1
2#pragma once
3#include <ATen/Operators.h>
4#include <ATen/functorch/PlumbingHelper.h>
5
6namespace at { namespace functorch {
7
8template <typename batch_rule_t, batch_rule_t batch_rule>
9at::Tensor _cast_Byte_generated_plumbing(const at::Tensor & self, bool non_blocking) {
10 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
11 auto maybe_layer = maybeCurrentDynamicLayer();
12 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
13 int64_t cur_level = maybe_layer->layerId();
14 if (!isBatchedAtLevel(self, cur_level)) {
15 return at::_ops::_cast_Byte::call(self, non_blocking);
16 }
17 Tensor self_value;
18 optional<int64_t> self_bdim;
19 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
20 auto results = batch_rule(self_value, self_bdim, non_blocking);
21 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
22}
23template <typename batch_rule_t, batch_rule_t batch_rule>
24at::Tensor _cast_Char_generated_plumbing(const at::Tensor & self, bool non_blocking) {
25 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
26 auto maybe_layer = maybeCurrentDynamicLayer();
27 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
28 int64_t cur_level = maybe_layer->layerId();
29 if (!isBatchedAtLevel(self, cur_level)) {
30 return at::_ops::_cast_Char::call(self, non_blocking);
31 }
32 Tensor self_value;
33 optional<int64_t> self_bdim;
34 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
35 auto results = batch_rule(self_value, self_bdim, non_blocking);
36 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
37}
38template <typename batch_rule_t, batch_rule_t batch_rule>
39at::Tensor _cast_Double_generated_plumbing(const at::Tensor & self, bool non_blocking) {
40 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
41 auto maybe_layer = maybeCurrentDynamicLayer();
42 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
43 int64_t cur_level = maybe_layer->layerId();
44 if (!isBatchedAtLevel(self, cur_level)) {
45 return at::_ops::_cast_Double::call(self, non_blocking);
46 }
47 Tensor self_value;
48 optional<int64_t> self_bdim;
49 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
50 auto results = batch_rule(self_value, self_bdim, non_blocking);
51 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
52}
53template <typename batch_rule_t, batch_rule_t batch_rule>
54at::Tensor _cast_Float_generated_plumbing(const at::Tensor & self, bool non_blocking) {
55 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
56 auto maybe_layer = maybeCurrentDynamicLayer();
57 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
58 int64_t cur_level = maybe_layer->layerId();
59 if (!isBatchedAtLevel(self, cur_level)) {
60 return at::_ops::_cast_Float::call(self, non_blocking);
61 }
62 Tensor self_value;
63 optional<int64_t> self_bdim;
64 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
65 auto results = batch_rule(self_value, self_bdim, non_blocking);
66 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
67}
68template <typename batch_rule_t, batch_rule_t batch_rule>
69at::Tensor _cast_Int_generated_plumbing(const at::Tensor & self, bool non_blocking) {
70 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
71 auto maybe_layer = maybeCurrentDynamicLayer();
72 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
73 int64_t cur_level = maybe_layer->layerId();
74 if (!isBatchedAtLevel(self, cur_level)) {
75 return at::_ops::_cast_Int::call(self, non_blocking);
76 }
77 Tensor self_value;
78 optional<int64_t> self_bdim;
79 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
80 auto results = batch_rule(self_value, self_bdim, non_blocking);
81 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
82}
83template <typename batch_rule_t, batch_rule_t batch_rule>
84at::Tensor _cast_Long_generated_plumbing(const at::Tensor & self, bool non_blocking) {
85 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
86 auto maybe_layer = maybeCurrentDynamicLayer();
87 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
88 int64_t cur_level = maybe_layer->layerId();
89 if (!isBatchedAtLevel(self, cur_level)) {
90 return at::_ops::_cast_Long::call(self, non_blocking);
91 }
92 Tensor self_value;
93 optional<int64_t> self_bdim;
94 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
95 auto results = batch_rule(self_value, self_bdim, non_blocking);
96 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
97}
98template <typename batch_rule_t, batch_rule_t batch_rule>
99at::Tensor _cast_Short_generated_plumbing(const at::Tensor & self, bool non_blocking) {
100 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
101 auto maybe_layer = maybeCurrentDynamicLayer();
102 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
103 int64_t cur_level = maybe_layer->layerId();
104 if (!isBatchedAtLevel(self, cur_level)) {
105 return at::_ops::_cast_Short::call(self, non_blocking);
106 }
107 Tensor self_value;
108 optional<int64_t> self_bdim;
109 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
110 auto results = batch_rule(self_value, self_bdim, non_blocking);
111 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
112}
113template <typename batch_rule_t, batch_rule_t batch_rule>
114at::Tensor _cast_Half_generated_plumbing(const at::Tensor & self, bool non_blocking) {
115 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
116 auto maybe_layer = maybeCurrentDynamicLayer();
117 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
118 int64_t cur_level = maybe_layer->layerId();
119 if (!isBatchedAtLevel(self, cur_level)) {
120 return at::_ops::_cast_Half::call(self, non_blocking);
121 }
122 Tensor self_value;
123 optional<int64_t> self_bdim;
124 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
125 auto results = batch_rule(self_value, self_bdim, non_blocking);
126 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
127}
128template <typename batch_rule_t, batch_rule_t batch_rule>
129void _backward_generated_plumbing(const at::Tensor & self, at::TensorList inputs, const c10::optional<at::Tensor> & gradient, c10::optional<bool> retain_graph, bool create_graph) {
130 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
131 auto maybe_layer = maybeCurrentDynamicLayer();
132 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
133 int64_t cur_level = maybe_layer->layerId();
134 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(inputs, cur_level) && !isBatchedAtLevel(gradient, cur_level)) {
135 return at::_ops::_backward::call(self, inputs, gradient, retain_graph, create_graph);
136 }
137 Tensor self_value;
138 optional<int64_t> self_bdim;
139 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
140 optional<Tensor> gradient_value;
141 optional<int64_t> gradient_bdim;
142 if (gradient) {
143 std::tie(gradient_value, gradient_bdim) = unwrapTensorAtLevel(gradient.value(), cur_level);
144 }
145 batch_rule(self_value, self_bdim, inputs, gradient_value, gradient_bdim, retain_graph, create_graph);
146}
147template <typename batch_rule_t, batch_rule_t batch_rule>
148void set_data_generated_plumbing(at::Tensor & self, const at::Tensor & new_data) {
149 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
150 auto maybe_layer = maybeCurrentDynamicLayer();
151 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
152 int64_t cur_level = maybe_layer->layerId();
153 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(new_data, cur_level)) {
154 return at::_ops::set_data::call(self, new_data);
155 }
156 Tensor self_value;
157 optional<int64_t> self_bdim;
158 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
159 Tensor new_data_value;
160 optional<int64_t> new_data_bdim;
161 std::tie(new_data_value, new_data_bdim) = unwrapTensorAtLevel(new_data, cur_level);
162 batch_rule(self_value, self_bdim, new_data_value, new_data_bdim);
163}
164template <typename batch_rule_t, batch_rule_t batch_rule>
165at::Tensor data_generated_plumbing(const at::Tensor & self) {
166 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
167 auto maybe_layer = maybeCurrentDynamicLayer();
168 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
169 int64_t cur_level = maybe_layer->layerId();
170 if (!isBatchedAtLevel(self, cur_level)) {
171 return at::_ops::data::call(self);
172 }
173 Tensor self_value;
174 optional<int64_t> self_bdim;
175 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
176 auto results = batch_rule(self_value, self_bdim);
177 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
178}
179template <typename batch_rule_t, batch_rule_t batch_rule>
180at::Tensor & requires_grad__generated_plumbing(at::Tensor & self, bool requires_grad) {
181 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
182 auto maybe_layer = maybeCurrentDynamicLayer();
183 vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
184 int64_t cur_level = maybe_layer->layerId();
185 if (!isBatchedAtLevel(self, cur_level)) {
186 return at::_ops::requires_grad_::call(self, requires_grad);
187 }
188 Tensor self_value;
189 optional<int64_t> self_bdim;
190 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
191 batch_rule(self_value, self_bdim, requires_grad);
192 return self;
193}
194template <typename batch_rule_t, batch_rule_t batch_rule>
195void retain_grad_generated_plumbing(at::Tensor & self) {
196 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
197 auto maybe_layer = maybeCurrentDynamicLayer();
198 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
199 int64_t cur_level = maybe_layer->layerId();
200 if (!isBatchedAtLevel(self, cur_level)) {
201 return at::_ops::retain_grad::call(self);
202 }
203 Tensor self_value;
204 optional<int64_t> self_bdim;
205 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
206 batch_rule(self_value, self_bdim);
207}
208template <typename batch_rule_t, batch_rule_t batch_rule>
209at::Tensor _fw_primal_generated_plumbing(const at::Tensor & self, int64_t level) {
210 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
211 auto maybe_layer = maybeCurrentDynamicLayer();
212 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
213 int64_t cur_level = maybe_layer->layerId();
214 if (!isBatchedAtLevel(self, cur_level)) {
215 return at::_ops::_fw_primal::call(self, level);
216 }
217 Tensor self_value;
218 optional<int64_t> self_bdim;
219 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
220 auto results = batch_rule(self_value, self_bdim, level);
221 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
222}
223template <typename batch_rule_t, batch_rule_t batch_rule>
224at::Tensor _make_dual_generated_plumbing(const at::Tensor & primal, const at::Tensor & tangent, int64_t level) {
225 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
226 auto maybe_layer = maybeCurrentDynamicLayer();
227 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
228 int64_t cur_level = maybe_layer->layerId();
229 if (!isBatchedAtLevel(primal, cur_level) && !isBatchedAtLevel(tangent, cur_level)) {
230 return at::_ops::_make_dual::call(primal, tangent, level);
231 }
232 Tensor primal_value;
233 optional<int64_t> primal_bdim;
234 std::tie(primal_value, primal_bdim) = unwrapTensorAtLevel(primal, cur_level);
235 Tensor tangent_value;
236 optional<int64_t> tangent_bdim;
237 std::tie(tangent_value, tangent_bdim) = unwrapTensorAtLevel(tangent, cur_level);
238 auto results = batch_rule(primal_value, primal_bdim, tangent_value, tangent_bdim, level);
239 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
240}
241template <typename batch_rule_t, batch_rule_t batch_rule>
242::std::tuple<at::Tensor,at::Tensor> _unpack_dual_generated_plumbing(const at::Tensor & dual, int64_t level) {
243 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
244 auto maybe_layer = maybeCurrentDynamicLayer();
245 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
246 int64_t cur_level = maybe_layer->layerId();
247 if (!isBatchedAtLevel(dual, cur_level)) {
248 return at::_ops::_unpack_dual::call(dual, level);
249 }
250 Tensor dual_value;
251 optional<int64_t> dual_bdim;
252 std::tie(dual_value, dual_bdim) = unwrapTensorAtLevel(dual, cur_level);
253 auto results = batch_rule(dual_value, dual_bdim, level);
254 return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
255}
256template <typename batch_rule_t, batch_rule_t batch_rule>
257at::Tensor _new_zeros_with_same_feature_meta_generated_plumbing(const at::Tensor & self, const at::Tensor & other, int64_t self_num_batch_dims) {
258 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
259 auto maybe_layer = maybeCurrentDynamicLayer();
260 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
261 int64_t cur_level = maybe_layer->layerId();
262 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
263 return at::_ops::_new_zeros_with_same_feature_meta::call(self, other, self_num_batch_dims);
264 }
265 Tensor self_value;
266 optional<int64_t> self_bdim;
267 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
268 Tensor other_value;
269 optional<int64_t> other_bdim;
270 std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
271 auto results = batch_rule(self_value, self_bdim, other_value, other_bdim, self_num_batch_dims);
272 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
273}
274template <typename batch_rule_t, batch_rule_t batch_rule>
275at::Tensor rename_generated_plumbing(const at::Tensor & self, c10::optional<at::DimnameList> names) {
276 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
277 auto maybe_layer = maybeCurrentDynamicLayer();
278 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
279 int64_t cur_level = maybe_layer->layerId();
280 if (!isBatchedAtLevel(self, cur_level)) {
281 return at::_ops::rename::call(self, names);
282 }
283 Tensor self_value;
284 optional<int64_t> self_bdim;
285 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
286 auto results = batch_rule(self_value, self_bdim, names);
287 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
288}
289template <typename batch_rule_t, batch_rule_t batch_rule>
290at::Tensor align_to_generated_plumbing(const at::Tensor & self, at::DimnameList names) {
291 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
292 auto maybe_layer = maybeCurrentDynamicLayer();
293 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
294 int64_t cur_level = maybe_layer->layerId();
295 if (!isBatchedAtLevel(self, cur_level)) {
296 return at::_ops::align_to::call(self, names);
297 }
298 Tensor self_value;
299 optional<int64_t> self_bdim;
300 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
301 auto results = batch_rule(self_value, self_bdim, names);
302 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
303}
304template <typename batch_rule_t, batch_rule_t batch_rule>
305at::Tensor align_to_ellipsis_idx_generated_plumbing(const at::Tensor & self, at::DimnameList order, int64_t ellipsis_idx) {
306 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
307 auto maybe_layer = maybeCurrentDynamicLayer();
308 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
309 int64_t cur_level = maybe_layer->layerId();
310 if (!isBatchedAtLevel(self, cur_level)) {
311 return at::_ops::align_to_ellipsis_idx::call(self, order, ellipsis_idx);
312 }
313 Tensor self_value;
314 optional<int64_t> self_bdim;
315 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
316 auto results = batch_rule(self_value, self_bdim, order, ellipsis_idx);
317 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
318}
319template <typename batch_rule_t, batch_rule_t batch_rule>
320at::Tensor align_as_generated_plumbing(const at::Tensor & self, const at::Tensor & other) {
321 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
322 auto maybe_layer = maybeCurrentDynamicLayer();
323 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
324 int64_t cur_level = maybe_layer->layerId();
325 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
326 return at::_ops::align_as::call(self, other);
327 }
328 Tensor self_value;
329 optional<int64_t> self_bdim;
330 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
331 Tensor other_value;
332 optional<int64_t> other_bdim;
333 std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
334 auto results = batch_rule(self_value, self_bdim, other_value, other_bdim);
335 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
336}
337template <typename batch_rule_t, batch_rule_t batch_rule>
338::std::vector<at::Tensor> align_tensors_generated_plumbing(at::TensorList tensors) {
339 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
340 auto maybe_layer = maybeCurrentDynamicLayer();
341 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
342 int64_t cur_level = maybe_layer->layerId();
343 if (!isBatchedAtLevel(tensors, cur_level)) {
344 return at::_ops::align_tensors::call(tensors);
345 }
346
347 auto results = batch_rule(tensors);
348 return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
349}
350template <typename batch_rule_t, batch_rule_t batch_rule>
351void _assert_async_generated_plumbing(const at::Tensor & self) {
352 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
353 auto maybe_layer = maybeCurrentDynamicLayer();
354 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
355 int64_t cur_level = maybe_layer->layerId();
356 if (!isBatchedAtLevel(self, cur_level)) {
357 return at::_ops::_assert_async::call(self);
358 }
359 Tensor self_value;
360 optional<int64_t> self_bdim;
361 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
362 batch_rule(self_value, self_bdim);
363}
364template <typename batch_rule_t, batch_rule_t batch_rule>
365void _assert_tensor_metadata_generated_plumbing(const at::Tensor & a, at::OptionalIntArrayRef size, at::OptionalIntArrayRef stride, c10::optional<at::ScalarType> dtype) {
366 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
367 auto maybe_layer = maybeCurrentDynamicLayer();
368 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
369 int64_t cur_level = maybe_layer->layerId();
370 if (!isBatchedAtLevel(a, cur_level)) {
371 return at::_ops::_assert_tensor_metadata::call(a, size, stride, dtype);
372 }
373 Tensor a_value;
374 optional<int64_t> a_bdim;
375 std::tie(a_value, a_bdim) = unwrapTensorAtLevel(a, cur_level);
376 batch_rule(a_value, a_bdim, size, stride, dtype);
377}
378template <typename batch_rule_t, batch_rule_t batch_rule>
379at::Tensor refine_names_generated_plumbing(const at::Tensor & self, at::DimnameList names) {
380 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
381 auto maybe_layer = maybeCurrentDynamicLayer();
382 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
383 int64_t cur_level = maybe_layer->layerId();
384 if (!isBatchedAtLevel(self, cur_level)) {
385 return at::_ops::refine_names::call(self, names);
386 }
387 Tensor self_value;
388 optional<int64_t> self_bdim;
389 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
390 auto results = batch_rule(self_value, self_bdim, names);
391 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
392}
393template <typename batch_rule_t, batch_rule_t batch_rule>
394::std::tuple<at::Tensor,at::Tensor> _cudnn_ctc_loss_generated_plumbing(const at::Tensor & log_probs, const at::Tensor & targets, at::IntArrayRef input_lengths, at::IntArrayRef target_lengths, int64_t blank, bool deterministic, bool zero_infinity) {
395 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
396 auto maybe_layer = maybeCurrentDynamicLayer();
397 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
398 int64_t cur_level = maybe_layer->layerId();
399 if (!isBatchedAtLevel(log_probs, cur_level) && !isBatchedAtLevel(targets, cur_level)) {
400 return at::_ops::_cudnn_ctc_loss::call(log_probs, targets, input_lengths, target_lengths, blank, deterministic, zero_infinity);
401 }
402 Tensor log_probs_value;
403 optional<int64_t> log_probs_bdim;
404 std::tie(log_probs_value, log_probs_bdim) = unwrapTensorAtLevel(log_probs, cur_level);
405 Tensor targets_value;
406 optional<int64_t> targets_bdim;
407 std::tie(targets_value, targets_bdim) = unwrapTensorAtLevel(targets, cur_level);
408 auto results = batch_rule(log_probs_value, log_probs_bdim, targets_value, targets_bdim, input_lengths, target_lengths, blank, deterministic, zero_infinity);
409 return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
410}
411template <typename batch_rule_t, batch_rule_t batch_rule>
412::std::tuple<at::Tensor,at::Tensor> _cudnn_ctc_loss_Tensor_generated_plumbing(const at::Tensor & log_probs, const at::Tensor & targets, const at::Tensor & input_lengths, const at::Tensor & target_lengths, int64_t blank, bool deterministic, bool zero_infinity) {
413 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
414 auto maybe_layer = maybeCurrentDynamicLayer();
415 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
416 int64_t cur_level = maybe_layer->layerId();
417 if (!isBatchedAtLevel(log_probs, cur_level) && !isBatchedAtLevel(targets, cur_level) && !isBatchedAtLevel(input_lengths, cur_level) && !isBatchedAtLevel(target_lengths, cur_level)) {
418 return at::_ops::_cudnn_ctc_loss_Tensor::call(log_probs, targets, input_lengths, target_lengths, blank, deterministic, zero_infinity);
419 }
420 Tensor log_probs_value;
421 optional<int64_t> log_probs_bdim;
422 std::tie(log_probs_value, log_probs_bdim) = unwrapTensorAtLevel(log_probs, cur_level);
423 Tensor targets_value;
424 optional<int64_t> targets_bdim;
425 std::tie(targets_value, targets_bdim) = unwrapTensorAtLevel(targets, cur_level);
426 Tensor input_lengths_value;
427 optional<int64_t> input_lengths_bdim;
428 std::tie(input_lengths_value, input_lengths_bdim) = unwrapTensorAtLevel(input_lengths, cur_level);
429 Tensor target_lengths_value;
430 optional<int64_t> target_lengths_bdim;
431 std::tie(target_lengths_value, target_lengths_bdim) = unwrapTensorAtLevel(target_lengths, cur_level);
432 auto results = batch_rule(log_probs_value, log_probs_bdim, targets_value, targets_bdim, input_lengths_value, input_lengths_bdim, target_lengths_value, target_lengths_bdim, blank, deterministic, zero_infinity);
433 return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
434}
435template <typename batch_rule_t, batch_rule_t batch_rule>
436at::Tensor _cudnn_rnn_flatten_weight_generated_plumbing(at::TensorList weight_arr, int64_t weight_stride0, c10::SymInt input_size, int64_t mode, c10::SymInt hidden_size, c10::SymInt proj_size, int64_t num_layers, bool batch_first, bool bidirectional) {
437 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
438 auto maybe_layer = maybeCurrentDynamicLayer();
439 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
440 int64_t cur_level = maybe_layer->layerId();
441 if (!isBatchedAtLevel(weight_arr, cur_level)) {
442 return at::_ops::_cudnn_rnn_flatten_weight::call(weight_arr, weight_stride0, input_size, mode, hidden_size, proj_size, num_layers, batch_first, bidirectional);
443 }
444
445 auto results = batch_rule(weight_arr, weight_stride0, input_size, mode, hidden_size, proj_size, num_layers, batch_first, bidirectional);
446 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
447}
448template <typename batch_rule_t, batch_rule_t batch_rule>
449::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor,at::Tensor> _cudnn_rnn_generated_plumbing(const at::Tensor & input, at::TensorList weight, int64_t weight_stride0, const c10::optional<at::Tensor> & weight_buf, const at::Tensor & hx, const c10::optional<at::Tensor> & cx, int64_t mode, c10::SymInt hidden_size, c10::SymInt proj_size, int64_t num_layers, bool batch_first, double dropout, bool train, bool bidirectional, c10::SymIntArrayRef batch_sizes, const c10::optional<at::Tensor> & dropout_state) {
450 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
451 auto maybe_layer = maybeCurrentDynamicLayer();
452 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
453 int64_t cur_level = maybe_layer->layerId();
454 if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(weight_buf, cur_level) && !isBatchedAtLevel(hx, cur_level) && !isBatchedAtLevel(cx, cur_level) && !isBatchedAtLevel(dropout_state, cur_level)) {
455 return at::_ops::_cudnn_rnn::call(input, weight, weight_stride0, weight_buf, hx, cx, mode, hidden_size, proj_size, num_layers, batch_first, dropout, train, bidirectional, batch_sizes, dropout_state);
456 }
457 Tensor input_value;
458 optional<int64_t> input_bdim;
459 std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level);
460 Tensor hx_value;
461 optional<int64_t> hx_bdim;
462 std::tie(hx_value, hx_bdim) = unwrapTensorAtLevel(hx, cur_level);
463 optional<Tensor> weight_buf_value;
464 optional<int64_t> weight_buf_bdim;
465 if (weight_buf) {
466 std::tie(weight_buf_value, weight_buf_bdim) = unwrapTensorAtLevel(weight_buf.value(), cur_level);
467 }
468 optional<Tensor> cx_value;
469 optional<int64_t> cx_bdim;
470 if (cx) {
471 std::tie(cx_value, cx_bdim) = unwrapTensorAtLevel(cx.value(), cur_level);
472 }
473 optional<Tensor> dropout_state_value;
474 optional<int64_t> dropout_state_bdim;
475 if (dropout_state) {
476 std::tie(dropout_state_value, dropout_state_bdim) = unwrapTensorAtLevel(dropout_state.value(), cur_level);
477 }
478 auto results = batch_rule(input_value, input_bdim, weight, weight_stride0, weight_buf_value, weight_buf_bdim, hx_value, hx_bdim, cx_value, cx_bdim, mode, hidden_size, proj_size, num_layers, batch_first, dropout, train, bidirectional, batch_sizes, dropout_state_value, dropout_state_bdim);
479 return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level), makeBatched(std::get<6>(results), std::get<7>(results), cur_level), makeBatched(std::get<8>(results), std::get<9>(results), cur_level));
480}
481template <typename batch_rule_t, batch_rule_t batch_rule>
482::std::tuple<at::Tensor,at::Tensor,at::Tensor,::std::vector<at::Tensor>> _cudnn_rnn_backward_generated_plumbing(const at::Tensor & input, at::TensorList weight, int64_t weight_stride0, const at::Tensor & weight_buf, const at::Tensor & hx, const c10::optional<at::Tensor> & cx, const at::Tensor & output, const c10::optional<at::Tensor> & grad_output, const c10::optional<at::Tensor> & grad_hy, const c10::optional<at::Tensor> & grad_cy, int64_t mode, c10::SymInt hidden_size, c10::SymInt proj_size, int64_t num_layers, bool batch_first, double dropout, bool train, bool bidirectional, c10::SymIntArrayRef batch_sizes, const c10::optional<at::Tensor> & dropout_state, const at::Tensor & reserve, ::std::array<bool,4> output_mask) {
483 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
484 auto maybe_layer = maybeCurrentDynamicLayer();
485 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
486 int64_t cur_level = maybe_layer->layerId();
487 if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(weight_buf, cur_level) && !isBatchedAtLevel(hx, cur_level) && !isBatchedAtLevel(cx, cur_level) && !isBatchedAtLevel(output, cur_level) && !isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(grad_hy, cur_level) && !isBatchedAtLevel(grad_cy, cur_level) && !isBatchedAtLevel(dropout_state, cur_level) && !isBatchedAtLevel(reserve, cur_level)) {
488 return at::_ops::_cudnn_rnn_backward::call(input, weight, weight_stride0, weight_buf, hx, cx, output, grad_output, grad_hy, grad_cy, mode, hidden_size, proj_size, num_layers, batch_first, dropout, train, bidirectional, batch_sizes, dropout_state, reserve, output_mask);
489 }
490 Tensor input_value;
491 optional<int64_t> input_bdim;
492 std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level);
493 Tensor weight_buf_value;
494 optional<int64_t> weight_buf_bdim;
495 std::tie(weight_buf_value, weight_buf_bdim) = unwrapTensorAtLevel(weight_buf, cur_level);
496 Tensor hx_value;
497 optional<int64_t> hx_bdim;
498 std::tie(hx_value, hx_bdim) = unwrapTensorAtLevel(hx, cur_level);
499 Tensor output_value;
500 optional<int64_t> output_bdim;
501 std::tie(output_value, output_bdim) = unwrapTensorAtLevel(output, cur_level);
502 Tensor reserve_value;
503 optional<int64_t> reserve_bdim;
504 std::tie(reserve_value, reserve_bdim) = unwrapTensorAtLevel(reserve, cur_level);
505 optional<Tensor> cx_value;
506 optional<int64_t> cx_bdim;
507 if (cx) {
508 std::tie(cx_value, cx_bdim) = unwrapTensorAtLevel(cx.value(), cur_level);
509 }
510 optional<Tensor> grad_output_value;
511 optional<int64_t> grad_output_bdim;
512 if (grad_output) {
513 std::tie(grad_output_value, grad_output_bdim) = unwrapTensorAtLevel(grad_output.value(), cur_level);
514 }
515 optional<Tensor> grad_hy_value;
516 optional<int64_t> grad_hy_bdim;
517 if (grad_hy) {
518 std::tie(grad_hy_value, grad_hy_bdim) = unwrapTensorAtLevel(grad_hy.value(), cur_level);
519 }
520 optional<Tensor> grad_cy_value;
521 optional<int64_t> grad_cy_bdim;
522 if (grad_cy) {
523 std::tie(grad_cy_value, grad_cy_bdim) = unwrapTensorAtLevel(grad_cy.value(), cur_level);
524 }
525 optional<Tensor> dropout_state_value;
526 optional<int64_t> dropout_state_bdim;
527 if (dropout_state) {
528 std::tie(dropout_state_value, dropout_state_bdim) = unwrapTensorAtLevel(dropout_state.value(), cur_level);
529 }
530 auto results = batch_rule(input_value, input_bdim, weight, weight_stride0, weight_buf_value, weight_buf_bdim, hx_value, hx_bdim, cx_value, cx_bdim, output_value, output_bdim, grad_output_value, grad_output_bdim, grad_hy_value, grad_hy_bdim, grad_cy_value, grad_cy_bdim, mode, hidden_size, proj_size, num_layers, batch_first, dropout, train, bidirectional, batch_sizes, dropout_state_value, dropout_state_bdim, reserve_value, reserve_bdim, output_mask);
531 return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level), makeBatchedVector(std::get<6>(results), std::get<7>(results), cur_level));
532}
533template <typename batch_rule_t, batch_rule_t batch_rule>
534::std::tuple<at::Tensor,at::Tensor> _fused_dropout_generated_plumbing(const at::Tensor & self, double p, c10::optional<at::Generator> generator) {
535 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
536 auto maybe_layer = maybeCurrentDynamicLayer();
537 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
538 int64_t cur_level = maybe_layer->layerId();
539 if (!isBatchedAtLevel(self, cur_level)) {
540 return at::_ops::_fused_dropout::call(self, p, generator);
541 }
542 Tensor self_value;
543 optional<int64_t> self_bdim;
544 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
545 auto results = batch_rule(self_value, self_bdim, p, generator);
546 return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
547}
548template <typename batch_rule_t, batch_rule_t batch_rule>
549at::Tensor _masked_scale_generated_plumbing(const at::Tensor & self, const at::Tensor & mask, double scale) {
550 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
551 auto maybe_layer = maybeCurrentDynamicLayer();
552 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
553 int64_t cur_level = maybe_layer->layerId();
554 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(mask, cur_level)) {
555 return at::_ops::_masked_scale::call(self, mask, scale);
556 }
557 Tensor self_value;
558 optional<int64_t> self_bdim;
559 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
560 Tensor mask_value;
561 optional<int64_t> mask_bdim;
562 std::tie(mask_value, mask_bdim) = unwrapTensorAtLevel(mask, cur_level);
563 auto results = batch_rule(self_value, self_bdim, mask_value, mask_bdim, scale);
564 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
565}
566template <typename batch_rule_t, batch_rule_t batch_rule>
567::std::tuple<at::Tensor,at::Tensor> native_dropout_generated_plumbing(const at::Tensor & input, double p, c10::optional<bool> train) {
568 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
569 auto maybe_layer = maybeCurrentDynamicLayer();
570 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
571 int64_t cur_level = maybe_layer->layerId();
572 if (!isBatchedAtLevel(input, cur_level)) {
573 return at::_ops::native_dropout::call(input, p, train);
574 }
575 Tensor input_value;
576 optional<int64_t> input_bdim;
577 std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level);
578 auto results = batch_rule(input_value, input_bdim, p, train);
579 return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
580}
581template <typename batch_rule_t, batch_rule_t batch_rule>
582at::Tensor native_dropout_backward_generated_plumbing(const at::Tensor & grad_output, const at::Tensor & mask, double scale) {
583 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
584 auto maybe_layer = maybeCurrentDynamicLayer();
585 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
586 int64_t cur_level = maybe_layer->layerId();
587 if (!isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(mask, cur_level)) {
588 return at::_ops::native_dropout_backward::call(grad_output, mask, scale);
589 }
590 Tensor grad_output_value;
591 optional<int64_t> grad_output_bdim;
592 std::tie(grad_output_value, grad_output_bdim) = unwrapTensorAtLevel(grad_output, cur_level);
593 Tensor mask_value;
594 optional<int64_t> mask_bdim;
595 std::tie(mask_value, mask_bdim) = unwrapTensorAtLevel(mask, cur_level);
596 auto results = batch_rule(grad_output_value, grad_output_bdim, mask_value, mask_bdim, scale);
597 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
598}
599template <typename batch_rule_t, batch_rule_t batch_rule>
600::std::tuple<at::Tensor,at::Tensor> _sobol_engine_draw_generated_plumbing(const at::Tensor & quasi, int64_t n, const at::Tensor & sobolstate, int64_t dimension, int64_t num_generated, c10::optional<at::ScalarType> dtype) {
601 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
602 auto maybe_layer = maybeCurrentDynamicLayer();
603 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
604 int64_t cur_level = maybe_layer->layerId();
605 if (!isBatchedAtLevel(quasi, cur_level) && !isBatchedAtLevel(sobolstate, cur_level)) {
606 return at::_ops::_sobol_engine_draw::call(quasi, n, sobolstate, dimension, num_generated, dtype);
607 }
608 Tensor quasi_value;
609 optional<int64_t> quasi_bdim;
610 std::tie(quasi_value, quasi_bdim) = unwrapTensorAtLevel(quasi, cur_level);
611 Tensor sobolstate_value;
612 optional<int64_t> sobolstate_bdim;
613 std::tie(sobolstate_value, sobolstate_bdim) = unwrapTensorAtLevel(sobolstate, cur_level);
614 auto results = batch_rule(quasi_value, quasi_bdim, n, sobolstate_value, sobolstate_bdim, dimension, num_generated, dtype);
615 return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
616}
617template <typename batch_rule_t, batch_rule_t batch_rule>
618at::Tensor & _sobol_engine_ff__generated_plumbing(at::Tensor & self, int64_t n, const at::Tensor & sobolstate, int64_t dimension, int64_t num_generated) {
619 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
620 auto maybe_layer = maybeCurrentDynamicLayer();
621 vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
622 int64_t cur_level = maybe_layer->layerId();
623 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(sobolstate, cur_level)) {
624 return at::_ops::_sobol_engine_ff_::call(self, n, sobolstate, dimension, num_generated);
625 }
626 Tensor self_value;
627 optional<int64_t> self_bdim;
628 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
629 Tensor sobolstate_value;
630 optional<int64_t> sobolstate_bdim;
631 std::tie(sobolstate_value, sobolstate_bdim) = unwrapTensorAtLevel(sobolstate, cur_level);
632 batch_rule(self_value, self_bdim, n, sobolstate_value, sobolstate_bdim, dimension, num_generated);
633 return self;
634}
635template <typename batch_rule_t, batch_rule_t batch_rule>
636at::Tensor & _sobol_engine_scramble__generated_plumbing(at::Tensor & self, const at::Tensor & ltm, int64_t dimension) {
637 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
638 auto maybe_layer = maybeCurrentDynamicLayer();
639 vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
640 int64_t cur_level = maybe_layer->layerId();
641 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(ltm, cur_level)) {
642 return at::_ops::_sobol_engine_scramble_::call(self, ltm, dimension);
643 }
644 Tensor self_value;
645 optional<int64_t> self_bdim;
646 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
647 Tensor ltm_value;
648 optional<int64_t> ltm_bdim;
649 std::tie(ltm_value, ltm_bdim) = unwrapTensorAtLevel(ltm, cur_level);
650 batch_rule(self_value, self_bdim, ltm_value, ltm_bdim, dimension);
651 return self;
652}
653template <typename batch_rule_t, batch_rule_t batch_rule>
654at::Tensor & _sobol_engine_initialize_state__generated_plumbing(at::Tensor & self, int64_t dimension) {
655 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
656 auto maybe_layer = maybeCurrentDynamicLayer();
657 vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
658 int64_t cur_level = maybe_layer->layerId();
659 if (!isBatchedAtLevel(self, cur_level)) {
660 return at::_ops::_sobol_engine_initialize_state_::call(self, dimension);
661 }
662 Tensor self_value;
663 optional<int64_t> self_bdim;
664 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
665 batch_rule(self_value, self_bdim, dimension);
666 return self;
667}
668template <typename batch_rule_t, batch_rule_t batch_rule>
669at::Tensor _reshape_from_tensor_generated_plumbing(const at::Tensor & self, const at::Tensor & shape) {
670 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
671 auto maybe_layer = maybeCurrentDynamicLayer();
672 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
673 int64_t cur_level = maybe_layer->layerId();
674 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(shape, cur_level)) {
675 return at::_ops::_reshape_from_tensor::call(self, shape);
676 }
677 Tensor self_value;
678 optional<int64_t> self_bdim;
679 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
680 Tensor shape_value;
681 optional<int64_t> shape_bdim;
682 std::tie(shape_value, shape_bdim) = unwrapTensorAtLevel(shape, cur_level);
683 auto results = batch_rule(self_value, self_bdim, shape_value, shape_bdim);
684 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
685}
686template <typename batch_rule_t, batch_rule_t batch_rule>
687at::Tensor _shape_as_tensor_generated_plumbing(const at::Tensor & self) {
688 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
689 auto maybe_layer = maybeCurrentDynamicLayer();
690 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
691 int64_t cur_level = maybe_layer->layerId();
692 if (!isBatchedAtLevel(self, cur_level)) {
693 return at::_ops::_shape_as_tensor::call(self);
694 }
695 Tensor self_value;
696 optional<int64_t> self_bdim;
697 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
698 auto results = batch_rule(self_value, self_bdim);
699 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
700}
701template <typename batch_rule_t, batch_rule_t batch_rule>
702at::Tensor dropout_generated_plumbing(const at::Tensor & input, double p, bool train) {
703 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
704 auto maybe_layer = maybeCurrentDynamicLayer();
705 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
706 int64_t cur_level = maybe_layer->layerId();
707 if (!isBatchedAtLevel(input, cur_level)) {
708 return at::_ops::dropout::call(input, p, train);
709 }
710 Tensor input_value;
711 optional<int64_t> input_bdim;
712 std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level);
713 auto results = batch_rule(input_value, input_bdim, p, train);
714 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
715}
716template <typename batch_rule_t, batch_rule_t batch_rule>
717at::Tensor & dropout__generated_plumbing(at::Tensor & self, double p, bool train) {
718 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
719 auto maybe_layer = maybeCurrentDynamicLayer();
720 vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
721 int64_t cur_level = maybe_layer->layerId();
722 if (!isBatchedAtLevel(self, cur_level)) {
723 return at::_ops::dropout_::call(self, p, train);
724 }
725 Tensor self_value;
726 optional<int64_t> self_bdim;
727 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
728 batch_rule(self_value, self_bdim, p, train);
729 return self;
730}
731template <typename batch_rule_t, batch_rule_t batch_rule>
732at::Tensor feature_dropout_generated_plumbing(const at::Tensor & input, double p, bool train) {
733 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
734 auto maybe_layer = maybeCurrentDynamicLayer();
735 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
736 int64_t cur_level = maybe_layer->layerId();
737 if (!isBatchedAtLevel(input, cur_level)) {
738 return at::_ops::feature_dropout::call(input, p, train);
739 }
740 Tensor input_value;
741 optional<int64_t> input_bdim;
742 std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level);
743 auto results = batch_rule(input_value, input_bdim, p, train);
744 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
745}
746template <typename batch_rule_t, batch_rule_t batch_rule>
747at::Tensor & feature_dropout__generated_plumbing(at::Tensor & self, double p, bool train) {
748 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
749 auto maybe_layer = maybeCurrentDynamicLayer();
750 vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
751 int64_t cur_level = maybe_layer->layerId();
752 if (!isBatchedAtLevel(self, cur_level)) {
753 return at::_ops::feature_dropout_::call(self, p, train);
754 }
755 Tensor self_value;
756 optional<int64_t> self_bdim;
757 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
758 batch_rule(self_value, self_bdim, p, train);
759 return self;
760}
761template <typename batch_rule_t, batch_rule_t batch_rule>
762at::Tensor alpha_dropout_generated_plumbing(const at::Tensor & input, double p, bool train) {
763 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
764 auto maybe_layer = maybeCurrentDynamicLayer();
765 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
766 int64_t cur_level = maybe_layer->layerId();
767 if (!isBatchedAtLevel(input, cur_level)) {
768 return at::_ops::alpha_dropout::call(input, p, train);
769 }
770 Tensor input_value;
771 optional<int64_t> input_bdim;
772 std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level);
773 auto results = batch_rule(input_value, input_bdim, p, train);
774 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
775}
776template <typename batch_rule_t, batch_rule_t batch_rule>
777at::Tensor & alpha_dropout__generated_plumbing(at::Tensor & self, double p, bool train) {
778 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
779 auto maybe_layer = maybeCurrentDynamicLayer();
780 vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
781 int64_t cur_level = maybe_layer->layerId();
782 if (!isBatchedAtLevel(self, cur_level)) {
783 return at::_ops::alpha_dropout_::call(self, p, train);
784 }
785 Tensor self_value;
786 optional<int64_t> self_bdim;
787 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
788 batch_rule(self_value, self_bdim, p, train);
789 return self;
790}
791template <typename batch_rule_t, batch_rule_t batch_rule>
792at::Tensor feature_alpha_dropout_generated_plumbing(const at::Tensor & input, double p, bool train) {
793 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
794 auto maybe_layer = maybeCurrentDynamicLayer();
795 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
796 int64_t cur_level = maybe_layer->layerId();
797 if (!isBatchedAtLevel(input, cur_level)) {
798 return at::_ops::feature_alpha_dropout::call(input, p, train);
799 }
800 Tensor input_value;
801 optional<int64_t> input_bdim;
802 std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level);
803 auto results = batch_rule(input_value, input_bdim, p, train);
804 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
805}
806template <typename batch_rule_t, batch_rule_t batch_rule>
807at::Tensor & feature_alpha_dropout__generated_plumbing(at::Tensor & self, double p, bool train) {
808 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
809 auto maybe_layer = maybeCurrentDynamicLayer();
810 vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
811 int64_t cur_level = maybe_layer->layerId();
812 if (!isBatchedAtLevel(self, cur_level)) {
813 return at::_ops::feature_alpha_dropout_::call(self, p, train);
814 }
815 Tensor self_value;
816 optional<int64_t> self_bdim;
817 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
818 batch_rule(self_value, self_bdim, p, train);
819 return self;
820}
821template <typename batch_rule_t, batch_rule_t batch_rule>
822at::Tensor abs_generated_plumbing(const at::Tensor & self) {
823 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
824 auto maybe_layer = maybeCurrentDynamicLayer();
825 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
826 int64_t cur_level = maybe_layer->layerId();
827 if (!isBatchedAtLevel(self, cur_level)) {
828 return at::_ops::abs::call(self);
829 }
830 Tensor self_value;
831 optional<int64_t> self_bdim;
832 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
833 auto results = batch_rule(self_value, self_bdim);
834 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
835}
836template <typename batch_rule_t, batch_rule_t batch_rule>
837at::Tensor & abs__generated_plumbing(at::Tensor & self) {
838 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
839 auto maybe_layer = maybeCurrentDynamicLayer();
840 vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
841 int64_t cur_level = maybe_layer->layerId();
842 if (!isBatchedAtLevel(self, cur_level)) {
843 return at::_ops::abs_::call(self);
844 }
845 Tensor self_value;
846 optional<int64_t> self_bdim;
847 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
848 batch_rule(self_value, self_bdim);
849 return self;
850}
851template <typename batch_rule_t, batch_rule_t batch_rule>
852at::Tensor absolute_generated_plumbing(const at::Tensor & self) {
853 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
854 auto maybe_layer = maybeCurrentDynamicLayer();
855 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
856 int64_t cur_level = maybe_layer->layerId();
857 if (!isBatchedAtLevel(self, cur_level)) {
858 return at::_ops::absolute::call(self);
859 }
860 Tensor self_value;
861 optional<int64_t> self_bdim;
862 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
863 auto results = batch_rule(self_value, self_bdim);
864 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
865}
866template <typename batch_rule_t, batch_rule_t batch_rule>
867at::Tensor & absolute__generated_plumbing(at::Tensor & self) {
868 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
869 auto maybe_layer = maybeCurrentDynamicLayer();
870 vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
871 int64_t cur_level = maybe_layer->layerId();
872 if (!isBatchedAtLevel(self, cur_level)) {
873 return at::_ops::absolute_::call(self);
874 }
875 Tensor self_value;
876 optional<int64_t> self_bdim;
877 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
878 batch_rule(self_value, self_bdim);
879 return self;
880}
881template <typename batch_rule_t, batch_rule_t batch_rule>
882at::Tensor angle_generated_plumbing(const at::Tensor & self) {
883 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
884 auto maybe_layer = maybeCurrentDynamicLayer();
885 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
886 int64_t cur_level = maybe_layer->layerId();
887 if (!isBatchedAtLevel(self, cur_level)) {
888 return at::_ops::angle::call(self);
889 }
890 Tensor self_value;
891 optional<int64_t> self_bdim;
892 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
893 auto results = batch_rule(self_value, self_bdim);
894 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
895}
896template <typename batch_rule_t, batch_rule_t batch_rule>
897at::Tensor view_as_real_generated_plumbing(const at::Tensor & self) {
898 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
899 auto maybe_layer = maybeCurrentDynamicLayer();
900 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
901 int64_t cur_level = maybe_layer->layerId();
902 if (!isBatchedAtLevel(self, cur_level)) {
903 return at::_ops::view_as_real::call(self);
904 }
905 Tensor self_value;
906 optional<int64_t> self_bdim;
907 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
908 auto results = batch_rule(self_value, self_bdim);
909 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
910}
911template <typename batch_rule_t, batch_rule_t batch_rule>
912at::Tensor view_as_complex_generated_plumbing(const at::Tensor & self) {
913 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
914 auto maybe_layer = maybeCurrentDynamicLayer();
915 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
916 int64_t cur_level = maybe_layer->layerId();
917 if (!isBatchedAtLevel(self, cur_level)) {
918 return at::_ops::view_as_complex::call(self);
919 }
920 Tensor self_value;
921 optional<int64_t> self_bdim;
922 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
923 auto results = batch_rule(self_value, self_bdim);
924 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
925}
926template <typename batch_rule_t, batch_rule_t batch_rule>
927at::Tensor sgn_generated_plumbing(const at::Tensor & self) {
928 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
929 auto maybe_layer = maybeCurrentDynamicLayer();
930 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
931 int64_t cur_level = maybe_layer->layerId();
932 if (!isBatchedAtLevel(self, cur_level)) {
933 return at::_ops::sgn::call(self);
934 }
935 Tensor self_value;
936 optional<int64_t> self_bdim;
937 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
938 auto results = batch_rule(self_value, self_bdim);
939 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
940}
941template <typename batch_rule_t, batch_rule_t batch_rule>
942at::Tensor & sgn__generated_plumbing(at::Tensor & self) {
943 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
944 auto maybe_layer = maybeCurrentDynamicLayer();
945 vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
946 int64_t cur_level = maybe_layer->layerId();
947 if (!isBatchedAtLevel(self, cur_level)) {
948 return at::_ops::sgn_::call(self);
949 }
950 Tensor self_value;
951 optional<int64_t> self_bdim;
952 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
953 batch_rule(self_value, self_bdim);
954 return self;
955}
956template <typename batch_rule_t, batch_rule_t batch_rule>
957at::Tensor chalf_generated_plumbing(const at::Tensor & self, c10::optional<at::MemoryFormat> memory_format) {
958 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
959 auto maybe_layer = maybeCurrentDynamicLayer();
960 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
961 int64_t cur_level = maybe_layer->layerId();
962 if (!isBatchedAtLevel(self, cur_level)) {
963 return at::_ops::chalf::call(self, memory_format);
964 }
965 Tensor self_value;
966 optional<int64_t> self_bdim;
967 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
968 auto results = batch_rule(self_value, self_bdim, memory_format);
969 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
970}
971template <typename batch_rule_t, batch_rule_t batch_rule>
972at::Tensor real_generated_plumbing(const at::Tensor & self) {
973 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
974 auto maybe_layer = maybeCurrentDynamicLayer();
975 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
976 int64_t cur_level = maybe_layer->layerId();
977 if (!isBatchedAtLevel(self, cur_level)) {
978 return at::_ops::real::call(self);
979 }
980 Tensor self_value;
981 optional<int64_t> self_bdim;
982 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
983 auto results = batch_rule(self_value, self_bdim);
984 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
985}
986template <typename batch_rule_t, batch_rule_t batch_rule>
987at::Tensor imag_generated_plumbing(const at::Tensor & self) {
988 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
989 auto maybe_layer = maybeCurrentDynamicLayer();
990 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
991 int64_t cur_level = maybe_layer->layerId();
992 if (!isBatchedAtLevel(self, cur_level)) {
993 return at::_ops::imag::call(self);
994 }
995 Tensor self_value;
996 optional<int64_t> self_bdim;
997 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
998 auto results = batch_rule(self_value, self_bdim);
999 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
1000}
1001template <typename batch_rule_t, batch_rule_t batch_rule>
1002at::Tensor _conj_generated_plumbing(const at::Tensor & self) {
1003 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
1004 auto maybe_layer = maybeCurrentDynamicLayer();
1005 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
1006 int64_t cur_level = maybe_layer->layerId();
1007 if (!isBatchedAtLevel(self, cur_level)) {
1008 return at::_ops::_conj::call(self);
1009 }
1010 Tensor self_value;
1011 optional<int64_t> self_bdim;
1012 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
1013 auto results = batch_rule(self_value, self_bdim);
1014 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
1015}
1016template <typename batch_rule_t, batch_rule_t batch_rule>
1017at::Tensor conj_generated_plumbing(const at::Tensor & self) {
1018 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
1019 auto maybe_layer = maybeCurrentDynamicLayer();
1020 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
1021 int64_t cur_level = maybe_layer->layerId();
1022 if (!isBatchedAtLevel(self, cur_level)) {
1023 return at::_ops::conj::call(self);
1024 }
1025 Tensor self_value;
1026 optional<int64_t> self_bdim;
1027 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
1028 auto results = batch_rule(self_value, self_bdim);
1029 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
1030}
1031template <typename batch_rule_t, batch_rule_t batch_rule>
1032at::Tensor _conj_physical_generated_plumbing(const at::Tensor & self) {
1033 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
1034 auto maybe_layer = maybeCurrentDynamicLayer();
1035 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
1036 int64_t cur_level = maybe_layer->layerId();
1037 if (!isBatchedAtLevel(self, cur_level)) {
1038 return at::_ops::_conj_physical::call(self);
1039 }
1040 Tensor self_value;
1041 optional<int64_t> self_bdim;
1042 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
1043 auto results = batch_rule(self_value, self_bdim);
1044 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
1045}
1046template <typename batch_rule_t, batch_rule_t batch_rule>
1047at::Tensor conj_physical_generated_plumbing(const at::Tensor & self) {
1048 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
1049 auto maybe_layer = maybeCurrentDynamicLayer();
1050 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
1051 int64_t cur_level = maybe_layer->layerId();
1052 if (!isBatchedAtLevel(self, cur_level)) {
1053 return at::_ops::conj_physical::call(self);
1054 }
1055 Tensor self_value;
1056 optional<int64_t> self_bdim;
1057 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
1058 auto results = batch_rule(self_value, self_bdim);
1059 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
1060}
1061template <typename batch_rule_t, batch_rule_t batch_rule>
1062at::Tensor & conj_physical__generated_plumbing(at::Tensor & self) {
1063 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
1064 auto maybe_layer = maybeCurrentDynamicLayer();
1065 vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
1066 int64_t cur_level = maybe_layer->layerId();
1067 if (!isBatchedAtLevel(self, cur_level)) {
1068 return at::_ops::conj_physical_::call(self);
1069 }
1070 Tensor self_value;
1071 optional<int64_t> self_bdim;
1072 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
1073 batch_rule(self_value, self_bdim);
1074 return self;
1075}
1076template <typename batch_rule_t, batch_rule_t batch_rule>
1077at::Tensor resolve_conj_generated_plumbing(const at::Tensor & self) {
1078 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
1079 auto maybe_layer = maybeCurrentDynamicLayer();
1080 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
1081 int64_t cur_level = maybe_layer->layerId();
1082 if (!isBatchedAtLevel(self, cur_level)) {
1083 return at::_ops::resolve_conj::call(self);
1084 }
1085 Tensor self_value;
1086 optional<int64_t> self_bdim;
1087 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
1088 auto results = batch_rule(self_value, self_bdim);
1089 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
1090}
1091template <typename batch_rule_t, batch_rule_t batch_rule>
1092at::Tensor resolve_neg_generated_plumbing(const at::Tensor & self) {
1093 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
1094 auto maybe_layer = maybeCurrentDynamicLayer();
1095 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
1096 int64_t cur_level = maybe_layer->layerId();
1097 if (!isBatchedAtLevel(self, cur_level)) {
1098 return at::_ops::resolve_neg::call(self);
1099 }
1100 Tensor self_value;
1101 optional<int64_t> self_bdim;
1102 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
1103 auto results = batch_rule(self_value, self_bdim);
1104 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
1105}
1106template <typename batch_rule_t, batch_rule_t batch_rule>
1107at::Tensor _neg_view_generated_plumbing(const at::Tensor & self) {
1108 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
1109 auto maybe_layer = maybeCurrentDynamicLayer();
1110 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
1111 int64_t cur_level = maybe_layer->layerId();
1112 if (!isBatchedAtLevel(self, cur_level)) {
1113 return at::_ops::_neg_view::call(self);
1114 }
1115 Tensor self_value;
1116 optional<int64_t> self_bdim;
1117 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
1118 auto results = batch_rule(self_value, self_bdim);
1119 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
1120}
1121template <typename batch_rule_t, batch_rule_t batch_rule>
1122at::Tensor acos_generated_plumbing(const at::Tensor & self) {
1123 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
1124 auto maybe_layer = maybeCurrentDynamicLayer();
1125 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
1126 int64_t cur_level = maybe_layer->layerId();
1127 if (!isBatchedAtLevel(self, cur_level)) {
1128 return at::_ops::acos::call(self);
1129 }
1130 Tensor self_value;
1131 optional<int64_t> self_bdim;
1132 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
1133 auto results = batch_rule(self_value, self_bdim);
1134 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
1135}
1136template <typename batch_rule_t, batch_rule_t batch_rule>
1137at::Tensor & acos__generated_plumbing(at::Tensor & self) {
1138 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
1139 auto maybe_layer = maybeCurrentDynamicLayer();
1140 vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
1141 int64_t cur_level = maybe_layer->layerId();
1142 if (!isBatchedAtLevel(self, cur_level)) {
1143 return at::_ops::acos_::call(self);
1144 }
1145 Tensor self_value;
1146 optional<int64_t> self_bdim;
1147 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
1148 batch_rule(self_value, self_bdim);
1149 return self;
1150}
1151template <typename batch_rule_t, batch_rule_t batch_rule>
1152at::Tensor arccos_generated_plumbing(const at::Tensor & self) {
1153 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
1154 auto maybe_layer = maybeCurrentDynamicLayer();
1155 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
1156 int64_t cur_level = maybe_layer->layerId();
1157 if (!isBatchedAtLevel(self, cur_level)) {
1158 return at::_ops::arccos::call(self);
1159 }
1160 Tensor self_value;
1161 optional<int64_t> self_bdim;
1162 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
1163 auto results = batch_rule(self_value, self_bdim);
1164 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
1165}
1166template <typename batch_rule_t, batch_rule_t batch_rule>
1167at::Tensor & arccos__generated_plumbing(at::Tensor & self) {
1168 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
1169 auto maybe_layer = maybeCurrentDynamicLayer();
1170 vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
1171 int64_t cur_level = maybe_layer->layerId();
1172 if (!isBatchedAtLevel(self, cur_level)) {
1173 return at::_ops::arccos_::call(self);
1174 }
1175 Tensor self_value;
1176 optional<int64_t> self_bdim;
1177 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
1178 batch_rule(self_value, self_bdim);
1179 return self;
1180}
1181template <typename batch_rule_t, batch_rule_t batch_rule>
1182at::Tensor avg_pool1d_generated_plumbing(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, bool ceil_mode, bool count_include_pad) {
1183 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
1184 auto maybe_layer = maybeCurrentDynamicLayer();
1185 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
1186 int64_t cur_level = maybe_layer->layerId();
1187 if (!isBatchedAtLevel(self, cur_level)) {
1188 return at::_ops::avg_pool1d::call(self, kernel_size, stride, padding, ceil_mode, count_include_pad);
1189 }
1190 Tensor self_value;
1191 optional<int64_t> self_bdim;
1192 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
1193 auto results = batch_rule(self_value, self_bdim, kernel_size, stride, padding, ceil_mode, count_include_pad);
1194 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
1195}
1196template <typename batch_rule_t, batch_rule_t batch_rule>
1197at::Tensor adaptive_avg_pool1d_generated_plumbing(const at::Tensor & self, at::IntArrayRef output_size) {
1198 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
1199 auto maybe_layer = maybeCurrentDynamicLayer();
1200 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
1201 int64_t cur_level = maybe_layer->layerId();
1202 if (!isBatchedAtLevel(self, cur_level)) {
1203 return at::_ops::adaptive_avg_pool1d::call(self, output_size);
1204 }
1205 Tensor self_value;
1206 optional<int64_t> self_bdim;
1207 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
1208 auto results = batch_rule(self_value, self_bdim, output_size);
1209 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
1210}
1211template <typename batch_rule_t, batch_rule_t batch_rule>
1212::std::tuple<at::Tensor,at::Tensor> adaptive_max_pool1d_generated_plumbing(const at::Tensor & self, at::IntArrayRef output_size) {
1213 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
1214 auto maybe_layer = maybeCurrentDynamicLayer();
1215 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
1216 int64_t cur_level = maybe_layer->layerId();
1217 if (!isBatchedAtLevel(self, cur_level)) {
1218 return at::_ops::adaptive_max_pool1d::call(self, output_size);
1219 }
1220 Tensor self_value;
1221 optional<int64_t> self_bdim;
1222 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
1223 auto results = batch_rule(self_value, self_bdim, output_size);
1224 return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
1225}
1226template <typename batch_rule_t, batch_rule_t batch_rule>
1227at::Tensor add_Tensor_generated_plumbing(const at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha) {
1228 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
1229 auto maybe_layer = maybeCurrentDynamicLayer();
1230 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
1231 int64_t cur_level = maybe_layer->layerId();
1232 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
1233 return at::_ops::add_Tensor::call(self, other, alpha);
1234 }
1235 Tensor self_value;
1236 optional<int64_t> self_bdim;
1237 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
1238 Tensor other_value;
1239 optional<int64_t> other_bdim;
1240 std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
1241 auto results = batch_rule(self_value, self_bdim, other_value, other_bdim, alpha);
1242 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
1243}
1244template <typename batch_rule_t, batch_rule_t batch_rule>
1245at::Tensor & add__Tensor_generated_plumbing(at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha) {
1246 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
1247 auto maybe_layer = maybeCurrentDynamicLayer();
1248 vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
1249 int64_t cur_level = maybe_layer->layerId();
1250 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
1251 return at::_ops::add__Tensor::call(self, other, alpha);
1252 }
1253 Tensor self_value;
1254 optional<int64_t> self_bdim;
1255 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
1256 Tensor other_value;
1257 optional<int64_t> other_bdim;
1258 std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
1259 batch_rule(self_value, self_bdim, other_value, other_bdim, alpha);
1260 return self;
1261}
1262template <typename batch_rule_t, batch_rule_t batch_rule>
1263at::Tensor _add_relu_Tensor_generated_plumbing(const at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha) {
1264 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
1265 auto maybe_layer = maybeCurrentDynamicLayer();
1266 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
1267 int64_t cur_level = maybe_layer->layerId();
1268 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
1269 return at::_ops::_add_relu_Tensor::call(self, other, alpha);
1270 }
1271 Tensor self_value;
1272 optional<int64_t> self_bdim;
1273 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
1274 Tensor other_value;
1275 optional<int64_t> other_bdim;
1276 std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
1277 auto results = batch_rule(self_value, self_bdim, other_value, other_bdim, alpha);
1278 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
1279}
1280template <typename batch_rule_t, batch_rule_t batch_rule>
1281at::Tensor & _add_relu__Tensor_generated_plumbing(at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha) {
1282 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
1283 auto maybe_layer = maybeCurrentDynamicLayer();
1284 vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
1285 int64_t cur_level = maybe_layer->layerId();
1286 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
1287 return at::_ops::_add_relu__Tensor::call(self, other, alpha);
1288 }
1289 Tensor self_value;
1290 optional<int64_t> self_bdim;
1291 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
1292 Tensor other_value;
1293 optional<int64_t> other_bdim;
1294 std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
1295 batch_rule(self_value, self_bdim, other_value, other_bdim, alpha);
1296 return self;
1297}
1298template <typename batch_rule_t, batch_rule_t batch_rule>
1299at::Tensor _add_relu_Scalar_generated_plumbing(const at::Tensor & self, const at::Scalar & other, const at::Scalar & alpha) {
1300 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
1301 auto maybe_layer = maybeCurrentDynamicLayer();
1302 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
1303 int64_t cur_level = maybe_layer->layerId();
1304 if (!isBatchedAtLevel(self, cur_level)) {
1305 return at::_ops::_add_relu_Scalar::call(self, other, alpha);
1306 }
1307 Tensor self_value;
1308 optional<int64_t> self_bdim;
1309 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
1310 auto results = batch_rule(self_value, self_bdim, other, alpha);
1311 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
1312}
1313template <typename batch_rule_t, batch_rule_t batch_rule>
1314at::Tensor & _add_relu__Scalar_generated_plumbing(at::Tensor & self, const at::Scalar & other, const at::Scalar & alpha) {
1315 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
1316 auto maybe_layer = maybeCurrentDynamicLayer();
1317 vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
1318 int64_t cur_level = maybe_layer->layerId();
1319 if (!isBatchedAtLevel(self, cur_level)) {
1320 return at::_ops::_add_relu__Scalar::call(self, other, alpha);
1321 }
1322 Tensor self_value;
1323 optional<int64_t> self_bdim;
1324 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
1325 batch_rule(self_value, self_bdim, other, alpha);
1326 return self;
1327}
1328template <typename batch_rule_t, batch_rule_t batch_rule>
1329at::Tensor add_Scalar_generated_plumbing(const at::Tensor & self, const at::Scalar & other, const at::Scalar & alpha) {
1330 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
1331 auto maybe_layer = maybeCurrentDynamicLayer();
1332 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
1333 int64_t cur_level = maybe_layer->layerId();
1334 if (!isBatchedAtLevel(self, cur_level)) {
1335 return at::_ops::add_Scalar::call(self, other, alpha);
1336 }
1337 Tensor self_value;
1338 optional<int64_t> self_bdim;
1339 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
1340 auto results = batch_rule(self_value, self_bdim, other, alpha);
1341 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
1342}
1343template <typename batch_rule_t, batch_rule_t batch_rule>
1344at::Tensor & add__Scalar_generated_plumbing(at::Tensor & self, const at::Scalar & other, const at::Scalar & alpha) {
1345 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
1346 auto maybe_layer = maybeCurrentDynamicLayer();
1347 vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
1348 int64_t cur_level = maybe_layer->layerId();
1349 if (!isBatchedAtLevel(self, cur_level)) {
1350 return at::_ops::add__Scalar::call(self, other, alpha);
1351 }
1352 Tensor self_value;
1353 optional<int64_t> self_bdim;
1354 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
1355 batch_rule(self_value, self_bdim, other, alpha);
1356 return self;
1357}
1358template <typename batch_rule_t, batch_rule_t batch_rule>
1359at::Tensor addmv_generated_plumbing(const at::Tensor & self, const at::Tensor & mat, const at::Tensor & vec, const at::Scalar & beta, const at::Scalar & alpha) {
1360 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
1361 auto maybe_layer = maybeCurrentDynamicLayer();
1362 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
1363 int64_t cur_level = maybe_layer->layerId();
1364 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(mat, cur_level) && !isBatchedAtLevel(vec, cur_level)) {
1365 return at::_ops::addmv::call(self, mat, vec, beta, alpha);
1366 }
1367 Tensor self_value;
1368 optional<int64_t> self_bdim;
1369 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
1370 Tensor mat_value;
1371 optional<int64_t> mat_bdim;
1372 std::tie(mat_value, mat_bdim) = unwrapTensorAtLevel(mat, cur_level);
1373 Tensor vec_value;
1374 optional<int64_t> vec_bdim;
1375 std::tie(vec_value, vec_bdim) = unwrapTensorAtLevel(vec, cur_level);
1376 auto results = batch_rule(self_value, self_bdim, mat_value, mat_bdim, vec_value, vec_bdim, beta, alpha);
1377 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
1378}
1379template <typename batch_rule_t, batch_rule_t batch_rule>
1380at::Tensor & addmv__generated_plumbing(at::Tensor & self, const at::Tensor & mat, const at::Tensor & vec, const at::Scalar & beta, const at::Scalar & alpha) {
1381 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
1382 auto maybe_layer = maybeCurrentDynamicLayer();
1383 vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
1384 int64_t cur_level = maybe_layer->layerId();
1385 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(mat, cur_level) && !isBatchedAtLevel(vec, cur_level)) {
1386 return at::_ops::addmv_::call(self, mat, vec, beta, alpha);
1387 }
1388 Tensor self_value;
1389 optional<int64_t> self_bdim;
1390 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
1391 Tensor mat_value;
1392 optional<int64_t> mat_bdim;
1393 std::tie(mat_value, mat_bdim) = unwrapTensorAtLevel(mat, cur_level);
1394 Tensor vec_value;
1395 optional<int64_t> vec_bdim;
1396 std::tie(vec_value, vec_bdim) = unwrapTensorAtLevel(vec, cur_level);
1397 batch_rule(self_value, self_bdim, mat_value, mat_bdim, vec_value, vec_bdim, beta, alpha);
1398 return self;
1399}
1400template <typename batch_rule_t, batch_rule_t batch_rule>
1401at::Tensor addr_generated_plumbing(const at::Tensor & self, const at::Tensor & vec1, const at::Tensor & vec2, const at::Scalar & beta, const at::Scalar & alpha) {
1402 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
1403 auto maybe_layer = maybeCurrentDynamicLayer();
1404 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
1405 int64_t cur_level = maybe_layer->layerId();
1406 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(vec1, cur_level) && !isBatchedAtLevel(vec2, cur_level)) {
1407 return at::_ops::addr::call(self, vec1, vec2, beta, alpha);
1408 }
1409 Tensor self_value;
1410 optional<int64_t> self_bdim;
1411 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
1412 Tensor vec1_value;
1413 optional<int64_t> vec1_bdim;
1414 std::tie(vec1_value, vec1_bdim) = unwrapTensorAtLevel(vec1, cur_level);
1415 Tensor vec2_value;
1416 optional<int64_t> vec2_bdim;
1417 std::tie(vec2_value, vec2_bdim) = unwrapTensorAtLevel(vec2, cur_level);
1418 auto results = batch_rule(self_value, self_bdim, vec1_value, vec1_bdim, vec2_value, vec2_bdim, beta, alpha);
1419 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
1420}
1421template <typename batch_rule_t, batch_rule_t batch_rule>
1422at::Tensor & addr__generated_plumbing(at::Tensor & self, const at::Tensor & vec1, const at::Tensor & vec2, const at::Scalar & beta, const at::Scalar & alpha) {
1423 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
1424 auto maybe_layer = maybeCurrentDynamicLayer();
1425 vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
1426 int64_t cur_level = maybe_layer->layerId();
1427 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(vec1, cur_level) && !isBatchedAtLevel(vec2, cur_level)) {
1428 return at::_ops::addr_::call(self, vec1, vec2, beta, alpha);
1429 }
1430 Tensor self_value;
1431 optional<int64_t> self_bdim;
1432 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
1433 Tensor vec1_value;
1434 optional<int64_t> vec1_bdim;
1435 std::tie(vec1_value, vec1_bdim) = unwrapTensorAtLevel(vec1, cur_level);
1436 Tensor vec2_value;
1437 optional<int64_t> vec2_bdim;
1438 std::tie(vec2_value, vec2_bdim) = unwrapTensorAtLevel(vec2, cur_level);
1439 batch_rule(self_value, self_bdim, vec1_value, vec1_bdim, vec2_value, vec2_bdim, beta, alpha);
1440 return self;
1441}
1442template <typename batch_rule_t, batch_rule_t batch_rule>
1443at::Tensor affine_grid_generator_generated_plumbing(const at::Tensor & theta, at::IntArrayRef size, bool align_corners) {
1444 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
1445 auto maybe_layer = maybeCurrentDynamicLayer();
1446 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
1447 int64_t cur_level = maybe_layer->layerId();
1448 if (!isBatchedAtLevel(theta, cur_level)) {
1449 return at::_ops::affine_grid_generator::call(theta, size, align_corners);
1450 }
1451 Tensor theta_value;
1452 optional<int64_t> theta_bdim;
1453 std::tie(theta_value, theta_bdim) = unwrapTensorAtLevel(theta, cur_level);
1454 auto results = batch_rule(theta_value, theta_bdim, size, align_corners);
1455 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
1456}
1457template <typename batch_rule_t, batch_rule_t batch_rule>
1458at::Tensor affine_grid_generator_backward_generated_plumbing(const at::Tensor & grad, at::IntArrayRef size, bool align_corners) {
1459 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
1460 auto maybe_layer = maybeCurrentDynamicLayer();
1461 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
1462 int64_t cur_level = maybe_layer->layerId();
1463 if (!isBatchedAtLevel(grad, cur_level)) {
1464 return at::_ops::affine_grid_generator_backward::call(grad, size, align_corners);
1465 }
1466 Tensor grad_value;
1467 optional<int64_t> grad_bdim;
1468 std::tie(grad_value, grad_bdim) = unwrapTensorAtLevel(grad, cur_level);
1469 auto results = batch_rule(grad_value, grad_bdim, size, align_corners);
1470 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
1471}
1472template <typename batch_rule_t, batch_rule_t batch_rule>
1473at::Tensor _is_all_true_generated_plumbing(const at::Tensor & self) {
1474 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
1475 auto maybe_layer = maybeCurrentDynamicLayer();
1476 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
1477 int64_t cur_level = maybe_layer->layerId();
1478 if (!isBatchedAtLevel(self, cur_level)) {
1479 return at::_ops::_is_all_true::call(self);
1480 }
1481 Tensor self_value;
1482 optional<int64_t> self_bdim;
1483 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
1484 auto results = batch_rule(self_value, self_bdim);
1485 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
1486}
1487template <typename batch_rule_t, batch_rule_t batch_rule>
1488at::Tensor _is_any_true_generated_plumbing(const at::Tensor & self) {
1489 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
1490 auto maybe_layer = maybeCurrentDynamicLayer();
1491 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
1492 int64_t cur_level = maybe_layer->layerId();
1493 if (!isBatchedAtLevel(self, cur_level)) {
1494 return at::_ops::_is_any_true::call(self);
1495 }
1496 Tensor self_value;
1497 optional<int64_t> self_bdim;
1498 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
1499 auto results = batch_rule(self_value, self_bdim);
1500 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
1501}
1502template <typename batch_rule_t, batch_rule_t batch_rule>
1503at::Tensor _test_check_tensor_generated_plumbing(const at::Tensor & self) {
1504 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
1505 auto maybe_layer = maybeCurrentDynamicLayer();
1506 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
1507 int64_t cur_level = maybe_layer->layerId();
1508 if (!isBatchedAtLevel(self, cur_level)) {
1509 return at::_ops::_test_check_tensor::call(self);
1510 }
1511 Tensor self_value;
1512 optional<int64_t> self_bdim;
1513 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
1514 auto results = batch_rule(self_value, self_bdim);
1515 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
1516}
1517template <typename batch_rule_t, batch_rule_t batch_rule>
1518at::Tensor all_dim_generated_plumbing(const at::Tensor & self, int64_t dim, bool keepdim) {
1519 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
1520 auto maybe_layer = maybeCurrentDynamicLayer();
1521 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
1522 int64_t cur_level = maybe_layer->layerId();
1523 if (!isBatchedAtLevel(self, cur_level)) {
1524 return at::_ops::all_dim::call(self, dim, keepdim);
1525 }
1526 Tensor self_value;
1527 optional<int64_t> self_bdim;
1528 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
1529 auto results = batch_rule(self_value, self_bdim, dim, keepdim);
1530 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
1531}
1532template <typename batch_rule_t, batch_rule_t batch_rule>
1533at::Tensor all_dimname_generated_plumbing(const at::Tensor & self, at::Dimname dim, bool keepdim) {
1534 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
1535 auto maybe_layer = maybeCurrentDynamicLayer();
1536 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
1537 int64_t cur_level = maybe_layer->layerId();
1538 if (!isBatchedAtLevel(self, cur_level)) {
1539 return at::_ops::all_dimname::call(self, dim, keepdim);
1540 }
1541 Tensor self_value;
1542 optional<int64_t> self_bdim;
1543 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
1544 auto results = batch_rule(self_value, self_bdim, dim, keepdim);
1545 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
1546}
1547template <typename batch_rule_t, batch_rule_t batch_rule>
1548at::Tensor any_dim_generated_plumbing(const at::Tensor & self, int64_t dim, bool keepdim) {
1549 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
1550 auto maybe_layer = maybeCurrentDynamicLayer();
1551 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
1552 int64_t cur_level = maybe_layer->layerId();
1553 if (!isBatchedAtLevel(self, cur_level)) {
1554 return at::_ops::any_dim::call(self, dim, keepdim);
1555 }
1556 Tensor self_value;
1557 optional<int64_t> self_bdim;
1558 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
1559 auto results = batch_rule(self_value, self_bdim, dim, keepdim);
1560 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
1561}
1562template <typename batch_rule_t, batch_rule_t batch_rule>
1563at::Tensor any_dimname_generated_plumbing(const at::Tensor & self, at::Dimname dim, bool keepdim) {
1564 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
1565 auto maybe_layer = maybeCurrentDynamicLayer();
1566 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
1567 int64_t cur_level = maybe_layer->layerId();
1568 if (!isBatchedAtLevel(self, cur_level)) {
1569 return at::_ops::any_dimname::call(self, dim, keepdim);
1570 }
1571 Tensor self_value;
1572 optional<int64_t> self_bdim;
1573 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
1574 auto results = batch_rule(self_value, self_bdim, dim, keepdim);
1575 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
1576}
1577template <typename batch_rule_t, batch_rule_t batch_rule>
1578at::Tensor _dim_arange_generated_plumbing(const at::Tensor & like, int64_t dim) {
1579 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
1580 auto maybe_layer = maybeCurrentDynamicLayer();
1581 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
1582 int64_t cur_level = maybe_layer->layerId();
1583 if (!isBatchedAtLevel(like, cur_level)) {
1584 return at::_ops::_dim_arange::call(like, dim);
1585 }
1586 Tensor like_value;
1587 optional<int64_t> like_bdim;
1588 std::tie(like_value, like_bdim) = unwrapTensorAtLevel(like, cur_level);
1589 auto results = batch_rule(like_value, like_bdim, dim);
1590 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
1591}
1592template <typename batch_rule_t, batch_rule_t batch_rule>
1593at::Tensor argmax_generated_plumbing(const at::Tensor & self, c10::optional<int64_t> dim, bool keepdim) {
1594 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
1595 auto maybe_layer = maybeCurrentDynamicLayer();
1596 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
1597 int64_t cur_level = maybe_layer->layerId();
1598 if (!isBatchedAtLevel(self, cur_level)) {
1599 return at::_ops::argmax::call(self, dim, keepdim);
1600 }
1601 Tensor self_value;
1602 optional<int64_t> self_bdim;
1603 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
1604 auto results = batch_rule(self_value, self_bdim, dim, keepdim);
1605 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
1606}
1607template <typename batch_rule_t, batch_rule_t batch_rule>
1608at::Tensor argmin_generated_plumbing(const at::Tensor & self, c10::optional<int64_t> dim, bool keepdim) {
1609 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
1610 auto maybe_layer = maybeCurrentDynamicLayer();
1611 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
1612 int64_t cur_level = maybe_layer->layerId();
1613 if (!isBatchedAtLevel(self, cur_level)) {
1614 return at::_ops::argmin::call(self, dim, keepdim);
1615 }
1616 Tensor self_value;
1617 optional<int64_t> self_bdim;
1618 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
1619 auto results = batch_rule(self_value, self_bdim, dim, keepdim);
1620 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
1621}
1622template <typename batch_rule_t, batch_rule_t batch_rule>
1623at::Tensor acosh_generated_plumbing(const at::Tensor & self) {
1624 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
1625 auto maybe_layer = maybeCurrentDynamicLayer();
1626 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
1627 int64_t cur_level = maybe_layer->layerId();
1628 if (!isBatchedAtLevel(self, cur_level)) {
1629 return at::_ops::acosh::call(self);
1630 }
1631 Tensor self_value;
1632 optional<int64_t> self_bdim;
1633 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
1634 auto results = batch_rule(self_value, self_bdim);
1635 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
1636}
1637template <typename batch_rule_t, batch_rule_t batch_rule>
1638at::Tensor & acosh__generated_plumbing(at::Tensor & self) {
1639 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
1640 auto maybe_layer = maybeCurrentDynamicLayer();
1641 vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
1642 int64_t cur_level = maybe_layer->layerId();
1643 if (!isBatchedAtLevel(self, cur_level)) {
1644 return at::_ops::acosh_::call(self);
1645 }
1646 Tensor self_value;
1647 optional<int64_t> self_bdim;
1648 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
1649 batch_rule(self_value, self_bdim);
1650 return self;
1651}
1652template <typename batch_rule_t, batch_rule_t batch_rule>
1653at::Tensor arccosh_generated_plumbing(const at::Tensor & self) {
1654 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
1655 auto maybe_layer = maybeCurrentDynamicLayer();
1656 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
1657 int64_t cur_level = maybe_layer->layerId();
1658 if (!isBatchedAtLevel(self, cur_level)) {
1659 return at::_ops::arccosh::call(self);
1660 }
1661 Tensor self_value;
1662 optional<int64_t> self_bdim;
1663 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
1664 auto results = batch_rule(self_value, self_bdim);
1665 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
1666}
1667template <typename batch_rule_t, batch_rule_t batch_rule>
1668at::Tensor & arccosh__generated_plumbing(at::Tensor & self) {
1669 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
1670 auto maybe_layer = maybeCurrentDynamicLayer();
1671 vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
1672 int64_t cur_level = maybe_layer->layerId();
1673 if (!isBatchedAtLevel(self, cur_level)) {
1674 return at::_ops::arccosh_::call(self);
1675 }
1676 Tensor self_value;
1677 optional<int64_t> self_bdim;
1678 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
1679 batch_rule(self_value, self_bdim);
1680 return self;
1681}
1682template <typename batch_rule_t, batch_rule_t batch_rule>
1683at::Tensor asinh_generated_plumbing(const at::Tensor & self) {
1684 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
1685 auto maybe_layer = maybeCurrentDynamicLayer();
1686 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
1687 int64_t cur_level = maybe_layer->layerId();
1688 if (!isBatchedAtLevel(self, cur_level)) {
1689 return at::_ops::asinh::call(self);
1690 }
1691 Tensor self_value;
1692 optional<int64_t> self_bdim;
1693 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
1694 auto results = batch_rule(self_value, self_bdim);
1695 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
1696}
1697template <typename batch_rule_t, batch_rule_t batch_rule>
1698at::Tensor & asinh__generated_plumbing(at::Tensor & self) {
1699 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
1700 auto maybe_layer = maybeCurrentDynamicLayer();
1701 vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
1702 int64_t cur_level = maybe_layer->layerId();
1703 if (!isBatchedAtLevel(self, cur_level)) {
1704 return at::_ops::asinh_::call(self);
1705 }
1706 Tensor self_value;
1707 optional<int64_t> self_bdim;
1708 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
1709 batch_rule(self_value, self_bdim);
1710 return self;
1711}
1712template <typename batch_rule_t, batch_rule_t batch_rule>
1713at::Tensor arcsinh_generated_plumbing(const at::Tensor & self) {
1714 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
1715 auto maybe_layer = maybeCurrentDynamicLayer();
1716 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
1717 int64_t cur_level = maybe_layer->layerId();
1718 if (!isBatchedAtLevel(self, cur_level)) {
1719 return at::_ops::arcsinh::call(self);
1720 }
1721 Tensor self_value;
1722 optional<int64_t> self_bdim;
1723 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
1724 auto results = batch_rule(self_value, self_bdim);
1725 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
1726}
1727template <typename batch_rule_t, batch_rule_t batch_rule>
1728at::Tensor & arcsinh__generated_plumbing(at::Tensor & self) {
1729 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
1730 auto maybe_layer = maybeCurrentDynamicLayer();
1731 vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
1732 int64_t cur_level = maybe_layer->layerId();
1733 if (!isBatchedAtLevel(self, cur_level)) {
1734 return at::_ops::arcsinh_::call(self);
1735 }
1736 Tensor self_value;
1737 optional<int64_t> self_bdim;
1738 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
1739 batch_rule(self_value, self_bdim);
1740 return self;
1741}
1742template <typename batch_rule_t, batch_rule_t batch_rule>
1743at::Tensor atanh_generated_plumbing(const at::Tensor & self) {
1744 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
1745 auto maybe_layer = maybeCurrentDynamicLayer();
1746 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
1747 int64_t cur_level = maybe_layer->layerId();
1748 if (!isBatchedAtLevel(self, cur_level)) {
1749 return at::_ops::atanh::call(self);
1750 }
1751 Tensor self_value;
1752 optional<int64_t> self_bdim;
1753 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
1754 auto results = batch_rule(self_value, self_bdim);
1755 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
1756}
1757template <typename batch_rule_t, batch_rule_t batch_rule>
1758at::Tensor & atanh__generated_plumbing(at::Tensor & self) {
1759 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
1760 auto maybe_layer = maybeCurrentDynamicLayer();
1761 vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
1762 int64_t cur_level = maybe_layer->layerId();
1763 if (!isBatchedAtLevel(self, cur_level)) {
1764 return at::_ops::atanh_::call(self);
1765 }
1766 Tensor self_value;
1767 optional<int64_t> self_bdim;
1768 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
1769 batch_rule(self_value, self_bdim);
1770 return self;
1771}
1772template <typename batch_rule_t, batch_rule_t batch_rule>
1773at::Tensor arctanh_generated_plumbing(const at::Tensor & self) {
1774 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
1775 auto maybe_layer = maybeCurrentDynamicLayer();
1776 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
1777 int64_t cur_level = maybe_layer->layerId();
1778 if (!isBatchedAtLevel(self, cur_level)) {
1779 return at::_ops::arctanh::call(self);
1780 }
1781 Tensor self_value;
1782 optional<int64_t> self_bdim;
1783 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
1784 auto results = batch_rule(self_value, self_bdim);
1785 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
1786}
1787template <typename batch_rule_t, batch_rule_t batch_rule>
1788at::Tensor & arctanh__generated_plumbing(at::Tensor & self) {
1789 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
1790 auto maybe_layer = maybeCurrentDynamicLayer();
1791 vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
1792 int64_t cur_level = maybe_layer->layerId();
1793 if (!isBatchedAtLevel(self, cur_level)) {
1794 return at::_ops::arctanh_::call(self);
1795 }
1796 Tensor self_value;
1797 optional<int64_t> self_bdim;
1798 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
1799 batch_rule(self_value, self_bdim);
1800 return self;
1801}
1802template <typename batch_rule_t, batch_rule_t batch_rule>
1803at::Tensor as_strided_generated_plumbing(const at::Tensor & self, c10::SymIntArrayRef size, c10::SymIntArrayRef stride, c10::optional<c10::SymInt> storage_offset) {
1804 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
1805 auto maybe_layer = maybeCurrentDynamicLayer();
1806 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
1807 int64_t cur_level = maybe_layer->layerId();
1808 if (!isBatchedAtLevel(self, cur_level)) {
1809 return at::_ops::as_strided::call(self, size, stride, storage_offset);
1810 }
1811 Tensor self_value;
1812 optional<int64_t> self_bdim;
1813 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
1814 auto results = batch_rule(self_value, self_bdim, size, stride, storage_offset);
1815 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
1816}
1817template <typename batch_rule_t, batch_rule_t batch_rule>
1818at::Tensor asin_generated_plumbing(const at::Tensor & self) {
1819 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
1820 auto maybe_layer = maybeCurrentDynamicLayer();
1821 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
1822 int64_t cur_level = maybe_layer->layerId();
1823 if (!isBatchedAtLevel(self, cur_level)) {
1824 return at::_ops::asin::call(self);
1825 }
1826 Tensor self_value;
1827 optional<int64_t> self_bdim;
1828 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
1829 auto results = batch_rule(self_value, self_bdim);
1830 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
1831}
1832template <typename batch_rule_t, batch_rule_t batch_rule>
1833at::Tensor & asin__generated_plumbing(at::Tensor & self) {
1834 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
1835 auto maybe_layer = maybeCurrentDynamicLayer();
1836 vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
1837 int64_t cur_level = maybe_layer->layerId();
1838 if (!isBatchedAtLevel(self, cur_level)) {
1839 return at::_ops::asin_::call(self);
1840 }
1841 Tensor self_value;
1842 optional<int64_t> self_bdim;
1843 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
1844 batch_rule(self_value, self_bdim);
1845 return self;
1846}
1847template <typename batch_rule_t, batch_rule_t batch_rule>
1848at::Tensor arcsin_generated_plumbing(const at::Tensor & self) {
1849 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
1850 auto maybe_layer = maybeCurrentDynamicLayer();
1851 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
1852 int64_t cur_level = maybe_layer->layerId();
1853 if (!isBatchedAtLevel(self, cur_level)) {
1854 return at::_ops::arcsin::call(self);
1855 }
1856 Tensor self_value;
1857 optional<int64_t> self_bdim;
1858 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
1859 auto results = batch_rule(self_value, self_bdim);
1860 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
1861}
1862template <typename batch_rule_t, batch_rule_t batch_rule>
1863at::Tensor & arcsin__generated_plumbing(at::Tensor & self) {
1864 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
1865 auto maybe_layer = maybeCurrentDynamicLayer();
1866 vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
1867 int64_t cur_level = maybe_layer->layerId();
1868 if (!isBatchedAtLevel(self, cur_level)) {
1869 return at::_ops::arcsin_::call(self);
1870 }
1871 Tensor self_value;
1872 optional<int64_t> self_bdim;
1873 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
1874 batch_rule(self_value, self_bdim);
1875 return self;
1876}
1877template <typename batch_rule_t, batch_rule_t batch_rule>
1878at::Tensor atan_generated_plumbing(const at::Tensor & self) {
1879 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
1880 auto maybe_layer = maybeCurrentDynamicLayer();
1881 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
1882 int64_t cur_level = maybe_layer->layerId();
1883 if (!isBatchedAtLevel(self, cur_level)) {
1884 return at::_ops::atan::call(self);
1885 }
1886 Tensor self_value;
1887 optional<int64_t> self_bdim;
1888 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
1889 auto results = batch_rule(self_value, self_bdim);
1890 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
1891}
1892template <typename batch_rule_t, batch_rule_t batch_rule>
1893at::Tensor & atan__generated_plumbing(at::Tensor & self) {
1894 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
1895 auto maybe_layer = maybeCurrentDynamicLayer();
1896 vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
1897 int64_t cur_level = maybe_layer->layerId();
1898 if (!isBatchedAtLevel(self, cur_level)) {
1899 return at::_ops::atan_::call(self);
1900 }
1901 Tensor self_value;
1902 optional<int64_t> self_bdim;
1903 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
1904 batch_rule(self_value, self_bdim);
1905 return self;
1906}
1907template <typename batch_rule_t, batch_rule_t batch_rule>
1908at::Tensor arctan_generated_plumbing(const at::Tensor & self) {
1909 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
1910 auto maybe_layer = maybeCurrentDynamicLayer();
1911 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
1912 int64_t cur_level = maybe_layer->layerId();
1913 if (!isBatchedAtLevel(self, cur_level)) {
1914 return at::_ops::arctan::call(self);
1915 }
1916 Tensor self_value;
1917 optional<int64_t> self_bdim;
1918 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
1919 auto results = batch_rule(self_value, self_bdim);
1920 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
1921}
1922template <typename batch_rule_t, batch_rule_t batch_rule>
1923at::Tensor & arctan__generated_plumbing(at::Tensor & self) {
1924 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
1925 auto maybe_layer = maybeCurrentDynamicLayer();
1926 vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
1927 int64_t cur_level = maybe_layer->layerId();
1928 if (!isBatchedAtLevel(self, cur_level)) {
1929 return at::_ops::arctan_::call(self);
1930 }
1931 Tensor self_value;
1932 optional<int64_t> self_bdim;
1933 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
1934 batch_rule(self_value, self_bdim);
1935 return self;
1936}
1937template <typename batch_rule_t, batch_rule_t batch_rule>
1938at::Tensor atleast_1d_generated_plumbing(const at::Tensor & self) {
1939 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
1940 auto maybe_layer = maybeCurrentDynamicLayer();
1941 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
1942 int64_t cur_level = maybe_layer->layerId();
1943 if (!isBatchedAtLevel(self, cur_level)) {
1944 return at::_ops::atleast_1d::call(self);
1945 }
1946 Tensor self_value;
1947 optional<int64_t> self_bdim;
1948 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
1949 auto results = batch_rule(self_value, self_bdim);
1950 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
1951}
1952template <typename batch_rule_t, batch_rule_t batch_rule>
1953::std::vector<at::Tensor> atleast_1d_Sequence_generated_plumbing(at::TensorList tensors) {
1954 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
1955 auto maybe_layer = maybeCurrentDynamicLayer();
1956 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
1957 int64_t cur_level = maybe_layer->layerId();
1958 if (!isBatchedAtLevel(tensors, cur_level)) {
1959 return at::_ops::atleast_1d_Sequence::call(tensors);
1960 }
1961
1962 auto results = batch_rule(tensors);
1963 return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
1964}
1965template <typename batch_rule_t, batch_rule_t batch_rule>
1966at::Tensor atleast_2d_generated_plumbing(const at::Tensor & self) {
1967 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
1968 auto maybe_layer = maybeCurrentDynamicLayer();
1969 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
1970 int64_t cur_level = maybe_layer->layerId();
1971 if (!isBatchedAtLevel(self, cur_level)) {
1972 return at::_ops::atleast_2d::call(self);
1973 }
1974 Tensor self_value;
1975 optional<int64_t> self_bdim;
1976 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
1977 auto results = batch_rule(self_value, self_bdim);
1978 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
1979}
1980template <typename batch_rule_t, batch_rule_t batch_rule>
1981::std::vector<at::Tensor> atleast_2d_Sequence_generated_plumbing(at::TensorList tensors) {
1982 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
1983 auto maybe_layer = maybeCurrentDynamicLayer();
1984 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
1985 int64_t cur_level = maybe_layer->layerId();
1986 if (!isBatchedAtLevel(tensors, cur_level)) {
1987 return at::_ops::atleast_2d_Sequence::call(tensors);
1988 }
1989
1990 auto results = batch_rule(tensors);
1991 return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
1992}
1993template <typename batch_rule_t, batch_rule_t batch_rule>
1994at::Tensor atleast_3d_generated_plumbing(const at::Tensor & self) {
1995 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
1996 auto maybe_layer = maybeCurrentDynamicLayer();
1997 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
1998 int64_t cur_level = maybe_layer->layerId();
1999 if (!isBatchedAtLevel(self, cur_level)) {
2000 return at::_ops::atleast_3d::call(self);
2001 }
2002 Tensor self_value;
2003 optional<int64_t> self_bdim;
2004 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
2005 auto results = batch_rule(self_value, self_bdim);
2006 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
2007}
2008template <typename batch_rule_t, batch_rule_t batch_rule>
2009::std::vector<at::Tensor> atleast_3d_Sequence_generated_plumbing(at::TensorList tensors) {
2010 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
2011 auto maybe_layer = maybeCurrentDynamicLayer();
2012 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
2013 int64_t cur_level = maybe_layer->layerId();
2014 if (!isBatchedAtLevel(tensors, cur_level)) {
2015 return at::_ops::atleast_3d_Sequence::call(tensors);
2016 }
2017
2018 auto results = batch_rule(tensors);
2019 return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
2020}
2021template <typename batch_rule_t, batch_rule_t batch_rule>
2022at::Tensor baddbmm_generated_plumbing(const at::Tensor & self, const at::Tensor & batch1, const at::Tensor & batch2, const at::Scalar & beta, const at::Scalar & alpha) {
2023 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
2024 auto maybe_layer = maybeCurrentDynamicLayer();
2025 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
2026 int64_t cur_level = maybe_layer->layerId();
2027 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(batch1, cur_level) && !isBatchedAtLevel(batch2, cur_level)) {
2028 return at::_ops::baddbmm::call(self, batch1, batch2, beta, alpha);
2029 }
2030 Tensor self_value;
2031 optional<int64_t> self_bdim;
2032 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
2033 Tensor batch1_value;
2034 optional<int64_t> batch1_bdim;
2035 std::tie(batch1_value, batch1_bdim) = unwrapTensorAtLevel(batch1, cur_level);
2036 Tensor batch2_value;
2037 optional<int64_t> batch2_bdim;
2038 std::tie(batch2_value, batch2_bdim) = unwrapTensorAtLevel(batch2, cur_level);
2039 auto results = batch_rule(self_value, self_bdim, batch1_value, batch1_bdim, batch2_value, batch2_bdim, beta, alpha);
2040 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
2041}
2042template <typename batch_rule_t, batch_rule_t batch_rule>
2043at::Tensor & baddbmm__generated_plumbing(at::Tensor & self, const at::Tensor & batch1, const at::Tensor & batch2, const at::Scalar & beta, const at::Scalar & alpha) {
2044 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
2045 auto maybe_layer = maybeCurrentDynamicLayer();
2046 vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
2047 int64_t cur_level = maybe_layer->layerId();
2048 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(batch1, cur_level) && !isBatchedAtLevel(batch2, cur_level)) {
2049 return at::_ops::baddbmm_::call(self, batch1, batch2, beta, alpha);
2050 }
2051 Tensor self_value;
2052 optional<int64_t> self_bdim;
2053 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
2054 Tensor batch1_value;
2055 optional<int64_t> batch1_bdim;
2056 std::tie(batch1_value, batch1_bdim) = unwrapTensorAtLevel(batch1, cur_level);
2057 Tensor batch2_value;
2058 optional<int64_t> batch2_bdim;
2059 std::tie(batch2_value, batch2_bdim) = unwrapTensorAtLevel(batch2, cur_level);
2060 batch_rule(self_value, self_bdim, batch1_value, batch1_bdim, batch2_value, batch2_bdim, beta, alpha);
2061 return self;
2062}
2063template <typename batch_rule_t, batch_rule_t batch_rule>
2064at::Tensor batch_norm_generated_plumbing(const at::Tensor & input, const c10::optional<at::Tensor> & weight, const c10::optional<at::Tensor> & bias, const c10::optional<at::Tensor> & running_mean, const c10::optional<at::Tensor> & running_var, bool training, double momentum, double eps, bool cudnn_enabled) {
2065 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
2066 auto maybe_layer = maybeCurrentDynamicLayer();
2067 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
2068 int64_t cur_level = maybe_layer->layerId();
2069 if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(bias, cur_level) && !isBatchedAtLevel(running_mean, cur_level) && !isBatchedAtLevel(running_var, cur_level)) {
2070 return at::_ops::batch_norm::call(input, weight, bias, running_mean, running_var, training, momentum, eps, cudnn_enabled);
2071 }
2072 Tensor input_value;
2073 optional<int64_t> input_bdim;
2074 std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level);
2075 optional<Tensor> weight_value;
2076 optional<int64_t> weight_bdim;
2077 if (weight) {
2078 std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight.value(), cur_level);
2079 }
2080 optional<Tensor> bias_value;
2081 optional<int64_t> bias_bdim;
2082 if (bias) {
2083 std::tie(bias_value, bias_bdim) = unwrapTensorAtLevel(bias.value(), cur_level);
2084 }
2085 optional<Tensor> running_mean_value;
2086 optional<int64_t> running_mean_bdim;
2087 if (running_mean) {
2088 std::tie(running_mean_value, running_mean_bdim) = unwrapTensorAtLevel(running_mean.value(), cur_level);
2089 }
2090 optional<Tensor> running_var_value;
2091 optional<int64_t> running_var_bdim;
2092 if (running_var) {
2093 std::tie(running_var_value, running_var_bdim) = unwrapTensorAtLevel(running_var.value(), cur_level);
2094 }
2095 auto results = batch_rule(input_value, input_bdim, weight_value, weight_bdim, bias_value, bias_bdim, running_mean_value, running_mean_bdim, running_var_value, running_var_bdim, training, momentum, eps, cudnn_enabled);
2096 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
2097}
2098template <typename batch_rule_t, batch_rule_t batch_rule>
2099at::Tensor quantized_batch_norm_generated_plumbing(const at::Tensor & input, const c10::optional<at::Tensor> & weight, const c10::optional<at::Tensor> & bias, const at::Tensor & mean, const at::Tensor & var, double eps, double output_scale, int64_t output_zero_point) {
2100 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
2101 auto maybe_layer = maybeCurrentDynamicLayer();
2102 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
2103 int64_t cur_level = maybe_layer->layerId();
2104 if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(bias, cur_level) && !isBatchedAtLevel(mean, cur_level) && !isBatchedAtLevel(var, cur_level)) {
2105 return at::_ops::quantized_batch_norm::call(input, weight, bias, mean, var, eps, output_scale, output_zero_point);
2106 }
2107 Tensor input_value;
2108 optional<int64_t> input_bdim;
2109 std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level);
2110 Tensor mean_value;
2111 optional<int64_t> mean_bdim;
2112 std::tie(mean_value, mean_bdim) = unwrapTensorAtLevel(mean, cur_level);
2113 Tensor var_value;
2114 optional<int64_t> var_bdim;
2115 std::tie(var_value, var_bdim) = unwrapTensorAtLevel(var, cur_level);
2116 optional<Tensor> weight_value;
2117 optional<int64_t> weight_bdim;
2118 if (weight) {
2119 std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight.value(), cur_level);
2120 }
2121 optional<Tensor> bias_value;
2122 optional<int64_t> bias_bdim;
2123 if (bias) {
2124 std::tie(bias_value, bias_bdim) = unwrapTensorAtLevel(bias.value(), cur_level);
2125 }
2126 auto results = batch_rule(input_value, input_bdim, weight_value, weight_bdim, bias_value, bias_bdim, mean_value, mean_bdim, var_value, var_bdim, eps, output_scale, output_zero_point);
2127 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
2128}
2129template <typename batch_rule_t, batch_rule_t batch_rule>
2130::std::tuple<at::Tensor,at::Tensor,at::Tensor> _batch_norm_impl_index_backward_generated_plumbing(int64_t impl_index, const at::Tensor & input, const at::Tensor & grad_output, const c10::optional<at::Tensor> & weight, const c10::optional<at::Tensor> & running_mean, const c10::optional<at::Tensor> & running_var, const c10::optional<at::Tensor> & save_mean, const c10::optional<at::Tensor> & save_var_transform, bool train, double eps, ::std::array<bool,3> output_mask, const at::Tensor & reservedSpace) {
2131 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
2132 auto maybe_layer = maybeCurrentDynamicLayer();
2133 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
2134 int64_t cur_level = maybe_layer->layerId();
2135 if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(running_mean, cur_level) && !isBatchedAtLevel(running_var, cur_level) && !isBatchedAtLevel(save_mean, cur_level) && !isBatchedAtLevel(save_var_transform, cur_level) && !isBatchedAtLevel(reservedSpace, cur_level)) {
2136 return at::_ops::_batch_norm_impl_index_backward::call(impl_index, input, grad_output, weight, running_mean, running_var, save_mean, save_var_transform, train, eps, output_mask, reservedSpace);
2137 }
2138 Tensor input_value;
2139 optional<int64_t> input_bdim;
2140 std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level);
2141 Tensor grad_output_value;
2142 optional<int64_t> grad_output_bdim;
2143 std::tie(grad_output_value, grad_output_bdim) = unwrapTensorAtLevel(grad_output, cur_level);
2144 Tensor reservedSpace_value;
2145 optional<int64_t> reservedSpace_bdim;
2146 std::tie(reservedSpace_value, reservedSpace_bdim) = unwrapTensorAtLevel(reservedSpace, cur_level);
2147 optional<Tensor> weight_value;
2148 optional<int64_t> weight_bdim;
2149 if (weight) {
2150 std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight.value(), cur_level);
2151 }
2152 optional<Tensor> running_mean_value;
2153 optional<int64_t> running_mean_bdim;
2154 if (running_mean) {
2155 std::tie(running_mean_value, running_mean_bdim) = unwrapTensorAtLevel(running_mean.value(), cur_level);
2156 }
2157 optional<Tensor> running_var_value;
2158 optional<int64_t> running_var_bdim;
2159 if (running_var) {
2160 std::tie(running_var_value, running_var_bdim) = unwrapTensorAtLevel(running_var.value(), cur_level);
2161 }
2162 optional<Tensor> save_mean_value;
2163 optional<int64_t> save_mean_bdim;
2164 if (save_mean) {
2165 std::tie(save_mean_value, save_mean_bdim) = unwrapTensorAtLevel(save_mean.value(), cur_level);
2166 }
2167 optional<Tensor> save_var_transform_value;
2168 optional<int64_t> save_var_transform_bdim;
2169 if (save_var_transform) {
2170 std::tie(save_var_transform_value, save_var_transform_bdim) = unwrapTensorAtLevel(save_var_transform.value(), cur_level);
2171 }
2172 auto results = batch_rule(impl_index, input_value, input_bdim, grad_output_value, grad_output_bdim, weight_value, weight_bdim, running_mean_value, running_mean_bdim, running_var_value, running_var_bdim, save_mean_value, save_mean_bdim, save_var_transform_value, save_var_transform_bdim, train, eps, output_mask, reservedSpace_value, reservedSpace_bdim);
2173 return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level));
2174}
2175template <typename batch_rule_t, batch_rule_t batch_rule>
2176at::Tensor bernoulli_generated_plumbing(const at::Tensor & self, c10::optional<at::Generator> generator) {
2177 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
2178 auto maybe_layer = maybeCurrentDynamicLayer();
2179 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
2180 int64_t cur_level = maybe_layer->layerId();
2181 if (!isBatchedAtLevel(self, cur_level)) {
2182 return at::_ops::bernoulli::call(self, generator);
2183 }
2184 Tensor self_value;
2185 optional<int64_t> self_bdim;
2186 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
2187 auto results = batch_rule(self_value, self_bdim, generator);
2188 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
2189}
2190template <typename batch_rule_t, batch_rule_t batch_rule>
2191at::Tensor & bernoulli__Tensor_generated_plumbing(at::Tensor & self, const at::Tensor & p, c10::optional<at::Generator> generator) {
2192 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
2193 auto maybe_layer = maybeCurrentDynamicLayer();
2194 vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
2195 int64_t cur_level = maybe_layer->layerId();
2196 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(p, cur_level)) {
2197 return at::_ops::bernoulli__Tensor::call(self, p, generator);
2198 }
2199 Tensor self_value;
2200 optional<int64_t> self_bdim;
2201 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
2202 Tensor p_value;
2203 optional<int64_t> p_bdim;
2204 std::tie(p_value, p_bdim) = unwrapTensorAtLevel(p, cur_level);
2205 batch_rule(self_value, self_bdim, p_value, p_bdim, generator);
2206 return self;
2207}
2208template <typename batch_rule_t, batch_rule_t batch_rule>
2209at::Tensor & bernoulli__float_generated_plumbing(at::Tensor & self, double p, c10::optional<at::Generator> generator) {
2210 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
2211 auto maybe_layer = maybeCurrentDynamicLayer();
2212 vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
2213 int64_t cur_level = maybe_layer->layerId();
2214 if (!isBatchedAtLevel(self, cur_level)) {
2215 return at::_ops::bernoulli__float::call(self, p, generator);
2216 }
2217 Tensor self_value;
2218 optional<int64_t> self_bdim;
2219 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
2220 batch_rule(self_value, self_bdim, p, generator);
2221 return self;
2222}
2223template <typename batch_rule_t, batch_rule_t batch_rule>
2224at::Tensor bernoulli_p_generated_plumbing(const at::Tensor & self, double p, c10::optional<at::Generator> generator) {
2225 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
2226 auto maybe_layer = maybeCurrentDynamicLayer();
2227 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
2228 int64_t cur_level = maybe_layer->layerId();
2229 if (!isBatchedAtLevel(self, cur_level)) {
2230 return at::_ops::bernoulli_p::call(self, p, generator);
2231 }
2232 Tensor self_value;
2233 optional<int64_t> self_bdim;
2234 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
2235 auto results = batch_rule(self_value, self_bdim, p, generator);
2236 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
2237}
2238template <typename batch_rule_t, batch_rule_t batch_rule>
2239at::Tensor bilinear_generated_plumbing(const at::Tensor & input1, const at::Tensor & input2, const at::Tensor & weight, const c10::optional<at::Tensor> & bias) {
2240 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
2241 auto maybe_layer = maybeCurrentDynamicLayer();
2242 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
2243 int64_t cur_level = maybe_layer->layerId();
2244 if (!isBatchedAtLevel(input1, cur_level) && !isBatchedAtLevel(input2, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(bias, cur_level)) {
2245 return at::_ops::bilinear::call(input1, input2, weight, bias);
2246 }
2247 Tensor input1_value;
2248 optional<int64_t> input1_bdim;
2249 std::tie(input1_value, input1_bdim) = unwrapTensorAtLevel(input1, cur_level);
2250 Tensor input2_value;
2251 optional<int64_t> input2_bdim;
2252 std::tie(input2_value, input2_bdim) = unwrapTensorAtLevel(input2, cur_level);
2253 Tensor weight_value;
2254 optional<int64_t> weight_bdim;
2255 std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight, cur_level);
2256 optional<Tensor> bias_value;
2257 optional<int64_t> bias_bdim;
2258 if (bias) {
2259 std::tie(bias_value, bias_bdim) = unwrapTensorAtLevel(bias.value(), cur_level);
2260 }
2261 auto results = batch_rule(input1_value, input1_bdim, input2_value, input2_bdim, weight_value, weight_bdim, bias_value, bias_bdim);
2262 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
2263}
2264template <typename batch_rule_t, batch_rule_t batch_rule>
2265at::Tensor binary_cross_entropy_generated_plumbing(const at::Tensor & self, const at::Tensor & target, const c10::optional<at::Tensor> & weight, int64_t reduction) {
2266 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
2267 auto maybe_layer = maybeCurrentDynamicLayer();
2268 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
2269 int64_t cur_level = maybe_layer->layerId();
2270 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(target, cur_level) && !isBatchedAtLevel(weight, cur_level)) {
2271 return at::_ops::binary_cross_entropy::call(self, target, weight, reduction);
2272 }
2273 Tensor self_value;
2274 optional<int64_t> self_bdim;
2275 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
2276 Tensor target_value;
2277 optional<int64_t> target_bdim;
2278 std::tie(target_value, target_bdim) = unwrapTensorAtLevel(target, cur_level);
2279 optional<Tensor> weight_value;
2280 optional<int64_t> weight_bdim;
2281 if (weight) {
2282 std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight.value(), cur_level);
2283 }
2284 auto results = batch_rule(self_value, self_bdim, target_value, target_bdim, weight_value, weight_bdim, reduction);
2285 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
2286}
2287template <typename batch_rule_t, batch_rule_t batch_rule>
2288at::Tensor binary_cross_entropy_backward_generated_plumbing(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, const c10::optional<at::Tensor> & weight, int64_t reduction) {
2289 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
2290 auto maybe_layer = maybeCurrentDynamicLayer();
2291 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
2292 int64_t cur_level = maybe_layer->layerId();
2293 if (!isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(target, cur_level) && !isBatchedAtLevel(weight, cur_level)) {
2294 return at::_ops::binary_cross_entropy_backward::call(grad_output, self, target, weight, reduction);
2295 }
2296 Tensor grad_output_value;
2297 optional<int64_t> grad_output_bdim;
2298 std::tie(grad_output_value, grad_output_bdim) = unwrapTensorAtLevel(grad_output, cur_level);
2299 Tensor self_value;
2300 optional<int64_t> self_bdim;
2301 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
2302 Tensor target_value;
2303 optional<int64_t> target_bdim;
2304 std::tie(target_value, target_bdim) = unwrapTensorAtLevel(target, cur_level);
2305 optional<Tensor> weight_value;
2306 optional<int64_t> weight_bdim;
2307 if (weight) {
2308 std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight.value(), cur_level);
2309 }
2310 auto results = batch_rule(grad_output_value, grad_output_bdim, self_value, self_bdim, target_value, target_bdim, weight_value, weight_bdim, reduction);
2311 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
2312}
2313template <typename batch_rule_t, batch_rule_t batch_rule>
2314at::Tensor binary_cross_entropy_with_logits_generated_plumbing(const at::Tensor & self, const at::Tensor & target, const c10::optional<at::Tensor> & weight, const c10::optional<at::Tensor> & pos_weight, int64_t reduction) {
2315 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
2316 auto maybe_layer = maybeCurrentDynamicLayer();
2317 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
2318 int64_t cur_level = maybe_layer->layerId();
2319 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(target, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(pos_weight, cur_level)) {
2320 return at::_ops::binary_cross_entropy_with_logits::call(self, target, weight, pos_weight, reduction);
2321 }
2322 Tensor self_value;
2323 optional<int64_t> self_bdim;
2324 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
2325 Tensor target_value;
2326 optional<int64_t> target_bdim;
2327 std::tie(target_value, target_bdim) = unwrapTensorAtLevel(target, cur_level);
2328 optional<Tensor> weight_value;
2329 optional<int64_t> weight_bdim;
2330 if (weight) {
2331 std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight.value(), cur_level);
2332 }
2333 optional<Tensor> pos_weight_value;
2334 optional<int64_t> pos_weight_bdim;
2335 if (pos_weight) {
2336 std::tie(pos_weight_value, pos_weight_bdim) = unwrapTensorAtLevel(pos_weight.value(), cur_level);
2337 }
2338 auto results = batch_rule(self_value, self_bdim, target_value, target_bdim, weight_value, weight_bdim, pos_weight_value, pos_weight_bdim, reduction);
2339 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
2340}
2341template <typename batch_rule_t, batch_rule_t batch_rule>
2342at::Tensor bincount_generated_plumbing(const at::Tensor & self, const c10::optional<at::Tensor> & weights, int64_t minlength) {
2343 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
2344 auto maybe_layer = maybeCurrentDynamicLayer();
2345 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
2346 int64_t cur_level = maybe_layer->layerId();
2347 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(weights, cur_level)) {
2348 return at::_ops::bincount::call(self, weights, minlength);
2349 }
2350 Tensor self_value;
2351 optional<int64_t> self_bdim;
2352 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
2353 optional<Tensor> weights_value;
2354 optional<int64_t> weights_bdim;
2355 if (weights) {
2356 std::tie(weights_value, weights_bdim) = unwrapTensorAtLevel(weights.value(), cur_level);
2357 }
2358 auto results = batch_rule(self_value, self_bdim, weights_value, weights_bdim, minlength);
2359 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
2360}
2361template <typename batch_rule_t, batch_rule_t batch_rule>
2362at::Tensor bitwise_not_generated_plumbing(const at::Tensor & self) {
2363 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
2364 auto maybe_layer = maybeCurrentDynamicLayer();
2365 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
2366 int64_t cur_level = maybe_layer->layerId();
2367 if (!isBatchedAtLevel(self, cur_level)) {
2368 return at::_ops::bitwise_not::call(self);
2369 }
2370 Tensor self_value;
2371 optional<int64_t> self_bdim;
2372 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
2373 auto results = batch_rule(self_value, self_bdim);
2374 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
2375}
2376template <typename batch_rule_t, batch_rule_t batch_rule>
2377at::Tensor & bitwise_not__generated_plumbing(at::Tensor & self) {
2378 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
2379 auto maybe_layer = maybeCurrentDynamicLayer();
2380 vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
2381 int64_t cur_level = maybe_layer->layerId();
2382 if (!isBatchedAtLevel(self, cur_level)) {
2383 return at::_ops::bitwise_not_::call(self);
2384 }
2385 Tensor self_value;
2386 optional<int64_t> self_bdim;
2387 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
2388 batch_rule(self_value, self_bdim);
2389 return self;
2390}
2391template <typename batch_rule_t, batch_rule_t batch_rule>
2392at::Tensor copysign_Tensor_generated_plumbing(const at::Tensor & self, const at::Tensor & other) {
2393 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
2394 auto maybe_layer = maybeCurrentDynamicLayer();
2395 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
2396 int64_t cur_level = maybe_layer->layerId();
2397 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
2398 return at::_ops::copysign_Tensor::call(self, other);
2399 }
2400 Tensor self_value;
2401 optional<int64_t> self_bdim;
2402 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
2403 Tensor other_value;
2404 optional<int64_t> other_bdim;
2405 std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
2406 auto results = batch_rule(self_value, self_bdim, other_value, other_bdim);
2407 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
2408}
2409template <typename batch_rule_t, batch_rule_t batch_rule>
2410at::Tensor & copysign__Tensor_generated_plumbing(at::Tensor & self, const at::Tensor & other) {
2411 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
2412 auto maybe_layer = maybeCurrentDynamicLayer();
2413 vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
2414 int64_t cur_level = maybe_layer->layerId();
2415 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
2416 return at::_ops::copysign__Tensor::call(self, other);
2417 }
2418 Tensor self_value;
2419 optional<int64_t> self_bdim;
2420 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
2421 Tensor other_value;
2422 optional<int64_t> other_bdim;
2423 std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
2424 batch_rule(self_value, self_bdim, other_value, other_bdim);
2425 return self;
2426}
2427template <typename batch_rule_t, batch_rule_t batch_rule>
2428at::Tensor copysign_Scalar_generated_plumbing(const at::Tensor & self, const at::Scalar & other) {
2429 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
2430 auto maybe_layer = maybeCurrentDynamicLayer();
2431 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
2432 int64_t cur_level = maybe_layer->layerId();
2433 if (!isBatchedAtLevel(self, cur_level)) {
2434 return at::_ops::copysign_Scalar::call(self, other);
2435 }
2436 Tensor self_value;
2437 optional<int64_t> self_bdim;
2438 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
2439 auto results = batch_rule(self_value, self_bdim, other);
2440 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
2441}
2442template <typename batch_rule_t, batch_rule_t batch_rule>
2443at::Tensor & copysign__Scalar_generated_plumbing(at::Tensor & self, const at::Scalar & other) {
2444 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
2445 auto maybe_layer = maybeCurrentDynamicLayer();
2446 vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
2447 int64_t cur_level = maybe_layer->layerId();
2448 if (!isBatchedAtLevel(self, cur_level)) {
2449 return at::_ops::copysign__Scalar::call(self, other);
2450 }
2451 Tensor self_value;
2452 optional<int64_t> self_bdim;
2453 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
2454 batch_rule(self_value, self_bdim, other);
2455 return self;
2456}
2457template <typename batch_rule_t, batch_rule_t batch_rule>
2458at::Tensor logical_not_generated_plumbing(const at::Tensor & self) {
2459 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
2460 auto maybe_layer = maybeCurrentDynamicLayer();
2461 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
2462 int64_t cur_level = maybe_layer->layerId();
2463 if (!isBatchedAtLevel(self, cur_level)) {
2464 return at::_ops::logical_not::call(self);
2465 }
2466 Tensor self_value;
2467 optional<int64_t> self_bdim;
2468 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
2469 auto results = batch_rule(self_value, self_bdim);
2470 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
2471}
2472template <typename batch_rule_t, batch_rule_t batch_rule>
2473at::Tensor & logical_not__generated_plumbing(at::Tensor & self) {
2474 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
2475 auto maybe_layer = maybeCurrentDynamicLayer();
2476 vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
2477 int64_t cur_level = maybe_layer->layerId();
2478 if (!isBatchedAtLevel(self, cur_level)) {
2479 return at::_ops::logical_not_::call(self);
2480 }
2481 Tensor self_value;
2482 optional<int64_t> self_bdim;
2483 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
2484 batch_rule(self_value, self_bdim);
2485 return self;
2486}
2487template <typename batch_rule_t, batch_rule_t batch_rule>
2488at::Tensor logical_xor_generated_plumbing(const at::Tensor & self, const at::Tensor & other) {
2489 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
2490 auto maybe_layer = maybeCurrentDynamicLayer();
2491 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
2492 int64_t cur_level = maybe_layer->layerId();
2493 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
2494 return at::_ops::logical_xor::call(self, other);
2495 }
2496 Tensor self_value;
2497 optional<int64_t> self_bdim;
2498 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
2499 Tensor other_value;
2500 optional<int64_t> other_bdim;
2501 std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
2502 auto results = batch_rule(self_value, self_bdim, other_value, other_bdim);
2503 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
2504}
2505template <typename batch_rule_t, batch_rule_t batch_rule>
2506at::Tensor & logical_xor__generated_plumbing(at::Tensor & self, const at::Tensor & other) {
2507 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
2508 auto maybe_layer = maybeCurrentDynamicLayer();
2509 vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
2510 int64_t cur_level = maybe_layer->layerId();
2511 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
2512 return at::_ops::logical_xor_::call(self, other);
2513 }
2514 Tensor self_value;
2515 optional<int64_t> self_bdim;
2516 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
2517 Tensor other_value;
2518 optional<int64_t> other_bdim;
2519 std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
2520 batch_rule(self_value, self_bdim, other_value, other_bdim);
2521 return self;
2522}
2523template <typename batch_rule_t, batch_rule_t batch_rule>
2524at::Tensor logical_and_generated_plumbing(const at::Tensor & self, const at::Tensor & other) {
2525 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
2526 auto maybe_layer = maybeCurrentDynamicLayer();
2527 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
2528 int64_t cur_level = maybe_layer->layerId();
2529 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
2530 return at::_ops::logical_and::call(self, other);
2531 }
2532 Tensor self_value;
2533 optional<int64_t> self_bdim;
2534 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
2535 Tensor other_value;
2536 optional<int64_t> other_bdim;
2537 std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
2538 auto results = batch_rule(self_value, self_bdim, other_value, other_bdim);
2539 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
2540}
2541template <typename batch_rule_t, batch_rule_t batch_rule>
2542at::Tensor & logical_and__generated_plumbing(at::Tensor & self, const at::Tensor & other) {
2543 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
2544 auto maybe_layer = maybeCurrentDynamicLayer();
2545 vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
2546 int64_t cur_level = maybe_layer->layerId();
2547 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
2548 return at::_ops::logical_and_::call(self, other);
2549 }
2550 Tensor self_value;
2551 optional<int64_t> self_bdim;
2552 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
2553 Tensor other_value;
2554 optional<int64_t> other_bdim;
2555 std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
2556 batch_rule(self_value, self_bdim, other_value, other_bdim);
2557 return self;
2558}
2559template <typename batch_rule_t, batch_rule_t batch_rule>
2560at::Tensor logical_or_generated_plumbing(const at::Tensor & self, const at::Tensor & other) {
2561 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
2562 auto maybe_layer = maybeCurrentDynamicLayer();
2563 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
2564 int64_t cur_level = maybe_layer->layerId();
2565 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
2566 return at::_ops::logical_or::call(self, other);
2567 }
2568 Tensor self_value;
2569 optional<int64_t> self_bdim;
2570 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
2571 Tensor other_value;
2572 optional<int64_t> other_bdim;
2573 std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
2574 auto results = batch_rule(self_value, self_bdim, other_value, other_bdim);
2575 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
2576}
2577template <typename batch_rule_t, batch_rule_t batch_rule>
2578at::Tensor & logical_or__generated_plumbing(at::Tensor & self, const at::Tensor & other) {
2579 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
2580 auto maybe_layer = maybeCurrentDynamicLayer();
2581 vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
2582 int64_t cur_level = maybe_layer->layerId();
2583 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
2584 return at::_ops::logical_or_::call(self, other);
2585 }
2586 Tensor self_value;
2587 optional<int64_t> self_bdim;
2588 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
2589 Tensor other_value;
2590 optional<int64_t> other_bdim;
2591 std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
2592 batch_rule(self_value, self_bdim, other_value, other_bdim);
2593 return self;
2594}
2595template <typename batch_rule_t, batch_rule_t batch_rule>
2596at::Tensor bmm_generated_plumbing(const at::Tensor & self, const at::Tensor & mat2) {
2597 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
2598 auto maybe_layer = maybeCurrentDynamicLayer();
2599 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
2600 int64_t cur_level = maybe_layer->layerId();
2601 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(mat2, cur_level)) {
2602 return at::_ops::bmm::call(self, mat2);
2603 }
2604 Tensor self_value;
2605 optional<int64_t> self_bdim;
2606 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
2607 Tensor mat2_value;
2608 optional<int64_t> mat2_bdim;
2609 std::tie(mat2_value, mat2_bdim) = unwrapTensorAtLevel(mat2, cur_level);
2610 auto results = batch_rule(self_value, self_bdim, mat2_value, mat2_bdim);
2611 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
2612}
2613template <typename batch_rule_t, batch_rule_t batch_rule>
2614::std::vector<at::Tensor> broadcast_tensors_generated_plumbing(at::TensorList tensors) {
2615 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
2616 auto maybe_layer = maybeCurrentDynamicLayer();
2617 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
2618 int64_t cur_level = maybe_layer->layerId();
2619 if (!isBatchedAtLevel(tensors, cur_level)) {
2620 return at::_ops::broadcast_tensors::call(tensors);
2621 }
2622
2623 auto results = batch_rule(tensors);
2624 return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
2625}
2626template <typename batch_rule_t, batch_rule_t batch_rule>
2627at::Tensor broadcast_to_generated_plumbing(const at::Tensor & self, c10::SymIntArrayRef size) {
2628 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
2629 auto maybe_layer = maybeCurrentDynamicLayer();
2630 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
2631 int64_t cur_level = maybe_layer->layerId();
2632 if (!isBatchedAtLevel(self, cur_level)) {
2633 return at::_ops::broadcast_to::call(self, size);
2634 }
2635 Tensor self_value;
2636 optional<int64_t> self_bdim;
2637 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
2638 auto results = batch_rule(self_value, self_bdim, size);
2639 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
2640}
2641template <typename batch_rule_t, batch_rule_t batch_rule>
2642at::Tensor _sparse_broadcast_to_generated_plumbing(const at::Tensor & self, at::IntArrayRef size) {
2643 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
2644 auto maybe_layer = maybeCurrentDynamicLayer();
2645 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
2646 int64_t cur_level = maybe_layer->layerId();
2647 if (!isBatchedAtLevel(self, cur_level)) {
2648 return at::_ops::_sparse_broadcast_to::call(self, size);
2649 }
2650 Tensor self_value;
2651 optional<int64_t> self_bdim;
2652 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
2653 auto results = batch_rule(self_value, self_bdim, size);
2654 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
2655}
2656template <typename batch_rule_t, batch_rule_t batch_rule>
2657at::Tensor cat_generated_plumbing(const at::ITensorListRef & tensors, int64_t dim) {
2658 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
2659 auto maybe_layer = maybeCurrentDynamicLayer();
2660 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
2661 int64_t cur_level = maybe_layer->layerId();
2662 if (!isBatchedAtLevel(tensors, cur_level)) {
2663 return at::_ops::cat::call(tensors, dim);
2664 }
2665
2666 auto results = batch_rule(tensors, dim);
2667 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
2668}
2669template <typename batch_rule_t, batch_rule_t batch_rule>
2670at::Tensor cat_names_generated_plumbing(at::TensorList tensors, at::Dimname dim) {
2671 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
2672 auto maybe_layer = maybeCurrentDynamicLayer();
2673 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
2674 int64_t cur_level = maybe_layer->layerId();
2675 if (!isBatchedAtLevel(tensors, cur_level)) {
2676 return at::_ops::cat_names::call(tensors, dim);
2677 }
2678
2679 auto results = batch_rule(tensors, dim);
2680 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
2681}
2682template <typename batch_rule_t, batch_rule_t batch_rule>
2683at::Tensor concat_generated_plumbing(at::TensorList tensors, int64_t dim) {
2684 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
2685 auto maybe_layer = maybeCurrentDynamicLayer();
2686 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
2687 int64_t cur_level = maybe_layer->layerId();
2688 if (!isBatchedAtLevel(tensors, cur_level)) {
2689 return at::_ops::concat::call(tensors, dim);
2690 }
2691
2692 auto results = batch_rule(tensors, dim);
2693 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
2694}
2695template <typename batch_rule_t, batch_rule_t batch_rule>
2696at::Tensor concat_names_generated_plumbing(at::TensorList tensors, at::Dimname dim) {
2697 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
2698 auto maybe_layer = maybeCurrentDynamicLayer();
2699 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
2700 int64_t cur_level = maybe_layer->layerId();
2701 if (!isBatchedAtLevel(tensors, cur_level)) {
2702 return at::_ops::concat_names::call(tensors, dim);
2703 }
2704
2705 auto results = batch_rule(tensors, dim);
2706 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
2707}
2708template <typename batch_rule_t, batch_rule_t batch_rule>
2709at::Tensor concatenate_generated_plumbing(at::TensorList tensors, int64_t dim) {
2710 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
2711 auto maybe_layer = maybeCurrentDynamicLayer();
2712 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
2713 int64_t cur_level = maybe_layer->layerId();
2714 if (!isBatchedAtLevel(tensors, cur_level)) {
2715 return at::_ops::concatenate::call(tensors, dim);
2716 }
2717
2718 auto results = batch_rule(tensors, dim);
2719 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
2720}
2721template <typename batch_rule_t, batch_rule_t batch_rule>
2722at::Tensor concatenate_names_generated_plumbing(at::TensorList tensors, at::Dimname dim) {
2723 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
2724 auto maybe_layer = maybeCurrentDynamicLayer();
2725 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
2726 int64_t cur_level = maybe_layer->layerId();
2727 if (!isBatchedAtLevel(tensors, cur_level)) {
2728 return at::_ops::concatenate_names::call(tensors, dim);
2729 }
2730
2731 auto results = batch_rule(tensors, dim);
2732 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
2733}
2734template <typename batch_rule_t, batch_rule_t batch_rule>
2735at::Tensor block_diag_generated_plumbing(at::TensorList tensors) {
2736 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
2737 auto maybe_layer = maybeCurrentDynamicLayer();
2738 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
2739 int64_t cur_level = maybe_layer->layerId();
2740 if (!isBatchedAtLevel(tensors, cur_level)) {
2741 return at::_ops::block_diag::call(tensors);
2742 }
2743
2744 auto results = batch_rule(tensors);
2745 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
2746}
2747template <typename batch_rule_t, batch_rule_t batch_rule>
2748at::Tensor ceil_generated_plumbing(const at::Tensor & self) {
2749 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
2750 auto maybe_layer = maybeCurrentDynamicLayer();
2751 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
2752 int64_t cur_level = maybe_layer->layerId();
2753 if (!isBatchedAtLevel(self, cur_level)) {
2754 return at::_ops::ceil::call(self);
2755 }
2756 Tensor self_value;
2757 optional<int64_t> self_bdim;
2758 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
2759 auto results = batch_rule(self_value, self_bdim);
2760 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
2761}
2762template <typename batch_rule_t, batch_rule_t batch_rule>
2763at::Tensor & ceil__generated_plumbing(at::Tensor & self) {
2764 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
2765 auto maybe_layer = maybeCurrentDynamicLayer();
2766 vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
2767 int64_t cur_level = maybe_layer->layerId();
2768 if (!isBatchedAtLevel(self, cur_level)) {
2769 return at::_ops::ceil_::call(self);
2770 }
2771 Tensor self_value;
2772 optional<int64_t> self_bdim;
2773 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
2774 batch_rule(self_value, self_bdim);
2775 return self;
2776}
2777template <typename batch_rule_t, batch_rule_t batch_rule>
2778at::Tensor chain_matmul_generated_plumbing(at::TensorList matrices) {
2779 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
2780 auto maybe_layer = maybeCurrentDynamicLayer();
2781 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
2782 int64_t cur_level = maybe_layer->layerId();
2783 if (!isBatchedAtLevel(matrices, cur_level)) {
2784 return at::_ops::chain_matmul::call(matrices);
2785 }
2786
2787 auto results = batch_rule(matrices);
2788 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
2789}
2790template <typename batch_rule_t, batch_rule_t batch_rule>
2791::std::vector<at::Tensor> unsafe_chunk_generated_plumbing(const at::Tensor & self, int64_t chunks, int64_t dim) {
2792 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
2793 auto maybe_layer = maybeCurrentDynamicLayer();
2794 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
2795 int64_t cur_level = maybe_layer->layerId();
2796 if (!isBatchedAtLevel(self, cur_level)) {
2797 return at::_ops::unsafe_chunk::call(self, chunks, dim);
2798 }
2799 Tensor self_value;
2800 optional<int64_t> self_bdim;
2801 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
2802 auto results = batch_rule(self_value, self_bdim, chunks, dim);
2803 return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
2804}
2805template <typename batch_rule_t, batch_rule_t batch_rule>
2806::std::vector<at::Tensor> chunk_generated_plumbing(const at::Tensor & self, int64_t chunks, int64_t dim) {
2807 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
2808 auto maybe_layer = maybeCurrentDynamicLayer();
2809 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
2810 int64_t cur_level = maybe_layer->layerId();
2811 if (!isBatchedAtLevel(self, cur_level)) {
2812 return at::_ops::chunk::call(self, chunks, dim);
2813 }
2814 Tensor self_value;
2815 optional<int64_t> self_bdim;
2816 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
2817 auto results = batch_rule(self_value, self_bdim, chunks, dim);
2818 return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
2819}
2820template <typename batch_rule_t, batch_rule_t batch_rule>
2821::std::vector<at::Tensor> tensor_split_sections_generated_plumbing(const at::Tensor & self, c10::SymInt sections, int64_t dim) {
2822 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
2823 auto maybe_layer = maybeCurrentDynamicLayer();
2824 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
2825 int64_t cur_level = maybe_layer->layerId();
2826 if (!isBatchedAtLevel(self, cur_level)) {
2827 return at::_ops::tensor_split_sections::call(self, sections, dim);
2828 }
2829 Tensor self_value;
2830 optional<int64_t> self_bdim;
2831 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
2832 auto results = batch_rule(self_value, self_bdim, sections, dim);
2833 return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
2834}
2835template <typename batch_rule_t, batch_rule_t batch_rule>
2836::std::vector<at::Tensor> tensor_split_indices_generated_plumbing(const at::Tensor & self, c10::SymIntArrayRef indices, int64_t dim) {
2837 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
2838 auto maybe_layer = maybeCurrentDynamicLayer();
2839 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
2840 int64_t cur_level = maybe_layer->layerId();
2841 if (!isBatchedAtLevel(self, cur_level)) {
2842 return at::_ops::tensor_split_indices::call(self, indices, dim);
2843 }
2844 Tensor self_value;
2845 optional<int64_t> self_bdim;
2846 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
2847 auto results = batch_rule(self_value, self_bdim, indices, dim);
2848 return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
2849}
2850template <typename batch_rule_t, batch_rule_t batch_rule>
2851::std::vector<at::Tensor> tensor_split_tensor_indices_or_sections_generated_plumbing(const at::Tensor & self, const at::Tensor & tensor_indices_or_sections, int64_t dim) {
2852 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
2853 auto maybe_layer = maybeCurrentDynamicLayer();
2854 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
2855 int64_t cur_level = maybe_layer->layerId();
2856 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(tensor_indices_or_sections, cur_level)) {
2857 return at::_ops::tensor_split_tensor_indices_or_sections::call(self, tensor_indices_or_sections, dim);
2858 }
2859 Tensor self_value;
2860 optional<int64_t> self_bdim;
2861 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
2862 Tensor tensor_indices_or_sections_value;
2863 optional<int64_t> tensor_indices_or_sections_bdim;
2864 std::tie(tensor_indices_or_sections_value, tensor_indices_or_sections_bdim) = unwrapTensorAtLevel(tensor_indices_or_sections, cur_level);
2865 auto results = batch_rule(self_value, self_bdim, tensor_indices_or_sections_value, tensor_indices_or_sections_bdim, dim);
2866 return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
2867}
2868template <typename batch_rule_t, batch_rule_t batch_rule>
2869at::Tensor clamp_generated_plumbing(const at::Tensor & self, const c10::optional<at::Scalar> & min, const c10::optional<at::Scalar> & max) {
2870 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
2871 auto maybe_layer = maybeCurrentDynamicLayer();
2872 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
2873 int64_t cur_level = maybe_layer->layerId();
2874 if (!isBatchedAtLevel(self, cur_level)) {
2875 return at::_ops::clamp::call(self, min, max);
2876 }
2877 Tensor self_value;
2878 optional<int64_t> self_bdim;
2879 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
2880 auto results = batch_rule(self_value, self_bdim, min, max);
2881 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
2882}
2883template <typename batch_rule_t, batch_rule_t batch_rule>
2884at::Tensor clamp_Tensor_generated_plumbing(const at::Tensor & self, const c10::optional<at::Tensor> & min, const c10::optional<at::Tensor> & max) {
2885 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
2886 auto maybe_layer = maybeCurrentDynamicLayer();
2887 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
2888 int64_t cur_level = maybe_layer->layerId();
2889 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(min, cur_level) && !isBatchedAtLevel(max, cur_level)) {
2890 return at::_ops::clamp_Tensor::call(self, min, max);
2891 }
2892 Tensor self_value;
2893 optional<int64_t> self_bdim;
2894 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
2895 optional<Tensor> min_value;
2896 optional<int64_t> min_bdim;
2897 if (min) {
2898 std::tie(min_value, min_bdim) = unwrapTensorAtLevel(min.value(), cur_level);
2899 }
2900 optional<Tensor> max_value;
2901 optional<int64_t> max_bdim;
2902 if (max) {
2903 std::tie(max_value, max_bdim) = unwrapTensorAtLevel(max.value(), cur_level);
2904 }
2905 auto results = batch_rule(self_value, self_bdim, min_value, min_bdim, max_value, max_bdim);
2906 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
2907}
2908template <typename batch_rule_t, batch_rule_t batch_rule>
2909at::Tensor & clamp__generated_plumbing(at::Tensor & self, const c10::optional<at::Scalar> & min, const c10::optional<at::Scalar> & max) {
2910 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
2911 auto maybe_layer = maybeCurrentDynamicLayer();
2912 vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
2913 int64_t cur_level = maybe_layer->layerId();
2914 if (!isBatchedAtLevel(self, cur_level)) {
2915 return at::_ops::clamp_::call(self, min, max);
2916 }
2917 Tensor self_value;
2918 optional<int64_t> self_bdim;
2919 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
2920 batch_rule(self_value, self_bdim, min, max);
2921 return self;
2922}
2923template <typename batch_rule_t, batch_rule_t batch_rule>
2924at::Tensor & clamp__Tensor_generated_plumbing(at::Tensor & self, const c10::optional<at::Tensor> & min, const c10::optional<at::Tensor> & max) {
2925 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
2926 auto maybe_layer = maybeCurrentDynamicLayer();
2927 vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
2928 int64_t cur_level = maybe_layer->layerId();
2929 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(min, cur_level) && !isBatchedAtLevel(max, cur_level)) {
2930 return at::_ops::clamp__Tensor::call(self, min, max);
2931 }
2932 Tensor self_value;
2933 optional<int64_t> self_bdim;
2934 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
2935 optional<Tensor> min_value;
2936 optional<int64_t> min_bdim;
2937 if (min) {
2938 std::tie(min_value, min_bdim) = unwrapTensorAtLevel(min.value(), cur_level);
2939 }
2940 optional<Tensor> max_value;
2941 optional<int64_t> max_bdim;
2942 if (max) {
2943 std::tie(max_value, max_bdim) = unwrapTensorAtLevel(max.value(), cur_level);
2944 }
2945 batch_rule(self_value, self_bdim, min_value, min_bdim, max_value, max_bdim);
2946 return self;
2947}
2948template <typename batch_rule_t, batch_rule_t batch_rule>
2949at::Tensor clamp_max_generated_plumbing(const at::Tensor & self, const at::Scalar & max) {
2950 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
2951 auto maybe_layer = maybeCurrentDynamicLayer();
2952 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
2953 int64_t cur_level = maybe_layer->layerId();
2954 if (!isBatchedAtLevel(self, cur_level)) {
2955 return at::_ops::clamp_max::call(self, max);
2956 }
2957 Tensor self_value;
2958 optional<int64_t> self_bdim;
2959 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
2960 auto results = batch_rule(self_value, self_bdim, max);
2961 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
2962}
2963template <typename batch_rule_t, batch_rule_t batch_rule>
2964at::Tensor clamp_max_Tensor_generated_plumbing(const at::Tensor & self, const at::Tensor & max) {
2965 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
2966 auto maybe_layer = maybeCurrentDynamicLayer();
2967 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
2968 int64_t cur_level = maybe_layer->layerId();
2969 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(max, cur_level)) {
2970 return at::_ops::clamp_max_Tensor::call(self, max);
2971 }
2972 Tensor self_value;
2973 optional<int64_t> self_bdim;
2974 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
2975 Tensor max_value;
2976 optional<int64_t> max_bdim;
2977 std::tie(max_value, max_bdim) = unwrapTensorAtLevel(max, cur_level);
2978 auto results = batch_rule(self_value, self_bdim, max_value, max_bdim);
2979 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
2980}
2981template <typename batch_rule_t, batch_rule_t batch_rule>
2982at::Tensor & clamp_max__generated_plumbing(at::Tensor & self, const at::Scalar & max) {
2983 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
2984 auto maybe_layer = maybeCurrentDynamicLayer();
2985 vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
2986 int64_t cur_level = maybe_layer->layerId();
2987 if (!isBatchedAtLevel(self, cur_level)) {
2988 return at::_ops::clamp_max_::call(self, max);
2989 }
2990 Tensor self_value;
2991 optional<int64_t> self_bdim;
2992 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
2993 batch_rule(self_value, self_bdim, max);
2994 return self;
2995}
2996template <typename batch_rule_t, batch_rule_t batch_rule>
2997at::Tensor & clamp_max__Tensor_generated_plumbing(at::Tensor & self, const at::Tensor & max) {
2998 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
2999 auto maybe_layer = maybeCurrentDynamicLayer();
3000 vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
3001 int64_t cur_level = maybe_layer->layerId();
3002 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(max, cur_level)) {
3003 return at::_ops::clamp_max__Tensor::call(self, max);
3004 }
3005 Tensor self_value;
3006 optional<int64_t> self_bdim;
3007 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
3008 Tensor max_value;
3009 optional<int64_t> max_bdim;
3010 std::tie(max_value, max_bdim) = unwrapTensorAtLevel(max, cur_level);
3011 batch_rule(self_value, self_bdim, max_value, max_bdim);
3012 return self;
3013}
3014template <typename batch_rule_t, batch_rule_t batch_rule>
3015at::Tensor clamp_min_generated_plumbing(const at::Tensor & self, const at::Scalar & min) {
3016 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
3017 auto maybe_layer = maybeCurrentDynamicLayer();
3018 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
3019 int64_t cur_level = maybe_layer->layerId();
3020 if (!isBatchedAtLevel(self, cur_level)) {
3021 return at::_ops::clamp_min::call(self, min);
3022 }
3023 Tensor self_value;
3024 optional<int64_t> self_bdim;
3025 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
3026 auto results = batch_rule(self_value, self_bdim, min);
3027 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
3028}
3029template <typename batch_rule_t, batch_rule_t batch_rule>
3030at::Tensor clamp_min_Tensor_generated_plumbing(const at::Tensor & self, const at::Tensor & min) {
3031 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
3032 auto maybe_layer = maybeCurrentDynamicLayer();
3033 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
3034 int64_t cur_level = maybe_layer->layerId();
3035 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(min, cur_level)) {
3036 return at::_ops::clamp_min_Tensor::call(self, min);
3037 }
3038 Tensor self_value;
3039 optional<int64_t> self_bdim;
3040 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
3041 Tensor min_value;
3042 optional<int64_t> min_bdim;
3043 std::tie(min_value, min_bdim) = unwrapTensorAtLevel(min, cur_level);
3044 auto results = batch_rule(self_value, self_bdim, min_value, min_bdim);
3045 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
3046}
3047template <typename batch_rule_t, batch_rule_t batch_rule>
3048at::Tensor & clamp_min__generated_plumbing(at::Tensor & self, const at::Scalar & min) {
3049 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
3050 auto maybe_layer = maybeCurrentDynamicLayer();
3051 vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
3052 int64_t cur_level = maybe_layer->layerId();
3053 if (!isBatchedAtLevel(self, cur_level)) {
3054 return at::_ops::clamp_min_::call(self, min);
3055 }
3056 Tensor self_value;
3057 optional<int64_t> self_bdim;
3058 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
3059 batch_rule(self_value, self_bdim, min);
3060 return self;
3061}
3062template <typename batch_rule_t, batch_rule_t batch_rule>
3063at::Tensor & clamp_min__Tensor_generated_plumbing(at::Tensor & self, const at::Tensor & min) {
3064 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
3065 auto maybe_layer = maybeCurrentDynamicLayer();
3066 vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
3067 int64_t cur_level = maybe_layer->layerId();
3068 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(min, cur_level)) {
3069 return at::_ops::clamp_min__Tensor::call(self, min);
3070 }
3071 Tensor self_value;
3072 optional<int64_t> self_bdim;
3073 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
3074 Tensor min_value;
3075 optional<int64_t> min_bdim;
3076 std::tie(min_value, min_bdim) = unwrapTensorAtLevel(min, cur_level);
3077 batch_rule(self_value, self_bdim, min_value, min_bdim);
3078 return self;
3079}
3080template <typename batch_rule_t, batch_rule_t batch_rule>
3081at::Tensor clip_generated_plumbing(const at::Tensor & self, const c10::optional<at::Scalar> & min, const c10::optional<at::Scalar> & max) {
3082 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
3083 auto maybe_layer = maybeCurrentDynamicLayer();
3084 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
3085 int64_t cur_level = maybe_layer->layerId();
3086 if (!isBatchedAtLevel(self, cur_level)) {
3087 return at::_ops::clip::call(self, min, max);
3088 }
3089 Tensor self_value;
3090 optional<int64_t> self_bdim;
3091 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
3092 auto results = batch_rule(self_value, self_bdim, min, max);
3093 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
3094}
3095template <typename batch_rule_t, batch_rule_t batch_rule>
3096at::Tensor clip_Tensor_generated_plumbing(const at::Tensor & self, const c10::optional<at::Tensor> & min, const c10::optional<at::Tensor> & max) {
3097 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
3098 auto maybe_layer = maybeCurrentDynamicLayer();
3099 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
3100 int64_t cur_level = maybe_layer->layerId();
3101 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(min, cur_level) && !isBatchedAtLevel(max, cur_level)) {
3102 return at::_ops::clip_Tensor::call(self, min, max);
3103 }
3104 Tensor self_value;
3105 optional<int64_t> self_bdim;
3106 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
3107 optional<Tensor> min_value;
3108 optional<int64_t> min_bdim;
3109 if (min) {
3110 std::tie(min_value, min_bdim) = unwrapTensorAtLevel(min.value(), cur_level);
3111 }
3112 optional<Tensor> max_value;
3113 optional<int64_t> max_bdim;
3114 if (max) {
3115 std::tie(max_value, max_bdim) = unwrapTensorAtLevel(max.value(), cur_level);
3116 }
3117 auto results = batch_rule(self_value, self_bdim, min_value, min_bdim, max_value, max_bdim);
3118 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
3119}
3120template <typename batch_rule_t, batch_rule_t batch_rule>
3121at::Tensor & clip__generated_plumbing(at::Tensor & self, const c10::optional<at::Scalar> & min, const c10::optional<at::Scalar> & max) {
3122 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
3123 auto maybe_layer = maybeCurrentDynamicLayer();
3124 vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
3125 int64_t cur_level = maybe_layer->layerId();
3126 if (!isBatchedAtLevel(self, cur_level)) {
3127 return at::_ops::clip_::call(self, min, max);
3128 }
3129 Tensor self_value;
3130 optional<int64_t> self_bdim;
3131 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
3132 batch_rule(self_value, self_bdim, min, max);
3133 return self;
3134}
3135template <typename batch_rule_t, batch_rule_t batch_rule>
3136at::Tensor & clip__Tensor_generated_plumbing(at::Tensor & self, const c10::optional<at::Tensor> & min, const c10::optional<at::Tensor> & max) {
3137 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
3138 auto maybe_layer = maybeCurrentDynamicLayer();
3139 vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
3140 int64_t cur_level = maybe_layer->layerId();
3141 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(min, cur_level) && !isBatchedAtLevel(max, cur_level)) {
3142 return at::_ops::clip__Tensor::call(self, min, max);
3143 }
3144 Tensor self_value;
3145 optional<int64_t> self_bdim;
3146 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
3147 optional<Tensor> min_value;
3148 optional<int64_t> min_bdim;
3149 if (min) {
3150 std::tie(min_value, min_bdim) = unwrapTensorAtLevel(min.value(), cur_level);
3151 }
3152 optional<Tensor> max_value;
3153 optional<int64_t> max_bdim;
3154 if (max) {
3155 std::tie(max_value, max_bdim) = unwrapTensorAtLevel(max.value(), cur_level);
3156 }
3157 batch_rule(self_value, self_bdim, min_value, min_bdim, max_value, max_bdim);
3158 return self;
3159}
3160template <typename batch_rule_t, batch_rule_t batch_rule>
3161at::Tensor complex_generated_plumbing(const at::Tensor & real, const at::Tensor & imag) {
3162 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
3163 auto maybe_layer = maybeCurrentDynamicLayer();
3164 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
3165 int64_t cur_level = maybe_layer->layerId();
3166 if (!isBatchedAtLevel(real, cur_level) && !isBatchedAtLevel(imag, cur_level)) {
3167 return at::_ops::complex::call(real, imag);
3168 }
3169 Tensor real_value;
3170 optional<int64_t> real_bdim;
3171 std::tie(real_value, real_bdim) = unwrapTensorAtLevel(real, cur_level);
3172 Tensor imag_value;
3173 optional<int64_t> imag_bdim;
3174 std::tie(imag_value, imag_bdim) = unwrapTensorAtLevel(imag, cur_level);
3175 auto results = batch_rule(real_value, real_bdim, imag_value, imag_bdim);
3176 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
3177}
3178template <typename batch_rule_t, batch_rule_t batch_rule>
3179at::Tensor polar_generated_plumbing(const at::Tensor & abs, const at::Tensor & angle) {
3180 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
3181 auto maybe_layer = maybeCurrentDynamicLayer();
3182 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
3183 int64_t cur_level = maybe_layer->layerId();
3184 if (!isBatchedAtLevel(abs, cur_level) && !isBatchedAtLevel(angle, cur_level)) {
3185 return at::_ops::polar::call(abs, angle);
3186 }
3187 Tensor abs_value;
3188 optional<int64_t> abs_bdim;
3189 std::tie(abs_value, abs_bdim) = unwrapTensorAtLevel(abs, cur_level);
3190 Tensor angle_value;
3191 optional<int64_t> angle_bdim;
3192 std::tie(angle_value, angle_bdim) = unwrapTensorAtLevel(angle, cur_level);
3193 auto results = batch_rule(abs_value, abs_bdim, angle_value, angle_bdim);
3194 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
3195}
3196template <typename batch_rule_t, batch_rule_t batch_rule>
3197at::Tensor constant_pad_nd_generated_plumbing(const at::Tensor & self, c10::SymIntArrayRef pad, const at::Scalar & value) {
3198 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
3199 auto maybe_layer = maybeCurrentDynamicLayer();
3200 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
3201 int64_t cur_level = maybe_layer->layerId();
3202 if (!isBatchedAtLevel(self, cur_level)) {
3203 return at::_ops::constant_pad_nd::call(self, pad, value);
3204 }
3205 Tensor self_value;
3206 optional<int64_t> self_bdim;
3207 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
3208 auto results = batch_rule(self_value, self_bdim, pad, value);
3209 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
3210}
3211template <typename batch_rule_t, batch_rule_t batch_rule>
3212at::Tensor contiguous_generated_plumbing(const at::Tensor & self, at::MemoryFormat memory_format) {
3213 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
3214 auto maybe_layer = maybeCurrentDynamicLayer();
3215 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
3216 int64_t cur_level = maybe_layer->layerId();
3217 if (!isBatchedAtLevel(self, cur_level)) {
3218 return at::_ops::contiguous::call(self, memory_format);
3219 }
3220 Tensor self_value;
3221 optional<int64_t> self_bdim;
3222 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
3223 auto results = batch_rule(self_value, self_bdim, memory_format);
3224 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
3225}
3226template <typename batch_rule_t, batch_rule_t batch_rule>
3227at::Tensor convolution_generated_plumbing(const at::Tensor & input, const at::Tensor & weight, const c10::optional<at::Tensor> & bias, at::IntArrayRef stride, c10::SymIntArrayRef padding, at::IntArrayRef dilation, bool transposed, c10::SymIntArrayRef output_padding, int64_t groups) {
3228 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
3229 auto maybe_layer = maybeCurrentDynamicLayer();
3230 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
3231 int64_t cur_level = maybe_layer->layerId();
3232 if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(bias, cur_level)) {
3233 return at::_ops::convolution::call(input, weight, bias, stride, padding, dilation, transposed, output_padding, groups);
3234 }
3235 Tensor input_value;
3236 optional<int64_t> input_bdim;
3237 std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level);
3238 Tensor weight_value;
3239 optional<int64_t> weight_bdim;
3240 std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight, cur_level);
3241 optional<Tensor> bias_value;
3242 optional<int64_t> bias_bdim;
3243 if (bias) {
3244 std::tie(bias_value, bias_bdim) = unwrapTensorAtLevel(bias.value(), cur_level);
3245 }
3246 auto results = batch_rule(input_value, input_bdim, weight_value, weight_bdim, bias_value, bias_bdim, stride, padding, dilation, transposed, output_padding, groups);
3247 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
3248}
3249template <typename batch_rule_t, batch_rule_t batch_rule>
3250::std::tuple<at::Tensor,at::Tensor,at::Tensor> convolution_backward_generated_plumbing(const at::Tensor & grad_output, const at::Tensor & input, const at::Tensor & weight, at::OptionalSymIntArrayRef bias_sizes, at::IntArrayRef stride, c10::SymIntArrayRef padding, at::IntArrayRef dilation, bool transposed, c10::SymIntArrayRef output_padding, int64_t groups, ::std::array<bool,3> output_mask) {
3251 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
3252 auto maybe_layer = maybeCurrentDynamicLayer();
3253 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
3254 int64_t cur_level = maybe_layer->layerId();
3255 if (!isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(weight, cur_level)) {
3256 return at::_ops::convolution_backward::call(grad_output, input, weight, bias_sizes, stride, padding, dilation, transposed, output_padding, groups, output_mask);
3257 }
3258 Tensor grad_output_value;
3259 optional<int64_t> grad_output_bdim;
3260 std::tie(grad_output_value, grad_output_bdim) = unwrapTensorAtLevel(grad_output, cur_level);
3261 Tensor input_value;
3262 optional<int64_t> input_bdim;
3263 std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level);
3264 Tensor weight_value;
3265 optional<int64_t> weight_bdim;
3266 std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight, cur_level);
3267 auto results = batch_rule(grad_output_value, grad_output_bdim, input_value, input_bdim, weight_value, weight_bdim, bias_sizes, stride, padding, dilation, transposed, output_padding, groups, output_mask);
3268 return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level));
3269}
3270template <typename batch_rule_t, batch_rule_t batch_rule>
3271at::Tensor convolution_overrideable_generated_plumbing(const at::Tensor & input, const at::Tensor & weight, const c10::optional<at::Tensor> & bias, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool transposed, at::IntArrayRef output_padding, int64_t groups) {
3272 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
3273 auto maybe_layer = maybeCurrentDynamicLayer();
3274 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
3275 int64_t cur_level = maybe_layer->layerId();
3276 if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(bias, cur_level)) {
3277 return at::_ops::convolution_overrideable::call(input, weight, bias, stride, padding, dilation, transposed, output_padding, groups);
3278 }
3279 Tensor input_value;
3280 optional<int64_t> input_bdim;
3281 std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level);
3282 Tensor weight_value;
3283 optional<int64_t> weight_bdim;
3284 std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight, cur_level);
3285 optional<Tensor> bias_value;
3286 optional<int64_t> bias_bdim;
3287 if (bias) {
3288 std::tie(bias_value, bias_bdim) = unwrapTensorAtLevel(bias.value(), cur_level);
3289 }
3290 auto results = batch_rule(input_value, input_bdim, weight_value, weight_bdim, bias_value, bias_bdim, stride, padding, dilation, transposed, output_padding, groups);
3291 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
3292}
3293template <typename batch_rule_t, batch_rule_t batch_rule>
3294::std::tuple<at::Tensor,at::Tensor,at::Tensor> convolution_backward_overrideable_generated_plumbing(const at::Tensor & grad_output, const at::Tensor & input, const at::Tensor & weight, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool transposed, at::IntArrayRef output_padding, int64_t groups, ::std::array<bool,3> output_mask) {
3295 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
3296 auto maybe_layer = maybeCurrentDynamicLayer();
3297 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
3298 int64_t cur_level = maybe_layer->layerId();
3299 if (!isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(weight, cur_level)) {
3300 return at::_ops::convolution_backward_overrideable::call(grad_output, input, weight, stride, padding, dilation, transposed, output_padding, groups, output_mask);
3301 }
3302 Tensor grad_output_value;
3303 optional<int64_t> grad_output_bdim;
3304 std::tie(grad_output_value, grad_output_bdim) = unwrapTensorAtLevel(grad_output, cur_level);
3305 Tensor input_value;
3306 optional<int64_t> input_bdim;
3307 std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level);
3308 Tensor weight_value;
3309 optional<int64_t> weight_bdim;
3310 std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight, cur_level);
3311 auto results = batch_rule(grad_output_value, grad_output_bdim, input_value, input_bdim, weight_value, weight_bdim, stride, padding, dilation, transposed, output_padding, groups, output_mask);
3312 return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level));
3313}
3314template <typename batch_rule_t, batch_rule_t batch_rule>
3315at::Tensor _convolution_generated_plumbing(const at::Tensor & input, const at::Tensor & weight, const c10::optional<at::Tensor> & bias, at::IntArrayRef stride, c10::SymIntArrayRef padding, at::IntArrayRef dilation, bool transposed, c10::SymIntArrayRef output_padding, int64_t groups, bool benchmark, bool deterministic, bool cudnn_enabled, bool allow_tf32) {
3316 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
3317 auto maybe_layer = maybeCurrentDynamicLayer();
3318 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
3319 int64_t cur_level = maybe_layer->layerId();
3320 if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(bias, cur_level)) {
3321 return at::_ops::_convolution::call(input, weight, bias, stride, padding, dilation, transposed, output_padding, groups, benchmark, deterministic, cudnn_enabled, allow_tf32);
3322 }
3323 Tensor input_value;
3324 optional<int64_t> input_bdim;
3325 std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level);
3326 Tensor weight_value;
3327 optional<int64_t> weight_bdim;
3328 std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight, cur_level);
3329 optional<Tensor> bias_value;
3330 optional<int64_t> bias_bdim;
3331 if (bias) {
3332 std::tie(bias_value, bias_bdim) = unwrapTensorAtLevel(bias.value(), cur_level);
3333 }
3334 auto results = batch_rule(input_value, input_bdim, weight_value, weight_bdim, bias_value, bias_bdim, stride, padding, dilation, transposed, output_padding, groups, benchmark, deterministic, cudnn_enabled, allow_tf32);
3335 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
3336}
3337template <typename batch_rule_t, batch_rule_t batch_rule>
3338at::Tensor _convolution_deprecated_generated_plumbing(const at::Tensor & input, const at::Tensor & weight, const c10::optional<at::Tensor> & bias, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool transposed, at::IntArrayRef output_padding, int64_t groups, bool benchmark, bool deterministic, bool cudnn_enabled) {
3339 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
3340 auto maybe_layer = maybeCurrentDynamicLayer();
3341 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
3342 int64_t cur_level = maybe_layer->layerId();
3343 if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(bias, cur_level)) {
3344 return at::_ops::_convolution_deprecated::call(input, weight, bias, stride, padding, dilation, transposed, output_padding, groups, benchmark, deterministic, cudnn_enabled);
3345 }
3346 Tensor input_value;
3347 optional<int64_t> input_bdim;
3348 std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level);
3349 Tensor weight_value;
3350 optional<int64_t> weight_bdim;
3351 std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight, cur_level);
3352 optional<Tensor> bias_value;
3353 optional<int64_t> bias_bdim;
3354 if (bias) {
3355 std::tie(bias_value, bias_bdim) = unwrapTensorAtLevel(bias.value(), cur_level);
3356 }
3357 auto results = batch_rule(input_value, input_bdim, weight_value, weight_bdim, bias_value, bias_bdim, stride, padding, dilation, transposed, output_padding, groups, benchmark, deterministic, cudnn_enabled);
3358 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
3359}
3360template <typename batch_rule_t, batch_rule_t batch_rule>
3361at::Tensor _convolution_mode_generated_plumbing(const at::Tensor & input, const at::Tensor & weight, const c10::optional<at::Tensor> & bias, at::IntArrayRef stride, c10::string_view padding, at::IntArrayRef dilation, int64_t groups) {
3362 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
3363 auto maybe_layer = maybeCurrentDynamicLayer();
3364 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
3365 int64_t cur_level = maybe_layer->layerId();
3366 if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(bias, cur_level)) {
3367 return at::_ops::_convolution_mode::call(input, weight, bias, stride, padding, dilation, groups);
3368 }
3369 Tensor input_value;
3370 optional<int64_t> input_bdim;
3371 std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level);
3372 Tensor weight_value;
3373 optional<int64_t> weight_bdim;
3374 std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight, cur_level);
3375 optional<Tensor> bias_value;
3376 optional<int64_t> bias_bdim;
3377 if (bias) {
3378 std::tie(bias_value, bias_bdim) = unwrapTensorAtLevel(bias.value(), cur_level);
3379 }
3380 auto results = batch_rule(input_value, input_bdim, weight_value, weight_bdim, bias_value, bias_bdim, stride, padding, dilation, groups);
3381 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
3382}
3383template <typename batch_rule_t, batch_rule_t batch_rule>
3384::std::tuple<at::Tensor,at::Tensor,at::Tensor> _convolution_double_backward_generated_plumbing(const c10::optional<at::Tensor> & ggI, const c10::optional<at::Tensor> & ggW, const c10::optional<at::Tensor> & ggb, const at::Tensor & gO, const at::Tensor & weight, const at::Tensor & self, at::IntArrayRef stride, c10::SymIntArrayRef padding, at::IntArrayRef dilation, bool transposed, c10::SymIntArrayRef output_padding, int64_t groups, ::std::array<bool,3> output_mask) {
3385 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
3386 auto maybe_layer = maybeCurrentDynamicLayer();
3387 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
3388 int64_t cur_level = maybe_layer->layerId();
3389 if (!isBatchedAtLevel(ggI, cur_level) && !isBatchedAtLevel(ggW, cur_level) && !isBatchedAtLevel(ggb, cur_level) && !isBatchedAtLevel(gO, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(self, cur_level)) {
3390 return at::_ops::_convolution_double_backward::call(ggI, ggW, ggb, gO, weight, self, stride, padding, dilation, transposed, output_padding, groups, output_mask);
3391 }
3392 Tensor gO_value;
3393 optional<int64_t> gO_bdim;
3394 std::tie(gO_value, gO_bdim) = unwrapTensorAtLevel(gO, cur_level);
3395 Tensor weight_value;
3396 optional<int64_t> weight_bdim;
3397 std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight, cur_level);
3398 Tensor self_value;
3399 optional<int64_t> self_bdim;
3400 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
3401 optional<Tensor> ggI_value;
3402 optional<int64_t> ggI_bdim;
3403 if (ggI) {
3404 std::tie(ggI_value, ggI_bdim) = unwrapTensorAtLevel(ggI.value(), cur_level);
3405 }
3406 optional<Tensor> ggW_value;
3407 optional<int64_t> ggW_bdim;
3408 if (ggW) {
3409 std::tie(ggW_value, ggW_bdim) = unwrapTensorAtLevel(ggW.value(), cur_level);
3410 }
3411 optional<Tensor> ggb_value;
3412 optional<int64_t> ggb_bdim;
3413 if (ggb) {
3414 std::tie(ggb_value, ggb_bdim) = unwrapTensorAtLevel(ggb.value(), cur_level);
3415 }
3416 auto results = batch_rule(ggI_value, ggI_bdim, ggW_value, ggW_bdim, ggb_value, ggb_bdim, gO_value, gO_bdim, weight_value, weight_bdim, self_value, self_bdim, stride, padding, dilation, transposed, output_padding, groups, output_mask);
3417 return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level));
3418}
3419template <typename batch_rule_t, batch_rule_t batch_rule>
3420at::Tensor conv1d_generated_plumbing(const at::Tensor & input, const at::Tensor & weight, const c10::optional<at::Tensor> & bias, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, int64_t groups) {
3421 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
3422 auto maybe_layer = maybeCurrentDynamicLayer();
3423 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
3424 int64_t cur_level = maybe_layer->layerId();
3425 if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(bias, cur_level)) {
3426 return at::_ops::conv1d::call(input, weight, bias, stride, padding, dilation, groups);
3427 }
3428 Tensor input_value;
3429 optional<int64_t> input_bdim;
3430 std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level);
3431 Tensor weight_value;
3432 optional<int64_t> weight_bdim;
3433 std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight, cur_level);
3434 optional<Tensor> bias_value;
3435 optional<int64_t> bias_bdim;
3436 if (bias) {
3437 std::tie(bias_value, bias_bdim) = unwrapTensorAtLevel(bias.value(), cur_level);
3438 }
3439 auto results = batch_rule(input_value, input_bdim, weight_value, weight_bdim, bias_value, bias_bdim, stride, padding, dilation, groups);
3440 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
3441}
3442template <typename batch_rule_t, batch_rule_t batch_rule>
3443at::Tensor conv2d_generated_plumbing(const at::Tensor & input, const at::Tensor & weight, const c10::optional<at::Tensor> & bias, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, int64_t groups) {
3444 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
3445 auto maybe_layer = maybeCurrentDynamicLayer();
3446 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
3447 int64_t cur_level = maybe_layer->layerId();
3448 if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(bias, cur_level)) {
3449 return at::_ops::conv2d::call(input, weight, bias, stride, padding, dilation, groups);
3450 }
3451 Tensor input_value;
3452 optional<int64_t> input_bdim;
3453 std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level);
3454 Tensor weight_value;
3455 optional<int64_t> weight_bdim;
3456 std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight, cur_level);
3457 optional<Tensor> bias_value;
3458 optional<int64_t> bias_bdim;
3459 if (bias) {
3460 std::tie(bias_value, bias_bdim) = unwrapTensorAtLevel(bias.value(), cur_level);
3461 }
3462 auto results = batch_rule(input_value, input_bdim, weight_value, weight_bdim, bias_value, bias_bdim, stride, padding, dilation, groups);
3463 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
3464}
3465template <typename batch_rule_t, batch_rule_t batch_rule>
3466at::Tensor conv3d_generated_plumbing(const at::Tensor & input, const at::Tensor & weight, const c10::optional<at::Tensor> & bias, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, int64_t groups) {
3467 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
3468 auto maybe_layer = maybeCurrentDynamicLayer();
3469 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
3470 int64_t cur_level = maybe_layer->layerId();
3471 if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(bias, cur_level)) {
3472 return at::_ops::conv3d::call(input, weight, bias, stride, padding, dilation, groups);
3473 }
3474 Tensor input_value;
3475 optional<int64_t> input_bdim;
3476 std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level);
3477 Tensor weight_value;
3478 optional<int64_t> weight_bdim;
3479 std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight, cur_level);
3480 optional<Tensor> bias_value;
3481 optional<int64_t> bias_bdim;
3482 if (bias) {
3483 std::tie(bias_value, bias_bdim) = unwrapTensorAtLevel(bias.value(), cur_level);
3484 }
3485 auto results = batch_rule(input_value, input_bdim, weight_value, weight_bdim, bias_value, bias_bdim, stride, padding, dilation, groups);
3486 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
3487}
3488template <typename batch_rule_t, batch_rule_t batch_rule>
3489at::Tensor conv1d_padding_generated_plumbing(const at::Tensor & input, const at::Tensor & weight, const c10::optional<at::Tensor> & bias, at::IntArrayRef stride, c10::string_view padding, at::IntArrayRef dilation, int64_t groups) {
3490 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
3491 auto maybe_layer = maybeCurrentDynamicLayer();
3492 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
3493 int64_t cur_level = maybe_layer->layerId();
3494 if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(bias, cur_level)) {
3495 return at::_ops::conv1d_padding::call(input, weight, bias, stride, padding, dilation, groups);
3496 }
3497 Tensor input_value;
3498 optional<int64_t> input_bdim;
3499 std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level);
3500 Tensor weight_value;
3501 optional<int64_t> weight_bdim;
3502 std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight, cur_level);
3503 optional<Tensor> bias_value;
3504 optional<int64_t> bias_bdim;
3505 if (bias) {
3506 std::tie(bias_value, bias_bdim) = unwrapTensorAtLevel(bias.value(), cur_level);
3507 }
3508 auto results = batch_rule(input_value, input_bdim, weight_value, weight_bdim, bias_value, bias_bdim, stride, padding, dilation, groups);
3509 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
3510}
3511template <typename batch_rule_t, batch_rule_t batch_rule>
3512at::Tensor conv2d_padding_generated_plumbing(const at::Tensor & input, const at::Tensor & weight, const c10::optional<at::Tensor> & bias, at::IntArrayRef stride, c10::string_view padding, at::IntArrayRef dilation, int64_t groups) {
3513 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
3514 auto maybe_layer = maybeCurrentDynamicLayer();
3515 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
3516 int64_t cur_level = maybe_layer->layerId();
3517 if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(bias, cur_level)) {
3518 return at::_ops::conv2d_padding::call(input, weight, bias, stride, padding, dilation, groups);
3519 }
3520 Tensor input_value;
3521 optional<int64_t> input_bdim;
3522 std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level);
3523 Tensor weight_value;
3524 optional<int64_t> weight_bdim;
3525 std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight, cur_level);
3526 optional<Tensor> bias_value;
3527 optional<int64_t> bias_bdim;
3528 if (bias) {
3529 std::tie(bias_value, bias_bdim) = unwrapTensorAtLevel(bias.value(), cur_level);
3530 }
3531 auto results = batch_rule(input_value, input_bdim, weight_value, weight_bdim, bias_value, bias_bdim, stride, padding, dilation, groups);
3532 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
3533}
3534template <typename batch_rule_t, batch_rule_t batch_rule>
3535at::Tensor conv3d_padding_generated_plumbing(const at::Tensor & input, const at::Tensor & weight, const c10::optional<at::Tensor> & bias, at::IntArrayRef stride, c10::string_view padding, at::IntArrayRef dilation, int64_t groups) {
3536 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
3537 auto maybe_layer = maybeCurrentDynamicLayer();
3538 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
3539 int64_t cur_level = maybe_layer->layerId();
3540 if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(bias, cur_level)) {
3541 return at::_ops::conv3d_padding::call(input, weight, bias, stride, padding, dilation, groups);
3542 }
3543 Tensor input_value;
3544 optional<int64_t> input_bdim;
3545 std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level);
3546 Tensor weight_value;
3547 optional<int64_t> weight_bdim;
3548 std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight, cur_level);
3549 optional<Tensor> bias_value;
3550 optional<int64_t> bias_bdim;
3551 if (bias) {
3552 std::tie(bias_value, bias_bdim) = unwrapTensorAtLevel(bias.value(), cur_level);
3553 }
3554 auto results = batch_rule(input_value, input_bdim, weight_value, weight_bdim, bias_value, bias_bdim, stride, padding, dilation, groups);
3555 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
3556}
3557template <typename batch_rule_t, batch_rule_t batch_rule>
3558at::Tensor conv_tbc_generated_plumbing(const at::Tensor & self, const at::Tensor & weight, const at::Tensor & bias, int64_t pad) {
3559 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
3560 auto maybe_layer = maybeCurrentDynamicLayer();
3561 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
3562 int64_t cur_level = maybe_layer->layerId();
3563 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(bias, cur_level)) {
3564 return at::_ops::conv_tbc::call(self, weight, bias, pad);
3565 }
3566 Tensor self_value;
3567 optional<int64_t> self_bdim;
3568 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
3569 Tensor weight_value;
3570 optional<int64_t> weight_bdim;
3571 std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight, cur_level);
3572 Tensor bias_value;
3573 optional<int64_t> bias_bdim;
3574 std::tie(bias_value, bias_bdim) = unwrapTensorAtLevel(bias, cur_level);
3575 auto results = batch_rule(self_value, self_bdim, weight_value, weight_bdim, bias_value, bias_bdim, pad);
3576 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
3577}
3578template <typename batch_rule_t, batch_rule_t batch_rule>
3579::std::tuple<at::Tensor,at::Tensor,at::Tensor> conv_tbc_backward_generated_plumbing(const at::Tensor & self, const at::Tensor & input, const at::Tensor & weight, const at::Tensor & bias, int64_t pad) {
3580 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
3581 auto maybe_layer = maybeCurrentDynamicLayer();
3582 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
3583 int64_t cur_level = maybe_layer->layerId();
3584 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(bias, cur_level)) {
3585 return at::_ops::conv_tbc_backward::call(self, input, weight, bias, pad);
3586 }
3587 Tensor self_value;
3588 optional<int64_t> self_bdim;
3589 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
3590 Tensor input_value;
3591 optional<int64_t> input_bdim;
3592 std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level);
3593 Tensor weight_value;
3594 optional<int64_t> weight_bdim;
3595 std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight, cur_level);
3596 Tensor bias_value;
3597 optional<int64_t> bias_bdim;
3598 std::tie(bias_value, bias_bdim) = unwrapTensorAtLevel(bias, cur_level);
3599 auto results = batch_rule(self_value, self_bdim, input_value, input_bdim, weight_value, weight_bdim, bias_value, bias_bdim, pad);
3600 return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level));
3601}
3602template <typename batch_rule_t, batch_rule_t batch_rule>
3603at::Tensor conv_transpose1d_generated_plumbing(const at::Tensor & input, const at::Tensor & weight, const c10::optional<at::Tensor> & bias, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef output_padding, int64_t groups, at::IntArrayRef dilation) {
3604 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
3605 auto maybe_layer = maybeCurrentDynamicLayer();
3606 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
3607 int64_t cur_level = maybe_layer->layerId();
3608 if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(bias, cur_level)) {
3609 return at::_ops::conv_transpose1d::call(input, weight, bias, stride, padding, output_padding, groups, dilation);
3610 }
3611 Tensor input_value;
3612 optional<int64_t> input_bdim;
3613 std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level);
3614 Tensor weight_value;
3615 optional<int64_t> weight_bdim;
3616 std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight, cur_level);
3617 optional<Tensor> bias_value;
3618 optional<int64_t> bias_bdim;
3619 if (bias) {
3620 std::tie(bias_value, bias_bdim) = unwrapTensorAtLevel(bias.value(), cur_level);
3621 }
3622 auto results = batch_rule(input_value, input_bdim, weight_value, weight_bdim, bias_value, bias_bdim, stride, padding, output_padding, groups, dilation);
3623 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
3624}
3625template <typename batch_rule_t, batch_rule_t batch_rule>
3626at::Tensor conv_transpose2d_input_generated_plumbing(const at::Tensor & input, const at::Tensor & weight, const c10::optional<at::Tensor> & bias, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef output_padding, int64_t groups, at::IntArrayRef dilation) {
3627 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
3628 auto maybe_layer = maybeCurrentDynamicLayer();
3629 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
3630 int64_t cur_level = maybe_layer->layerId();
3631 if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(bias, cur_level)) {
3632 return at::_ops::conv_transpose2d_input::call(input, weight, bias, stride, padding, output_padding, groups, dilation);
3633 }
3634 Tensor input_value;
3635 optional<int64_t> input_bdim;
3636 std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level);
3637 Tensor weight_value;
3638 optional<int64_t> weight_bdim;
3639 std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight, cur_level);
3640 optional<Tensor> bias_value;
3641 optional<int64_t> bias_bdim;
3642 if (bias) {
3643 std::tie(bias_value, bias_bdim) = unwrapTensorAtLevel(bias.value(), cur_level);
3644 }
3645 auto results = batch_rule(input_value, input_bdim, weight_value, weight_bdim, bias_value, bias_bdim, stride, padding, output_padding, groups, dilation);
3646 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
3647}
3648template <typename batch_rule_t, batch_rule_t batch_rule>
3649at::Tensor conv_transpose3d_input_generated_plumbing(const at::Tensor & input, const at::Tensor & weight, const c10::optional<at::Tensor> & bias, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef output_padding, int64_t groups, at::IntArrayRef dilation) {
3650 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
3651 auto maybe_layer = maybeCurrentDynamicLayer();
3652 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
3653 int64_t cur_level = maybe_layer->layerId();
3654 if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(bias, cur_level)) {
3655 return at::_ops::conv_transpose3d_input::call(input, weight, bias, stride, padding, output_padding, groups, dilation);
3656 }
3657 Tensor input_value;
3658 optional<int64_t> input_bdim;
3659 std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level);
3660 Tensor weight_value;
3661 optional<int64_t> weight_bdim;
3662 std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight, cur_level);
3663 optional<Tensor> bias_value;
3664 optional<int64_t> bias_bdim;
3665 if (bias) {
3666 std::tie(bias_value, bias_bdim) = unwrapTensorAtLevel(bias.value(), cur_level);
3667 }
3668 auto results = batch_rule(input_value, input_bdim, weight_value, weight_bdim, bias_value, bias_bdim, stride, padding, output_padding, groups, dilation);
3669 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
3670}
3671template <typename batch_rule_t, batch_rule_t batch_rule>
3672at::Tensor copy_generated_plumbing(const at::Tensor & self, const at::Tensor & src, bool non_blocking) {
3673 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
3674 auto maybe_layer = maybeCurrentDynamicLayer();
3675 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
3676 int64_t cur_level = maybe_layer->layerId();
3677 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(src, cur_level)) {
3678 return at::_ops::copy::call(self, src, non_blocking);
3679 }
3680 Tensor self_value;
3681 optional<int64_t> self_bdim;
3682 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
3683 Tensor src_value;
3684 optional<int64_t> src_bdim;
3685 std::tie(src_value, src_bdim) = unwrapTensorAtLevel(src, cur_level);
3686 auto results = batch_rule(self_value, self_bdim, src_value, src_bdim, non_blocking);
3687 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
3688}
3689template <typename batch_rule_t, batch_rule_t batch_rule>
3690at::Tensor & copy__generated_plumbing(at::Tensor & self, const at::Tensor & src, bool non_blocking) {
3691 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
3692 auto maybe_layer = maybeCurrentDynamicLayer();
3693 vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
3694 int64_t cur_level = maybe_layer->layerId();
3695 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(src, cur_level)) {
3696 return at::_ops::copy_::call(self, src, non_blocking);
3697 }
3698 Tensor self_value;
3699 optional<int64_t> self_bdim;
3700 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
3701 Tensor src_value;
3702 optional<int64_t> src_bdim;
3703 std::tie(src_value, src_bdim) = unwrapTensorAtLevel(src, cur_level);
3704 batch_rule(self_value, self_bdim, src_value, src_bdim, non_blocking);
3705 return self;
3706}
3707template <typename batch_rule_t, batch_rule_t batch_rule>
3708at::Tensor _copy_from_generated_plumbing(const at::Tensor & self, const at::Tensor & dst, bool non_blocking) {
3709 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
3710 auto maybe_layer = maybeCurrentDynamicLayer();
3711 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
3712 int64_t cur_level = maybe_layer->layerId();
3713 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(dst, cur_level)) {
3714 return at::_ops::_copy_from::call(self, dst, non_blocking);
3715 }
3716 Tensor self_value;
3717 optional<int64_t> self_bdim;
3718 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
3719 Tensor dst_value;
3720 optional<int64_t> dst_bdim;
3721 std::tie(dst_value, dst_bdim) = unwrapTensorAtLevel(dst, cur_level);
3722 auto results = batch_rule(self_value, self_bdim, dst_value, dst_bdim, non_blocking);
3723 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
3724}
3725template <typename batch_rule_t, batch_rule_t batch_rule>
3726at::Tensor _copy_from_and_resize_generated_plumbing(const at::Tensor & self, const at::Tensor & dst) {
3727 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
3728 auto maybe_layer = maybeCurrentDynamicLayer();
3729 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
3730 int64_t cur_level = maybe_layer->layerId();
3731 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(dst, cur_level)) {
3732 return at::_ops::_copy_from_and_resize::call(self, dst);
3733 }
3734 Tensor self_value;
3735 optional<int64_t> self_bdim;
3736 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
3737 Tensor dst_value;
3738 optional<int64_t> dst_bdim;
3739 std::tie(dst_value, dst_bdim) = unwrapTensorAtLevel(dst, cur_level);
3740 auto results = batch_rule(self_value, self_bdim, dst_value, dst_bdim);
3741 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
3742}
3743template <typename batch_rule_t, batch_rule_t batch_rule>
3744at::Tensor cos_generated_plumbing(const at::Tensor & self) {
3745 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
3746 auto maybe_layer = maybeCurrentDynamicLayer();
3747 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
3748 int64_t cur_level = maybe_layer->layerId();
3749 if (!isBatchedAtLevel(self, cur_level)) {
3750 return at::_ops::cos::call(self);
3751 }
3752 Tensor self_value;
3753 optional<int64_t> self_bdim;
3754 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
3755 auto results = batch_rule(self_value, self_bdim);
3756 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
3757}
3758template <typename batch_rule_t, batch_rule_t batch_rule>
3759at::Tensor & cos__generated_plumbing(at::Tensor & self) {
3760 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
3761 auto maybe_layer = maybeCurrentDynamicLayer();
3762 vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
3763 int64_t cur_level = maybe_layer->layerId();
3764 if (!isBatchedAtLevel(self, cur_level)) {
3765 return at::_ops::cos_::call(self);
3766 }
3767 Tensor self_value;
3768 optional<int64_t> self_bdim;
3769 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
3770 batch_rule(self_value, self_bdim);
3771 return self;
3772}
3773template <typename batch_rule_t, batch_rule_t batch_rule>
3774at::Tensor cosh_generated_plumbing(const at::Tensor & self) {
3775 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
3776 auto maybe_layer = maybeCurrentDynamicLayer();
3777 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
3778 int64_t cur_level = maybe_layer->layerId();
3779 if (!isBatchedAtLevel(self, cur_level)) {
3780 return at::_ops::cosh::call(self);
3781 }
3782 Tensor self_value;
3783 optional<int64_t> self_bdim;
3784 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
3785 auto results = batch_rule(self_value, self_bdim);
3786 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
3787}
3788template <typename batch_rule_t, batch_rule_t batch_rule>
3789at::Tensor & cosh__generated_plumbing(at::Tensor & self) {
3790 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
3791 auto maybe_layer = maybeCurrentDynamicLayer();
3792 vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
3793 int64_t cur_level = maybe_layer->layerId();
3794 if (!isBatchedAtLevel(self, cur_level)) {
3795 return at::_ops::cosh_::call(self);
3796 }
3797 Tensor self_value;
3798 optional<int64_t> self_bdim;
3799 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
3800 batch_rule(self_value, self_bdim);
3801 return self;
3802}
3803template <typename batch_rule_t, batch_rule_t batch_rule>
3804at::Tensor cosine_embedding_loss_generated_plumbing(const at::Tensor & input1, const at::Tensor & input2, const at::Tensor & target, double margin, int64_t reduction) {
3805 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
3806 auto maybe_layer = maybeCurrentDynamicLayer();
3807 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
3808 int64_t cur_level = maybe_layer->layerId();
3809 if (!isBatchedAtLevel(input1, cur_level) && !isBatchedAtLevel(input2, cur_level) && !isBatchedAtLevel(target, cur_level)) {
3810 return at::_ops::cosine_embedding_loss::call(input1, input2, target, margin, reduction);
3811 }
3812 Tensor input1_value;
3813 optional<int64_t> input1_bdim;
3814 std::tie(input1_value, input1_bdim) = unwrapTensorAtLevel(input1, cur_level);
3815 Tensor input2_value;
3816 optional<int64_t> input2_bdim;
3817 std::tie(input2_value, input2_bdim) = unwrapTensorAtLevel(input2, cur_level);
3818 Tensor target_value;
3819 optional<int64_t> target_bdim;
3820 std::tie(target_value, target_bdim) = unwrapTensorAtLevel(target, cur_level);
3821 auto results = batch_rule(input1_value, input1_bdim, input2_value, input2_bdim, target_value, target_bdim, margin, reduction);
3822 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
3823}
3824template <typename batch_rule_t, batch_rule_t batch_rule>
3825at::Tensor count_nonzero_dim_IntList_generated_plumbing(const at::Tensor & self, at::IntArrayRef dim) {
3826 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
3827 auto maybe_layer = maybeCurrentDynamicLayer();
3828 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
3829 int64_t cur_level = maybe_layer->layerId();
3830 if (!isBatchedAtLevel(self, cur_level)) {
3831 return at::_ops::count_nonzero_dim_IntList::call(self, dim);
3832 }
3833 Tensor self_value;
3834 optional<int64_t> self_bdim;
3835 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
3836 auto results = batch_rule(self_value, self_bdim, dim);
3837 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
3838}
3839template <typename batch_rule_t, batch_rule_t batch_rule>
3840at::Tensor count_nonzero_generated_plumbing(const at::Tensor & self, c10::optional<int64_t> dim) {
3841 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
3842 auto maybe_layer = maybeCurrentDynamicLayer();
3843 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
3844 int64_t cur_level = maybe_layer->layerId();
3845 if (!isBatchedAtLevel(self, cur_level)) {
3846 return at::_ops::count_nonzero::call(self, dim);
3847 }
3848 Tensor self_value;
3849 optional<int64_t> self_bdim;
3850 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
3851 auto results = batch_rule(self_value, self_bdim, dim);
3852 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
3853}
3854template <typename batch_rule_t, batch_rule_t batch_rule>
3855at::Tensor cov_generated_plumbing(const at::Tensor & self, int64_t correction, const c10::optional<at::Tensor> & fweights, const c10::optional<at::Tensor> & aweights) {
3856 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
3857 auto maybe_layer = maybeCurrentDynamicLayer();
3858 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
3859 int64_t cur_level = maybe_layer->layerId();
3860 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(fweights, cur_level) && !isBatchedAtLevel(aweights, cur_level)) {
3861 return at::_ops::cov::call(self, correction, fweights, aweights);
3862 }
3863 Tensor self_value;
3864 optional<int64_t> self_bdim;
3865 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
3866 optional<Tensor> fweights_value;
3867 optional<int64_t> fweights_bdim;
3868 if (fweights) {
3869 std::tie(fweights_value, fweights_bdim) = unwrapTensorAtLevel(fweights.value(), cur_level);
3870 }
3871 optional<Tensor> aweights_value;
3872 optional<int64_t> aweights_bdim;
3873 if (aweights) {
3874 std::tie(aweights_value, aweights_bdim) = unwrapTensorAtLevel(aweights.value(), cur_level);
3875 }
3876 auto results = batch_rule(self_value, self_bdim, correction, fweights_value, fweights_bdim, aweights_value, aweights_bdim);
3877 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
3878}
3879template <typename batch_rule_t, batch_rule_t batch_rule>
3880at::Tensor corrcoef_generated_plumbing(const at::Tensor & self) {
3881 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
3882 auto maybe_layer = maybeCurrentDynamicLayer();
3883 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
3884 int64_t cur_level = maybe_layer->layerId();
3885 if (!isBatchedAtLevel(self, cur_level)) {
3886 return at::_ops::corrcoef::call(self);
3887 }
3888 Tensor self_value;
3889 optional<int64_t> self_bdim;
3890 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
3891 auto results = batch_rule(self_value, self_bdim);
3892 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
3893}
3894template <typename batch_rule_t, batch_rule_t batch_rule>
3895at::Tensor cudnn_affine_grid_generator_generated_plumbing(const at::Tensor & theta, int64_t N, int64_t C, int64_t H, int64_t W) {
3896 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
3897 auto maybe_layer = maybeCurrentDynamicLayer();
3898 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
3899 int64_t cur_level = maybe_layer->layerId();
3900 if (!isBatchedAtLevel(theta, cur_level)) {
3901 return at::_ops::cudnn_affine_grid_generator::call(theta, N, C, H, W);
3902 }
3903 Tensor theta_value;
3904 optional<int64_t> theta_bdim;
3905 std::tie(theta_value, theta_bdim) = unwrapTensorAtLevel(theta, cur_level);
3906 auto results = batch_rule(theta_value, theta_bdim, N, C, H, W);
3907 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
3908}
3909template <typename batch_rule_t, batch_rule_t batch_rule>
3910at::Tensor cudnn_affine_grid_generator_backward_generated_plumbing(const at::Tensor & grad, int64_t N, int64_t C, int64_t H, int64_t W) {
3911 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
3912 auto maybe_layer = maybeCurrentDynamicLayer();
3913 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
3914 int64_t cur_level = maybe_layer->layerId();
3915 if (!isBatchedAtLevel(grad, cur_level)) {
3916 return at::_ops::cudnn_affine_grid_generator_backward::call(grad, N, C, H, W);
3917 }
3918 Tensor grad_value;
3919 optional<int64_t> grad_bdim;
3920 std::tie(grad_value, grad_bdim) = unwrapTensorAtLevel(grad, cur_level);
3921 auto results = batch_rule(grad_value, grad_bdim, N, C, H, W);
3922 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
3923}
3924template <typename batch_rule_t, batch_rule_t batch_rule>
3925::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor> cudnn_batch_norm_generated_plumbing(const at::Tensor & input, const at::Tensor & weight, const c10::optional<at::Tensor> & bias, const c10::optional<at::Tensor> & running_mean, const c10::optional<at::Tensor> & running_var, bool training, double exponential_average_factor, double epsilon) {
3926 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
3927 auto maybe_layer = maybeCurrentDynamicLayer();
3928 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
3929 int64_t cur_level = maybe_layer->layerId();
3930 if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(bias, cur_level) && !isBatchedAtLevel(running_mean, cur_level) && !isBatchedAtLevel(running_var, cur_level)) {
3931 return at::_ops::cudnn_batch_norm::call(input, weight, bias, running_mean, running_var, training, exponential_average_factor, epsilon);
3932 }
3933 Tensor input_value;
3934 optional<int64_t> input_bdim;
3935 std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level);
3936 Tensor weight_value;
3937 optional<int64_t> weight_bdim;
3938 std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight, cur_level);
3939 optional<Tensor> bias_value;
3940 optional<int64_t> bias_bdim;
3941 if (bias) {
3942 std::tie(bias_value, bias_bdim) = unwrapTensorAtLevel(bias.value(), cur_level);
3943 }
3944 optional<Tensor> running_mean_value;
3945 optional<int64_t> running_mean_bdim;
3946 if (running_mean) {
3947 std::tie(running_mean_value, running_mean_bdim) = unwrapTensorAtLevel(running_mean.value(), cur_level);
3948 }
3949 optional<Tensor> running_var_value;
3950 optional<int64_t> running_var_bdim;
3951 if (running_var) {
3952 std::tie(running_var_value, running_var_bdim) = unwrapTensorAtLevel(running_var.value(), cur_level);
3953 }
3954 auto results = batch_rule(input_value, input_bdim, weight_value, weight_bdim, bias_value, bias_bdim, running_mean_value, running_mean_bdim, running_var_value, running_var_bdim, training, exponential_average_factor, epsilon);
3955 return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level), makeBatched(std::get<6>(results), std::get<7>(results), cur_level));
3956}
3957template <typename batch_rule_t, batch_rule_t batch_rule>
3958::std::tuple<at::Tensor,at::Tensor,at::Tensor> cudnn_batch_norm_backward_generated_plumbing(const at::Tensor & input, const at::Tensor & grad_output, const at::Tensor & weight, const c10::optional<at::Tensor> & running_mean, const c10::optional<at::Tensor> & running_var, const c10::optional<at::Tensor> & save_mean, const c10::optional<at::Tensor> & save_var, double epsilon, const at::Tensor & reserveSpace) {
3959 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
3960 auto maybe_layer = maybeCurrentDynamicLayer();
3961 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
3962 int64_t cur_level = maybe_layer->layerId();
3963 if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(running_mean, cur_level) && !isBatchedAtLevel(running_var, cur_level) && !isBatchedAtLevel(save_mean, cur_level) && !isBatchedAtLevel(save_var, cur_level) && !isBatchedAtLevel(reserveSpace, cur_level)) {
3964 return at::_ops::cudnn_batch_norm_backward::call(input, grad_output, weight, running_mean, running_var, save_mean, save_var, epsilon, reserveSpace);
3965 }
3966 Tensor input_value;
3967 optional<int64_t> input_bdim;
3968 std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level);
3969 Tensor grad_output_value;
3970 optional<int64_t> grad_output_bdim;
3971 std::tie(grad_output_value, grad_output_bdim) = unwrapTensorAtLevel(grad_output, cur_level);
3972 Tensor weight_value;
3973 optional<int64_t> weight_bdim;
3974 std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight, cur_level);
3975 Tensor reserveSpace_value;
3976 optional<int64_t> reserveSpace_bdim;
3977 std::tie(reserveSpace_value, reserveSpace_bdim) = unwrapTensorAtLevel(reserveSpace, cur_level);
3978 optional<Tensor> running_mean_value;
3979 optional<int64_t> running_mean_bdim;
3980 if (running_mean) {
3981 std::tie(running_mean_value, running_mean_bdim) = unwrapTensorAtLevel(running_mean.value(), cur_level);
3982 }
3983 optional<Tensor> running_var_value;
3984 optional<int64_t> running_var_bdim;
3985 if (running_var) {
3986 std::tie(running_var_value, running_var_bdim) = unwrapTensorAtLevel(running_var.value(), cur_level);
3987 }
3988 optional<Tensor> save_mean_value;
3989 optional<int64_t> save_mean_bdim;
3990 if (save_mean) {
3991 std::tie(save_mean_value, save_mean_bdim) = unwrapTensorAtLevel(save_mean.value(), cur_level);
3992 }
3993 optional<Tensor> save_var_value;
3994 optional<int64_t> save_var_bdim;
3995 if (save_var) {
3996 std::tie(save_var_value, save_var_bdim) = unwrapTensorAtLevel(save_var.value(), cur_level);
3997 }
3998 auto results = batch_rule(input_value, input_bdim, grad_output_value, grad_output_bdim, weight_value, weight_bdim, running_mean_value, running_mean_bdim, running_var_value, running_var_bdim, save_mean_value, save_mean_bdim, save_var_value, save_var_bdim, epsilon, reserveSpace_value, reserveSpace_bdim);
3999 return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level));
4000}
4001template <typename batch_rule_t, batch_rule_t batch_rule>
4002at::Tensor cudnn_convolution_generated_plumbing(const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef padding, at::IntArrayRef stride, at::IntArrayRef dilation, int64_t groups, bool benchmark, bool deterministic, bool allow_tf32) {
4003 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
4004 auto maybe_layer = maybeCurrentDynamicLayer();
4005 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
4006 int64_t cur_level = maybe_layer->layerId();
4007 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(weight, cur_level)) {
4008 return at::_ops::cudnn_convolution::call(self, weight, padding, stride, dilation, groups, benchmark, deterministic, allow_tf32);
4009 }
4010 Tensor self_value;
4011 optional<int64_t> self_bdim;
4012 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
4013 Tensor weight_value;
4014 optional<int64_t> weight_bdim;
4015 std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight, cur_level);
4016 auto results = batch_rule(self_value, self_bdim, weight_value, weight_bdim, padding, stride, dilation, groups, benchmark, deterministic, allow_tf32);
4017 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
4018}
4019template <typename batch_rule_t, batch_rule_t batch_rule>
4020at::Tensor cudnn_convolution_transpose_generated_plumbing(const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef padding, at::IntArrayRef output_padding, at::IntArrayRef stride, at::IntArrayRef dilation, int64_t groups, bool benchmark, bool deterministic, bool allow_tf32) {
4021 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
4022 auto maybe_layer = maybeCurrentDynamicLayer();
4023 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
4024 int64_t cur_level = maybe_layer->layerId();
4025 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(weight, cur_level)) {
4026 return at::_ops::cudnn_convolution_transpose::call(self, weight, padding, output_padding, stride, dilation, groups, benchmark, deterministic, allow_tf32);
4027 }
4028 Tensor self_value;
4029 optional<int64_t> self_bdim;
4030 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
4031 Tensor weight_value;
4032 optional<int64_t> weight_bdim;
4033 std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight, cur_level);
4034 auto results = batch_rule(self_value, self_bdim, weight_value, weight_bdim, padding, output_padding, stride, dilation, groups, benchmark, deterministic, allow_tf32);
4035 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
4036}
4037template <typename batch_rule_t, batch_rule_t batch_rule>
4038at::Tensor _mps_convolution_transpose_generated_plumbing(const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef padding, at::IntArrayRef output_padding, at::IntArrayRef stride, at::IntArrayRef dilation, int64_t groups) {
4039 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
4040 auto maybe_layer = maybeCurrentDynamicLayer();
4041 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
4042 int64_t cur_level = maybe_layer->layerId();
4043 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(weight, cur_level)) {
4044 return at::_ops::_mps_convolution_transpose::call(self, weight, padding, output_padding, stride, dilation, groups);
4045 }
4046 Tensor self_value;
4047 optional<int64_t> self_bdim;
4048 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
4049 Tensor weight_value;
4050 optional<int64_t> weight_bdim;
4051 std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight, cur_level);
4052 auto results = batch_rule(self_value, self_bdim, weight_value, weight_bdim, padding, output_padding, stride, dilation, groups);
4053 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
4054}
4055template <typename batch_rule_t, batch_rule_t batch_rule>
4056::std::tuple<at::Tensor,at::Tensor> mps_convolution_transpose_backward_generated_plumbing(const at::Tensor & self, const at::Tensor & grad_output, const at::Tensor & weight, at::IntArrayRef padding, at::IntArrayRef output_padding, at::IntArrayRef stride, at::IntArrayRef dilation, int64_t groups, ::std::array<bool,2> output_mask) {
4057 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
4058 auto maybe_layer = maybeCurrentDynamicLayer();
4059 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
4060 int64_t cur_level = maybe_layer->layerId();
4061 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(weight, cur_level)) {
4062 return at::_ops::mps_convolution_transpose_backward::call(self, grad_output, weight, padding, output_padding, stride, dilation, groups, output_mask);
4063 }
4064 Tensor self_value;
4065 optional<int64_t> self_bdim;
4066 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
4067 Tensor grad_output_value;
4068 optional<int64_t> grad_output_bdim;
4069 std::tie(grad_output_value, grad_output_bdim) = unwrapTensorAtLevel(grad_output, cur_level);
4070 Tensor weight_value;
4071 optional<int64_t> weight_bdim;
4072 std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight, cur_level);
4073 auto results = batch_rule(self_value, self_bdim, grad_output_value, grad_output_bdim, weight_value, weight_bdim, padding, output_padding, stride, dilation, groups, output_mask);
4074 return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
4075}
4076template <typename batch_rule_t, batch_rule_t batch_rule>
4077at::Tensor cudnn_convolution_relu_generated_plumbing(const at::Tensor & self, const at::Tensor & weight, const c10::optional<at::Tensor> & bias, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, int64_t groups) {
4078 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
4079 auto maybe_layer = maybeCurrentDynamicLayer();
4080 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
4081 int64_t cur_level = maybe_layer->layerId();
4082 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(bias, cur_level)) {
4083 return at::_ops::cudnn_convolution_relu::call(self, weight, bias, stride, padding, dilation, groups);
4084 }
4085 Tensor self_value;
4086 optional<int64_t> self_bdim;
4087 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
4088 Tensor weight_value;
4089 optional<int64_t> weight_bdim;
4090 std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight, cur_level);
4091 optional<Tensor> bias_value;
4092 optional<int64_t> bias_bdim;
4093 if (bias) {
4094 std::tie(bias_value, bias_bdim) = unwrapTensorAtLevel(bias.value(), cur_level);
4095 }
4096 auto results = batch_rule(self_value, self_bdim, weight_value, weight_bdim, bias_value, bias_bdim, stride, padding, dilation, groups);
4097 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
4098}
4099template <typename batch_rule_t, batch_rule_t batch_rule>
4100at::Tensor cudnn_convolution_add_relu_generated_plumbing(const at::Tensor & self, const at::Tensor & weight, const at::Tensor & z, const c10::optional<at::Scalar> & alpha, const c10::optional<at::Tensor> & bias, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, int64_t groups) {
4101 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
4102 auto maybe_layer = maybeCurrentDynamicLayer();
4103 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
4104 int64_t cur_level = maybe_layer->layerId();
4105 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(z, cur_level) && !isBatchedAtLevel(bias, cur_level)) {
4106 return at::_ops::cudnn_convolution_add_relu::call(self, weight, z, alpha, bias, stride, padding, dilation, groups);
4107 }
4108 Tensor self_value;
4109 optional<int64_t> self_bdim;
4110 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
4111 Tensor weight_value;
4112 optional<int64_t> weight_bdim;
4113 std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight, cur_level);
4114 Tensor z_value;
4115 optional<int64_t> z_bdim;
4116 std::tie(z_value, z_bdim) = unwrapTensorAtLevel(z, cur_level);
4117 optional<Tensor> bias_value;
4118 optional<int64_t> bias_bdim;
4119 if (bias) {
4120 std::tie(bias_value, bias_bdim) = unwrapTensorAtLevel(bias.value(), cur_level);
4121 }
4122 auto results = batch_rule(self_value, self_bdim, weight_value, weight_bdim, z_value, z_bdim, alpha, bias_value, bias_bdim, stride, padding, dilation, groups);
4123 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
4124}
4125template <typename batch_rule_t, batch_rule_t batch_rule>
4126at::Tensor cudnn_grid_sampler_generated_plumbing(const at::Tensor & self, const at::Tensor & grid) {
4127 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
4128 auto maybe_layer = maybeCurrentDynamicLayer();
4129 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
4130 int64_t cur_level = maybe_layer->layerId();
4131 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(grid, cur_level)) {
4132 return at::_ops::cudnn_grid_sampler::call(self, grid);
4133 }
4134 Tensor self_value;
4135 optional<int64_t> self_bdim;
4136 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
4137 Tensor grid_value;
4138 optional<int64_t> grid_bdim;
4139 std::tie(grid_value, grid_bdim) = unwrapTensorAtLevel(grid, cur_level);
4140 auto results = batch_rule(self_value, self_bdim, grid_value, grid_bdim);
4141 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
4142}
4143template <typename batch_rule_t, batch_rule_t batch_rule>
4144::std::tuple<at::Tensor,at::Tensor> cudnn_grid_sampler_backward_generated_plumbing(const at::Tensor & self, const at::Tensor & grid, const at::Tensor & grad_output) {
4145 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
4146 auto maybe_layer = maybeCurrentDynamicLayer();
4147 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
4148 int64_t cur_level = maybe_layer->layerId();
4149 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(grid, cur_level) && !isBatchedAtLevel(grad_output, cur_level)) {
4150 return at::_ops::cudnn_grid_sampler_backward::call(self, grid, grad_output);
4151 }
4152 Tensor self_value;
4153 optional<int64_t> self_bdim;
4154 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
4155 Tensor grid_value;
4156 optional<int64_t> grid_bdim;
4157 std::tie(grid_value, grid_bdim) = unwrapTensorAtLevel(grid, cur_level);
4158 Tensor grad_output_value;
4159 optional<int64_t> grad_output_bdim;
4160 std::tie(grad_output_value, grad_output_bdim) = unwrapTensorAtLevel(grad_output, cur_level);
4161 auto results = batch_rule(self_value, self_bdim, grid_value, grid_bdim, grad_output_value, grad_output_bdim);
4162 return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
4163}
4164template <typename batch_rule_t, batch_rule_t batch_rule>
4165::std::tuple<at::Tensor,at::Tensor> cummax_generated_plumbing(const at::Tensor & self, int64_t dim) {
4166 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
4167 auto maybe_layer = maybeCurrentDynamicLayer();
4168 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
4169 int64_t cur_level = maybe_layer->layerId();
4170 if (!isBatchedAtLevel(self, cur_level)) {
4171 return at::_ops::cummax::call(self, dim);
4172 }
4173 Tensor self_value;
4174 optional<int64_t> self_bdim;
4175 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
4176 auto results = batch_rule(self_value, self_bdim, dim);
4177 return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
4178}
4179template <typename batch_rule_t, batch_rule_t batch_rule>
4180::std::tuple<at::Tensor,at::Tensor> cummax_dimname_generated_plumbing(const at::Tensor & self, at::Dimname dim) {
4181 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
4182 auto maybe_layer = maybeCurrentDynamicLayer();
4183 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
4184 int64_t cur_level = maybe_layer->layerId();
4185 if (!isBatchedAtLevel(self, cur_level)) {
4186 return at::_ops::cummax_dimname::call(self, dim);
4187 }
4188 Tensor self_value;
4189 optional<int64_t> self_bdim;
4190 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
4191 auto results = batch_rule(self_value, self_bdim, dim);
4192 return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
4193}
4194template <typename batch_rule_t, batch_rule_t batch_rule>
4195void _cummax_helper_generated_plumbing(const at::Tensor & self, at::Tensor & values, at::Tensor & indices, int64_t dim) {
4196 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
4197 auto maybe_layer = maybeCurrentDynamicLayer();
4198 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
4199 int64_t cur_level = maybe_layer->layerId();
4200 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(values, cur_level) && !isBatchedAtLevel(indices, cur_level)) {
4201 return at::_ops::_cummax_helper::call(self, values, indices, dim);
4202 }
4203 Tensor self_value;
4204 optional<int64_t> self_bdim;
4205 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
4206 Tensor values_value;
4207 optional<int64_t> values_bdim;
4208 std::tie(values_value, values_bdim) = unwrapTensorAtLevel(values, cur_level);
4209 Tensor indices_value;
4210 optional<int64_t> indices_bdim;
4211 std::tie(indices_value, indices_bdim) = unwrapTensorAtLevel(indices, cur_level);
4212 batch_rule(self_value, self_bdim, values_value, values_bdim, indices_value, indices_bdim, dim);
4213}
4214template <typename batch_rule_t, batch_rule_t batch_rule>
4215::std::tuple<at::Tensor,at::Tensor> cummin_generated_plumbing(const at::Tensor & self, int64_t dim) {
4216 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
4217 auto maybe_layer = maybeCurrentDynamicLayer();
4218 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
4219 int64_t cur_level = maybe_layer->layerId();
4220 if (!isBatchedAtLevel(self, cur_level)) {
4221 return at::_ops::cummin::call(self, dim);
4222 }
4223 Tensor self_value;
4224 optional<int64_t> self_bdim;
4225 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
4226 auto results = batch_rule(self_value, self_bdim, dim);
4227 return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
4228}
4229template <typename batch_rule_t, batch_rule_t batch_rule>
4230::std::tuple<at::Tensor,at::Tensor> cummin_dimname_generated_plumbing(const at::Tensor & self, at::Dimname dim) {
4231 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
4232 auto maybe_layer = maybeCurrentDynamicLayer();
4233 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
4234 int64_t cur_level = maybe_layer->layerId();
4235 if (!isBatchedAtLevel(self, cur_level)) {
4236 return at::_ops::cummin_dimname::call(self, dim);
4237 }
4238 Tensor self_value;
4239 optional<int64_t> self_bdim;
4240 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
4241 auto results = batch_rule(self_value, self_bdim, dim);
4242 return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
4243}
4244template <typename batch_rule_t, batch_rule_t batch_rule>
4245void _cummin_helper_generated_plumbing(const at::Tensor & self, at::Tensor & values, at::Tensor & indices, int64_t dim) {
4246 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
4247 auto maybe_layer = maybeCurrentDynamicLayer();
4248 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
4249 int64_t cur_level = maybe_layer->layerId();
4250 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(values, cur_level) && !isBatchedAtLevel(indices, cur_level)) {
4251 return at::_ops::_cummin_helper::call(self, values, indices, dim);
4252 }
4253 Tensor self_value;
4254 optional<int64_t> self_bdim;
4255 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
4256 Tensor values_value;
4257 optional<int64_t> values_bdim;
4258 std::tie(values_value, values_bdim) = unwrapTensorAtLevel(values, cur_level);
4259 Tensor indices_value;
4260 optional<int64_t> indices_bdim;
4261 std::tie(indices_value, indices_bdim) = unwrapTensorAtLevel(indices, cur_level);
4262 batch_rule(self_value, self_bdim, values_value, values_bdim, indices_value, indices_bdim, dim);
4263}
4264template <typename batch_rule_t, batch_rule_t batch_rule>
4265at::Tensor cummaxmin_backward_generated_plumbing(const at::Tensor & grad, const at::Tensor & input, const at::Tensor & indices, int64_t dim) {
4266 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
4267 auto maybe_layer = maybeCurrentDynamicLayer();
4268 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
4269 int64_t cur_level = maybe_layer->layerId();
4270 if (!isBatchedAtLevel(grad, cur_level) && !isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(indices, cur_level)) {
4271 return at::_ops::cummaxmin_backward::call(grad, input, indices, dim);
4272 }
4273 Tensor grad_value;
4274 optional<int64_t> grad_bdim;
4275 std::tie(grad_value, grad_bdim) = unwrapTensorAtLevel(grad, cur_level);
4276 Tensor input_value;
4277 optional<int64_t> input_bdim;
4278 std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level);
4279 Tensor indices_value;
4280 optional<int64_t> indices_bdim;
4281 std::tie(indices_value, indices_bdim) = unwrapTensorAtLevel(indices, cur_level);
4282 auto results = batch_rule(grad_value, grad_bdim, input_value, input_bdim, indices_value, indices_bdim, dim);
4283 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
4284}
4285template <typename batch_rule_t, batch_rule_t batch_rule>
4286at::Tensor cumprod_generated_plumbing(const at::Tensor & self, int64_t dim, c10::optional<at::ScalarType> dtype) {
4287 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
4288 auto maybe_layer = maybeCurrentDynamicLayer();
4289 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
4290 int64_t cur_level = maybe_layer->layerId();
4291 if (!isBatchedAtLevel(self, cur_level)) {
4292 return at::_ops::cumprod::call(self, dim, dtype);
4293 }
4294 Tensor self_value;
4295 optional<int64_t> self_bdim;
4296 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
4297 auto results = batch_rule(self_value, self_bdim, dim, dtype);
4298 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
4299}
4300template <typename batch_rule_t, batch_rule_t batch_rule>
4301at::Tensor & cumprod__generated_plumbing(at::Tensor & self, int64_t dim, c10::optional<at::ScalarType> dtype) {
4302 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
4303 auto maybe_layer = maybeCurrentDynamicLayer();
4304 vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
4305 int64_t cur_level = maybe_layer->layerId();
4306 if (!isBatchedAtLevel(self, cur_level)) {
4307 return at::_ops::cumprod_::call(self, dim, dtype);
4308 }
4309 Tensor self_value;
4310 optional<int64_t> self_bdim;
4311 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
4312 batch_rule(self_value, self_bdim, dim, dtype);
4313 return self;
4314}
4315template <typename batch_rule_t, batch_rule_t batch_rule>
4316at::Tensor cumprod_dimname_generated_plumbing(const at::Tensor & self, at::Dimname dim, c10::optional<at::ScalarType> dtype) {
4317 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
4318 auto maybe_layer = maybeCurrentDynamicLayer();
4319 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
4320 int64_t cur_level = maybe_layer->layerId();
4321 if (!isBatchedAtLevel(self, cur_level)) {
4322 return at::_ops::cumprod_dimname::call(self, dim, dtype);
4323 }
4324 Tensor self_value;
4325 optional<int64_t> self_bdim;
4326 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
4327 auto results = batch_rule(self_value, self_bdim, dim, dtype);
4328 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
4329}
4330template <typename batch_rule_t, batch_rule_t batch_rule>
4331at::Tensor & cumprod__dimname_generated_plumbing(at::Tensor & self, at::Dimname dim, c10::optional<at::ScalarType> dtype) {
4332 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
4333 auto maybe_layer = maybeCurrentDynamicLayer();
4334 vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
4335 int64_t cur_level = maybe_layer->layerId();
4336 if (!isBatchedAtLevel(self, cur_level)) {
4337 return at::_ops::cumprod__dimname::call(self, dim, dtype);
4338 }
4339 Tensor self_value;
4340 optional<int64_t> self_bdim;
4341 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
4342 batch_rule(self_value, self_bdim, dim, dtype);
4343 return self;
4344}
4345template <typename batch_rule_t, batch_rule_t batch_rule>
4346at::Tensor cumprod_backward_generated_plumbing(const at::Tensor & grad, const at::Tensor & input, int64_t dim, const at::Tensor & output) {
4347 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
4348 auto maybe_layer = maybeCurrentDynamicLayer();
4349 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
4350 int64_t cur_level = maybe_layer->layerId();
4351 if (!isBatchedAtLevel(grad, cur_level) && !isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(output, cur_level)) {
4352 return at::_ops::cumprod_backward::call(grad, input, dim, output);
4353 }
4354 Tensor grad_value;
4355 optional<int64_t> grad_bdim;
4356 std::tie(grad_value, grad_bdim) = unwrapTensorAtLevel(grad, cur_level);
4357 Tensor input_value;
4358 optional<int64_t> input_bdim;
4359 std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level);
4360 Tensor output_value;
4361 optional<int64_t> output_bdim;
4362 std::tie(output_value, output_bdim) = unwrapTensorAtLevel(output, cur_level);
4363 auto results = batch_rule(grad_value, grad_bdim, input_value, input_bdim, dim, output_value, output_bdim);
4364 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
4365}
4366template <typename batch_rule_t, batch_rule_t batch_rule>
4367at::Tensor cumsum_generated_plumbing(const at::Tensor & self, int64_t dim, c10::optional<at::ScalarType> dtype) {
4368 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
4369 auto maybe_layer = maybeCurrentDynamicLayer();
4370 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
4371 int64_t cur_level = maybe_layer->layerId();
4372 if (!isBatchedAtLevel(self, cur_level)) {
4373 return at::_ops::cumsum::call(self, dim, dtype);
4374 }
4375 Tensor self_value;
4376 optional<int64_t> self_bdim;
4377 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
4378 auto results = batch_rule(self_value, self_bdim, dim, dtype);
4379 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
4380}
4381template <typename batch_rule_t, batch_rule_t batch_rule>
4382at::Tensor & cumsum__generated_plumbing(at::Tensor & self, int64_t dim, c10::optional<at::ScalarType> dtype) {
4383 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
4384 auto maybe_layer = maybeCurrentDynamicLayer();
4385 vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
4386 int64_t cur_level = maybe_layer->layerId();
4387 if (!isBatchedAtLevel(self, cur_level)) {
4388 return at::_ops::cumsum_::call(self, dim, dtype);
4389 }
4390 Tensor self_value;
4391 optional<int64_t> self_bdim;
4392 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
4393 batch_rule(self_value, self_bdim, dim, dtype);
4394 return self;
4395}
4396template <typename batch_rule_t, batch_rule_t batch_rule>
4397at::Tensor cumsum_dimname_generated_plumbing(const at::Tensor & self, at::Dimname dim, c10::optional<at::ScalarType> dtype) {
4398 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
4399 auto maybe_layer = maybeCurrentDynamicLayer();
4400 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
4401 int64_t cur_level = maybe_layer->layerId();
4402 if (!isBatchedAtLevel(self, cur_level)) {
4403 return at::_ops::cumsum_dimname::call(self, dim, dtype);
4404 }
4405 Tensor self_value;
4406 optional<int64_t> self_bdim;
4407 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
4408 auto results = batch_rule(self_value, self_bdim, dim, dtype);
4409 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
4410}
4411template <typename batch_rule_t, batch_rule_t batch_rule>
4412at::Tensor & cumsum__dimname_generated_plumbing(at::Tensor & self, at::Dimname dim, c10::optional<at::ScalarType> dtype) {
4413 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
4414 auto maybe_layer = maybeCurrentDynamicLayer();
4415 vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
4416 int64_t cur_level = maybe_layer->layerId();
4417 if (!isBatchedAtLevel(self, cur_level)) {
4418 return at::_ops::cumsum__dimname::call(self, dim, dtype);
4419 }
4420 Tensor self_value;
4421 optional<int64_t> self_bdim;
4422 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
4423 batch_rule(self_value, self_bdim, dim, dtype);
4424 return self;
4425}
4426template <typename batch_rule_t, batch_rule_t batch_rule>
4427at::Tensor cumulative_trapezoid_x_generated_plumbing(const at::Tensor & y, const at::Tensor & x, int64_t dim) {
4428 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
4429 auto maybe_layer = maybeCurrentDynamicLayer();
4430 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
4431 int64_t cur_level = maybe_layer->layerId();
4432 if (!isBatchedAtLevel(y, cur_level) && !isBatchedAtLevel(x, cur_level)) {
4433 return at::_ops::cumulative_trapezoid_x::call(y, x, dim);
4434 }
4435 Tensor y_value;
4436 optional<int64_t> y_bdim;
4437 std::tie(y_value, y_bdim) = unwrapTensorAtLevel(y, cur_level);
4438 Tensor x_value;
4439 optional<int64_t> x_bdim;
4440 std::tie(x_value, x_bdim) = unwrapTensorAtLevel(x, cur_level);
4441 auto results = batch_rule(y_value, y_bdim, x_value, x_bdim, dim);
4442 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
4443}
4444template <typename batch_rule_t, batch_rule_t batch_rule>
4445at::Tensor cumulative_trapezoid_dx_generated_plumbing(const at::Tensor & y, const at::Scalar & dx, int64_t dim) {
4446 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
4447 auto maybe_layer = maybeCurrentDynamicLayer();
4448 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
4449 int64_t cur_level = maybe_layer->layerId();
4450 if (!isBatchedAtLevel(y, cur_level)) {
4451 return at::_ops::cumulative_trapezoid_dx::call(y, dx, dim);
4452 }
4453 Tensor y_value;
4454 optional<int64_t> y_bdim;
4455 std::tie(y_value, y_bdim) = unwrapTensorAtLevel(y, cur_level);
4456 auto results = batch_rule(y_value, y_bdim, dx, dim);
4457 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
4458}
4459template <typename batch_rule_t, batch_rule_t batch_rule>
4460at::Tensor ctc_loss_IntList_generated_plumbing(const at::Tensor & log_probs, const at::Tensor & targets, at::IntArrayRef input_lengths, at::IntArrayRef target_lengths, int64_t blank, int64_t reduction, bool zero_infinity) {
4461 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
4462 auto maybe_layer = maybeCurrentDynamicLayer();
4463 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
4464 int64_t cur_level = maybe_layer->layerId();
4465 if (!isBatchedAtLevel(log_probs, cur_level) && !isBatchedAtLevel(targets, cur_level)) {
4466 return at::_ops::ctc_loss_IntList::call(log_probs, targets, input_lengths, target_lengths, blank, reduction, zero_infinity);
4467 }
4468 Tensor log_probs_value;
4469 optional<int64_t> log_probs_bdim;
4470 std::tie(log_probs_value, log_probs_bdim) = unwrapTensorAtLevel(log_probs, cur_level);
4471 Tensor targets_value;
4472 optional<int64_t> targets_bdim;
4473 std::tie(targets_value, targets_bdim) = unwrapTensorAtLevel(targets, cur_level);
4474 auto results = batch_rule(log_probs_value, log_probs_bdim, targets_value, targets_bdim, input_lengths, target_lengths, blank, reduction, zero_infinity);
4475 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
4476}
4477template <typename batch_rule_t, batch_rule_t batch_rule>
4478at::Tensor ctc_loss_Tensor_generated_plumbing(const at::Tensor & log_probs, const at::Tensor & targets, const at::Tensor & input_lengths, const at::Tensor & target_lengths, int64_t blank, int64_t reduction, bool zero_infinity) {
4479 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
4480 auto maybe_layer = maybeCurrentDynamicLayer();
4481 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
4482 int64_t cur_level = maybe_layer->layerId();
4483 if (!isBatchedAtLevel(log_probs, cur_level) && !isBatchedAtLevel(targets, cur_level) && !isBatchedAtLevel(input_lengths, cur_level) && !isBatchedAtLevel(target_lengths, cur_level)) {
4484 return at::_ops::ctc_loss_Tensor::call(log_probs, targets, input_lengths, target_lengths, blank, reduction, zero_infinity);
4485 }
4486 Tensor log_probs_value;
4487 optional<int64_t> log_probs_bdim;
4488 std::tie(log_probs_value, log_probs_bdim) = unwrapTensorAtLevel(log_probs, cur_level);
4489 Tensor targets_value;
4490 optional<int64_t> targets_bdim;
4491 std::tie(targets_value, targets_bdim) = unwrapTensorAtLevel(targets, cur_level);
4492 Tensor input_lengths_value;
4493 optional<int64_t> input_lengths_bdim;
4494 std::tie(input_lengths_value, input_lengths_bdim) = unwrapTensorAtLevel(input_lengths, cur_level);
4495 Tensor target_lengths_value;
4496 optional<int64_t> target_lengths_bdim;
4497 std::tie(target_lengths_value, target_lengths_bdim) = unwrapTensorAtLevel(target_lengths, cur_level);
4498 auto results = batch_rule(log_probs_value, log_probs_bdim, targets_value, targets_bdim, input_lengths_value, input_lengths_bdim, target_lengths_value, target_lengths_bdim, blank, reduction, zero_infinity);
4499 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
4500}
4501template <typename batch_rule_t, batch_rule_t batch_rule>
4502::std::tuple<at::Tensor,at::Tensor> _ctc_loss_generated_plumbing(const at::Tensor & log_probs, const at::Tensor & targets, at::IntArrayRef input_lengths, at::IntArrayRef target_lengths, int64_t blank, bool zero_infinity) {
4503 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
4504 auto maybe_layer = maybeCurrentDynamicLayer();
4505 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
4506 int64_t cur_level = maybe_layer->layerId();
4507 if (!isBatchedAtLevel(log_probs, cur_level) && !isBatchedAtLevel(targets, cur_level)) {
4508 return at::_ops::_ctc_loss::call(log_probs, targets, input_lengths, target_lengths, blank, zero_infinity);
4509 }
4510 Tensor log_probs_value;
4511 optional<int64_t> log_probs_bdim;
4512 std::tie(log_probs_value, log_probs_bdim) = unwrapTensorAtLevel(log_probs, cur_level);
4513 Tensor targets_value;
4514 optional<int64_t> targets_bdim;
4515 std::tie(targets_value, targets_bdim) = unwrapTensorAtLevel(targets, cur_level);
4516 auto results = batch_rule(log_probs_value, log_probs_bdim, targets_value, targets_bdim, input_lengths, target_lengths, blank, zero_infinity);
4517 return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
4518}
4519template <typename batch_rule_t, batch_rule_t batch_rule>
4520::std::tuple<at::Tensor,at::Tensor> _ctc_loss_Tensor_generated_plumbing(const at::Tensor & log_probs, const at::Tensor & targets, const at::Tensor & input_lengths, const at::Tensor & target_lengths, int64_t blank, bool zero_infinity) {
4521 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
4522 auto maybe_layer = maybeCurrentDynamicLayer();
4523 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
4524 int64_t cur_level = maybe_layer->layerId();
4525 if (!isBatchedAtLevel(log_probs, cur_level) && !isBatchedAtLevel(targets, cur_level) && !isBatchedAtLevel(input_lengths, cur_level) && !isBatchedAtLevel(target_lengths, cur_level)) {
4526 return at::_ops::_ctc_loss_Tensor::call(log_probs, targets, input_lengths, target_lengths, blank, zero_infinity);
4527 }
4528 Tensor log_probs_value;
4529 optional<int64_t> log_probs_bdim;
4530 std::tie(log_probs_value, log_probs_bdim) = unwrapTensorAtLevel(log_probs, cur_level);
4531 Tensor targets_value;
4532 optional<int64_t> targets_bdim;
4533 std::tie(targets_value, targets_bdim) = unwrapTensorAtLevel(targets, cur_level);
4534 Tensor input_lengths_value;
4535 optional<int64_t> input_lengths_bdim;
4536 std::tie(input_lengths_value, input_lengths_bdim) = unwrapTensorAtLevel(input_lengths, cur_level);
4537 Tensor target_lengths_value;
4538 optional<int64_t> target_lengths_bdim;
4539 std::tie(target_lengths_value, target_lengths_bdim) = unwrapTensorAtLevel(target_lengths, cur_level);
4540 auto results = batch_rule(log_probs_value, log_probs_bdim, targets_value, targets_bdim, input_lengths_value, input_lengths_bdim, target_lengths_value, target_lengths_bdim, blank, zero_infinity);
4541 return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
4542}
4543template <typename batch_rule_t, batch_rule_t batch_rule>
4544at::Tensor _ctc_loss_backward_generated_plumbing(const at::Tensor & grad, const at::Tensor & log_probs, const at::Tensor & targets, at::IntArrayRef input_lengths, at::IntArrayRef target_lengths, const at::Tensor & neg_log_likelihood, const at::Tensor & log_alpha, int64_t blank, bool zero_infinity) {
4545 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
4546 auto maybe_layer = maybeCurrentDynamicLayer();
4547 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
4548 int64_t cur_level = maybe_layer->layerId();
4549 if (!isBatchedAtLevel(grad, cur_level) && !isBatchedAtLevel(log_probs, cur_level) && !isBatchedAtLevel(targets, cur_level) && !isBatchedAtLevel(neg_log_likelihood, cur_level) && !isBatchedAtLevel(log_alpha, cur_level)) {
4550 return at::_ops::_ctc_loss_backward::call(grad, log_probs, targets, input_lengths, target_lengths, neg_log_likelihood, log_alpha, blank, zero_infinity);
4551 }
4552 Tensor grad_value;
4553 optional<int64_t> grad_bdim;
4554 std::tie(grad_value, grad_bdim) = unwrapTensorAtLevel(grad, cur_level);
4555 Tensor log_probs_value;
4556 optional<int64_t> log_probs_bdim;
4557 std::tie(log_probs_value, log_probs_bdim) = unwrapTensorAtLevel(log_probs, cur_level);
4558 Tensor targets_value;
4559 optional<int64_t> targets_bdim;
4560 std::tie(targets_value, targets_bdim) = unwrapTensorAtLevel(targets, cur_level);
4561 Tensor neg_log_likelihood_value;
4562 optional<int64_t> neg_log_likelihood_bdim;
4563 std::tie(neg_log_likelihood_value, neg_log_likelihood_bdim) = unwrapTensorAtLevel(neg_log_likelihood, cur_level);
4564 Tensor log_alpha_value;
4565 optional<int64_t> log_alpha_bdim;
4566 std::tie(log_alpha_value, log_alpha_bdim) = unwrapTensorAtLevel(log_alpha, cur_level);
4567 auto results = batch_rule(grad_value, grad_bdim, log_probs_value, log_probs_bdim, targets_value, targets_bdim, input_lengths, target_lengths, neg_log_likelihood_value, neg_log_likelihood_bdim, log_alpha_value, log_alpha_bdim, blank, zero_infinity);
4568 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
4569}
4570template <typename batch_rule_t, batch_rule_t batch_rule>
4571at::Tensor _ctc_loss_backward_Tensor_generated_plumbing(const at::Tensor & grad, const at::Tensor & log_probs, const at::Tensor & targets, const at::Tensor & input_lengths, const at::Tensor & target_lengths, const at::Tensor & neg_log_likelihood, const at::Tensor & log_alpha, int64_t blank, bool zero_infinity) {
4572 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
4573 auto maybe_layer = maybeCurrentDynamicLayer();
4574 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
4575 int64_t cur_level = maybe_layer->layerId();
4576 if (!isBatchedAtLevel(grad, cur_level) && !isBatchedAtLevel(log_probs, cur_level) && !isBatchedAtLevel(targets, cur_level) && !isBatchedAtLevel(input_lengths, cur_level) && !isBatchedAtLevel(target_lengths, cur_level) && !isBatchedAtLevel(neg_log_likelihood, cur_level) && !isBatchedAtLevel(log_alpha, cur_level)) {
4577 return at::_ops::_ctc_loss_backward_Tensor::call(grad, log_probs, targets, input_lengths, target_lengths, neg_log_likelihood, log_alpha, blank, zero_infinity);
4578 }
4579 Tensor grad_value;
4580 optional<int64_t> grad_bdim;
4581 std::tie(grad_value, grad_bdim) = unwrapTensorAtLevel(grad, cur_level);
4582 Tensor log_probs_value;
4583 optional<int64_t> log_probs_bdim;
4584 std::tie(log_probs_value, log_probs_bdim) = unwrapTensorAtLevel(log_probs, cur_level);
4585 Tensor targets_value;
4586 optional<int64_t> targets_bdim;
4587 std::tie(targets_value, targets_bdim) = unwrapTensorAtLevel(targets, cur_level);
4588 Tensor input_lengths_value;
4589 optional<int64_t> input_lengths_bdim;
4590 std::tie(input_lengths_value, input_lengths_bdim) = unwrapTensorAtLevel(input_lengths, cur_level);
4591 Tensor target_lengths_value;
4592 optional<int64_t> target_lengths_bdim;
4593 std::tie(target_lengths_value, target_lengths_bdim) = unwrapTensorAtLevel(target_lengths, cur_level);
4594 Tensor neg_log_likelihood_value;
4595 optional<int64_t> neg_log_likelihood_bdim;
4596 std::tie(neg_log_likelihood_value, neg_log_likelihood_bdim) = unwrapTensorAtLevel(neg_log_likelihood, cur_level);
4597 Tensor log_alpha_value;
4598 optional<int64_t> log_alpha_bdim;
4599 std::tie(log_alpha_value, log_alpha_bdim) = unwrapTensorAtLevel(log_alpha, cur_level);
4600 auto results = batch_rule(grad_value, grad_bdim, log_probs_value, log_probs_bdim, targets_value, targets_bdim, input_lengths_value, input_lengths_bdim, target_lengths_value, target_lengths_bdim, neg_log_likelihood_value, neg_log_likelihood_bdim, log_alpha_value, log_alpha_bdim, blank, zero_infinity);
4601 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
4602}
4603template <typename batch_rule_t, batch_rule_t batch_rule>
4604at::Tensor diag_embed_generated_plumbing(const at::Tensor & self, int64_t offset, int64_t dim1, int64_t dim2) {
4605 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
4606 auto maybe_layer = maybeCurrentDynamicLayer();
4607 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
4608 int64_t cur_level = maybe_layer->layerId();
4609 if (!isBatchedAtLevel(self, cur_level)) {
4610 return at::_ops::diag_embed::call(self, offset, dim1, dim2);
4611 }
4612 Tensor self_value;
4613 optional<int64_t> self_bdim;
4614 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
4615 auto results = batch_rule(self_value, self_bdim, offset, dim1, dim2);
4616 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
4617}
4618template <typename batch_rule_t, batch_rule_t batch_rule>
4619at::Tensor diagflat_generated_plumbing(const at::Tensor & self, int64_t offset) {
4620 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
4621 auto maybe_layer = maybeCurrentDynamicLayer();
4622 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
4623 int64_t cur_level = maybe_layer->layerId();
4624 if (!isBatchedAtLevel(self, cur_level)) {
4625 return at::_ops::diagflat::call(self, offset);
4626 }
4627 Tensor self_value;
4628 optional<int64_t> self_bdim;
4629 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
4630 auto results = batch_rule(self_value, self_bdim, offset);
4631 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
4632}
4633template <typename batch_rule_t, batch_rule_t batch_rule>
4634at::Tensor diagonal_generated_plumbing(const at::Tensor & self, int64_t offset, int64_t dim1, int64_t dim2) {
4635 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
4636 auto maybe_layer = maybeCurrentDynamicLayer();
4637 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
4638 int64_t cur_level = maybe_layer->layerId();
4639 if (!isBatchedAtLevel(self, cur_level)) {
4640 return at::_ops::diagonal::call(self, offset, dim1, dim2);
4641 }
4642 Tensor self_value;
4643 optional<int64_t> self_bdim;
4644 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
4645 auto results = batch_rule(self_value, self_bdim, offset, dim1, dim2);
4646 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
4647}
4648template <typename batch_rule_t, batch_rule_t batch_rule>
4649at::Tensor linalg_diagonal_generated_plumbing(const at::Tensor & A, int64_t offset, int64_t dim1, int64_t dim2) {
4650 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
4651 auto maybe_layer = maybeCurrentDynamicLayer();
4652 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
4653 int64_t cur_level = maybe_layer->layerId();
4654 if (!isBatchedAtLevel(A, cur_level)) {
4655 return at::_ops::linalg_diagonal::call(A, offset, dim1, dim2);
4656 }
4657 Tensor A_value;
4658 optional<int64_t> A_bdim;
4659 std::tie(A_value, A_bdim) = unwrapTensorAtLevel(A, cur_level);
4660 auto results = batch_rule(A_value, A_bdim, offset, dim1, dim2);
4661 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
4662}
4663template <typename batch_rule_t, batch_rule_t batch_rule>
4664at::Tensor diagonal_Dimname_generated_plumbing(const at::Tensor & self, at::Dimname outdim, at::Dimname dim1, at::Dimname dim2, int64_t offset) {
4665 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
4666 auto maybe_layer = maybeCurrentDynamicLayer();
4667 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
4668 int64_t cur_level = maybe_layer->layerId();
4669 if (!isBatchedAtLevel(self, cur_level)) {
4670 return at::_ops::diagonal_Dimname::call(self, outdim, dim1, dim2, offset);
4671 }
4672 Tensor self_value;
4673 optional<int64_t> self_bdim;
4674 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
4675 auto results = batch_rule(self_value, self_bdim, outdim, dim1, dim2, offset);
4676 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
4677}
4678template <typename batch_rule_t, batch_rule_t batch_rule>
4679at::Tensor diagonal_backward_generated_plumbing(const at::Tensor & grad_output, c10::SymIntArrayRef input_sizes, int64_t offset, int64_t dim1, int64_t dim2) {
4680 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
4681 auto maybe_layer = maybeCurrentDynamicLayer();
4682 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
4683 int64_t cur_level = maybe_layer->layerId();
4684 if (!isBatchedAtLevel(grad_output, cur_level)) {
4685 return at::_ops::diagonal_backward::call(grad_output, input_sizes, offset, dim1, dim2);
4686 }
4687 Tensor grad_output_value;
4688 optional<int64_t> grad_output_bdim;
4689 std::tie(grad_output_value, grad_output_bdim) = unwrapTensorAtLevel(grad_output, cur_level);
4690 auto results = batch_rule(grad_output_value, grad_output_bdim, input_sizes, offset, dim1, dim2);
4691 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
4692}
4693template <typename batch_rule_t, batch_rule_t batch_rule>
4694at::Tensor & fill_diagonal__generated_plumbing(at::Tensor & self, const at::Scalar & fill_value, bool wrap) {
4695 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
4696 auto maybe_layer = maybeCurrentDynamicLayer();
4697 vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
4698 int64_t cur_level = maybe_layer->layerId();
4699 if (!isBatchedAtLevel(self, cur_level)) {
4700 return at::_ops::fill_diagonal_::call(self, fill_value, wrap);
4701 }
4702 Tensor self_value;
4703 optional<int64_t> self_bdim;
4704 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
4705 batch_rule(self_value, self_bdim, fill_value, wrap);
4706 return self;
4707}
4708template <typename batch_rule_t, batch_rule_t batch_rule>
4709at::Tensor diff_generated_plumbing(const at::Tensor & self, int64_t n, int64_t dim, const c10::optional<at::Tensor> & prepend, const c10::optional<at::Tensor> & append) {
4710 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
4711 auto maybe_layer = maybeCurrentDynamicLayer();
4712 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
4713 int64_t cur_level = maybe_layer->layerId();
4714 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(prepend, cur_level) && !isBatchedAtLevel(append, cur_level)) {
4715 return at::_ops::diff::call(self, n, dim, prepend, append);
4716 }
4717 Tensor self_value;
4718 optional<int64_t> self_bdim;
4719 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
4720 optional<Tensor> prepend_value;
4721 optional<int64_t> prepend_bdim;
4722 if (prepend) {
4723 std::tie(prepend_value, prepend_bdim) = unwrapTensorAtLevel(prepend.value(), cur_level);
4724 }
4725 optional<Tensor> append_value;
4726 optional<int64_t> append_bdim;
4727 if (append) {
4728 std::tie(append_value, append_bdim) = unwrapTensorAtLevel(append.value(), cur_level);
4729 }
4730 auto results = batch_rule(self_value, self_bdim, n, dim, prepend_value, prepend_bdim, append_value, append_bdim);
4731 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
4732}
4733template <typename batch_rule_t, batch_rule_t batch_rule>
4734::std::vector<at::Tensor> gradient_scalarint_generated_plumbing(const at::Tensor & self, const c10::optional<at::Scalar> & spacing, c10::optional<int64_t> dim, int64_t edge_order) {
4735 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
4736 auto maybe_layer = maybeCurrentDynamicLayer();
4737 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
4738 int64_t cur_level = maybe_layer->layerId();
4739 if (!isBatchedAtLevel(self, cur_level)) {
4740 return at::_ops::gradient_scalarint::call(self, spacing, dim, edge_order);
4741 }
4742 Tensor self_value;
4743 optional<int64_t> self_bdim;
4744 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
4745 auto results = batch_rule(self_value, self_bdim, spacing, dim, edge_order);
4746 return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
4747}
4748template <typename batch_rule_t, batch_rule_t batch_rule>
4749::std::vector<at::Tensor> gradient_scalararray_generated_plumbing(const at::Tensor & self, const at::Scalar & spacing, at::IntArrayRef dim, int64_t edge_order) {
4750 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
4751 auto maybe_layer = maybeCurrentDynamicLayer();
4752 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
4753 int64_t cur_level = maybe_layer->layerId();
4754 if (!isBatchedAtLevel(self, cur_level)) {
4755 return at::_ops::gradient_scalararray::call(self, spacing, dim, edge_order);
4756 }
4757 Tensor self_value;
4758 optional<int64_t> self_bdim;
4759 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
4760 auto results = batch_rule(self_value, self_bdim, spacing, dim, edge_order);
4761 return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
4762}
4763template <typename batch_rule_t, batch_rule_t batch_rule>
4764::std::vector<at::Tensor> gradient_array_generated_plumbing(const at::Tensor & self, at::IntArrayRef dim, int64_t edge_order) {
4765 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
4766 auto maybe_layer = maybeCurrentDynamicLayer();
4767 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
4768 int64_t cur_level = maybe_layer->layerId();
4769 if (!isBatchedAtLevel(self, cur_level)) {
4770 return at::_ops::gradient_array::call(self, dim, edge_order);
4771 }
4772 Tensor self_value;
4773 optional<int64_t> self_bdim;
4774 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
4775 auto results = batch_rule(self_value, self_bdim, dim, edge_order);
4776 return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
4777}
4778template <typename batch_rule_t, batch_rule_t batch_rule>
4779::std::vector<at::Tensor> gradient_scalarrayint_generated_plumbing(const at::Tensor & self, at::ArrayRef<at::Scalar> spacing, c10::optional<int64_t> dim, int64_t edge_order) {
4780 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
4781 auto maybe_layer = maybeCurrentDynamicLayer();
4782 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
4783 int64_t cur_level = maybe_layer->layerId();
4784 if (!isBatchedAtLevel(self, cur_level)) {
4785 return at::_ops::gradient_scalarrayint::call(self, spacing, dim, edge_order);
4786 }
4787 Tensor self_value;
4788 optional<int64_t> self_bdim;
4789 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
4790 auto results = batch_rule(self_value, self_bdim, spacing, dim, edge_order);
4791 return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
4792}
4793template <typename batch_rule_t, batch_rule_t batch_rule>
4794::std::vector<at::Tensor> gradient_scalarrayarray_generated_plumbing(const at::Tensor & self, at::ArrayRef<at::Scalar> spacing, at::IntArrayRef dim, int64_t edge_order) {
4795 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
4796 auto maybe_layer = maybeCurrentDynamicLayer();
4797 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
4798 int64_t cur_level = maybe_layer->layerId();
4799 if (!isBatchedAtLevel(self, cur_level)) {
4800 return at::_ops::gradient_scalarrayarray::call(self, spacing, dim, edge_order);
4801 }
4802 Tensor self_value;
4803 optional<int64_t> self_bdim;
4804 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
4805 auto results = batch_rule(self_value, self_bdim, spacing, dim, edge_order);
4806 return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
4807}
4808template <typename batch_rule_t, batch_rule_t batch_rule>
4809::std::vector<at::Tensor> gradient_tensorarrayint_generated_plumbing(const at::Tensor & self, at::TensorList spacing, c10::optional<int64_t> dim, int64_t edge_order) {
4810 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
4811 auto maybe_layer = maybeCurrentDynamicLayer();
4812 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
4813 int64_t cur_level = maybe_layer->layerId();
4814 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(spacing, cur_level)) {
4815 return at::_ops::gradient_tensorarrayint::call(self, spacing, dim, edge_order);
4816 }
4817 Tensor self_value;
4818 optional<int64_t> self_bdim;
4819 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
4820 auto results = batch_rule(self_value, self_bdim, spacing, dim, edge_order);
4821 return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
4822}
4823template <typename batch_rule_t, batch_rule_t batch_rule>
4824::std::vector<at::Tensor> gradient_tensorarray_generated_plumbing(const at::Tensor & self, at::TensorList spacing, at::IntArrayRef dim, int64_t edge_order) {
4825 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
4826 auto maybe_layer = maybeCurrentDynamicLayer();
4827 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
4828 int64_t cur_level = maybe_layer->layerId();
4829 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(spacing, cur_level)) {
4830 return at::_ops::gradient_tensorarray::call(self, spacing, dim, edge_order);
4831 }
4832 Tensor self_value;
4833 optional<int64_t> self_bdim;
4834 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
4835 auto results = batch_rule(self_value, self_bdim, spacing, dim, edge_order);
4836 return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
4837}
4838template <typename batch_rule_t, batch_rule_t batch_rule>
4839at::Tensor div_Tensor_generated_plumbing(const at::Tensor & self, const at::Tensor & other) {
4840 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
4841 auto maybe_layer = maybeCurrentDynamicLayer();
4842 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
4843 int64_t cur_level = maybe_layer->layerId();
4844 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
4845 return at::_ops::div_Tensor::call(self, other);
4846 }
4847 Tensor self_value;
4848 optional<int64_t> self_bdim;
4849 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
4850 Tensor other_value;
4851 optional<int64_t> other_bdim;
4852 std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
4853 auto results = batch_rule(self_value, self_bdim, other_value, other_bdim);
4854 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
4855}
4856template <typename batch_rule_t, batch_rule_t batch_rule>
4857at::Tensor & div__Tensor_generated_plumbing(at::Tensor & self, const at::Tensor & other) {
4858 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
4859 auto maybe_layer = maybeCurrentDynamicLayer();
4860 vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
4861 int64_t cur_level = maybe_layer->layerId();
4862 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
4863 return at::_ops::div__Tensor::call(self, other);
4864 }
4865 Tensor self_value;
4866 optional<int64_t> self_bdim;
4867 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
4868 Tensor other_value;
4869 optional<int64_t> other_bdim;
4870 std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
4871 batch_rule(self_value, self_bdim, other_value, other_bdim);
4872 return self;
4873}
4874template <typename batch_rule_t, batch_rule_t batch_rule>
4875at::Tensor div_Tensor_mode_generated_plumbing(const at::Tensor & self, const at::Tensor & other, c10::optional<c10::string_view> rounding_mode) {
4876 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
4877 auto maybe_layer = maybeCurrentDynamicLayer();
4878 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
4879 int64_t cur_level = maybe_layer->layerId();
4880 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
4881 return at::_ops::div_Tensor_mode::call(self, other, rounding_mode);
4882 }
4883 Tensor self_value;
4884 optional<int64_t> self_bdim;
4885 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
4886 Tensor other_value;
4887 optional<int64_t> other_bdim;
4888 std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
4889 auto results = batch_rule(self_value, self_bdim, other_value, other_bdim, rounding_mode);
4890 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
4891}
4892template <typename batch_rule_t, batch_rule_t batch_rule>
4893at::Tensor & div__Tensor_mode_generated_plumbing(at::Tensor & self, const at::Tensor & other, c10::optional<c10::string_view> rounding_mode) {
4894 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
4895 auto maybe_layer = maybeCurrentDynamicLayer();
4896 vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
4897 int64_t cur_level = maybe_layer->layerId();
4898 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
4899 return at::_ops::div__Tensor_mode::call(self, other, rounding_mode);
4900 }
4901 Tensor self_value;
4902 optional<int64_t> self_bdim;
4903 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
4904 Tensor other_value;
4905 optional<int64_t> other_bdim;
4906 std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
4907 batch_rule(self_value, self_bdim, other_value, other_bdim, rounding_mode);
4908 return self;
4909}
4910template <typename batch_rule_t, batch_rule_t batch_rule>
4911at::Tensor div_Scalar_generated_plumbing(const at::Tensor & self, const at::Scalar & other) {
4912 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
4913 auto maybe_layer = maybeCurrentDynamicLayer();
4914 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
4915 int64_t cur_level = maybe_layer->layerId();
4916 if (!isBatchedAtLevel(self, cur_level)) {
4917 return at::_ops::div_Scalar::call(self, other);
4918 }
4919 Tensor self_value;
4920 optional<int64_t> self_bdim;
4921 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
4922 auto results = batch_rule(self_value, self_bdim, other);
4923 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
4924}
4925template <typename batch_rule_t, batch_rule_t batch_rule>
4926at::Tensor & div__Scalar_generated_plumbing(at::Tensor & self, const at::Scalar & other) {
4927 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
4928 auto maybe_layer = maybeCurrentDynamicLayer();
4929 vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
4930 int64_t cur_level = maybe_layer->layerId();
4931 if (!isBatchedAtLevel(self, cur_level)) {
4932 return at::_ops::div__Scalar::call(self, other);
4933 }
4934 Tensor self_value;
4935 optional<int64_t> self_bdim;
4936 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
4937 batch_rule(self_value, self_bdim, other);
4938 return self;
4939}
4940template <typename batch_rule_t, batch_rule_t batch_rule>
4941at::Tensor div_Scalar_mode_generated_plumbing(const at::Tensor & self, const at::Scalar & other, c10::optional<c10::string_view> rounding_mode) {
4942 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
4943 auto maybe_layer = maybeCurrentDynamicLayer();
4944 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
4945 int64_t cur_level = maybe_layer->layerId();
4946 if (!isBatchedAtLevel(self, cur_level)) {
4947 return at::_ops::div_Scalar_mode::call(self, other, rounding_mode);
4948 }
4949 Tensor self_value;
4950 optional<int64_t> self_bdim;
4951 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
4952 auto results = batch_rule(self_value, self_bdim, other, rounding_mode);
4953 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
4954}
4955template <typename batch_rule_t, batch_rule_t batch_rule>
4956at::Tensor & div__Scalar_mode_generated_plumbing(at::Tensor & self, const at::Scalar & other, c10::optional<c10::string_view> rounding_mode) {
4957 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
4958 auto maybe_layer = maybeCurrentDynamicLayer();
4959 vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
4960 int64_t cur_level = maybe_layer->layerId();
4961 if (!isBatchedAtLevel(self, cur_level)) {
4962 return at::_ops::div__Scalar_mode::call(self, other, rounding_mode);
4963 }
4964 Tensor self_value;
4965 optional<int64_t> self_bdim;
4966 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
4967 batch_rule(self_value, self_bdim, other, rounding_mode);
4968 return self;
4969}
4970template <typename batch_rule_t, batch_rule_t batch_rule>
4971at::Tensor divide_Tensor_generated_plumbing(const at::Tensor & self, const at::Tensor & other) {
4972 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
4973 auto maybe_layer = maybeCurrentDynamicLayer();
4974 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
4975 int64_t cur_level = maybe_layer->layerId();
4976 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
4977 return at::_ops::divide_Tensor::call(self, other);
4978 }
4979 Tensor self_value;
4980 optional<int64_t> self_bdim;
4981 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
4982 Tensor other_value;
4983 optional<int64_t> other_bdim;
4984 std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
4985 auto results = batch_rule(self_value, self_bdim, other_value, other_bdim);
4986 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
4987}
4988template <typename batch_rule_t, batch_rule_t batch_rule>
4989at::Tensor & divide__Tensor_generated_plumbing(at::Tensor & self, const at::Tensor & other) {
4990 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
4991 auto maybe_layer = maybeCurrentDynamicLayer();
4992 vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
4993 int64_t cur_level = maybe_layer->layerId();
4994 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
4995 return at::_ops::divide__Tensor::call(self, other);
4996 }
4997 Tensor self_value;
4998 optional<int64_t> self_bdim;
4999 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
5000 Tensor other_value;
5001 optional<int64_t> other_bdim;
5002 std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
5003 batch_rule(self_value, self_bdim, other_value, other_bdim);
5004 return self;
5005}
5006template <typename batch_rule_t, batch_rule_t batch_rule>
5007at::Tensor divide_Scalar_generated_plumbing(const at::Tensor & self, const at::Scalar & other) {
5008 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
5009 auto maybe_layer = maybeCurrentDynamicLayer();
5010 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
5011 int64_t cur_level = maybe_layer->layerId();
5012 if (!isBatchedAtLevel(self, cur_level)) {
5013 return at::_ops::divide_Scalar::call(self, other);
5014 }
5015 Tensor self_value;
5016 optional<int64_t> self_bdim;
5017 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
5018 auto results = batch_rule(self_value, self_bdim, other);
5019 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
5020}
5021template <typename batch_rule_t, batch_rule_t batch_rule>
5022at::Tensor & divide__Scalar_generated_plumbing(at::Tensor & self, const at::Scalar & other) {
5023 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
5024 auto maybe_layer = maybeCurrentDynamicLayer();
5025 vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
5026 int64_t cur_level = maybe_layer->layerId();
5027 if (!isBatchedAtLevel(self, cur_level)) {
5028 return at::_ops::divide__Scalar::call(self, other);
5029 }
5030 Tensor self_value;
5031 optional<int64_t> self_bdim;
5032 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
5033 batch_rule(self_value, self_bdim, other);
5034 return self;
5035}
5036template <typename batch_rule_t, batch_rule_t batch_rule>
5037at::Tensor divide_Tensor_mode_generated_plumbing(const at::Tensor & self, const at::Tensor & other, c10::optional<c10::string_view> rounding_mode) {
5038 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
5039 auto maybe_layer = maybeCurrentDynamicLayer();
5040 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
5041 int64_t cur_level = maybe_layer->layerId();
5042 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
5043 return at::_ops::divide_Tensor_mode::call(self, other, rounding_mode);
5044 }
5045 Tensor self_value;
5046 optional<int64_t> self_bdim;
5047 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
5048 Tensor other_value;
5049 optional<int64_t> other_bdim;
5050 std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
5051 auto results = batch_rule(self_value, self_bdim, other_value, other_bdim, rounding_mode);
5052 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
5053}
5054template <typename batch_rule_t, batch_rule_t batch_rule>
5055at::Tensor & divide__Tensor_mode_generated_plumbing(at::Tensor & self, const at::Tensor & other, c10::optional<c10::string_view> rounding_mode) {
5056 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
5057 auto maybe_layer = maybeCurrentDynamicLayer();
5058 vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
5059 int64_t cur_level = maybe_layer->layerId();
5060 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
5061 return at::_ops::divide__Tensor_mode::call(self, other, rounding_mode);
5062 }
5063 Tensor self_value;
5064 optional<int64_t> self_bdim;
5065 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
5066 Tensor other_value;
5067 optional<int64_t> other_bdim;
5068 std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
5069 batch_rule(self_value, self_bdim, other_value, other_bdim, rounding_mode);
5070 return self;
5071}
5072template <typename batch_rule_t, batch_rule_t batch_rule>
5073at::Tensor divide_Scalar_mode_generated_plumbing(const at::Tensor & self, const at::Scalar & other, c10::optional<c10::string_view> rounding_mode) {
5074 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
5075 auto maybe_layer = maybeCurrentDynamicLayer();
5076 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
5077 int64_t cur_level = maybe_layer->layerId();
5078 if (!isBatchedAtLevel(self, cur_level)) {
5079 return at::_ops::divide_Scalar_mode::call(self, other, rounding_mode);
5080 }
5081 Tensor self_value;
5082 optional<int64_t> self_bdim;
5083 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
5084 auto results = batch_rule(self_value, self_bdim, other, rounding_mode);
5085 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
5086}
5087template <typename batch_rule_t, batch_rule_t batch_rule>
5088at::Tensor & divide__Scalar_mode_generated_plumbing(at::Tensor & self, const at::Scalar & other, c10::optional<c10::string_view> rounding_mode) {
5089 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
5090 auto maybe_layer = maybeCurrentDynamicLayer();
5091 vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
5092 int64_t cur_level = maybe_layer->layerId();
5093 if (!isBatchedAtLevel(self, cur_level)) {
5094 return at::_ops::divide__Scalar_mode::call(self, other, rounding_mode);
5095 }
5096 Tensor self_value;
5097 optional<int64_t> self_bdim;
5098 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
5099 batch_rule(self_value, self_bdim, other, rounding_mode);
5100 return self;
5101}
5102template <typename batch_rule_t, batch_rule_t batch_rule>
5103at::Tensor true_divide_Tensor_generated_plumbing(const at::Tensor & self, const at::Tensor & other) {
5104 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
5105 auto maybe_layer = maybeCurrentDynamicLayer();
5106 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
5107 int64_t cur_level = maybe_layer->layerId();
5108 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
5109 return at::_ops::true_divide_Tensor::call(self, other);
5110 }
5111 Tensor self_value;
5112 optional<int64_t> self_bdim;
5113 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
5114 Tensor other_value;
5115 optional<int64_t> other_bdim;
5116 std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
5117 auto results = batch_rule(self_value, self_bdim, other_value, other_bdim);
5118 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
5119}
5120template <typename batch_rule_t, batch_rule_t batch_rule>
5121at::Tensor & true_divide__Tensor_generated_plumbing(at::Tensor & self, const at::Tensor & other) {
5122 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
5123 auto maybe_layer = maybeCurrentDynamicLayer();
5124 vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
5125 int64_t cur_level = maybe_layer->layerId();
5126 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
5127 return at::_ops::true_divide__Tensor::call(self, other);
5128 }
5129 Tensor self_value;
5130 optional<int64_t> self_bdim;
5131 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
5132 Tensor other_value;
5133 optional<int64_t> other_bdim;
5134 std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
5135 batch_rule(self_value, self_bdim, other_value, other_bdim);
5136 return self;
5137}
5138template <typename batch_rule_t, batch_rule_t batch_rule>
5139at::Tensor true_divide_Scalar_generated_plumbing(const at::Tensor & self, const at::Scalar & other) {
5140 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
5141 auto maybe_layer = maybeCurrentDynamicLayer();
5142 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
5143 int64_t cur_level = maybe_layer->layerId();
5144 if (!isBatchedAtLevel(self, cur_level)) {
5145 return at::_ops::true_divide_Scalar::call(self, other);
5146 }
5147 Tensor self_value;
5148 optional<int64_t> self_bdim;
5149 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
5150 auto results = batch_rule(self_value, self_bdim, other);
5151 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
5152}
5153template <typename batch_rule_t, batch_rule_t batch_rule>
5154at::Tensor & true_divide__Scalar_generated_plumbing(at::Tensor & self, const at::Scalar & other) {
5155 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
5156 auto maybe_layer = maybeCurrentDynamicLayer();
5157 vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
5158 int64_t cur_level = maybe_layer->layerId();
5159 if (!isBatchedAtLevel(self, cur_level)) {
5160 return at::_ops::true_divide__Scalar::call(self, other);
5161 }
5162 Tensor self_value;
5163 optional<int64_t> self_bdim;
5164 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
5165 batch_rule(self_value, self_bdim, other);
5166 return self;
5167}
5168template <typename batch_rule_t, batch_rule_t batch_rule>
5169at::Tensor dot_generated_plumbing(const at::Tensor & self, const at::Tensor & tensor) {
5170 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
5171 auto maybe_layer = maybeCurrentDynamicLayer();
5172 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
5173 int64_t cur_level = maybe_layer->layerId();
5174 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(tensor, cur_level)) {
5175 return at::_ops::dot::call(self, tensor);
5176 }
5177 Tensor self_value;
5178 optional<int64_t> self_bdim;
5179 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
5180 Tensor tensor_value;
5181 optional<int64_t> tensor_bdim;
5182 std::tie(tensor_value, tensor_bdim) = unwrapTensorAtLevel(tensor, cur_level);
5183 auto results = batch_rule(self_value, self_bdim, tensor_value, tensor_bdim);
5184 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
5185}
5186template <typename batch_rule_t, batch_rule_t batch_rule>
5187at::Tensor vdot_generated_plumbing(const at::Tensor & self, const at::Tensor & other) {
5188 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
5189 auto maybe_layer = maybeCurrentDynamicLayer();
5190 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
5191 int64_t cur_level = maybe_layer->layerId();
5192 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
5193 return at::_ops::vdot::call(self, other);
5194 }
5195 Tensor self_value;
5196 optional<int64_t> self_bdim;
5197 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
5198 Tensor other_value;
5199 optional<int64_t> other_bdim;
5200 std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
5201 auto results = batch_rule(self_value, self_bdim, other_value, other_bdim);
5202 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
5203}
5204template <typename batch_rule_t, batch_rule_t batch_rule>
5205at::Tensor einsum_generated_plumbing(c10::string_view equation, at::TensorList tensors, at::OptionalIntArrayRef path) {
5206 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
5207 auto maybe_layer = maybeCurrentDynamicLayer();
5208 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
5209 int64_t cur_level = maybe_layer->layerId();
5210 if (!isBatchedAtLevel(tensors, cur_level)) {
5211 return at::_ops::einsum::call(equation, tensors, path);
5212 }
5213
5214 auto results = batch_rule(equation, tensors, path);
5215 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
5216}
5217template <typename batch_rule_t, batch_rule_t batch_rule>
5218at::Tensor embedding_generated_plumbing(const at::Tensor & weight, const at::Tensor & indices, c10::SymInt padding_idx, bool scale_grad_by_freq, bool sparse) {
5219 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
5220 auto maybe_layer = maybeCurrentDynamicLayer();
5221 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
5222 int64_t cur_level = maybe_layer->layerId();
5223 if (!isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(indices, cur_level)) {
5224 return at::_ops::embedding::call(weight, indices, padding_idx, scale_grad_by_freq, sparse);
5225 }
5226 Tensor weight_value;
5227 optional<int64_t> weight_bdim;
5228 std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight, cur_level);
5229 Tensor indices_value;
5230 optional<int64_t> indices_bdim;
5231 std::tie(indices_value, indices_bdim) = unwrapTensorAtLevel(indices, cur_level);
5232 auto results = batch_rule(weight_value, weight_bdim, indices_value, indices_bdim, padding_idx, scale_grad_by_freq, sparse);
5233 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
5234}
5235template <typename batch_rule_t, batch_rule_t batch_rule>
5236at::Tensor embedding_backward_generated_plumbing(const at::Tensor & grad, const at::Tensor & indices, c10::SymInt num_weights, c10::SymInt padding_idx, bool scale_grad_by_freq, bool sparse) {
5237 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
5238 auto maybe_layer = maybeCurrentDynamicLayer();
5239 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
5240 int64_t cur_level = maybe_layer->layerId();
5241 if (!isBatchedAtLevel(grad, cur_level) && !isBatchedAtLevel(indices, cur_level)) {
5242 return at::_ops::embedding_backward::call(grad, indices, num_weights, padding_idx, scale_grad_by_freq, sparse);
5243 }
5244 Tensor grad_value;
5245 optional<int64_t> grad_bdim;
5246 std::tie(grad_value, grad_bdim) = unwrapTensorAtLevel(grad, cur_level);
5247 Tensor indices_value;
5248 optional<int64_t> indices_bdim;
5249 std::tie(indices_value, indices_bdim) = unwrapTensorAtLevel(indices, cur_level);
5250 auto results = batch_rule(grad_value, grad_bdim, indices_value, indices_bdim, num_weights, padding_idx, scale_grad_by_freq, sparse);
5251 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
5252}
5253template <typename batch_rule_t, batch_rule_t batch_rule>
5254at::Tensor embedding_dense_backward_generated_plumbing(const at::Tensor & grad_output, const at::Tensor & indices, c10::SymInt num_weights, c10::SymInt padding_idx, bool scale_grad_by_freq) {
5255 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
5256 auto maybe_layer = maybeCurrentDynamicLayer();
5257 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
5258 int64_t cur_level = maybe_layer->layerId();
5259 if (!isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(indices, cur_level)) {
5260 return at::_ops::embedding_dense_backward::call(grad_output, indices, num_weights, padding_idx, scale_grad_by_freq);
5261 }
5262 Tensor grad_output_value;
5263 optional<int64_t> grad_output_bdim;
5264 std::tie(grad_output_value, grad_output_bdim) = unwrapTensorAtLevel(grad_output, cur_level);
5265 Tensor indices_value;
5266 optional<int64_t> indices_bdim;
5267 std::tie(indices_value, indices_bdim) = unwrapTensorAtLevel(indices, cur_level);
5268 auto results = batch_rule(grad_output_value, grad_output_bdim, indices_value, indices_bdim, num_weights, padding_idx, scale_grad_by_freq);
5269 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
5270}
5271template <typename batch_rule_t, batch_rule_t batch_rule>
5272at::Tensor & embedding_renorm__generated_plumbing(at::Tensor & self, const at::Tensor & indices, double max_norm, double norm_type) {
5273 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
5274 auto maybe_layer = maybeCurrentDynamicLayer();
5275 vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
5276 int64_t cur_level = maybe_layer->layerId();
5277 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(indices, cur_level)) {
5278 return at::_ops::embedding_renorm_::call(self, indices, max_norm, norm_type);
5279 }
5280 Tensor self_value;
5281 optional<int64_t> self_bdim;
5282 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
5283 Tensor indices_value;
5284 optional<int64_t> indices_bdim;
5285 std::tie(indices_value, indices_bdim) = unwrapTensorAtLevel(indices, cur_level);
5286 batch_rule(self_value, self_bdim, indices_value, indices_bdim, max_norm, norm_type);
5287 return self;
5288}
5289template <typename batch_rule_t, batch_rule_t batch_rule>
5290at::Tensor embedding_sparse_backward_generated_plumbing(const at::Tensor & grad, const at::Tensor & indices, int64_t num_weights, int64_t padding_idx, bool scale_grad_by_freq) {
5291 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
5292 auto maybe_layer = maybeCurrentDynamicLayer();
5293 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
5294 int64_t cur_level = maybe_layer->layerId();
5295 if (!isBatchedAtLevel(grad, cur_level) && !isBatchedAtLevel(indices, cur_level)) {
5296 return at::_ops::embedding_sparse_backward::call(grad, indices, num_weights, padding_idx, scale_grad_by_freq);
5297 }
5298 Tensor grad_value;
5299 optional<int64_t> grad_bdim;
5300 std::tie(grad_value, grad_bdim) = unwrapTensorAtLevel(grad, cur_level);
5301 Tensor indices_value;
5302 optional<int64_t> indices_bdim;
5303 std::tie(indices_value, indices_bdim) = unwrapTensorAtLevel(indices, cur_level);
5304 auto results = batch_rule(grad_value, grad_bdim, indices_value, indices_bdim, num_weights, padding_idx, scale_grad_by_freq);
5305 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
5306}
5307template <typename batch_rule_t, batch_rule_t batch_rule>
5308::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor> _embedding_bag_forward_only_generated_plumbing(const at::Tensor & weight, const at::Tensor & indices, const at::Tensor & offsets, bool scale_grad_by_freq, int64_t mode, bool sparse, const c10::optional<at::Tensor> & per_sample_weights, bool include_last_offset, int64_t padding_idx) {
5309 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
5310 auto maybe_layer = maybeCurrentDynamicLayer();
5311 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
5312 int64_t cur_level = maybe_layer->layerId();
5313 if (!isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(indices, cur_level) && !isBatchedAtLevel(offsets, cur_level) && !isBatchedAtLevel(per_sample_weights, cur_level)) {
5314 return at::_ops::_embedding_bag_forward_only::call(weight, indices, offsets, scale_grad_by_freq, mode, sparse, per_sample_weights, include_last_offset, padding_idx);
5315 }
5316 Tensor weight_value;
5317 optional<int64_t> weight_bdim;
5318 std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight, cur_level);
5319 Tensor indices_value;
5320 optional<int64_t> indices_bdim;
5321 std::tie(indices_value, indices_bdim) = unwrapTensorAtLevel(indices, cur_level);
5322 Tensor offsets_value;
5323 optional<int64_t> offsets_bdim;
5324 std::tie(offsets_value, offsets_bdim) = unwrapTensorAtLevel(offsets, cur_level);
5325 optional<Tensor> per_sample_weights_value;
5326 optional<int64_t> per_sample_weights_bdim;
5327 if (per_sample_weights) {
5328 std::tie(per_sample_weights_value, per_sample_weights_bdim) = unwrapTensorAtLevel(per_sample_weights.value(), cur_level);
5329 }
5330 auto results = batch_rule(weight_value, weight_bdim, indices_value, indices_bdim, offsets_value, offsets_bdim, scale_grad_by_freq, mode, sparse, per_sample_weights_value, per_sample_weights_bdim, include_last_offset, padding_idx);
5331 return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level), makeBatched(std::get<6>(results), std::get<7>(results), cur_level));
5332}
5333template <typename batch_rule_t, batch_rule_t batch_rule>
5334::std::tuple<at::Tensor,at::Tensor> _rowwise_prune_generated_plumbing(const at::Tensor & weight, const at::Tensor & mask, at::ScalarType compressed_indices_dtype) {
5335 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
5336 auto maybe_layer = maybeCurrentDynamicLayer();
5337 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
5338 int64_t cur_level = maybe_layer->layerId();
5339 if (!isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(mask, cur_level)) {
5340 return at::_ops::_rowwise_prune::call(weight, mask, compressed_indices_dtype);
5341 }
5342 Tensor weight_value;
5343 optional<int64_t> weight_bdim;
5344 std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight, cur_level);
5345 Tensor mask_value;
5346 optional<int64_t> mask_bdim;
5347 std::tie(mask_value, mask_bdim) = unwrapTensorAtLevel(mask, cur_level);
5348 auto results = batch_rule(weight_value, weight_bdim, mask_value, mask_bdim, compressed_indices_dtype);
5349 return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
5350}
5351template <typename batch_rule_t, batch_rule_t batch_rule>
5352at::Tensor row_stack_generated_plumbing(at::TensorList tensors) {
5353 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
5354 auto maybe_layer = maybeCurrentDynamicLayer();
5355 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
5356 int64_t cur_level = maybe_layer->layerId();
5357 if (!isBatchedAtLevel(tensors, cur_level)) {
5358 return at::_ops::row_stack::call(tensors);
5359 }
5360
5361 auto results = batch_rule(tensors);
5362 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
5363}
5364template <typename batch_rule_t, batch_rule_t batch_rule>
5365::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor> embedding_bag_generated_plumbing(const at::Tensor & weight, const at::Tensor & indices, const at::Tensor & offsets, bool scale_grad_by_freq, int64_t mode, bool sparse, const c10::optional<at::Tensor> & per_sample_weights, bool include_last_offset) {
5366 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
5367 auto maybe_layer = maybeCurrentDynamicLayer();
5368 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
5369 int64_t cur_level = maybe_layer->layerId();
5370 if (!isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(indices, cur_level) && !isBatchedAtLevel(offsets, cur_level) && !isBatchedAtLevel(per_sample_weights, cur_level)) {
5371 return at::_ops::embedding_bag::call(weight, indices, offsets, scale_grad_by_freq, mode, sparse, per_sample_weights, include_last_offset);
5372 }
5373 Tensor weight_value;
5374 optional<int64_t> weight_bdim;
5375 std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight, cur_level);
5376 Tensor indices_value;
5377 optional<int64_t> indices_bdim;
5378 std::tie(indices_value, indices_bdim) = unwrapTensorAtLevel(indices, cur_level);
5379 Tensor offsets_value;
5380 optional<int64_t> offsets_bdim;
5381 std::tie(offsets_value, offsets_bdim) = unwrapTensorAtLevel(offsets, cur_level);
5382 optional<Tensor> per_sample_weights_value;
5383 optional<int64_t> per_sample_weights_bdim;
5384 if (per_sample_weights) {
5385 std::tie(per_sample_weights_value, per_sample_weights_bdim) = unwrapTensorAtLevel(per_sample_weights.value(), cur_level);
5386 }
5387 auto results = batch_rule(weight_value, weight_bdim, indices_value, indices_bdim, offsets_value, offsets_bdim, scale_grad_by_freq, mode, sparse, per_sample_weights_value, per_sample_weights_bdim, include_last_offset);
5388 return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level), makeBatched(std::get<6>(results), std::get<7>(results), cur_level));
5389}
5390template <typename batch_rule_t, batch_rule_t batch_rule>
5391::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor> embedding_bag_padding_idx_generated_plumbing(const at::Tensor & weight, const at::Tensor & indices, const at::Tensor & offsets, bool scale_grad_by_freq, int64_t mode, bool sparse, const c10::optional<at::Tensor> & per_sample_weights, bool include_last_offset, c10::optional<int64_t> padding_idx) {
5392 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
5393 auto maybe_layer = maybeCurrentDynamicLayer();
5394 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
5395 int64_t cur_level = maybe_layer->layerId();
5396 if (!isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(indices, cur_level) && !isBatchedAtLevel(offsets, cur_level) && !isBatchedAtLevel(per_sample_weights, cur_level)) {
5397 return at::_ops::embedding_bag_padding_idx::call(weight, indices, offsets, scale_grad_by_freq, mode, sparse, per_sample_weights, include_last_offset, padding_idx);
5398 }
5399 Tensor weight_value;
5400 optional<int64_t> weight_bdim;
5401 std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight, cur_level);
5402 Tensor indices_value;
5403 optional<int64_t> indices_bdim;
5404 std::tie(indices_value, indices_bdim) = unwrapTensorAtLevel(indices, cur_level);
5405 Tensor offsets_value;
5406 optional<int64_t> offsets_bdim;
5407 std::tie(offsets_value, offsets_bdim) = unwrapTensorAtLevel(offsets, cur_level);
5408 optional<Tensor> per_sample_weights_value;
5409 optional<int64_t> per_sample_weights_bdim;
5410 if (per_sample_weights) {
5411 std::tie(per_sample_weights_value, per_sample_weights_bdim) = unwrapTensorAtLevel(per_sample_weights.value(), cur_level);
5412 }
5413 auto results = batch_rule(weight_value, weight_bdim, indices_value, indices_bdim, offsets_value, offsets_bdim, scale_grad_by_freq, mode, sparse, per_sample_weights_value, per_sample_weights_bdim, include_last_offset, padding_idx);
5414 return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level), makeBatched(std::get<6>(results), std::get<7>(results), cur_level));
5415}
5416template <typename batch_rule_t, batch_rule_t batch_rule>
5417::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor> _embedding_bag_generated_plumbing(const at::Tensor & weight, const at::Tensor & indices, const at::Tensor & offsets, bool scale_grad_by_freq, int64_t mode, bool sparse, const c10::optional<at::Tensor> & per_sample_weights, bool include_last_offset, int64_t padding_idx) {
5418 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
5419 auto maybe_layer = maybeCurrentDynamicLayer();
5420 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
5421 int64_t cur_level = maybe_layer->layerId();
5422 if (!isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(indices, cur_level) && !isBatchedAtLevel(offsets, cur_level) && !isBatchedAtLevel(per_sample_weights, cur_level)) {
5423 return at::_ops::_embedding_bag::call(weight, indices, offsets, scale_grad_by_freq, mode, sparse, per_sample_weights, include_last_offset, padding_idx);
5424 }
5425 Tensor weight_value;
5426 optional<int64_t> weight_bdim;
5427 std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight, cur_level);
5428 Tensor indices_value;
5429 optional<int64_t> indices_bdim;
5430 std::tie(indices_value, indices_bdim) = unwrapTensorAtLevel(indices, cur_level);
5431 Tensor offsets_value;
5432 optional<int64_t> offsets_bdim;
5433 std::tie(offsets_value, offsets_bdim) = unwrapTensorAtLevel(offsets, cur_level);
5434 optional<Tensor> per_sample_weights_value;
5435 optional<int64_t> per_sample_weights_bdim;
5436 if (per_sample_weights) {
5437 std::tie(per_sample_weights_value, per_sample_weights_bdim) = unwrapTensorAtLevel(per_sample_weights.value(), cur_level);
5438 }
5439 auto results = batch_rule(weight_value, weight_bdim, indices_value, indices_bdim, offsets_value, offsets_bdim, scale_grad_by_freq, mode, sparse, per_sample_weights_value, per_sample_weights_bdim, include_last_offset, padding_idx);
5440 return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level), makeBatched(std::get<6>(results), std::get<7>(results), cur_level));
5441}
5442template <typename batch_rule_t, batch_rule_t batch_rule>
5443at::Tensor _embedding_bag_backward_generated_plumbing(const at::Tensor & grad, const at::Tensor & indices, const at::Tensor & offsets, const at::Tensor & offset2bag, const at::Tensor & bag_size, const at::Tensor & maximum_indices, c10::SymInt num_weights, bool scale_grad_by_freq, int64_t mode, bool sparse, const c10::optional<at::Tensor> & per_sample_weights, int64_t padding_idx) {
5444 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
5445 auto maybe_layer = maybeCurrentDynamicLayer();
5446 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
5447 int64_t cur_level = maybe_layer->layerId();
5448 if (!isBatchedAtLevel(grad, cur_level) && !isBatchedAtLevel(indices, cur_level) && !isBatchedAtLevel(offsets, cur_level) && !isBatchedAtLevel(offset2bag, cur_level) && !isBatchedAtLevel(bag_size, cur_level) && !isBatchedAtLevel(maximum_indices, cur_level) && !isBatchedAtLevel(per_sample_weights, cur_level)) {
5449 return at::_ops::_embedding_bag_backward::call(grad, indices, offsets, offset2bag, bag_size, maximum_indices, num_weights, scale_grad_by_freq, mode, sparse, per_sample_weights, padding_idx);
5450 }
5451 Tensor grad_value;
5452 optional<int64_t> grad_bdim;
5453 std::tie(grad_value, grad_bdim) = unwrapTensorAtLevel(grad, cur_level);
5454 Tensor indices_value;
5455 optional<int64_t> indices_bdim;
5456 std::tie(indices_value, indices_bdim) = unwrapTensorAtLevel(indices, cur_level);
5457 Tensor offsets_value;
5458 optional<int64_t> offsets_bdim;
5459 std::tie(offsets_value, offsets_bdim) = unwrapTensorAtLevel(offsets, cur_level);
5460 Tensor offset2bag_value;
5461 optional<int64_t> offset2bag_bdim;
5462 std::tie(offset2bag_value, offset2bag_bdim) = unwrapTensorAtLevel(offset2bag, cur_level);
5463 Tensor bag_size_value;
5464 optional<int64_t> bag_size_bdim;
5465 std::tie(bag_size_value, bag_size_bdim) = unwrapTensorAtLevel(bag_size, cur_level);
5466 Tensor maximum_indices_value;
5467 optional<int64_t> maximum_indices_bdim;
5468 std::tie(maximum_indices_value, maximum_indices_bdim) = unwrapTensorAtLevel(maximum_indices, cur_level);
5469 optional<Tensor> per_sample_weights_value;
5470 optional<int64_t> per_sample_weights_bdim;
5471 if (per_sample_weights) {
5472 std::tie(per_sample_weights_value, per_sample_weights_bdim) = unwrapTensorAtLevel(per_sample_weights.value(), cur_level);
5473 }
5474 auto results = batch_rule(grad_value, grad_bdim, indices_value, indices_bdim, offsets_value, offsets_bdim, offset2bag_value, offset2bag_bdim, bag_size_value, bag_size_bdim, maximum_indices_value, maximum_indices_bdim, num_weights, scale_grad_by_freq, mode, sparse, per_sample_weights_value, per_sample_weights_bdim, padding_idx);
5475 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
5476}
5477template <typename batch_rule_t, batch_rule_t batch_rule>
5478at::Tensor _embedding_bag_sparse_backward_generated_plumbing(const at::Tensor & grad, const at::Tensor & indices, const at::Tensor & offsets, const at::Tensor & offset2bag, const at::Tensor & bag_size, c10::SymInt num_weights, bool scale_grad_by_freq, int64_t mode, const c10::optional<at::Tensor> & per_sample_weights, int64_t padding_idx) {
5479 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
5480 auto maybe_layer = maybeCurrentDynamicLayer();
5481 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
5482 int64_t cur_level = maybe_layer->layerId();
5483 if (!isBatchedAtLevel(grad, cur_level) && !isBatchedAtLevel(indices, cur_level) && !isBatchedAtLevel(offsets, cur_level) && !isBatchedAtLevel(offset2bag, cur_level) && !isBatchedAtLevel(bag_size, cur_level) && !isBatchedAtLevel(per_sample_weights, cur_level)) {
5484 return at::_ops::_embedding_bag_sparse_backward::call(grad, indices, offsets, offset2bag, bag_size, num_weights, scale_grad_by_freq, mode, per_sample_weights, padding_idx);
5485 }
5486 Tensor grad_value;
5487 optional<int64_t> grad_bdim;
5488 std::tie(grad_value, grad_bdim) = unwrapTensorAtLevel(grad, cur_level);
5489 Tensor indices_value;
5490 optional<int64_t> indices_bdim;
5491 std::tie(indices_value, indices_bdim) = unwrapTensorAtLevel(indices, cur_level);
5492 Tensor offsets_value;
5493 optional<int64_t> offsets_bdim;
5494 std::tie(offsets_value, offsets_bdim) = unwrapTensorAtLevel(offsets, cur_level);
5495 Tensor offset2bag_value;
5496 optional<int64_t> offset2bag_bdim;
5497 std::tie(offset2bag_value, offset2bag_bdim) = unwrapTensorAtLevel(offset2bag, cur_level);
5498 Tensor bag_size_value;
5499 optional<int64_t> bag_size_bdim;
5500 std::tie(bag_size_value, bag_size_bdim) = unwrapTensorAtLevel(bag_size, cur_level);
5501 optional<Tensor> per_sample_weights_value;
5502 optional<int64_t> per_sample_weights_bdim;
5503 if (per_sample_weights) {
5504 std::tie(per_sample_weights_value, per_sample_weights_bdim) = unwrapTensorAtLevel(per_sample_weights.value(), cur_level);
5505 }
5506 auto results = batch_rule(grad_value, grad_bdim, indices_value, indices_bdim, offsets_value, offsets_bdim, offset2bag_value, offset2bag_bdim, bag_size_value, bag_size_bdim, num_weights, scale_grad_by_freq, mode, per_sample_weights_value, per_sample_weights_bdim, padding_idx);
5507 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
5508}
5509template <typename batch_rule_t, batch_rule_t batch_rule>
5510at::Tensor _embedding_bag_dense_backward_generated_plumbing(const at::Tensor & grad, const at::Tensor & indices, const at::Tensor & offset2bag, const at::Tensor & bag_size, const at::Tensor & maximum_indices, c10::SymInt num_weights, bool scale_grad_by_freq, int64_t mode, const c10::optional<at::Tensor> & per_sample_weights, int64_t padding_idx) {
5511 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
5512 auto maybe_layer = maybeCurrentDynamicLayer();
5513 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
5514 int64_t cur_level = maybe_layer->layerId();
5515 if (!isBatchedAtLevel(grad, cur_level) && !isBatchedAtLevel(indices, cur_level) && !isBatchedAtLevel(offset2bag, cur_level) && !isBatchedAtLevel(bag_size, cur_level) && !isBatchedAtLevel(maximum_indices, cur_level) && !isBatchedAtLevel(per_sample_weights, cur_level)) {
5516 return at::_ops::_embedding_bag_dense_backward::call(grad, indices, offset2bag, bag_size, maximum_indices, num_weights, scale_grad_by_freq, mode, per_sample_weights, padding_idx);
5517 }
5518 Tensor grad_value;
5519 optional<int64_t> grad_bdim;
5520 std::tie(grad_value, grad_bdim) = unwrapTensorAtLevel(grad, cur_level);
5521 Tensor indices_value;
5522 optional<int64_t> indices_bdim;
5523 std::tie(indices_value, indices_bdim) = unwrapTensorAtLevel(indices, cur_level);
5524 Tensor offset2bag_value;
5525 optional<int64_t> offset2bag_bdim;
5526 std::tie(offset2bag_value, offset2bag_bdim) = unwrapTensorAtLevel(offset2bag, cur_level);
5527 Tensor bag_size_value;
5528 optional<int64_t> bag_size_bdim;
5529 std::tie(bag_size_value, bag_size_bdim) = unwrapTensorAtLevel(bag_size, cur_level);
5530 Tensor maximum_indices_value;
5531 optional<int64_t> maximum_indices_bdim;
5532 std::tie(maximum_indices_value, maximum_indices_bdim) = unwrapTensorAtLevel(maximum_indices, cur_level);
5533 optional<Tensor> per_sample_weights_value;
5534 optional<int64_t> per_sample_weights_bdim;
5535 if (per_sample_weights) {
5536 std::tie(per_sample_weights_value, per_sample_weights_bdim) = unwrapTensorAtLevel(per_sample_weights.value(), cur_level);
5537 }
5538 auto results = batch_rule(grad_value, grad_bdim, indices_value, indices_bdim, offset2bag_value, offset2bag_bdim, bag_size_value, bag_size_bdim, maximum_indices_value, maximum_indices_bdim, num_weights, scale_grad_by_freq, mode, per_sample_weights_value, per_sample_weights_bdim, padding_idx);
5539 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
5540}
5541template <typename batch_rule_t, batch_rule_t batch_rule>
5542at::Tensor _embedding_bag_per_sample_weights_backward_generated_plumbing(const at::Tensor & grad, const at::Tensor & weight, const at::Tensor & indices, const at::Tensor & offsets, const at::Tensor & offset2bag, int64_t mode, int64_t padding_idx) {
5543 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
5544 auto maybe_layer = maybeCurrentDynamicLayer();
5545 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
5546 int64_t cur_level = maybe_layer->layerId();
5547 if (!isBatchedAtLevel(grad, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(indices, cur_level) && !isBatchedAtLevel(offsets, cur_level) && !isBatchedAtLevel(offset2bag, cur_level)) {
5548 return at::_ops::_embedding_bag_per_sample_weights_backward::call(grad, weight, indices, offsets, offset2bag, mode, padding_idx);
5549 }
5550 Tensor grad_value;
5551 optional<int64_t> grad_bdim;
5552 std::tie(grad_value, grad_bdim) = unwrapTensorAtLevel(grad, cur_level);
5553 Tensor weight_value;
5554 optional<int64_t> weight_bdim;
5555 std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight, cur_level);
5556 Tensor indices_value;
5557 optional<int64_t> indices_bdim;
5558 std::tie(indices_value, indices_bdim) = unwrapTensorAtLevel(indices, cur_level);
5559 Tensor offsets_value;
5560 optional<int64_t> offsets_bdim;
5561 std::tie(offsets_value, offsets_bdim) = unwrapTensorAtLevel(offsets, cur_level);
5562 Tensor offset2bag_value;
5563 optional<int64_t> offset2bag_bdim;
5564 std::tie(offset2bag_value, offset2bag_bdim) = unwrapTensorAtLevel(offset2bag, cur_level);
5565 auto results = batch_rule(grad_value, grad_bdim, weight_value, weight_bdim, indices_value, indices_bdim, offsets_value, offsets_bdim, offset2bag_value, offset2bag_bdim, mode, padding_idx);
5566 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
5567}
5568template <typename batch_rule_t, batch_rule_t batch_rule>
5569at::Tensor new_empty_generated_plumbing(const at::Tensor & self, c10::SymIntArrayRef size, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) {
5570 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
5571 auto maybe_layer = maybeCurrentDynamicLayer();
5572 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
5573 int64_t cur_level = maybe_layer->layerId();
5574 if (!isBatchedAtLevel(self, cur_level)) {
5575 return at::_ops::new_empty::call(self, size, dtype, layout, device, pin_memory);
5576 }
5577 Tensor self_value;
5578 optional<int64_t> self_bdim;
5579 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
5580 auto results = batch_rule(self_value, self_bdim, size, dtype, layout, device, pin_memory);
5581 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
5582}
5583template <typename batch_rule_t, batch_rule_t batch_rule>
5584at::Tensor new_empty_strided_generated_plumbing(const at::Tensor & self, c10::SymIntArrayRef size, c10::SymIntArrayRef stride, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) {
5585 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
5586 auto maybe_layer = maybeCurrentDynamicLayer();
5587 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
5588 int64_t cur_level = maybe_layer->layerId();
5589 if (!isBatchedAtLevel(self, cur_level)) {
5590 return at::_ops::new_empty_strided::call(self, size, stride, dtype, layout, device, pin_memory);
5591 }
5592 Tensor self_value;
5593 optional<int64_t> self_bdim;
5594 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
5595 auto results = batch_rule(self_value, self_bdim, size, stride, dtype, layout, device, pin_memory);
5596 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
5597}
5598template <typename batch_rule_t, batch_rule_t batch_rule>
5599at::Tensor new_full_generated_plumbing(const at::Tensor & self, c10::SymIntArrayRef size, const at::Scalar & fill_value, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) {
5600 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
5601 auto maybe_layer = maybeCurrentDynamicLayer();
5602 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
5603 int64_t cur_level = maybe_layer->layerId();
5604 if (!isBatchedAtLevel(self, cur_level)) {
5605 return at::_ops::new_full::call(self, size, fill_value, dtype, layout, device, pin_memory);
5606 }
5607 Tensor self_value;
5608 optional<int64_t> self_bdim;
5609 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
5610 auto results = batch_rule(self_value, self_bdim, size, fill_value, dtype, layout, device, pin_memory);
5611 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
5612}
5613template <typename batch_rule_t, batch_rule_t batch_rule>
5614at::Tensor new_zeros_generated_plumbing(const at::Tensor & self, c10::SymIntArrayRef size, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) {
5615 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
5616 auto maybe_layer = maybeCurrentDynamicLayer();
5617 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
5618 int64_t cur_level = maybe_layer->layerId();
5619 if (!isBatchedAtLevel(self, cur_level)) {
5620 return at::_ops::new_zeros::call(self, size, dtype, layout, device, pin_memory);
5621 }
5622 Tensor self_value;
5623 optional<int64_t> self_bdim;
5624 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
5625 auto results = batch_rule(self_value, self_bdim, size, dtype, layout, device, pin_memory);
5626 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
5627}
5628template <typename batch_rule_t, batch_rule_t batch_rule>
5629at::Tensor new_ones_generated_plumbing(const at::Tensor & self, c10::SymIntArrayRef size, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) {
5630 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
5631 auto maybe_layer = maybeCurrentDynamicLayer();
5632 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
5633 int64_t cur_level = maybe_layer->layerId();
5634 if (!isBatchedAtLevel(self, cur_level)) {
5635 return at::_ops::new_ones::call(self, size, dtype, layout, device, pin_memory);
5636 }
5637 Tensor self_value;
5638 optional<int64_t> self_bdim;
5639 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
5640 auto results = batch_rule(self_value, self_bdim, size, dtype, layout, device, pin_memory);
5641 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
5642}
5643template <typename batch_rule_t, batch_rule_t batch_rule>
5644at::Tensor _empty_per_channel_affine_quantized_generated_plumbing(at::IntArrayRef size, const at::Tensor & scales, const at::Tensor & zero_points, int64_t axis, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory, c10::optional<at::MemoryFormat> memory_format) {
5645 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
5646 auto maybe_layer = maybeCurrentDynamicLayer();
5647 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
5648 int64_t cur_level = maybe_layer->layerId();
5649 if (!isBatchedAtLevel(scales, cur_level) && !isBatchedAtLevel(zero_points, cur_level)) {
5650 return at::_ops::_empty_per_channel_affine_quantized::call(size, scales, zero_points, axis, dtype, layout, device, pin_memory, memory_format);
5651 }
5652 Tensor scales_value;
5653 optional<int64_t> scales_bdim;
5654 std::tie(scales_value, scales_bdim) = unwrapTensorAtLevel(scales, cur_level);
5655 Tensor zero_points_value;
5656 optional<int64_t> zero_points_bdim;
5657 std::tie(zero_points_value, zero_points_bdim) = unwrapTensorAtLevel(zero_points, cur_level);
5658 auto results = batch_rule(size, scales_value, scales_bdim, zero_points_value, zero_points_bdim, axis, dtype, layout, device, pin_memory, memory_format);
5659 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
5660}
5661template <typename batch_rule_t, batch_rule_t batch_rule>
5662const at::Tensor & _resize_output__generated_plumbing(const at::Tensor & self, at::IntArrayRef size, at::Device device) {
5663 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
5664 auto maybe_layer = maybeCurrentDynamicLayer();
5665 vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
5666 int64_t cur_level = maybe_layer->layerId();
5667 if (!isBatchedAtLevel(self, cur_level)) {
5668 return at::_ops::_resize_output_::call(self, size, device);
5669 }
5670 Tensor self_value;
5671 optional<int64_t> self_bdim;
5672 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
5673 batch_rule(self_value, self_bdim, size, device);
5674 return self;
5675}
5676template <typename batch_rule_t, batch_rule_t batch_rule>
5677at::Tensor empty_quantized_generated_plumbing(at::IntArrayRef size, const at::Tensor & qtensor, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory, c10::optional<at::MemoryFormat> memory_format) {
5678 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
5679 auto maybe_layer = maybeCurrentDynamicLayer();
5680 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
5681 int64_t cur_level = maybe_layer->layerId();
5682 if (!isBatchedAtLevel(qtensor, cur_level)) {
5683 return at::_ops::empty_quantized::call(size, qtensor, dtype, layout, device, pin_memory, memory_format);
5684 }
5685 Tensor qtensor_value;
5686 optional<int64_t> qtensor_bdim;
5687 std::tie(qtensor_value, qtensor_bdim) = unwrapTensorAtLevel(qtensor, cur_level);
5688 auto results = batch_rule(size, qtensor_value, qtensor_bdim, dtype, layout, device, pin_memory, memory_format);
5689 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
5690}
5691template <typename batch_rule_t, batch_rule_t batch_rule>
5692at::Tensor empty_like_generated_plumbing(const at::Tensor & self, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory, c10::optional<at::MemoryFormat> memory_format) {
5693 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
5694 auto maybe_layer = maybeCurrentDynamicLayer();
5695 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
5696 int64_t cur_level = maybe_layer->layerId();
5697 if (!isBatchedAtLevel(self, cur_level)) {
5698 return at::_ops::empty_like::call(self, dtype, layout, device, pin_memory, memory_format);
5699 }
5700 Tensor self_value;
5701 optional<int64_t> self_bdim;
5702 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
5703 auto results = batch_rule(self_value, self_bdim, dtype, layout, device, pin_memory, memory_format);
5704 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
5705}
5706template <typename batch_rule_t, batch_rule_t batch_rule>
5707at::Tensor erf_generated_plumbing(const at::Tensor & self) {
5708 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
5709 auto maybe_layer = maybeCurrentDynamicLayer();
5710 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
5711 int64_t cur_level = maybe_layer->layerId();
5712 if (!isBatchedAtLevel(self, cur_level)) {
5713 return at::_ops::erf::call(self);
5714 }
5715 Tensor self_value;
5716 optional<int64_t> self_bdim;
5717 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
5718 auto results = batch_rule(self_value, self_bdim);
5719 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
5720}
5721template <typename batch_rule_t, batch_rule_t batch_rule>
5722at::Tensor & erf__generated_plumbing(at::Tensor & self) {
5723 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
5724 auto maybe_layer = maybeCurrentDynamicLayer();
5725 vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
5726 int64_t cur_level = maybe_layer->layerId();
5727 if (!isBatchedAtLevel(self, cur_level)) {
5728 return at::_ops::erf_::call(self);
5729 }
5730 Tensor self_value;
5731 optional<int64_t> self_bdim;
5732 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
5733 batch_rule(self_value, self_bdim);
5734 return self;
5735}
5736template <typename batch_rule_t, batch_rule_t batch_rule>
5737at::Tensor erfc_generated_plumbing(const at::Tensor & self) {
5738 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
5739 auto maybe_layer = maybeCurrentDynamicLayer();
5740 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
5741 int64_t cur_level = maybe_layer->layerId();
5742 if (!isBatchedAtLevel(self, cur_level)) {
5743 return at::_ops::erfc::call(self);
5744 }
5745 Tensor self_value;
5746 optional<int64_t> self_bdim;
5747 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
5748 auto results = batch_rule(self_value, self_bdim);
5749 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
5750}
5751template <typename batch_rule_t, batch_rule_t batch_rule>
5752at::Tensor & erfc__generated_plumbing(at::Tensor & self) {
5753 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
5754 auto maybe_layer = maybeCurrentDynamicLayer();
5755 vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
5756 int64_t cur_level = maybe_layer->layerId();
5757 if (!isBatchedAtLevel(self, cur_level)) {
5758 return at::_ops::erfc_::call(self);
5759 }
5760 Tensor self_value;
5761 optional<int64_t> self_bdim;
5762 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
5763 batch_rule(self_value, self_bdim);
5764 return self;
5765}
5766template <typename batch_rule_t, batch_rule_t batch_rule>
5767at::Tensor exp_generated_plumbing(const at::Tensor & self) {
5768 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
5769 auto maybe_layer = maybeCurrentDynamicLayer();
5770 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
5771 int64_t cur_level = maybe_layer->layerId();
5772 if (!isBatchedAtLevel(self, cur_level)) {
5773 return at::_ops::exp::call(self);
5774 }
5775 Tensor self_value;
5776 optional<int64_t> self_bdim;
5777 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
5778 auto results = batch_rule(self_value, self_bdim);
5779 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
5780}
5781template <typename batch_rule_t, batch_rule_t batch_rule>
5782at::Tensor & exp__generated_plumbing(at::Tensor & self) {
5783 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
5784 auto maybe_layer = maybeCurrentDynamicLayer();
5785 vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
5786 int64_t cur_level = maybe_layer->layerId();
5787 if (!isBatchedAtLevel(self, cur_level)) {
5788 return at::_ops::exp_::call(self);
5789 }
5790 Tensor self_value;
5791 optional<int64_t> self_bdim;
5792 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
5793 batch_rule(self_value, self_bdim);
5794 return self;
5795}
5796template <typename batch_rule_t, batch_rule_t batch_rule>
5797at::Tensor exp2_generated_plumbing(const at::Tensor & self) {
5798 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
5799 auto maybe_layer = maybeCurrentDynamicLayer();
5800 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
5801 int64_t cur_level = maybe_layer->layerId();
5802 if (!isBatchedAtLevel(self, cur_level)) {
5803 return at::_ops::exp2::call(self);
5804 }
5805 Tensor self_value;
5806 optional<int64_t> self_bdim;
5807 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
5808 auto results = batch_rule(self_value, self_bdim);
5809 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
5810}
5811template <typename batch_rule_t, batch_rule_t batch_rule>
5812at::Tensor & exp2__generated_plumbing(at::Tensor & self) {
5813 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
5814 auto maybe_layer = maybeCurrentDynamicLayer();
5815 vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
5816 int64_t cur_level = maybe_layer->layerId();
5817 if (!isBatchedAtLevel(self, cur_level)) {
5818 return at::_ops::exp2_::call(self);
5819 }
5820 Tensor self_value;
5821 optional<int64_t> self_bdim;
5822 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
5823 batch_rule(self_value, self_bdim);
5824 return self;
5825}
5826template <typename batch_rule_t, batch_rule_t batch_rule>
5827at::Tensor expm1_generated_plumbing(const at::Tensor & self) {
5828 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
5829 auto maybe_layer = maybeCurrentDynamicLayer();
5830 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
5831 int64_t cur_level = maybe_layer->layerId();
5832 if (!isBatchedAtLevel(self, cur_level)) {
5833 return at::_ops::expm1::call(self);
5834 }
5835 Tensor self_value;
5836 optional<int64_t> self_bdim;
5837 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
5838 auto results = batch_rule(self_value, self_bdim);
5839 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
5840}
5841template <typename batch_rule_t, batch_rule_t batch_rule>
5842at::Tensor & expm1__generated_plumbing(at::Tensor & self) {
5843 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
5844 auto maybe_layer = maybeCurrentDynamicLayer();
5845 vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
5846 int64_t cur_level = maybe_layer->layerId();
5847 if (!isBatchedAtLevel(self, cur_level)) {
5848 return at::_ops::expm1_::call(self);
5849 }
5850 Tensor self_value;
5851 optional<int64_t> self_bdim;
5852 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
5853 batch_rule(self_value, self_bdim);
5854 return self;
5855}
5856template <typename batch_rule_t, batch_rule_t batch_rule>
5857at::Tensor expand_generated_plumbing(const at::Tensor & self, c10::SymIntArrayRef size, bool implicit) {
5858 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
5859 auto maybe_layer = maybeCurrentDynamicLayer();
5860 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
5861 int64_t cur_level = maybe_layer->layerId();
5862 if (!isBatchedAtLevel(self, cur_level)) {
5863 return at::_ops::expand::call(self, size, implicit);
5864 }
5865 Tensor self_value;
5866 optional<int64_t> self_bdim;
5867 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
5868 auto results = batch_rule(self_value, self_bdim, size, implicit);
5869 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
5870}
5871template <typename batch_rule_t, batch_rule_t batch_rule>
5872at::Tensor expand_as_generated_plumbing(const at::Tensor & self, const at::Tensor & other) {
5873 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
5874 auto maybe_layer = maybeCurrentDynamicLayer();
5875 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
5876 int64_t cur_level = maybe_layer->layerId();
5877 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
5878 return at::_ops::expand_as::call(self, other);
5879 }
5880 Tensor self_value;
5881 optional<int64_t> self_bdim;
5882 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
5883 Tensor other_value;
5884 optional<int64_t> other_bdim;
5885 std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
5886 auto results = batch_rule(self_value, self_bdim, other_value, other_bdim);
5887 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
5888}
5889template <typename batch_rule_t, batch_rule_t batch_rule>
5890at::Tensor flatten_using_ints_generated_plumbing(const at::Tensor & self, int64_t start_dim, int64_t end_dim) {
5891 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
5892 auto maybe_layer = maybeCurrentDynamicLayer();
5893 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
5894 int64_t cur_level = maybe_layer->layerId();
5895 if (!isBatchedAtLevel(self, cur_level)) {
5896 return at::_ops::flatten_using_ints::call(self, start_dim, end_dim);
5897 }
5898 Tensor self_value;
5899 optional<int64_t> self_bdim;
5900 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
5901 auto results = batch_rule(self_value, self_bdim, start_dim, end_dim);
5902 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
5903}
5904template <typename batch_rule_t, batch_rule_t batch_rule>
5905at::Tensor flatten_named_out_dim_generated_plumbing(const at::Tensor & self, int64_t start_dim, int64_t end_dim, at::Dimname out_dim) {
5906 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
5907 auto maybe_layer = maybeCurrentDynamicLayer();
5908 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
5909 int64_t cur_level = maybe_layer->layerId();
5910 if (!isBatchedAtLevel(self, cur_level)) {
5911 return at::_ops::flatten_named_out_dim::call(self, start_dim, end_dim, out_dim);
5912 }
5913 Tensor self_value;
5914 optional<int64_t> self_bdim;
5915 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
5916 auto results = batch_rule(self_value, self_bdim, start_dim, end_dim, out_dim);
5917 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
5918}
5919template <typename batch_rule_t, batch_rule_t batch_rule>
5920at::Tensor flatten_using_names_generated_plumbing(const at::Tensor & self, at::Dimname start_dim, at::Dimname end_dim, at::Dimname out_dim) {
5921 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
5922 auto maybe_layer = maybeCurrentDynamicLayer();
5923 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
5924 int64_t cur_level = maybe_layer->layerId();
5925 if (!isBatchedAtLevel(self, cur_level)) {
5926 return at::_ops::flatten_using_names::call(self, start_dim, end_dim, out_dim);
5927 }
5928 Tensor self_value;
5929 optional<int64_t> self_bdim;
5930 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
5931 auto results = batch_rule(self_value, self_bdim, start_dim, end_dim, out_dim);
5932 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
5933}
5934template <typename batch_rule_t, batch_rule_t batch_rule>
5935at::Tensor flatten_DimnameList_generated_plumbing(const at::Tensor & self, at::DimnameList dims, at::Dimname out_dim) {
5936 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
5937 auto maybe_layer = maybeCurrentDynamicLayer();
5938 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
5939 int64_t cur_level = maybe_layer->layerId();
5940 if (!isBatchedAtLevel(self, cur_level)) {
5941 return at::_ops::flatten_DimnameList::call(self, dims, out_dim);
5942 }
5943 Tensor self_value;
5944 optional<int64_t> self_bdim;
5945 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
5946 auto results = batch_rule(self_value, self_bdim, dims, out_dim);
5947 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
5948}
5949template <typename batch_rule_t, batch_rule_t batch_rule>
5950at::Tensor unflatten_int_generated_plumbing(const at::Tensor & self, int64_t dim, at::IntArrayRef sizes) {
5951 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
5952 auto maybe_layer = maybeCurrentDynamicLayer();
5953 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
5954 int64_t cur_level = maybe_layer->layerId();
5955 if (!isBatchedAtLevel(self, cur_level)) {
5956 return at::_ops::unflatten_int::call(self, dim, sizes);
5957 }
5958 Tensor self_value;
5959 optional<int64_t> self_bdim;
5960 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
5961 auto results = batch_rule(self_value, self_bdim, dim, sizes);
5962 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
5963}
5964template <typename batch_rule_t, batch_rule_t batch_rule>
5965at::Tensor unflatten_Dimname_generated_plumbing(const at::Tensor & self, at::Dimname dim, at::IntArrayRef sizes, at::DimnameList names) {
5966 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
5967 auto maybe_layer = maybeCurrentDynamicLayer();
5968 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
5969 int64_t cur_level = maybe_layer->layerId();
5970 if (!isBatchedAtLevel(self, cur_level)) {
5971 return at::_ops::unflatten_Dimname::call(self, dim, sizes, names);
5972 }
5973 Tensor self_value;
5974 optional<int64_t> self_bdim;
5975 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
5976 auto results = batch_rule(self_value, self_bdim, dim, sizes, names);
5977 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
5978}
5979template <typename batch_rule_t, batch_rule_t batch_rule>
5980at::Tensor fill_Scalar_generated_plumbing(const at::Tensor & self, const at::Scalar & value) {
5981 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
5982 auto maybe_layer = maybeCurrentDynamicLayer();
5983 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
5984 int64_t cur_level = maybe_layer->layerId();
5985 if (!isBatchedAtLevel(self, cur_level)) {
5986 return at::_ops::fill_Scalar::call(self, value);
5987 }
5988 Tensor self_value;
5989 optional<int64_t> self_bdim;
5990 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
5991 auto results = batch_rule(self_value, self_bdim, value);
5992 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
5993}
5994template <typename batch_rule_t, batch_rule_t batch_rule>
5995at::Tensor fill_Tensor_generated_plumbing(const at::Tensor & self, const at::Tensor & value) {
5996 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
5997 auto maybe_layer = maybeCurrentDynamicLayer();
5998 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
5999 int64_t cur_level = maybe_layer->layerId();
6000 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(value, cur_level)) {
6001 return at::_ops::fill_Tensor::call(self, value);
6002 }
6003 Tensor self_value;
6004 optional<int64_t> self_bdim;
6005 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
6006 Tensor value_value;
6007 optional<int64_t> value_bdim;
6008 std::tie(value_value, value_bdim) = unwrapTensorAtLevel(value, cur_level);
6009 auto results = batch_rule(self_value, self_bdim, value_value, value_bdim);
6010 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
6011}
6012template <typename batch_rule_t, batch_rule_t batch_rule>
6013at::Tensor & fill__Scalar_generated_plumbing(at::Tensor & self, const at::Scalar & value) {
6014 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
6015 auto maybe_layer = maybeCurrentDynamicLayer();
6016 vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
6017 int64_t cur_level = maybe_layer->layerId();
6018 if (!isBatchedAtLevel(self, cur_level)) {
6019 return at::_ops::fill__Scalar::call(self, value);
6020 }
6021 Tensor self_value;
6022 optional<int64_t> self_bdim;
6023 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
6024 batch_rule(self_value, self_bdim, value);
6025 return self;
6026}
6027template <typename batch_rule_t, batch_rule_t batch_rule>
6028at::Tensor & fill__Tensor_generated_plumbing(at::Tensor & self, const at::Tensor & value) {
6029 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
6030 auto maybe_layer = maybeCurrentDynamicLayer();
6031 vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
6032 int64_t cur_level = maybe_layer->layerId();
6033 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(value, cur_level)) {
6034 return at::_ops::fill__Tensor::call(self, value);
6035 }
6036 Tensor self_value;
6037 optional<int64_t> self_bdim;
6038 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
6039 Tensor value_value;
6040 optional<int64_t> value_bdim;
6041 std::tie(value_value, value_bdim) = unwrapTensorAtLevel(value, cur_level);
6042 batch_rule(self_value, self_bdim, value_value, value_bdim);
6043 return self;
6044}
6045template <typename batch_rule_t, batch_rule_t batch_rule>
6046at::Tensor floor_generated_plumbing(const at::Tensor & self) {
6047 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
6048 auto maybe_layer = maybeCurrentDynamicLayer();
6049 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
6050 int64_t cur_level = maybe_layer->layerId();
6051 if (!isBatchedAtLevel(self, cur_level)) {
6052 return at::_ops::floor::call(self);
6053 }
6054 Tensor self_value;
6055 optional<int64_t> self_bdim;
6056 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
6057 auto results = batch_rule(self_value, self_bdim);
6058 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
6059}
6060template <typename batch_rule_t, batch_rule_t batch_rule>
6061at::Tensor & floor__generated_plumbing(at::Tensor & self) {
6062 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
6063 auto maybe_layer = maybeCurrentDynamicLayer();
6064 vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
6065 int64_t cur_level = maybe_layer->layerId();
6066 if (!isBatchedAtLevel(self, cur_level)) {
6067 return at::_ops::floor_::call(self);
6068 }
6069 Tensor self_value;
6070 optional<int64_t> self_bdim;
6071 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
6072 batch_rule(self_value, self_bdim);
6073 return self;
6074}
6075template <typename batch_rule_t, batch_rule_t batch_rule>
6076at::Tensor floor_divide_generated_plumbing(const at::Tensor & self, const at::Tensor & other) {
6077 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
6078 auto maybe_layer = maybeCurrentDynamicLayer();
6079 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
6080 int64_t cur_level = maybe_layer->layerId();
6081 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
6082 return at::_ops::floor_divide::call(self, other);
6083 }
6084 Tensor self_value;
6085 optional<int64_t> self_bdim;
6086 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
6087 Tensor other_value;
6088 optional<int64_t> other_bdim;
6089 std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
6090 auto results = batch_rule(self_value, self_bdim, other_value, other_bdim);
6091 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
6092}
6093template <typename batch_rule_t, batch_rule_t batch_rule>
6094at::Tensor & floor_divide__Tensor_generated_plumbing(at::Tensor & self, const at::Tensor & other) {
6095 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
6096 auto maybe_layer = maybeCurrentDynamicLayer();
6097 vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
6098 int64_t cur_level = maybe_layer->layerId();
6099 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
6100 return at::_ops::floor_divide__Tensor::call(self, other);
6101 }
6102 Tensor self_value;
6103 optional<int64_t> self_bdim;
6104 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
6105 Tensor other_value;
6106 optional<int64_t> other_bdim;
6107 std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
6108 batch_rule(self_value, self_bdim, other_value, other_bdim);
6109 return self;
6110}
6111template <typename batch_rule_t, batch_rule_t batch_rule>
6112at::Tensor floor_divide_Scalar_generated_plumbing(const at::Tensor & self, const at::Scalar & other) {
6113 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
6114 auto maybe_layer = maybeCurrentDynamicLayer();
6115 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
6116 int64_t cur_level = maybe_layer->layerId();
6117 if (!isBatchedAtLevel(self, cur_level)) {
6118 return at::_ops::floor_divide_Scalar::call(self, other);
6119 }
6120 Tensor self_value;
6121 optional<int64_t> self_bdim;
6122 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
6123 auto results = batch_rule(self_value, self_bdim, other);
6124 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
6125}
6126template <typename batch_rule_t, batch_rule_t batch_rule>
6127at::Tensor & floor_divide__Scalar_generated_plumbing(at::Tensor & self, const at::Scalar & other) {
6128 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
6129 auto maybe_layer = maybeCurrentDynamicLayer();
6130 vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
6131 int64_t cur_level = maybe_layer->layerId();
6132 if (!isBatchedAtLevel(self, cur_level)) {
6133 return at::_ops::floor_divide__Scalar::call(self, other);
6134 }
6135 Tensor self_value;
6136 optional<int64_t> self_bdim;
6137 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
6138 batch_rule(self_value, self_bdim, other);
6139 return self;
6140}
6141template <typename batch_rule_t, batch_rule_t batch_rule>
6142at::Tensor frac_generated_plumbing(const at::Tensor & self) {
6143 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
6144 auto maybe_layer = maybeCurrentDynamicLayer();
6145 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
6146 int64_t cur_level = maybe_layer->layerId();
6147 if (!isBatchedAtLevel(self, cur_level)) {
6148 return at::_ops::frac::call(self);
6149 }
6150 Tensor self_value;
6151 optional<int64_t> self_bdim;
6152 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
6153 auto results = batch_rule(self_value, self_bdim);
6154 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
6155}
6156template <typename batch_rule_t, batch_rule_t batch_rule>
6157at::Tensor & frac__generated_plumbing(at::Tensor & self) {
6158 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
6159 auto maybe_layer = maybeCurrentDynamicLayer();
6160 vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
6161 int64_t cur_level = maybe_layer->layerId();
6162 if (!isBatchedAtLevel(self, cur_level)) {
6163 return at::_ops::frac_::call(self);
6164 }
6165 Tensor self_value;
6166 optional<int64_t> self_bdim;
6167 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
6168 batch_rule(self_value, self_bdim);
6169 return self;
6170}
6171template <typename batch_rule_t, batch_rule_t batch_rule>
6172at::Tensor full_like_generated_plumbing(const at::Tensor & self, const at::Scalar & fill_value, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory, c10::optional<at::MemoryFormat> memory_format) {
6173 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
6174 auto maybe_layer = maybeCurrentDynamicLayer();
6175 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
6176 int64_t cur_level = maybe_layer->layerId();
6177 if (!isBatchedAtLevel(self, cur_level)) {
6178 return at::_ops::full_like::call(self, fill_value, dtype, layout, device, pin_memory, memory_format);
6179 }
6180 Tensor self_value;
6181 optional<int64_t> self_bdim;
6182 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
6183 auto results = batch_rule(self_value, self_bdim, fill_value, dtype, layout, device, pin_memory, memory_format);
6184 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
6185}
6186template <typename batch_rule_t, batch_rule_t batch_rule>
6187at::Tensor gcd_generated_plumbing(const at::Tensor & self, const at::Tensor & other) {
6188 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
6189 auto maybe_layer = maybeCurrentDynamicLayer();
6190 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
6191 int64_t cur_level = maybe_layer->layerId();
6192 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
6193 return at::_ops::gcd::call(self, other);
6194 }
6195 Tensor self_value;
6196 optional<int64_t> self_bdim;
6197 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
6198 Tensor other_value;
6199 optional<int64_t> other_bdim;
6200 std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
6201 auto results = batch_rule(self_value, self_bdim, other_value, other_bdim);
6202 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
6203}
6204template <typename batch_rule_t, batch_rule_t batch_rule>
6205at::Tensor & gcd__generated_plumbing(at::Tensor & self, const at::Tensor & other) {
6206 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
6207 auto maybe_layer = maybeCurrentDynamicLayer();
6208 vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
6209 int64_t cur_level = maybe_layer->layerId();
6210 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
6211 return at::_ops::gcd_::call(self, other);
6212 }
6213 Tensor self_value;
6214 optional<int64_t> self_bdim;
6215 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
6216 Tensor other_value;
6217 optional<int64_t> other_bdim;
6218 std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
6219 batch_rule(self_value, self_bdim, other_value, other_bdim);
6220 return self;
6221}
6222template <typename batch_rule_t, batch_rule_t batch_rule>
6223at::Tensor lcm_generated_plumbing(const at::Tensor & self, const at::Tensor & other) {
6224 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
6225 auto maybe_layer = maybeCurrentDynamicLayer();
6226 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
6227 int64_t cur_level = maybe_layer->layerId();
6228 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
6229 return at::_ops::lcm::call(self, other);
6230 }
6231 Tensor self_value;
6232 optional<int64_t> self_bdim;
6233 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
6234 Tensor other_value;
6235 optional<int64_t> other_bdim;
6236 std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
6237 auto results = batch_rule(self_value, self_bdim, other_value, other_bdim);
6238 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
6239}
6240template <typename batch_rule_t, batch_rule_t batch_rule>
6241at::Tensor & lcm__generated_plumbing(at::Tensor & self, const at::Tensor & other) {
6242 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
6243 auto maybe_layer = maybeCurrentDynamicLayer();
6244 vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
6245 int64_t cur_level = maybe_layer->layerId();
6246 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
6247 return at::_ops::lcm_::call(self, other);
6248 }
6249 Tensor self_value;
6250 optional<int64_t> self_bdim;
6251 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
6252 Tensor other_value;
6253 optional<int64_t> other_bdim;
6254 std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
6255 batch_rule(self_value, self_bdim, other_value, other_bdim);
6256 return self;
6257}
6258template <typename batch_rule_t, batch_rule_t batch_rule>
6259at::Tensor grid_sampler_generated_plumbing(const at::Tensor & input, const at::Tensor & grid, int64_t interpolation_mode, int64_t padding_mode, bool align_corners) {
6260 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
6261 auto maybe_layer = maybeCurrentDynamicLayer();
6262 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
6263 int64_t cur_level = maybe_layer->layerId();
6264 if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(grid, cur_level)) {
6265 return at::_ops::grid_sampler::call(input, grid, interpolation_mode, padding_mode, align_corners);
6266 }
6267 Tensor input_value;
6268 optional<int64_t> input_bdim;
6269 std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level);
6270 Tensor grid_value;
6271 optional<int64_t> grid_bdim;
6272 std::tie(grid_value, grid_bdim) = unwrapTensorAtLevel(grid, cur_level);
6273 auto results = batch_rule(input_value, input_bdim, grid_value, grid_bdim, interpolation_mode, padding_mode, align_corners);
6274 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
6275}
6276template <typename batch_rule_t, batch_rule_t batch_rule>
6277at::Tensor grid_sampler_2d_generated_plumbing(const at::Tensor & input, const at::Tensor & grid, int64_t interpolation_mode, int64_t padding_mode, bool align_corners) {
6278 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
6279 auto maybe_layer = maybeCurrentDynamicLayer();
6280 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
6281 int64_t cur_level = maybe_layer->layerId();
6282 if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(grid, cur_level)) {
6283 return at::_ops::grid_sampler_2d::call(input, grid, interpolation_mode, padding_mode, align_corners);
6284 }
6285 Tensor input_value;
6286 optional<int64_t> input_bdim;
6287 std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level);
6288 Tensor grid_value;
6289 optional<int64_t> grid_bdim;
6290 std::tie(grid_value, grid_bdim) = unwrapTensorAtLevel(grid, cur_level);
6291 auto results = batch_rule(input_value, input_bdim, grid_value, grid_bdim, interpolation_mode, padding_mode, align_corners);
6292 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
6293}
6294template <typename batch_rule_t, batch_rule_t batch_rule>
6295::std::tuple<at::Tensor,at::Tensor> grid_sampler_2d_backward_generated_plumbing(const at::Tensor & grad_output, const at::Tensor & input, const at::Tensor & grid, int64_t interpolation_mode, int64_t padding_mode, bool align_corners, ::std::array<bool,2> output_mask) {
6296 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
6297 auto maybe_layer = maybeCurrentDynamicLayer();
6298 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
6299 int64_t cur_level = maybe_layer->layerId();
6300 if (!isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(grid, cur_level)) {
6301 return at::_ops::grid_sampler_2d_backward::call(grad_output, input, grid, interpolation_mode, padding_mode, align_corners, output_mask);
6302 }
6303 Tensor grad_output_value;
6304 optional<int64_t> grad_output_bdim;
6305 std::tie(grad_output_value, grad_output_bdim) = unwrapTensorAtLevel(grad_output, cur_level);
6306 Tensor input_value;
6307 optional<int64_t> input_bdim;
6308 std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level);
6309 Tensor grid_value;
6310 optional<int64_t> grid_bdim;
6311 std::tie(grid_value, grid_bdim) = unwrapTensorAtLevel(grid, cur_level);
6312 auto results = batch_rule(grad_output_value, grad_output_bdim, input_value, input_bdim, grid_value, grid_bdim, interpolation_mode, padding_mode, align_corners, output_mask);
6313 return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
6314}
6315template <typename batch_rule_t, batch_rule_t batch_rule>
6316at::Tensor _grid_sampler_2d_cpu_fallback_generated_plumbing(const at::Tensor & input, const at::Tensor & grid, int64_t interpolation_mode, int64_t padding_mode, bool align_corners) {
6317 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
6318 auto maybe_layer = maybeCurrentDynamicLayer();
6319 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
6320 int64_t cur_level = maybe_layer->layerId();
6321 if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(grid, cur_level)) {
6322 return at::_ops::_grid_sampler_2d_cpu_fallback::call(input, grid, interpolation_mode, padding_mode, align_corners);
6323 }
6324 Tensor input_value;
6325 optional<int64_t> input_bdim;
6326 std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level);
6327 Tensor grid_value;
6328 optional<int64_t> grid_bdim;
6329 std::tie(grid_value, grid_bdim) = unwrapTensorAtLevel(grid, cur_level);
6330 auto results = batch_rule(input_value, input_bdim, grid_value, grid_bdim, interpolation_mode, padding_mode, align_corners);
6331 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
6332}
6333template <typename batch_rule_t, batch_rule_t batch_rule>
6334::std::tuple<at::Tensor,at::Tensor> _grid_sampler_2d_cpu_fallback_backward_generated_plumbing(const at::Tensor & grad_output, const at::Tensor & input, const at::Tensor & grid, int64_t interpolation_mode, int64_t padding_mode, bool align_corners) {
6335 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
6336 auto maybe_layer = maybeCurrentDynamicLayer();
6337 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
6338 int64_t cur_level = maybe_layer->layerId();
6339 if (!isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(grid, cur_level)) {
6340 return at::_ops::_grid_sampler_2d_cpu_fallback_backward::call(grad_output, input, grid, interpolation_mode, padding_mode, align_corners);
6341 }
6342 Tensor grad_output_value;
6343 optional<int64_t> grad_output_bdim;
6344 std::tie(grad_output_value, grad_output_bdim) = unwrapTensorAtLevel(grad_output, cur_level);
6345 Tensor input_value;
6346 optional<int64_t> input_bdim;
6347 std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level);
6348 Tensor grid_value;
6349 optional<int64_t> grid_bdim;
6350 std::tie(grid_value, grid_bdim) = unwrapTensorAtLevel(grid, cur_level);
6351 auto results = batch_rule(grad_output_value, grad_output_bdim, input_value, input_bdim, grid_value, grid_bdim, interpolation_mode, padding_mode, align_corners);
6352 return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
6353}
6354template <typename batch_rule_t, batch_rule_t batch_rule>
6355at::Tensor grid_sampler_3d_generated_plumbing(const at::Tensor & input, const at::Tensor & grid, int64_t interpolation_mode, int64_t padding_mode, bool align_corners) {
6356 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
6357 auto maybe_layer = maybeCurrentDynamicLayer();
6358 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
6359 int64_t cur_level = maybe_layer->layerId();
6360 if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(grid, cur_level)) {
6361 return at::_ops::grid_sampler_3d::call(input, grid, interpolation_mode, padding_mode, align_corners);
6362 }
6363 Tensor input_value;
6364 optional<int64_t> input_bdim;
6365 std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level);
6366 Tensor grid_value;
6367 optional<int64_t> grid_bdim;
6368 std::tie(grid_value, grid_bdim) = unwrapTensorAtLevel(grid, cur_level);
6369 auto results = batch_rule(input_value, input_bdim, grid_value, grid_bdim, interpolation_mode, padding_mode, align_corners);
6370 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
6371}
6372template <typename batch_rule_t, batch_rule_t batch_rule>
6373::std::tuple<at::Tensor,at::Tensor> grid_sampler_3d_backward_generated_plumbing(const at::Tensor & grad_output, const at::Tensor & input, const at::Tensor & grid, int64_t interpolation_mode, int64_t padding_mode, bool align_corners, ::std::array<bool,2> output_mask) {
6374 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
6375 auto maybe_layer = maybeCurrentDynamicLayer();
6376 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
6377 int64_t cur_level = maybe_layer->layerId();
6378 if (!isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(grid, cur_level)) {
6379 return at::_ops::grid_sampler_3d_backward::call(grad_output, input, grid, interpolation_mode, padding_mode, align_corners, output_mask);
6380 }
6381 Tensor grad_output_value;
6382 optional<int64_t> grad_output_bdim;
6383 std::tie(grad_output_value, grad_output_bdim) = unwrapTensorAtLevel(grad_output, cur_level);
6384 Tensor input_value;
6385 optional<int64_t> input_bdim;
6386 std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level);
6387 Tensor grid_value;
6388 optional<int64_t> grid_bdim;
6389 std::tie(grid_value, grid_bdim) = unwrapTensorAtLevel(grid, cur_level);
6390 auto results = batch_rule(grad_output_value, grad_output_bdim, input_value, input_bdim, grid_value, grid_bdim, interpolation_mode, padding_mode, align_corners, output_mask);
6391 return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
6392}
6393template <typename batch_rule_t, batch_rule_t batch_rule>
6394at::Tensor hinge_embedding_loss_generated_plumbing(const at::Tensor & self, const at::Tensor & target, double margin, int64_t reduction) {
6395 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
6396 auto maybe_layer = maybeCurrentDynamicLayer();
6397 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
6398 int64_t cur_level = maybe_layer->layerId();
6399 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(target, cur_level)) {
6400 return at::_ops::hinge_embedding_loss::call(self, target, margin, reduction);
6401 }
6402 Tensor self_value;
6403 optional<int64_t> self_bdim;
6404 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
6405 Tensor target_value;
6406 optional<int64_t> target_bdim;
6407 std::tie(target_value, target_bdim) = unwrapTensorAtLevel(target, cur_level);
6408 auto results = batch_rule(self_value, self_bdim, target_value, target_bdim, margin, reduction);
6409 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
6410}
6411template <typename batch_rule_t, batch_rule_t batch_rule>
6412at::Tensor group_norm_generated_plumbing(const at::Tensor & input, int64_t num_groups, const c10::optional<at::Tensor> & weight, const c10::optional<at::Tensor> & bias, double eps, bool cudnn_enabled) {
6413 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
6414 auto maybe_layer = maybeCurrentDynamicLayer();
6415 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
6416 int64_t cur_level = maybe_layer->layerId();
6417 if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(bias, cur_level)) {
6418 return at::_ops::group_norm::call(input, num_groups, weight, bias, eps, cudnn_enabled);
6419 }
6420 Tensor input_value;
6421 optional<int64_t> input_bdim;
6422 std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level);
6423 optional<Tensor> weight_value;
6424 optional<int64_t> weight_bdim;
6425 if (weight) {
6426 std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight.value(), cur_level);
6427 }
6428 optional<Tensor> bias_value;
6429 optional<int64_t> bias_bdim;
6430 if (bias) {
6431 std::tie(bias_value, bias_bdim) = unwrapTensorAtLevel(bias.value(), cur_level);
6432 }
6433 auto results = batch_rule(input_value, input_bdim, num_groups, weight_value, weight_bdim, bias_value, bias_bdim, eps, cudnn_enabled);
6434 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
6435}
6436template <typename batch_rule_t, batch_rule_t batch_rule>
6437::std::tuple<at::Tensor,at::Tensor,at::Tensor> native_group_norm_generated_plumbing(const at::Tensor & input, const c10::optional<at::Tensor> & weight, const c10::optional<at::Tensor> & bias, c10::SymInt N, c10::SymInt C, c10::SymInt HxW, int64_t group, double eps) {
6438 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
6439 auto maybe_layer = maybeCurrentDynamicLayer();
6440 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
6441 int64_t cur_level = maybe_layer->layerId();
6442 if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(bias, cur_level)) {
6443 return at::_ops::native_group_norm::call(input, weight, bias, N, C, HxW, group, eps);
6444 }
6445 Tensor input_value;
6446 optional<int64_t> input_bdim;
6447 std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level);
6448 optional<Tensor> weight_value;
6449 optional<int64_t> weight_bdim;
6450 if (weight) {
6451 std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight.value(), cur_level);
6452 }
6453 optional<Tensor> bias_value;
6454 optional<int64_t> bias_bdim;
6455 if (bias) {
6456 std::tie(bias_value, bias_bdim) = unwrapTensorAtLevel(bias.value(), cur_level);
6457 }
6458 auto results = batch_rule(input_value, input_bdim, weight_value, weight_bdim, bias_value, bias_bdim, N, C, HxW, group, eps);
6459 return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level));
6460}
6461template <typename batch_rule_t, batch_rule_t batch_rule>
6462::std::tuple<at::Tensor,at::Tensor,at::Tensor> native_group_norm_backward_generated_plumbing(const at::Tensor & grad_out, const at::Tensor & input, const at::Tensor & mean, const at::Tensor & rstd, const c10::optional<at::Tensor> & weight, c10::SymInt N, c10::SymInt C, c10::SymInt HxW, int64_t group, ::std::array<bool,3> output_mask) {
6463 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
6464 auto maybe_layer = maybeCurrentDynamicLayer();
6465 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
6466 int64_t cur_level = maybe_layer->layerId();
6467 if (!isBatchedAtLevel(grad_out, cur_level) && !isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(mean, cur_level) && !isBatchedAtLevel(rstd, cur_level) && !isBatchedAtLevel(weight, cur_level)) {
6468 return at::_ops::native_group_norm_backward::call(grad_out, input, mean, rstd, weight, N, C, HxW, group, output_mask);
6469 }
6470 Tensor grad_out_value;
6471 optional<int64_t> grad_out_bdim;
6472 std::tie(grad_out_value, grad_out_bdim) = unwrapTensorAtLevel(grad_out, cur_level);
6473 Tensor input_value;
6474 optional<int64_t> input_bdim;
6475 std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level);
6476 Tensor mean_value;
6477 optional<int64_t> mean_bdim;
6478 std::tie(mean_value, mean_bdim) = unwrapTensorAtLevel(mean, cur_level);
6479 Tensor rstd_value;
6480 optional<int64_t> rstd_bdim;
6481 std::tie(rstd_value, rstd_bdim) = unwrapTensorAtLevel(rstd, cur_level);
6482 optional<Tensor> weight_value;
6483 optional<int64_t> weight_bdim;
6484 if (weight) {
6485 std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight.value(), cur_level);
6486 }
6487 auto results = batch_rule(grad_out_value, grad_out_bdim, input_value, input_bdim, mean_value, mean_bdim, rstd_value, rstd_bdim, weight_value, weight_bdim, N, C, HxW, group, output_mask);
6488 return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level));
6489}
6490template <typename batch_rule_t, batch_rule_t batch_rule>
6491at::Tensor _fft_r2c_generated_plumbing(const at::Tensor & self, at::IntArrayRef dim, int64_t normalization, bool onesided) {
6492 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
6493 auto maybe_layer = maybeCurrentDynamicLayer();
6494 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
6495 int64_t cur_level = maybe_layer->layerId();
6496 if (!isBatchedAtLevel(self, cur_level)) {
6497 return at::_ops::_fft_r2c::call(self, dim, normalization, onesided);
6498 }
6499 Tensor self_value;
6500 optional<int64_t> self_bdim;
6501 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
6502 auto results = batch_rule(self_value, self_bdim, dim, normalization, onesided);
6503 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
6504}
6505template <typename batch_rule_t, batch_rule_t batch_rule>
6506at::Tensor _fft_c2r_generated_plumbing(const at::Tensor & self, at::IntArrayRef dim, int64_t normalization, int64_t last_dim_size) {
6507 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
6508 auto maybe_layer = maybeCurrentDynamicLayer();
6509 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
6510 int64_t cur_level = maybe_layer->layerId();
6511 if (!isBatchedAtLevel(self, cur_level)) {
6512 return at::_ops::_fft_c2r::call(self, dim, normalization, last_dim_size);
6513 }
6514 Tensor self_value;
6515 optional<int64_t> self_bdim;
6516 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
6517 auto results = batch_rule(self_value, self_bdim, dim, normalization, last_dim_size);
6518 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
6519}
6520template <typename batch_rule_t, batch_rule_t batch_rule>
6521at::Tensor _fft_c2c_generated_plumbing(const at::Tensor & self, c10::SymIntArrayRef dim, int64_t normalization, bool forward) {
6522 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
6523 auto maybe_layer = maybeCurrentDynamicLayer();
6524 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
6525 int64_t cur_level = maybe_layer->layerId();
6526 if (!isBatchedAtLevel(self, cur_level)) {
6527 return at::_ops::_fft_c2c::call(self, dim, normalization, forward);
6528 }
6529 Tensor self_value;
6530 optional<int64_t> self_bdim;
6531 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
6532 auto results = batch_rule(self_value, self_bdim, dim, normalization, forward);
6533 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
6534}
6535template <typename batch_rule_t, batch_rule_t batch_rule>
6536void _validate_compressed_sparse_indices_generated_plumbing(bool is_crow, const at::Tensor & compressed_idx, const at::Tensor & plain_idx, int64_t cdim, int64_t dim, int64_t nnz) {
6537 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
6538 auto maybe_layer = maybeCurrentDynamicLayer();
6539 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
6540 int64_t cur_level = maybe_layer->layerId();
6541 if (!isBatchedAtLevel(compressed_idx, cur_level) && !isBatchedAtLevel(plain_idx, cur_level)) {
6542 return at::_ops::_validate_compressed_sparse_indices::call(is_crow, compressed_idx, plain_idx, cdim, dim, nnz);
6543 }
6544 Tensor compressed_idx_value;
6545 optional<int64_t> compressed_idx_bdim;
6546 std::tie(compressed_idx_value, compressed_idx_bdim) = unwrapTensorAtLevel(compressed_idx, cur_level);
6547 Tensor plain_idx_value;
6548 optional<int64_t> plain_idx_bdim;
6549 std::tie(plain_idx_value, plain_idx_bdim) = unwrapTensorAtLevel(plain_idx, cur_level);
6550 batch_rule(is_crow, compressed_idx_value, compressed_idx_bdim, plain_idx_value, plain_idx_bdim, cdim, dim, nnz);
6551}
6552template <typename batch_rule_t, batch_rule_t batch_rule>
6553at::Tensor index_Tensor_generated_plumbing(const at::Tensor & self, const c10::List<c10::optional<at::Tensor>> & indices) {
6554 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
6555 auto maybe_layer = maybeCurrentDynamicLayer();
6556 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
6557 int64_t cur_level = maybe_layer->layerId();
6558 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(indices, cur_level)) {
6559 return at::_ops::index_Tensor::call(self, indices);
6560 }
6561 Tensor self_value;
6562 optional<int64_t> self_bdim;
6563 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
6564 auto results = batch_rule(self_value, self_bdim, indices);
6565 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
6566}
6567template <typename batch_rule_t, batch_rule_t batch_rule>
6568at::Tensor & index_copy__generated_plumbing(at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & source) {
6569 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
6570 auto maybe_layer = maybeCurrentDynamicLayer();
6571 vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
6572 int64_t cur_level = maybe_layer->layerId();
6573 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(index, cur_level) && !isBatchedAtLevel(source, cur_level)) {
6574 return at::_ops::index_copy_::call(self, dim, index, source);
6575 }
6576 Tensor self_value;
6577 optional<int64_t> self_bdim;
6578 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
6579 Tensor index_value;
6580 optional<int64_t> index_bdim;
6581 std::tie(index_value, index_bdim) = unwrapTensorAtLevel(index, cur_level);
6582 Tensor source_value;
6583 optional<int64_t> source_bdim;
6584 std::tie(source_value, source_bdim) = unwrapTensorAtLevel(source, cur_level);
6585 batch_rule(self_value, self_bdim, dim, index_value, index_bdim, source_value, source_bdim);
6586 return self;
6587}
6588template <typename batch_rule_t, batch_rule_t batch_rule>
6589at::Tensor index_copy_generated_plumbing(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & source) {
6590 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
6591 auto maybe_layer = maybeCurrentDynamicLayer();
6592 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
6593 int64_t cur_level = maybe_layer->layerId();
6594 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(index, cur_level) && !isBatchedAtLevel(source, cur_level)) {
6595 return at::_ops::index_copy::call(self, dim, index, source);
6596 }
6597 Tensor self_value;
6598 optional<int64_t> self_bdim;
6599 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
6600 Tensor index_value;
6601 optional<int64_t> index_bdim;
6602 std::tie(index_value, index_bdim) = unwrapTensorAtLevel(index, cur_level);
6603 Tensor source_value;
6604 optional<int64_t> source_bdim;
6605 std::tie(source_value, source_bdim) = unwrapTensorAtLevel(source, cur_level);
6606 auto results = batch_rule(self_value, self_bdim, dim, index_value, index_bdim, source_value, source_bdim);
6607 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
6608}
6609template <typename batch_rule_t, batch_rule_t batch_rule>
6610at::Tensor & index_copy__dimname_generated_plumbing(at::Tensor & self, at::Dimname dim, const at::Tensor & index, const at::Tensor & source) {
6611 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
6612 auto maybe_layer = maybeCurrentDynamicLayer();
6613 vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
6614 int64_t cur_level = maybe_layer->layerId();
6615 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(index, cur_level) && !isBatchedAtLevel(source, cur_level)) {
6616 return at::_ops::index_copy__dimname::call(self, dim, index, source);
6617 }
6618 Tensor self_value;
6619 optional<int64_t> self_bdim;
6620 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
6621 Tensor index_value;
6622 optional<int64_t> index_bdim;
6623 std::tie(index_value, index_bdim) = unwrapTensorAtLevel(index, cur_level);
6624 Tensor source_value;
6625 optional<int64_t> source_bdim;
6626 std::tie(source_value, source_bdim) = unwrapTensorAtLevel(source, cur_level);
6627 batch_rule(self_value, self_bdim, dim, index_value, index_bdim, source_value, source_bdim);
6628 return self;
6629}
6630template <typename batch_rule_t, batch_rule_t batch_rule>
6631at::Tensor index_copy_dimname_generated_plumbing(const at::Tensor & self, at::Dimname dim, const at::Tensor & index, const at::Tensor & source) {
6632 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
6633 auto maybe_layer = maybeCurrentDynamicLayer();
6634 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
6635 int64_t cur_level = maybe_layer->layerId();
6636 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(index, cur_level) && !isBatchedAtLevel(source, cur_level)) {
6637 return at::_ops::index_copy_dimname::call(self, dim, index, source);
6638 }
6639 Tensor self_value;
6640 optional<int64_t> self_bdim;
6641 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
6642 Tensor index_value;
6643 optional<int64_t> index_bdim;
6644 std::tie(index_value, index_bdim) = unwrapTensorAtLevel(index, cur_level);
6645 Tensor source_value;
6646 optional<int64_t> source_bdim;
6647 std::tie(source_value, source_bdim) = unwrapTensorAtLevel(source, cur_level);
6648 auto results = batch_rule(self_value, self_bdim, dim, index_value, index_bdim, source_value, source_bdim);
6649 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
6650}
6651template <typename batch_rule_t, batch_rule_t batch_rule>
6652at::Tensor & index_put__generated_plumbing(at::Tensor & self, const c10::List<c10::optional<at::Tensor>> & indices, const at::Tensor & values, bool accumulate) {
6653 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
6654 auto maybe_layer = maybeCurrentDynamicLayer();
6655 vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
6656 int64_t cur_level = maybe_layer->layerId();
6657 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(indices, cur_level) && !isBatchedAtLevel(values, cur_level)) {
6658 return at::_ops::index_put_::call(self, indices, values, accumulate);
6659 }
6660 Tensor self_value;
6661 optional<int64_t> self_bdim;
6662 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
6663 Tensor values_value;
6664 optional<int64_t> values_bdim;
6665 std::tie(values_value, values_bdim) = unwrapTensorAtLevel(values, cur_level);
6666 batch_rule(self_value, self_bdim, indices, values_value, values_bdim, accumulate);
6667 return self;
6668}
6669template <typename batch_rule_t, batch_rule_t batch_rule>
6670at::Tensor index_put_generated_plumbing(const at::Tensor & self, const c10::List<c10::optional<at::Tensor>> & indices, const at::Tensor & values, bool accumulate) {
6671 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
6672 auto maybe_layer = maybeCurrentDynamicLayer();
6673 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
6674 int64_t cur_level = maybe_layer->layerId();
6675 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(indices, cur_level) && !isBatchedAtLevel(values, cur_level)) {
6676 return at::_ops::index_put::call(self, indices, values, accumulate);
6677 }
6678 Tensor self_value;
6679 optional<int64_t> self_bdim;
6680 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
6681 Tensor values_value;
6682 optional<int64_t> values_bdim;
6683 std::tie(values_value, values_bdim) = unwrapTensorAtLevel(values, cur_level);
6684 auto results = batch_rule(self_value, self_bdim, indices, values_value, values_bdim, accumulate);
6685 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
6686}
6687template <typename batch_rule_t, batch_rule_t batch_rule>
6688at::Tensor & _index_put_impl__generated_plumbing(at::Tensor & self, const c10::List<c10::optional<at::Tensor>> & indices, const at::Tensor & values, bool accumulate, bool unsafe) {
6689 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
6690 auto maybe_layer = maybeCurrentDynamicLayer();
6691 vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
6692 int64_t cur_level = maybe_layer->layerId();
6693 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(indices, cur_level) && !isBatchedAtLevel(values, cur_level)) {
6694 return at::_ops::_index_put_impl_::call(self, indices, values, accumulate, unsafe);
6695 }
6696 Tensor self_value;
6697 optional<int64_t> self_bdim;
6698 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
6699 Tensor values_value;
6700 optional<int64_t> values_bdim;
6701 std::tie(values_value, values_bdim) = unwrapTensorAtLevel(values, cur_level);
6702 batch_rule(self_value, self_bdim, indices, values_value, values_bdim, accumulate, unsafe);
6703 return self;
6704}
6705template <typename batch_rule_t, batch_rule_t batch_rule>
6706at::Tensor instance_norm_generated_plumbing(const at::Tensor & input, const c10::optional<at::Tensor> & weight, const c10::optional<at::Tensor> & bias, const c10::optional<at::Tensor> & running_mean, const c10::optional<at::Tensor> & running_var, bool use_input_stats, double momentum, double eps, bool cudnn_enabled) {
6707 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
6708 auto maybe_layer = maybeCurrentDynamicLayer();
6709 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
6710 int64_t cur_level = maybe_layer->layerId();
6711 if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(bias, cur_level) && !isBatchedAtLevel(running_mean, cur_level) && !isBatchedAtLevel(running_var, cur_level)) {
6712 return at::_ops::instance_norm::call(input, weight, bias, running_mean, running_var, use_input_stats, momentum, eps, cudnn_enabled);
6713 }
6714 Tensor input_value;
6715 optional<int64_t> input_bdim;
6716 std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level);
6717 optional<Tensor> weight_value;
6718 optional<int64_t> weight_bdim;
6719 if (weight) {
6720 std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight.value(), cur_level);
6721 }
6722 optional<Tensor> bias_value;
6723 optional<int64_t> bias_bdim;
6724 if (bias) {
6725 std::tie(bias_value, bias_bdim) = unwrapTensorAtLevel(bias.value(), cur_level);
6726 }
6727 optional<Tensor> running_mean_value;
6728 optional<int64_t> running_mean_bdim;
6729 if (running_mean) {
6730 std::tie(running_mean_value, running_mean_bdim) = unwrapTensorAtLevel(running_mean.value(), cur_level);
6731 }
6732 optional<Tensor> running_var_value;
6733 optional<int64_t> running_var_bdim;
6734 if (running_var) {
6735 std::tie(running_var_value, running_var_bdim) = unwrapTensorAtLevel(running_var.value(), cur_level);
6736 }
6737 auto results = batch_rule(input_value, input_bdim, weight_value, weight_bdim, bias_value, bias_bdim, running_mean_value, running_mean_bdim, running_var_value, running_var_bdim, use_input_stats, momentum, eps, cudnn_enabled);
6738 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
6739}
6740template <typename batch_rule_t, batch_rule_t batch_rule>
6741at::Tensor isclose_generated_plumbing(const at::Tensor & self, const at::Tensor & other, double rtol, double atol, bool equal_nan) {
6742 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
6743 auto maybe_layer = maybeCurrentDynamicLayer();
6744 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
6745 int64_t cur_level = maybe_layer->layerId();
6746 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
6747 return at::_ops::isclose::call(self, other, rtol, atol, equal_nan);
6748 }
6749 Tensor self_value;
6750 optional<int64_t> self_bdim;
6751 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
6752 Tensor other_value;
6753 optional<int64_t> other_bdim;
6754 std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
6755 auto results = batch_rule(self_value, self_bdim, other_value, other_bdim, rtol, atol, equal_nan);
6756 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
6757}
6758template <typename batch_rule_t, batch_rule_t batch_rule>
6759at::Tensor isin_Tensor_Tensor_generated_plumbing(const at::Tensor & elements, const at::Tensor & test_elements, bool assume_unique, bool invert) {
6760 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
6761 auto maybe_layer = maybeCurrentDynamicLayer();
6762 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
6763 int64_t cur_level = maybe_layer->layerId();
6764 if (!isBatchedAtLevel(elements, cur_level) && !isBatchedAtLevel(test_elements, cur_level)) {
6765 return at::_ops::isin_Tensor_Tensor::call(elements, test_elements, assume_unique, invert);
6766 }
6767 Tensor elements_value;
6768 optional<int64_t> elements_bdim;
6769 std::tie(elements_value, elements_bdim) = unwrapTensorAtLevel(elements, cur_level);
6770 Tensor test_elements_value;
6771 optional<int64_t> test_elements_bdim;
6772 std::tie(test_elements_value, test_elements_bdim) = unwrapTensorAtLevel(test_elements, cur_level);
6773 auto results = batch_rule(elements_value, elements_bdim, test_elements_value, test_elements_bdim, assume_unique, invert);
6774 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
6775}
6776template <typename batch_rule_t, batch_rule_t batch_rule>
6777at::Tensor isin_Tensor_Scalar_generated_plumbing(const at::Tensor & elements, const at::Scalar & test_element, bool assume_unique, bool invert) {
6778 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
6779 auto maybe_layer = maybeCurrentDynamicLayer();
6780 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
6781 int64_t cur_level = maybe_layer->layerId();
6782 if (!isBatchedAtLevel(elements, cur_level)) {
6783 return at::_ops::isin_Tensor_Scalar::call(elements, test_element, assume_unique, invert);
6784 }
6785 Tensor elements_value;
6786 optional<int64_t> elements_bdim;
6787 std::tie(elements_value, elements_bdim) = unwrapTensorAtLevel(elements, cur_level);
6788 auto results = batch_rule(elements_value, elements_bdim, test_element, assume_unique, invert);
6789 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
6790}
6791template <typename batch_rule_t, batch_rule_t batch_rule>
6792at::Tensor isin_Scalar_Tensor_generated_plumbing(const at::Scalar & element, const at::Tensor & test_elements, bool assume_unique, bool invert) {
6793 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
6794 auto maybe_layer = maybeCurrentDynamicLayer();
6795 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
6796 int64_t cur_level = maybe_layer->layerId();
6797 if (!isBatchedAtLevel(test_elements, cur_level)) {
6798 return at::_ops::isin_Scalar_Tensor::call(element, test_elements, assume_unique, invert);
6799 }
6800 Tensor test_elements_value;
6801 optional<int64_t> test_elements_bdim;
6802 std::tie(test_elements_value, test_elements_bdim) = unwrapTensorAtLevel(test_elements, cur_level);
6803 auto results = batch_rule(element, test_elements_value, test_elements_bdim, assume_unique, invert);
6804 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
6805}
6806template <typename batch_rule_t, batch_rule_t batch_rule>
6807at::Tensor isnan_generated_plumbing(const at::Tensor & self) {
6808 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
6809 auto maybe_layer = maybeCurrentDynamicLayer();
6810 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
6811 int64_t cur_level = maybe_layer->layerId();
6812 if (!isBatchedAtLevel(self, cur_level)) {
6813 return at::_ops::isnan::call(self);
6814 }
6815 Tensor self_value;
6816 optional<int64_t> self_bdim;
6817 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
6818 auto results = batch_rule(self_value, self_bdim);
6819 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
6820}
6821template <typename batch_rule_t, batch_rule_t batch_rule>
6822at::Tensor isreal_generated_plumbing(const at::Tensor & self) {
6823 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
6824 auto maybe_layer = maybeCurrentDynamicLayer();
6825 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
6826 int64_t cur_level = maybe_layer->layerId();
6827 if (!isBatchedAtLevel(self, cur_level)) {
6828 return at::_ops::isreal::call(self);
6829 }
6830 Tensor self_value;
6831 optional<int64_t> self_bdim;
6832 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
6833 auto results = batch_rule(self_value, self_bdim);
6834 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
6835}
6836template <typename batch_rule_t, batch_rule_t batch_rule>
6837at::Tensor kl_div_generated_plumbing(const at::Tensor & self, const at::Tensor & target, int64_t reduction, bool log_target) {
6838 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
6839 auto maybe_layer = maybeCurrentDynamicLayer();
6840 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
6841 int64_t cur_level = maybe_layer->layerId();
6842 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(target, cur_level)) {
6843 return at::_ops::kl_div::call(self, target, reduction, log_target);
6844 }
6845 Tensor self_value;
6846 optional<int64_t> self_bdim;
6847 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
6848 Tensor target_value;
6849 optional<int64_t> target_bdim;
6850 std::tie(target_value, target_bdim) = unwrapTensorAtLevel(target, cur_level);
6851 auto results = batch_rule(self_value, self_bdim, target_value, target_bdim, reduction, log_target);
6852 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
6853}
6854template <typename batch_rule_t, batch_rule_t batch_rule>
6855at::Tensor kron_generated_plumbing(const at::Tensor & self, const at::Tensor & other) {
6856 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
6857 auto maybe_layer = maybeCurrentDynamicLayer();
6858 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
6859 int64_t cur_level = maybe_layer->layerId();
6860 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
6861 return at::_ops::kron::call(self, other);
6862 }
6863 Tensor self_value;
6864 optional<int64_t> self_bdim;
6865 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
6866 Tensor other_value;
6867 optional<int64_t> other_bdim;
6868 std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
6869 auto results = batch_rule(self_value, self_bdim, other_value, other_bdim);
6870 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
6871}
6872template <typename batch_rule_t, batch_rule_t batch_rule>
6873::std::tuple<at::Tensor,at::Tensor> kthvalue_generated_plumbing(const at::Tensor & self, int64_t k, int64_t dim, bool keepdim) {
6874 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
6875 auto maybe_layer = maybeCurrentDynamicLayer();
6876 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
6877 int64_t cur_level = maybe_layer->layerId();
6878 if (!isBatchedAtLevel(self, cur_level)) {
6879 return at::_ops::kthvalue::call(self, k, dim, keepdim);
6880 }
6881 Tensor self_value;
6882 optional<int64_t> self_bdim;
6883 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
6884 auto results = batch_rule(self_value, self_bdim, k, dim, keepdim);
6885 return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
6886}
6887template <typename batch_rule_t, batch_rule_t batch_rule>
6888::std::tuple<at::Tensor,at::Tensor> kthvalue_dimname_generated_plumbing(const at::Tensor & self, int64_t k, at::Dimname dim, bool keepdim) {
6889 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
6890 auto maybe_layer = maybeCurrentDynamicLayer();
6891 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
6892 int64_t cur_level = maybe_layer->layerId();
6893 if (!isBatchedAtLevel(self, cur_level)) {
6894 return at::_ops::kthvalue_dimname::call(self, k, dim, keepdim);
6895 }
6896 Tensor self_value;
6897 optional<int64_t> self_bdim;
6898 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
6899 auto results = batch_rule(self_value, self_bdim, k, dim, keepdim);
6900 return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
6901}
6902template <typename batch_rule_t, batch_rule_t batch_rule>
6903at::Tensor layer_norm_generated_plumbing(const at::Tensor & input, c10::SymIntArrayRef normalized_shape, const c10::optional<at::Tensor> & weight, const c10::optional<at::Tensor> & bias, double eps, bool cudnn_enable) {
6904 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
6905 auto maybe_layer = maybeCurrentDynamicLayer();
6906 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
6907 int64_t cur_level = maybe_layer->layerId();
6908 if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(bias, cur_level)) {
6909 return at::_ops::layer_norm::call(input, normalized_shape, weight, bias, eps, cudnn_enable);
6910 }
6911 Tensor input_value;
6912 optional<int64_t> input_bdim;
6913 std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level);
6914 optional<Tensor> weight_value;
6915 optional<int64_t> weight_bdim;
6916 if (weight) {
6917 std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight.value(), cur_level);
6918 }
6919 optional<Tensor> bias_value;
6920 optional<int64_t> bias_bdim;
6921 if (bias) {
6922 std::tie(bias_value, bias_bdim) = unwrapTensorAtLevel(bias.value(), cur_level);
6923 }
6924 auto results = batch_rule(input_value, input_bdim, normalized_shape, weight_value, weight_bdim, bias_value, bias_bdim, eps, cudnn_enable);
6925 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
6926}
6927template <typename batch_rule_t, batch_rule_t batch_rule>
6928::std::tuple<at::Tensor,at::Tensor,at::Tensor> native_layer_norm_generated_plumbing(const at::Tensor & input, c10::SymIntArrayRef normalized_shape, const c10::optional<at::Tensor> & weight, const c10::optional<at::Tensor> & bias, double eps) {
6929 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
6930 auto maybe_layer = maybeCurrentDynamicLayer();
6931 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
6932 int64_t cur_level = maybe_layer->layerId();
6933 if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(bias, cur_level)) {
6934 return at::_ops::native_layer_norm::call(input, normalized_shape, weight, bias, eps);
6935 }
6936 Tensor input_value;
6937 optional<int64_t> input_bdim;
6938 std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level);
6939 optional<Tensor> weight_value;
6940 optional<int64_t> weight_bdim;
6941 if (weight) {
6942 std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight.value(), cur_level);
6943 }
6944 optional<Tensor> bias_value;
6945 optional<int64_t> bias_bdim;
6946 if (bias) {
6947 std::tie(bias_value, bias_bdim) = unwrapTensorAtLevel(bias.value(), cur_level);
6948 }
6949 auto results = batch_rule(input_value, input_bdim, normalized_shape, weight_value, weight_bdim, bias_value, bias_bdim, eps);
6950 return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level));
6951}
6952template <typename batch_rule_t, batch_rule_t batch_rule>
6953::std::tuple<at::Tensor,at::Tensor,at::Tensor> native_layer_norm_backward_generated_plumbing(const at::Tensor & grad_out, const at::Tensor & input, c10::SymIntArrayRef normalized_shape, const at::Tensor & mean, const at::Tensor & rstd, const c10::optional<at::Tensor> & weight, const c10::optional<at::Tensor> & bias, ::std::array<bool,3> output_mask) {
6954 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
6955 auto maybe_layer = maybeCurrentDynamicLayer();
6956 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
6957 int64_t cur_level = maybe_layer->layerId();
6958 if (!isBatchedAtLevel(grad_out, cur_level) && !isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(mean, cur_level) && !isBatchedAtLevel(rstd, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(bias, cur_level)) {
6959 return at::_ops::native_layer_norm_backward::call(grad_out, input, normalized_shape, mean, rstd, weight, bias, output_mask);
6960 }
6961 Tensor grad_out_value;
6962 optional<int64_t> grad_out_bdim;
6963 std::tie(grad_out_value, grad_out_bdim) = unwrapTensorAtLevel(grad_out, cur_level);
6964 Tensor input_value;
6965 optional<int64_t> input_bdim;
6966 std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level);
6967 Tensor mean_value;
6968 optional<int64_t> mean_bdim;
6969 std::tie(mean_value, mean_bdim) = unwrapTensorAtLevel(mean, cur_level);
6970 Tensor rstd_value;
6971 optional<int64_t> rstd_bdim;
6972 std::tie(rstd_value, rstd_bdim) = unwrapTensorAtLevel(rstd, cur_level);
6973 optional<Tensor> weight_value;
6974 optional<int64_t> weight_bdim;
6975 if (weight) {
6976 std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight.value(), cur_level);
6977 }
6978 optional<Tensor> bias_value;
6979 optional<int64_t> bias_bdim;
6980 if (bias) {
6981 std::tie(bias_value, bias_bdim) = unwrapTensorAtLevel(bias.value(), cur_level);
6982 }
6983 auto results = batch_rule(grad_out_value, grad_out_bdim, input_value, input_bdim, normalized_shape, mean_value, mean_bdim, rstd_value, rstd_bdim, weight_value, weight_bdim, bias_value, bias_bdim, output_mask);
6984 return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level));
6985}
6986template <typename batch_rule_t, batch_rule_t batch_rule>
6987at::Tensor nan_to_num_generated_plumbing(const at::Tensor & self, c10::optional<double> nan, c10::optional<double> posinf, c10::optional<double> neginf) {
6988 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
6989 auto maybe_layer = maybeCurrentDynamicLayer();
6990 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
6991 int64_t cur_level = maybe_layer->layerId();
6992 if (!isBatchedAtLevel(self, cur_level)) {
6993 return at::_ops::nan_to_num::call(self, nan, posinf, neginf);
6994 }
6995 Tensor self_value;
6996 optional<int64_t> self_bdim;
6997 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
6998 auto results = batch_rule(self_value, self_bdim, nan, posinf, neginf);
6999 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
7000}
7001template <typename batch_rule_t, batch_rule_t batch_rule>
7002at::Tensor & nan_to_num__generated_plumbing(at::Tensor & self, c10::optional<double> nan, c10::optional<double> posinf, c10::optional<double> neginf) {
7003 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
7004 auto maybe_layer = maybeCurrentDynamicLayer();
7005 vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
7006 int64_t cur_level = maybe_layer->layerId();
7007 if (!isBatchedAtLevel(self, cur_level)) {
7008 return at::_ops::nan_to_num_::call(self, nan, posinf, neginf);
7009 }
7010 Tensor self_value;
7011 optional<int64_t> self_bdim;
7012 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
7013 batch_rule(self_value, self_bdim, nan, posinf, neginf);
7014 return self;
7015}
7016template <typename batch_rule_t, batch_rule_t batch_rule>
7017at::Tensor linear_generated_plumbing(const at::Tensor & input, const at::Tensor & weight, const c10::optional<at::Tensor> & bias) {
7018 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
7019 auto maybe_layer = maybeCurrentDynamicLayer();
7020 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
7021 int64_t cur_level = maybe_layer->layerId();
7022 if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(bias, cur_level)) {
7023 return at::_ops::linear::call(input, weight, bias);
7024 }
7025 Tensor input_value;
7026 optional<int64_t> input_bdim;
7027 std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level);
7028 Tensor weight_value;
7029 optional<int64_t> weight_bdim;
7030 std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight, cur_level);
7031 optional<Tensor> bias_value;
7032 optional<int64_t> bias_bdim;
7033 if (bias) {
7034 std::tie(bias_value, bias_bdim) = unwrapTensorAtLevel(bias.value(), cur_level);
7035 }
7036 auto results = batch_rule(input_value, input_bdim, weight_value, weight_bdim, bias_value, bias_bdim);
7037 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
7038}
7039template <typename batch_rule_t, batch_rule_t batch_rule>
7040::std::tuple<at::Tensor,at::Tensor,at::Tensor> linear_backward_generated_plumbing(const at::Tensor & self, const at::Tensor & grad_output, const at::Tensor & weight, ::std::array<bool,3> output_mask) {
7041 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
7042 auto maybe_layer = maybeCurrentDynamicLayer();
7043 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
7044 int64_t cur_level = maybe_layer->layerId();
7045 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(weight, cur_level)) {
7046 return at::_ops::linear_backward::call(self, grad_output, weight, output_mask);
7047 }
7048 Tensor self_value;
7049 optional<int64_t> self_bdim;
7050 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
7051 Tensor grad_output_value;
7052 optional<int64_t> grad_output_bdim;
7053 std::tie(grad_output_value, grad_output_bdim) = unwrapTensorAtLevel(grad_output, cur_level);
7054 Tensor weight_value;
7055 optional<int64_t> weight_bdim;
7056 std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight, cur_level);
7057 auto results = batch_rule(self_value, self_bdim, grad_output_value, grad_output_bdim, weight_value, weight_bdim, output_mask);
7058 return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level));
7059}
7060template <typename batch_rule_t, batch_rule_t batch_rule>
7061at::Tensor mkldnn_linear_generated_plumbing(const at::Tensor & self, const at::Tensor & weight, const c10::optional<at::Tensor> & bias) {
7062 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
7063 auto maybe_layer = maybeCurrentDynamicLayer();
7064 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
7065 int64_t cur_level = maybe_layer->layerId();
7066 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(bias, cur_level)) {
7067 return at::_ops::mkldnn_linear::call(self, weight, bias);
7068 }
7069 Tensor self_value;
7070 optional<int64_t> self_bdim;
7071 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
7072 Tensor weight_value;
7073 optional<int64_t> weight_bdim;
7074 std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight, cur_level);
7075 optional<Tensor> bias_value;
7076 optional<int64_t> bias_bdim;
7077 if (bias) {
7078 std::tie(bias_value, bias_bdim) = unwrapTensorAtLevel(bias.value(), cur_level);
7079 }
7080 auto results = batch_rule(self_value, self_bdim, weight_value, weight_bdim, bias_value, bias_bdim);
7081 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
7082}
7083template <typename batch_rule_t, batch_rule_t batch_rule>
7084at::Tensor mkldnn_linear_backward_input_generated_plumbing(at::IntArrayRef input_size, const at::Tensor & grad_output, const at::Tensor & weight) {
7085 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
7086 auto maybe_layer = maybeCurrentDynamicLayer();
7087 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
7088 int64_t cur_level = maybe_layer->layerId();
7089 if (!isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(weight, cur_level)) {
7090 return at::_ops::mkldnn_linear_backward_input::call(input_size, grad_output, weight);
7091 }
7092 Tensor grad_output_value;
7093 optional<int64_t> grad_output_bdim;
7094 std::tie(grad_output_value, grad_output_bdim) = unwrapTensorAtLevel(grad_output, cur_level);
7095 Tensor weight_value;
7096 optional<int64_t> weight_bdim;
7097 std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight, cur_level);
7098 auto results = batch_rule(input_size, grad_output_value, grad_output_bdim, weight_value, weight_bdim);
7099 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
7100}
7101template <typename batch_rule_t, batch_rule_t batch_rule>
7102::std::tuple<at::Tensor,at::Tensor> mkldnn_linear_backward_weights_generated_plumbing(const at::Tensor & grad_output, const at::Tensor & input, const at::Tensor & weight, bool bias_defined) {
7103 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
7104 auto maybe_layer = maybeCurrentDynamicLayer();
7105 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
7106 int64_t cur_level = maybe_layer->layerId();
7107 if (!isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(weight, cur_level)) {
7108 return at::_ops::mkldnn_linear_backward_weights::call(grad_output, input, weight, bias_defined);
7109 }
7110 Tensor grad_output_value;
7111 optional<int64_t> grad_output_bdim;
7112 std::tie(grad_output_value, grad_output_bdim) = unwrapTensorAtLevel(grad_output, cur_level);
7113 Tensor input_value;
7114 optional<int64_t> input_bdim;
7115 std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level);
7116 Tensor weight_value;
7117 optional<int64_t> weight_bdim;
7118 std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight, cur_level);
7119 auto results = batch_rule(grad_output_value, grad_output_bdim, input_value, input_bdim, weight_value, weight_bdim, bias_defined);
7120 return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
7121}
7122template <typename batch_rule_t, batch_rule_t batch_rule>
7123::std::tuple<at::Tensor,at::Tensor,at::Tensor> mkldnn_linear_backward_generated_plumbing(const at::Tensor & self, const at::Tensor & grad_output, const at::Tensor & weight, ::std::array<bool,3> output_mask) {
7124 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
7125 auto maybe_layer = maybeCurrentDynamicLayer();
7126 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
7127 int64_t cur_level = maybe_layer->layerId();
7128 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(weight, cur_level)) {
7129 return at::_ops::mkldnn_linear_backward::call(self, grad_output, weight, output_mask);
7130 }
7131 Tensor self_value;
7132 optional<int64_t> self_bdim;
7133 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
7134 Tensor grad_output_value;
7135 optional<int64_t> grad_output_bdim;
7136 std::tie(grad_output_value, grad_output_bdim) = unwrapTensorAtLevel(grad_output, cur_level);
7137 Tensor weight_value;
7138 optional<int64_t> weight_bdim;
7139 std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight, cur_level);
7140 auto results = batch_rule(self_value, self_bdim, grad_output_value, grad_output_bdim, weight_value, weight_bdim, output_mask);
7141 return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level));
7142}
7143template <typename batch_rule_t, batch_rule_t batch_rule>
7144at::Tensor fbgemm_linear_int8_weight_fp32_activation_generated_plumbing(const at::Tensor & input, const at::Tensor & weight, const at::Tensor & packed, const at::Tensor & col_offsets, const at::Scalar & weight_scale, const at::Scalar & weight_zero_point, const at::Tensor & bias) {
7145 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
7146 auto maybe_layer = maybeCurrentDynamicLayer();
7147 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
7148 int64_t cur_level = maybe_layer->layerId();
7149 if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(packed, cur_level) && !isBatchedAtLevel(col_offsets, cur_level) && !isBatchedAtLevel(bias, cur_level)) {
7150 return at::_ops::fbgemm_linear_int8_weight_fp32_activation::call(input, weight, packed, col_offsets, weight_scale, weight_zero_point, bias);
7151 }
7152 Tensor input_value;
7153 optional<int64_t> input_bdim;
7154 std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level);
7155 Tensor weight_value;
7156 optional<int64_t> weight_bdim;
7157 std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight, cur_level);
7158 Tensor packed_value;
7159 optional<int64_t> packed_bdim;
7160 std::tie(packed_value, packed_bdim) = unwrapTensorAtLevel(packed, cur_level);
7161 Tensor col_offsets_value;
7162 optional<int64_t> col_offsets_bdim;
7163 std::tie(col_offsets_value, col_offsets_bdim) = unwrapTensorAtLevel(col_offsets, cur_level);
7164 Tensor bias_value;
7165 optional<int64_t> bias_bdim;
7166 std::tie(bias_value, bias_bdim) = unwrapTensorAtLevel(bias, cur_level);
7167 auto results = batch_rule(input_value, input_bdim, weight_value, weight_bdim, packed_value, packed_bdim, col_offsets_value, col_offsets_bdim, weight_scale, weight_zero_point, bias_value, bias_bdim);
7168 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
7169}
7170template <typename batch_rule_t, batch_rule_t batch_rule>
7171at::Tensor fbgemm_linear_int8_weight_generated_plumbing(const at::Tensor & input, const at::Tensor & weight, const at::Tensor & packed, const at::Tensor & col_offsets, const at::Scalar & weight_scale, const at::Scalar & weight_zero_point, const at::Tensor & bias) {
7172 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
7173 auto maybe_layer = maybeCurrentDynamicLayer();
7174 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
7175 int64_t cur_level = maybe_layer->layerId();
7176 if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(packed, cur_level) && !isBatchedAtLevel(col_offsets, cur_level) && !isBatchedAtLevel(bias, cur_level)) {
7177 return at::_ops::fbgemm_linear_int8_weight::call(input, weight, packed, col_offsets, weight_scale, weight_zero_point, bias);
7178 }
7179 Tensor input_value;
7180 optional<int64_t> input_bdim;
7181 std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level);
7182 Tensor weight_value;
7183 optional<int64_t> weight_bdim;
7184 std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight, cur_level);
7185 Tensor packed_value;
7186 optional<int64_t> packed_bdim;
7187 std::tie(packed_value, packed_bdim) = unwrapTensorAtLevel(packed, cur_level);
7188 Tensor col_offsets_value;
7189 optional<int64_t> col_offsets_bdim;
7190 std::tie(col_offsets_value, col_offsets_bdim) = unwrapTensorAtLevel(col_offsets, cur_level);
7191 Tensor bias_value;
7192 optional<int64_t> bias_bdim;
7193 std::tie(bias_value, bias_bdim) = unwrapTensorAtLevel(bias, cur_level);
7194 auto results = batch_rule(input_value, input_bdim, weight_value, weight_bdim, packed_value, packed_bdim, col_offsets_value, col_offsets_bdim, weight_scale, weight_zero_point, bias_value, bias_bdim);
7195 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
7196}
7197template <typename batch_rule_t, batch_rule_t batch_rule>
7198at::Tensor fbgemm_pack_gemm_matrix_fp16_generated_plumbing(const at::Tensor & input) {
7199 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
7200 auto maybe_layer = maybeCurrentDynamicLayer();
7201 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
7202 int64_t cur_level = maybe_layer->layerId();
7203 if (!isBatchedAtLevel(input, cur_level)) {
7204 return at::_ops::fbgemm_pack_gemm_matrix_fp16::call(input);
7205 }
7206 Tensor input_value;
7207 optional<int64_t> input_bdim;
7208 std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level);
7209 auto results = batch_rule(input_value, input_bdim);
7210 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
7211}
7212template <typename batch_rule_t, batch_rule_t batch_rule>
7213at::Tensor fbgemm_linear_fp16_weight_fp32_activation_generated_plumbing(const at::Tensor & input, const at::Tensor & packed_weight, const at::Tensor & bias) {
7214 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
7215 auto maybe_layer = maybeCurrentDynamicLayer();
7216 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
7217 int64_t cur_level = maybe_layer->layerId();
7218 if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(packed_weight, cur_level) && !isBatchedAtLevel(bias, cur_level)) {
7219 return at::_ops::fbgemm_linear_fp16_weight_fp32_activation::call(input, packed_weight, bias);
7220 }
7221 Tensor input_value;
7222 optional<int64_t> input_bdim;
7223 std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level);
7224 Tensor packed_weight_value;
7225 optional<int64_t> packed_weight_bdim;
7226 std::tie(packed_weight_value, packed_weight_bdim) = unwrapTensorAtLevel(packed_weight, cur_level);
7227 Tensor bias_value;
7228 optional<int64_t> bias_bdim;
7229 std::tie(bias_value, bias_bdim) = unwrapTensorAtLevel(bias, cur_level);
7230 auto results = batch_rule(input_value, input_bdim, packed_weight_value, packed_weight_bdim, bias_value, bias_bdim);
7231 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
7232}
7233template <typename batch_rule_t, batch_rule_t batch_rule>
7234at::Tensor fbgemm_linear_fp16_weight_generated_plumbing(const at::Tensor & input, const at::Tensor & packed_weight, const at::Tensor & bias) {
7235 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
7236 auto maybe_layer = maybeCurrentDynamicLayer();
7237 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
7238 int64_t cur_level = maybe_layer->layerId();
7239 if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(packed_weight, cur_level) && !isBatchedAtLevel(bias, cur_level)) {
7240 return at::_ops::fbgemm_linear_fp16_weight::call(input, packed_weight, bias);
7241 }
7242 Tensor input_value;
7243 optional<int64_t> input_bdim;
7244 std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level);
7245 Tensor packed_weight_value;
7246 optional<int64_t> packed_weight_bdim;
7247 std::tie(packed_weight_value, packed_weight_bdim) = unwrapTensorAtLevel(packed_weight, cur_level);
7248 Tensor bias_value;
7249 optional<int64_t> bias_bdim;
7250 std::tie(bias_value, bias_bdim) = unwrapTensorAtLevel(bias, cur_level);
7251 auto results = batch_rule(input_value, input_bdim, packed_weight_value, packed_weight_bdim, bias_value, bias_bdim);
7252 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
7253}
7254template <typename batch_rule_t, batch_rule_t batch_rule>
7255at::Tensor fbgemm_pack_quantized_matrix_generated_plumbing(const at::Tensor & input) {
7256 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
7257 auto maybe_layer = maybeCurrentDynamicLayer();
7258 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
7259 int64_t cur_level = maybe_layer->layerId();
7260 if (!isBatchedAtLevel(input, cur_level)) {
7261 return at::_ops::fbgemm_pack_quantized_matrix::call(input);
7262 }
7263 Tensor input_value;
7264 optional<int64_t> input_bdim;
7265 std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level);
7266 auto results = batch_rule(input_value, input_bdim);
7267 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
7268}
7269template <typename batch_rule_t, batch_rule_t batch_rule>
7270at::Tensor fbgemm_pack_quantized_matrix_KN_generated_plumbing(const at::Tensor & input, int64_t K, int64_t N) {
7271 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
7272 auto maybe_layer = maybeCurrentDynamicLayer();
7273 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
7274 int64_t cur_level = maybe_layer->layerId();
7275 if (!isBatchedAtLevel(input, cur_level)) {
7276 return at::_ops::fbgemm_pack_quantized_matrix_KN::call(input, K, N);
7277 }
7278 Tensor input_value;
7279 optional<int64_t> input_bdim;
7280 std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level);
7281 auto results = batch_rule(input_value, input_bdim, K, N);
7282 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
7283}
7284template <typename batch_rule_t, batch_rule_t batch_rule>
7285at::Tensor ldexp_Tensor_generated_plumbing(const at::Tensor & self, const at::Tensor & other) {
7286 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
7287 auto maybe_layer = maybeCurrentDynamicLayer();
7288 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
7289 int64_t cur_level = maybe_layer->layerId();
7290 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
7291 return at::_ops::ldexp_Tensor::call(self, other);
7292 }
7293 Tensor self_value;
7294 optional<int64_t> self_bdim;
7295 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
7296 Tensor other_value;
7297 optional<int64_t> other_bdim;
7298 std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
7299 auto results = batch_rule(self_value, self_bdim, other_value, other_bdim);
7300 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
7301}
7302template <typename batch_rule_t, batch_rule_t batch_rule>
7303at::Tensor & ldexp__generated_plumbing(at::Tensor & self, const at::Tensor & other) {
7304 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
7305 auto maybe_layer = maybeCurrentDynamicLayer();
7306 vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
7307 int64_t cur_level = maybe_layer->layerId();
7308 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
7309 return at::_ops::ldexp_::call(self, other);
7310 }
7311 Tensor self_value;
7312 optional<int64_t> self_bdim;
7313 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
7314 Tensor other_value;
7315 optional<int64_t> other_bdim;
7316 std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
7317 batch_rule(self_value, self_bdim, other_value, other_bdim);
7318 return self;
7319}
7320template <typename batch_rule_t, batch_rule_t batch_rule>
7321at::Tensor log_generated_plumbing(const at::Tensor & self) {
7322 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
7323 auto maybe_layer = maybeCurrentDynamicLayer();
7324 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
7325 int64_t cur_level = maybe_layer->layerId();
7326 if (!isBatchedAtLevel(self, cur_level)) {
7327 return at::_ops::log::call(self);
7328 }
7329 Tensor self_value;
7330 optional<int64_t> self_bdim;
7331 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
7332 auto results = batch_rule(self_value, self_bdim);
7333 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
7334}
7335template <typename batch_rule_t, batch_rule_t batch_rule>
7336at::Tensor & log__generated_plumbing(at::Tensor & self) {
7337 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
7338 auto maybe_layer = maybeCurrentDynamicLayer();
7339 vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
7340 int64_t cur_level = maybe_layer->layerId();
7341 if (!isBatchedAtLevel(self, cur_level)) {
7342 return at::_ops::log_::call(self);
7343 }
7344 Tensor self_value;
7345 optional<int64_t> self_bdim;
7346 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
7347 batch_rule(self_value, self_bdim);
7348 return self;
7349}
7350template <typename batch_rule_t, batch_rule_t batch_rule>
7351at::Tensor log10_generated_plumbing(const at::Tensor & self) {
7352 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
7353 auto maybe_layer = maybeCurrentDynamicLayer();
7354 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
7355 int64_t cur_level = maybe_layer->layerId();
7356 if (!isBatchedAtLevel(self, cur_level)) {
7357 return at::_ops::log10::call(self);
7358 }
7359 Tensor self_value;
7360 optional<int64_t> self_bdim;
7361 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
7362 auto results = batch_rule(self_value, self_bdim);
7363 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
7364}
7365template <typename batch_rule_t, batch_rule_t batch_rule>
7366at::Tensor & log10__generated_plumbing(at::Tensor & self) {
7367 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
7368 auto maybe_layer = maybeCurrentDynamicLayer();
7369 vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
7370 int64_t cur_level = maybe_layer->layerId();
7371 if (!isBatchedAtLevel(self, cur_level)) {
7372 return at::_ops::log10_::call(self);
7373 }
7374 Tensor self_value;
7375 optional<int64_t> self_bdim;
7376 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
7377 batch_rule(self_value, self_bdim);
7378 return self;
7379}
7380template <typename batch_rule_t, batch_rule_t batch_rule>
7381at::Tensor log1p_generated_plumbing(const at::Tensor & self) {
7382 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
7383 auto maybe_layer = maybeCurrentDynamicLayer();
7384 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
7385 int64_t cur_level = maybe_layer->layerId();
7386 if (!isBatchedAtLevel(self, cur_level)) {
7387 return at::_ops::log1p::call(self);
7388 }
7389 Tensor self_value;
7390 optional<int64_t> self_bdim;
7391 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
7392 auto results = batch_rule(self_value, self_bdim);
7393 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
7394}
7395template <typename batch_rule_t, batch_rule_t batch_rule>
7396at::Tensor & log1p__generated_plumbing(at::Tensor & self) {
7397 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
7398 auto maybe_layer = maybeCurrentDynamicLayer();
7399 vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
7400 int64_t cur_level = maybe_layer->layerId();
7401 if (!isBatchedAtLevel(self, cur_level)) {
7402 return at::_ops::log1p_::call(self);
7403 }
7404 Tensor self_value;
7405 optional<int64_t> self_bdim;
7406 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
7407 batch_rule(self_value, self_bdim);
7408 return self;
7409}
7410template <typename batch_rule_t, batch_rule_t batch_rule>
7411at::Tensor log2_generated_plumbing(const at::Tensor & self) {
7412 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
7413 auto maybe_layer = maybeCurrentDynamicLayer();
7414 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
7415 int64_t cur_level = maybe_layer->layerId();
7416 if (!isBatchedAtLevel(self, cur_level)) {
7417 return at::_ops::log2::call(self);
7418 }
7419 Tensor self_value;
7420 optional<int64_t> self_bdim;
7421 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
7422 auto results = batch_rule(self_value, self_bdim);
7423 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
7424}
7425template <typename batch_rule_t, batch_rule_t batch_rule>
7426at::Tensor & log2__generated_plumbing(at::Tensor & self) {
7427 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
7428 auto maybe_layer = maybeCurrentDynamicLayer();
7429 vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
7430 int64_t cur_level = maybe_layer->layerId();
7431 if (!isBatchedAtLevel(self, cur_level)) {
7432 return at::_ops::log2_::call(self);
7433 }
7434 Tensor self_value;
7435 optional<int64_t> self_bdim;
7436 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
7437 batch_rule(self_value, self_bdim);
7438 return self;
7439}
7440template <typename batch_rule_t, batch_rule_t batch_rule>
7441at::Tensor logaddexp_generated_plumbing(const at::Tensor & self, const at::Tensor & other) {
7442 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
7443 auto maybe_layer = maybeCurrentDynamicLayer();
7444 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
7445 int64_t cur_level = maybe_layer->layerId();
7446 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
7447 return at::_ops::logaddexp::call(self, other);
7448 }
7449 Tensor self_value;
7450 optional<int64_t> self_bdim;
7451 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
7452 Tensor other_value;
7453 optional<int64_t> other_bdim;
7454 std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
7455 auto results = batch_rule(self_value, self_bdim, other_value, other_bdim);
7456 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
7457}
7458template <typename batch_rule_t, batch_rule_t batch_rule>
7459at::Tensor logaddexp2_generated_plumbing(const at::Tensor & self, const at::Tensor & other) {
7460 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
7461 auto maybe_layer = maybeCurrentDynamicLayer();
7462 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
7463 int64_t cur_level = maybe_layer->layerId();
7464 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
7465 return at::_ops::logaddexp2::call(self, other);
7466 }
7467 Tensor self_value;
7468 optional<int64_t> self_bdim;
7469 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
7470 Tensor other_value;
7471 optional<int64_t> other_bdim;
7472 std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
7473 auto results = batch_rule(self_value, self_bdim, other_value, other_bdim);
7474 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
7475}
7476template <typename batch_rule_t, batch_rule_t batch_rule>
7477at::Tensor xlogy_Tensor_generated_plumbing(const at::Tensor & self, const at::Tensor & other) {
7478 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
7479 auto maybe_layer = maybeCurrentDynamicLayer();
7480 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
7481 int64_t cur_level = maybe_layer->layerId();
7482 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
7483 return at::_ops::xlogy_Tensor::call(self, other);
7484 }
7485 Tensor self_value;
7486 optional<int64_t> self_bdim;
7487 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
7488 Tensor other_value;
7489 optional<int64_t> other_bdim;
7490 std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
7491 auto results = batch_rule(self_value, self_bdim, other_value, other_bdim);
7492 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
7493}
7494template <typename batch_rule_t, batch_rule_t batch_rule>
7495at::Tensor xlogy_Scalar_Self_generated_plumbing(const at::Scalar & self, const at::Tensor & other) {
7496 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
7497 auto maybe_layer = maybeCurrentDynamicLayer();
7498 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
7499 int64_t cur_level = maybe_layer->layerId();
7500 if (!isBatchedAtLevel(other, cur_level)) {
7501 return at::_ops::xlogy_Scalar_Self::call(self, other);
7502 }
7503 Tensor other_value;
7504 optional<int64_t> other_bdim;
7505 std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
7506 auto results = batch_rule(self, other_value, other_bdim);
7507 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
7508}
7509template <typename batch_rule_t, batch_rule_t batch_rule>
7510at::Tensor xlogy_Scalar_Other_generated_plumbing(const at::Tensor & self, const at::Scalar & other) {
7511 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
7512 auto maybe_layer = maybeCurrentDynamicLayer();
7513 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
7514 int64_t cur_level = maybe_layer->layerId();
7515 if (!isBatchedAtLevel(self, cur_level)) {
7516 return at::_ops::xlogy_Scalar_Other::call(self, other);
7517 }
7518 Tensor self_value;
7519 optional<int64_t> self_bdim;
7520 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
7521 auto results = batch_rule(self_value, self_bdim, other);
7522 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
7523}
7524template <typename batch_rule_t, batch_rule_t batch_rule>
7525at::Tensor & xlogy__Tensor_generated_plumbing(at::Tensor & self, const at::Tensor & other) {
7526 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
7527 auto maybe_layer = maybeCurrentDynamicLayer();
7528 vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
7529 int64_t cur_level = maybe_layer->layerId();
7530 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
7531 return at::_ops::xlogy__Tensor::call(self, other);
7532 }
7533 Tensor self_value;
7534 optional<int64_t> self_bdim;
7535 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
7536 Tensor other_value;
7537 optional<int64_t> other_bdim;
7538 std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
7539 batch_rule(self_value, self_bdim, other_value, other_bdim);
7540 return self;
7541}
7542template <typename batch_rule_t, batch_rule_t batch_rule>
7543at::Tensor & xlogy__Scalar_Other_generated_plumbing(at::Tensor & self, const at::Scalar & other) {
7544 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
7545 auto maybe_layer = maybeCurrentDynamicLayer();
7546 vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
7547 int64_t cur_level = maybe_layer->layerId();
7548 if (!isBatchedAtLevel(self, cur_level)) {
7549 return at::_ops::xlogy__Scalar_Other::call(self, other);
7550 }
7551 Tensor self_value;
7552 optional<int64_t> self_bdim;
7553 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
7554 batch_rule(self_value, self_bdim, other);
7555 return self;
7556}
7557template <typename batch_rule_t, batch_rule_t batch_rule>
7558at::Tensor log_softmax_int_generated_plumbing(const at::Tensor & self, int64_t dim, c10::optional<at::ScalarType> dtype) {
7559 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
7560 auto maybe_layer = maybeCurrentDynamicLayer();
7561 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
7562 int64_t cur_level = maybe_layer->layerId();
7563 if (!isBatchedAtLevel(self, cur_level)) {
7564 return at::_ops::log_softmax_int::call(self, dim, dtype);
7565 }
7566 Tensor self_value;
7567 optional<int64_t> self_bdim;
7568 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
7569 auto results = batch_rule(self_value, self_bdim, dim, dtype);
7570 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
7571}
7572template <typename batch_rule_t, batch_rule_t batch_rule>
7573at::Tensor log_softmax_Dimname_generated_plumbing(const at::Tensor & self, at::Dimname dim, c10::optional<at::ScalarType> dtype) {
7574 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
7575 auto maybe_layer = maybeCurrentDynamicLayer();
7576 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
7577 int64_t cur_level = maybe_layer->layerId();
7578 if (!isBatchedAtLevel(self, cur_level)) {
7579 return at::_ops::log_softmax_Dimname::call(self, dim, dtype);
7580 }
7581 Tensor self_value;
7582 optional<int64_t> self_bdim;
7583 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
7584 auto results = batch_rule(self_value, self_bdim, dim, dtype);
7585 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
7586}
7587template <typename batch_rule_t, batch_rule_t batch_rule>
7588at::Tensor _log_softmax_generated_plumbing(const at::Tensor & self, int64_t dim, bool half_to_float) {
7589 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
7590 auto maybe_layer = maybeCurrentDynamicLayer();
7591 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
7592 int64_t cur_level = maybe_layer->layerId();
7593 if (!isBatchedAtLevel(self, cur_level)) {
7594 return at::_ops::_log_softmax::call(self, dim, half_to_float);
7595 }
7596 Tensor self_value;
7597 optional<int64_t> self_bdim;
7598 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
7599 auto results = batch_rule(self_value, self_bdim, dim, half_to_float);
7600 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
7601}
7602template <typename batch_rule_t, batch_rule_t batch_rule>
7603at::Tensor _log_softmax_backward_data_generated_plumbing(const at::Tensor & grad_output, const at::Tensor & output, int64_t dim, at::ScalarType input_dtype) {
7604 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
7605 auto maybe_layer = maybeCurrentDynamicLayer();
7606 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
7607 int64_t cur_level = maybe_layer->layerId();
7608 if (!isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(output, cur_level)) {
7609 return at::_ops::_log_softmax_backward_data::call(grad_output, output, dim, input_dtype);
7610 }
7611 Tensor grad_output_value;
7612 optional<int64_t> grad_output_bdim;
7613 std::tie(grad_output_value, grad_output_bdim) = unwrapTensorAtLevel(grad_output, cur_level);
7614 Tensor output_value;
7615 optional<int64_t> output_bdim;
7616 std::tie(output_value, output_bdim) = unwrapTensorAtLevel(output, cur_level);
7617 auto results = batch_rule(grad_output_value, grad_output_bdim, output_value, output_bdim, dim, input_dtype);
7618 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
7619}
7620template <typename batch_rule_t, batch_rule_t batch_rule>
7621at::Tensor _logcumsumexp_generated_plumbing(const at::Tensor & self, int64_t dim) {
7622 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
7623 auto maybe_layer = maybeCurrentDynamicLayer();
7624 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
7625 int64_t cur_level = maybe_layer->layerId();
7626 if (!isBatchedAtLevel(self, cur_level)) {
7627 return at::_ops::_logcumsumexp::call(self, dim);
7628 }
7629 Tensor self_value;
7630 optional<int64_t> self_bdim;
7631 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
7632 auto results = batch_rule(self_value, self_bdim, dim);
7633 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
7634}
7635template <typename batch_rule_t, batch_rule_t batch_rule>
7636at::Tensor logcumsumexp_generated_plumbing(const at::Tensor & self, int64_t dim) {
7637 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
7638 auto maybe_layer = maybeCurrentDynamicLayer();
7639 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
7640 int64_t cur_level = maybe_layer->layerId();
7641 if (!isBatchedAtLevel(self, cur_level)) {
7642 return at::_ops::logcumsumexp::call(self, dim);
7643 }
7644 Tensor self_value;
7645 optional<int64_t> self_bdim;
7646 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
7647 auto results = batch_rule(self_value, self_bdim, dim);
7648 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
7649}
7650template <typename batch_rule_t, batch_rule_t batch_rule>
7651at::Tensor logcumsumexp_dimname_generated_plumbing(const at::Tensor & self, at::Dimname dim) {
7652 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
7653 auto maybe_layer = maybeCurrentDynamicLayer();
7654 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
7655 int64_t cur_level = maybe_layer->layerId();
7656 if (!isBatchedAtLevel(self, cur_level)) {
7657 return at::_ops::logcumsumexp_dimname::call(self, dim);
7658 }
7659 Tensor self_value;
7660 optional<int64_t> self_bdim;
7661 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
7662 auto results = batch_rule(self_value, self_bdim, dim);
7663 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
7664}
7665template <typename batch_rule_t, batch_rule_t batch_rule>
7666at::Tensor logsumexp_generated_plumbing(const at::Tensor & self, at::IntArrayRef dim, bool keepdim) {
7667 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
7668 auto maybe_layer = maybeCurrentDynamicLayer();
7669 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
7670 int64_t cur_level = maybe_layer->layerId();
7671 if (!isBatchedAtLevel(self, cur_level)) {
7672 return at::_ops::logsumexp::call(self, dim, keepdim);
7673 }
7674 Tensor self_value;
7675 optional<int64_t> self_bdim;
7676 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
7677 auto results = batch_rule(self_value, self_bdim, dim, keepdim);
7678 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
7679}
7680template <typename batch_rule_t, batch_rule_t batch_rule>
7681at::Tensor logsumexp_names_generated_plumbing(const at::Tensor & self, at::DimnameList dim, bool keepdim) {
7682 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
7683 auto maybe_layer = maybeCurrentDynamicLayer();
7684 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
7685 int64_t cur_level = maybe_layer->layerId();
7686 if (!isBatchedAtLevel(self, cur_level)) {
7687 return at::_ops::logsumexp_names::call(self, dim, keepdim);
7688 }
7689 Tensor self_value;
7690 optional<int64_t> self_bdim;
7691 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
7692 auto results = batch_rule(self_value, self_bdim, dim, keepdim);
7693 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
7694}
7695template <typename batch_rule_t, batch_rule_t batch_rule>
7696at::Tensor margin_ranking_loss_generated_plumbing(const at::Tensor & input1, const at::Tensor & input2, const at::Tensor & target, double margin, int64_t reduction) {
7697 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
7698 auto maybe_layer = maybeCurrentDynamicLayer();
7699 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
7700 int64_t cur_level = maybe_layer->layerId();
7701 if (!isBatchedAtLevel(input1, cur_level) && !isBatchedAtLevel(input2, cur_level) && !isBatchedAtLevel(target, cur_level)) {
7702 return at::_ops::margin_ranking_loss::call(input1, input2, target, margin, reduction);
7703 }
7704 Tensor input1_value;
7705 optional<int64_t> input1_bdim;
7706 std::tie(input1_value, input1_bdim) = unwrapTensorAtLevel(input1, cur_level);
7707 Tensor input2_value;
7708 optional<int64_t> input2_bdim;
7709 std::tie(input2_value, input2_bdim) = unwrapTensorAtLevel(input2, cur_level);
7710 Tensor target_value;
7711 optional<int64_t> target_bdim;
7712 std::tie(target_value, target_bdim) = unwrapTensorAtLevel(target, cur_level);
7713 auto results = batch_rule(input1_value, input1_bdim, input2_value, input2_bdim, target_value, target_bdim, margin, reduction);
7714 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
7715}
7716template <typename batch_rule_t, batch_rule_t batch_rule>
7717at::Tensor matmul_generated_plumbing(const at::Tensor & self, const at::Tensor & other) {
7718 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
7719 auto maybe_layer = maybeCurrentDynamicLayer();
7720 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
7721 int64_t cur_level = maybe_layer->layerId();
7722 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
7723 return at::_ops::matmul::call(self, other);
7724 }
7725 Tensor self_value;
7726 optional<int64_t> self_bdim;
7727 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
7728 Tensor other_value;
7729 optional<int64_t> other_bdim;
7730 std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
7731 auto results = batch_rule(self_value, self_bdim, other_value, other_bdim);
7732 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
7733}
7734template <typename batch_rule_t, batch_rule_t batch_rule>
7735::std::tuple<at::Tensor,at::Tensor> matmul_backward_generated_plumbing(const at::Tensor & grad, const at::Tensor & self, const at::Tensor & other, ::std::array<bool,2> mask) {
7736 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
7737 auto maybe_layer = maybeCurrentDynamicLayer();
7738 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
7739 int64_t cur_level = maybe_layer->layerId();
7740 if (!isBatchedAtLevel(grad, cur_level) && !isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
7741 return at::_ops::matmul_backward::call(grad, self, other, mask);
7742 }
7743 Tensor grad_value;
7744 optional<int64_t> grad_bdim;
7745 std::tie(grad_value, grad_bdim) = unwrapTensorAtLevel(grad, cur_level);
7746 Tensor self_value;
7747 optional<int64_t> self_bdim;
7748 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
7749 Tensor other_value;
7750 optional<int64_t> other_bdim;
7751 std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
7752 auto results = batch_rule(grad_value, grad_bdim, self_value, self_bdim, other_value, other_bdim, mask);
7753 return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
7754}
7755template <typename batch_rule_t, batch_rule_t batch_rule>
7756at::Tensor matrix_power_generated_plumbing(const at::Tensor & self, int64_t n) {
7757 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
7758 auto maybe_layer = maybeCurrentDynamicLayer();
7759 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
7760 int64_t cur_level = maybe_layer->layerId();
7761 if (!isBatchedAtLevel(self, cur_level)) {
7762 return at::_ops::matrix_power::call(self, n);
7763 }
7764 Tensor self_value;
7765 optional<int64_t> self_bdim;
7766 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
7767 auto results = batch_rule(self_value, self_bdim, n);
7768 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
7769}
7770template <typename batch_rule_t, batch_rule_t batch_rule>
7771at::Tensor matrix_exp_generated_plumbing(const at::Tensor & self) {
7772 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
7773 auto maybe_layer = maybeCurrentDynamicLayer();
7774 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
7775 int64_t cur_level = maybe_layer->layerId();
7776 if (!isBatchedAtLevel(self, cur_level)) {
7777 return at::_ops::matrix_exp::call(self);
7778 }
7779 Tensor self_value;
7780 optional<int64_t> self_bdim;
7781 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
7782 auto results = batch_rule(self_value, self_bdim);
7783 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
7784}
7785template <typename batch_rule_t, batch_rule_t batch_rule>
7786at::Tensor matrix_exp_backward_generated_plumbing(const at::Tensor & self, const at::Tensor & grad) {
7787 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
7788 auto maybe_layer = maybeCurrentDynamicLayer();
7789 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
7790 int64_t cur_level = maybe_layer->layerId();
7791 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(grad, cur_level)) {
7792 return at::_ops::matrix_exp_backward::call(self, grad);
7793 }
7794 Tensor self_value;
7795 optional<int64_t> self_bdim;
7796 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
7797 Tensor grad_value;
7798 optional<int64_t> grad_bdim;
7799 std::tie(grad_value, grad_bdim) = unwrapTensorAtLevel(grad, cur_level);
7800 auto results = batch_rule(self_value, self_bdim, grad_value, grad_bdim);
7801 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
7802}
7803template <typename batch_rule_t, batch_rule_t batch_rule>
7804::std::tuple<at::Tensor,at::Tensor> _aminmax_generated_plumbing(const at::Tensor & self) {
7805 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
7806 auto maybe_layer = maybeCurrentDynamicLayer();
7807 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
7808 int64_t cur_level = maybe_layer->layerId();
7809 if (!isBatchedAtLevel(self, cur_level)) {
7810 return at::_ops::_aminmax::call(self);
7811 }
7812 Tensor self_value;
7813 optional<int64_t> self_bdim;
7814 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
7815 auto results = batch_rule(self_value, self_bdim);
7816 return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
7817}
7818template <typename batch_rule_t, batch_rule_t batch_rule>
7819::std::tuple<at::Tensor,at::Tensor> _aminmax_dim_generated_plumbing(const at::Tensor & self, int64_t dim, bool keepdim) {
7820 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
7821 auto maybe_layer = maybeCurrentDynamicLayer();
7822 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
7823 int64_t cur_level = maybe_layer->layerId();
7824 if (!isBatchedAtLevel(self, cur_level)) {
7825 return at::_ops::_aminmax_dim::call(self, dim, keepdim);
7826 }
7827 Tensor self_value;
7828 optional<int64_t> self_bdim;
7829 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
7830 auto results = batch_rule(self_value, self_bdim, dim, keepdim);
7831 return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
7832}
7833template <typename batch_rule_t, batch_rule_t batch_rule>
7834::std::tuple<at::Tensor,at::Tensor> aminmax_generated_plumbing(const at::Tensor & self, c10::optional<int64_t> dim, bool keepdim) {
7835 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
7836 auto maybe_layer = maybeCurrentDynamicLayer();
7837 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
7838 int64_t cur_level = maybe_layer->layerId();
7839 if (!isBatchedAtLevel(self, cur_level)) {
7840 return at::_ops::aminmax::call(self, dim, keepdim);
7841 }
7842 Tensor self_value;
7843 optional<int64_t> self_bdim;
7844 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
7845 auto results = batch_rule(self_value, self_bdim, dim, keepdim);
7846 return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
7847}
7848template <typename batch_rule_t, batch_rule_t batch_rule>
7849at::Tensor _compute_linear_combination_generated_plumbing(const at::Tensor & input, const at::Tensor & coefficients) {
7850 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
7851 auto maybe_layer = maybeCurrentDynamicLayer();
7852 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
7853 int64_t cur_level = maybe_layer->layerId();
7854 if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(coefficients, cur_level)) {
7855 return at::_ops::_compute_linear_combination::call(input, coefficients);
7856 }
7857 Tensor input_value;
7858 optional<int64_t> input_bdim;
7859 std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level);
7860 Tensor coefficients_value;
7861 optional<int64_t> coefficients_bdim;
7862 std::tie(coefficients_value, coefficients_bdim) = unwrapTensorAtLevel(coefficients, cur_level);
7863 auto results = batch_rule(input_value, input_bdim, coefficients_value, coefficients_bdim);
7864 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
7865}
7866template <typename batch_rule_t, batch_rule_t batch_rule>
7867::std::tuple<at::Tensor,at::Tensor> max_dim_generated_plumbing(const at::Tensor & self, int64_t dim, bool keepdim) {
7868 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
7869 auto maybe_layer = maybeCurrentDynamicLayer();
7870 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
7871 int64_t cur_level = maybe_layer->layerId();
7872 if (!isBatchedAtLevel(self, cur_level)) {
7873 return at::_ops::max_dim::call(self, dim, keepdim);
7874 }
7875 Tensor self_value;
7876 optional<int64_t> self_bdim;
7877 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
7878 auto results = batch_rule(self_value, self_bdim, dim, keepdim);
7879 return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
7880}
7881template <typename batch_rule_t, batch_rule_t batch_rule>
7882::std::tuple<at::Tensor,at::Tensor> max_names_dim_generated_plumbing(const at::Tensor & self, at::Dimname dim, bool keepdim) {
7883 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
7884 auto maybe_layer = maybeCurrentDynamicLayer();
7885 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
7886 int64_t cur_level = maybe_layer->layerId();
7887 if (!isBatchedAtLevel(self, cur_level)) {
7888 return at::_ops::max_names_dim::call(self, dim, keepdim);
7889 }
7890 Tensor self_value;
7891 optional<int64_t> self_bdim;
7892 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
7893 auto results = batch_rule(self_value, self_bdim, dim, keepdim);
7894 return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
7895}
7896template <typename batch_rule_t, batch_rule_t batch_rule>
7897at::Tensor value_selecting_reduction_backward_generated_plumbing(const at::Tensor & grad, int64_t dim, const at::Tensor & indices, c10::SymIntArrayRef sizes, bool keepdim) {
7898 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
7899 auto maybe_layer = maybeCurrentDynamicLayer();
7900 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
7901 int64_t cur_level = maybe_layer->layerId();
7902 if (!isBatchedAtLevel(grad, cur_level) && !isBatchedAtLevel(indices, cur_level)) {
7903 return at::_ops::value_selecting_reduction_backward::call(grad, dim, indices, sizes, keepdim);
7904 }
7905 Tensor grad_value;
7906 optional<int64_t> grad_bdim;
7907 std::tie(grad_value, grad_bdim) = unwrapTensorAtLevel(grad, cur_level);
7908 Tensor indices_value;
7909 optional<int64_t> indices_bdim;
7910 std::tie(indices_value, indices_bdim) = unwrapTensorAtLevel(indices, cur_level);
7911 auto results = batch_rule(grad_value, grad_bdim, dim, indices_value, indices_bdim, sizes, keepdim);
7912 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
7913}
7914template <typename batch_rule_t, batch_rule_t batch_rule>
7915at::Tensor amax_generated_plumbing(const at::Tensor & self, at::IntArrayRef dim, bool keepdim) {
7916 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
7917 auto maybe_layer = maybeCurrentDynamicLayer();
7918 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
7919 int64_t cur_level = maybe_layer->layerId();
7920 if (!isBatchedAtLevel(self, cur_level)) {
7921 return at::_ops::amax::call(self, dim, keepdim);
7922 }
7923 Tensor self_value;
7924 optional<int64_t> self_bdim;
7925 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
7926 auto results = batch_rule(self_value, self_bdim, dim, keepdim);
7927 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
7928}
7929template <typename batch_rule_t, batch_rule_t batch_rule>
7930::std::tuple<at::Tensor,at::Tensor> max_pool1d_with_indices_generated_plumbing(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode) {
7931 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
7932 auto maybe_layer = maybeCurrentDynamicLayer();
7933 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
7934 int64_t cur_level = maybe_layer->layerId();
7935 if (!isBatchedAtLevel(self, cur_level)) {
7936 return at::_ops::max_pool1d_with_indices::call(self, kernel_size, stride, padding, dilation, ceil_mode);
7937 }
7938 Tensor self_value;
7939 optional<int64_t> self_bdim;
7940 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
7941 auto results = batch_rule(self_value, self_bdim, kernel_size, stride, padding, dilation, ceil_mode);
7942 return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
7943}
7944template <typename batch_rule_t, batch_rule_t batch_rule>
7945at::Tensor max_pool1d_generated_plumbing(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode) {
7946 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
7947 auto maybe_layer = maybeCurrentDynamicLayer();
7948 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
7949 int64_t cur_level = maybe_layer->layerId();
7950 if (!isBatchedAtLevel(self, cur_level)) {
7951 return at::_ops::max_pool1d::call(self, kernel_size, stride, padding, dilation, ceil_mode);
7952 }
7953 Tensor self_value;
7954 optional<int64_t> self_bdim;
7955 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
7956 auto results = batch_rule(self_value, self_bdim, kernel_size, stride, padding, dilation, ceil_mode);
7957 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
7958}
7959template <typename batch_rule_t, batch_rule_t batch_rule>
7960at::Tensor max_pool2d_generated_plumbing(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode) {
7961 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
7962 auto maybe_layer = maybeCurrentDynamicLayer();
7963 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
7964 int64_t cur_level = maybe_layer->layerId();
7965 if (!isBatchedAtLevel(self, cur_level)) {
7966 return at::_ops::max_pool2d::call(self, kernel_size, stride, padding, dilation, ceil_mode);
7967 }
7968 Tensor self_value;
7969 optional<int64_t> self_bdim;
7970 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
7971 auto results = batch_rule(self_value, self_bdim, kernel_size, stride, padding, dilation, ceil_mode);
7972 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
7973}
7974template <typename batch_rule_t, batch_rule_t batch_rule>
7975at::Tensor _mps_max_pool2d_generated_plumbing(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode) {
7976 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
7977 auto maybe_layer = maybeCurrentDynamicLayer();
7978 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
7979 int64_t cur_level = maybe_layer->layerId();
7980 if (!isBatchedAtLevel(self, cur_level)) {
7981 return at::_ops::_mps_max_pool2d::call(self, kernel_size, stride, padding, dilation, ceil_mode);
7982 }
7983 Tensor self_value;
7984 optional<int64_t> self_bdim;
7985 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
7986 auto results = batch_rule(self_value, self_bdim, kernel_size, stride, padding, dilation, ceil_mode);
7987 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
7988}
7989template <typename batch_rule_t, batch_rule_t batch_rule>
7990at::Tensor mps_max_pool2d_backward_generated_plumbing(const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode) {
7991 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
7992 auto maybe_layer = maybeCurrentDynamicLayer();
7993 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
7994 int64_t cur_level = maybe_layer->layerId();
7995 if (!isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(self, cur_level)) {
7996 return at::_ops::mps_max_pool2d_backward::call(grad_output, self, kernel_size, stride, padding, dilation, ceil_mode);
7997 }
7998 Tensor grad_output_value;
7999 optional<int64_t> grad_output_bdim;
8000 std::tie(grad_output_value, grad_output_bdim) = unwrapTensorAtLevel(grad_output, cur_level);
8001 Tensor self_value;
8002 optional<int64_t> self_bdim;
8003 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
8004 auto results = batch_rule(grad_output_value, grad_output_bdim, self_value, self_bdim, kernel_size, stride, padding, dilation, ceil_mode);
8005 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
8006}
8007template <typename batch_rule_t, batch_rule_t batch_rule>
8008at::Tensor mkldnn_max_pool2d_generated_plumbing(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode) {
8009 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
8010 auto maybe_layer = maybeCurrentDynamicLayer();
8011 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
8012 int64_t cur_level = maybe_layer->layerId();
8013 if (!isBatchedAtLevel(self, cur_level)) {
8014 return at::_ops::mkldnn_max_pool2d::call(self, kernel_size, stride, padding, dilation, ceil_mode);
8015 }
8016 Tensor self_value;
8017 optional<int64_t> self_bdim;
8018 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
8019 auto results = batch_rule(self_value, self_bdim, kernel_size, stride, padding, dilation, ceil_mode);
8020 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
8021}
8022template <typename batch_rule_t, batch_rule_t batch_rule>
8023at::Tensor mkldnn_max_pool2d_backward_generated_plumbing(const at::Tensor & grad_output, const at::Tensor & output, const at::Tensor & input, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode) {
8024 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
8025 auto maybe_layer = maybeCurrentDynamicLayer();
8026 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
8027 int64_t cur_level = maybe_layer->layerId();
8028 if (!isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(output, cur_level) && !isBatchedAtLevel(input, cur_level)) {
8029 return at::_ops::mkldnn_max_pool2d_backward::call(grad_output, output, input, kernel_size, stride, padding, dilation, ceil_mode);
8030 }
8031 Tensor grad_output_value;
8032 optional<int64_t> grad_output_bdim;
8033 std::tie(grad_output_value, grad_output_bdim) = unwrapTensorAtLevel(grad_output, cur_level);
8034 Tensor output_value;
8035 optional<int64_t> output_bdim;
8036 std::tie(output_value, output_bdim) = unwrapTensorAtLevel(output, cur_level);
8037 Tensor input_value;
8038 optional<int64_t> input_bdim;
8039 std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level);
8040 auto results = batch_rule(grad_output_value, grad_output_bdim, output_value, output_bdim, input_value, input_bdim, kernel_size, stride, padding, dilation, ceil_mode);
8041 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
8042}
8043template <typename batch_rule_t, batch_rule_t batch_rule>
8044at::Tensor mkldnn_max_pool3d_generated_plumbing(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode) {
8045 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
8046 auto maybe_layer = maybeCurrentDynamicLayer();
8047 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
8048 int64_t cur_level = maybe_layer->layerId();
8049 if (!isBatchedAtLevel(self, cur_level)) {
8050 return at::_ops::mkldnn_max_pool3d::call(self, kernel_size, stride, padding, dilation, ceil_mode);
8051 }
8052 Tensor self_value;
8053 optional<int64_t> self_bdim;
8054 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
8055 auto results = batch_rule(self_value, self_bdim, kernel_size, stride, padding, dilation, ceil_mode);
8056 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
8057}
8058template <typename batch_rule_t, batch_rule_t batch_rule>
8059at::Tensor mkldnn_max_pool3d_backward_generated_plumbing(const at::Tensor & grad_output, const at::Tensor & output, const at::Tensor & input, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode) {
8060 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
8061 auto maybe_layer = maybeCurrentDynamicLayer();
8062 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
8063 int64_t cur_level = maybe_layer->layerId();
8064 if (!isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(output, cur_level) && !isBatchedAtLevel(input, cur_level)) {
8065 return at::_ops::mkldnn_max_pool3d_backward::call(grad_output, output, input, kernel_size, stride, padding, dilation, ceil_mode);
8066 }
8067 Tensor grad_output_value;
8068 optional<int64_t> grad_output_bdim;
8069 std::tie(grad_output_value, grad_output_bdim) = unwrapTensorAtLevel(grad_output, cur_level);
8070 Tensor output_value;
8071 optional<int64_t> output_bdim;
8072 std::tie(output_value, output_bdim) = unwrapTensorAtLevel(output, cur_level);
8073 Tensor input_value;
8074 optional<int64_t> input_bdim;
8075 std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level);
8076 auto results = batch_rule(grad_output_value, grad_output_bdim, output_value, output_bdim, input_value, input_bdim, kernel_size, stride, padding, dilation, ceil_mode);
8077 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
8078}
8079template <typename batch_rule_t, batch_rule_t batch_rule>
8080at::Tensor quantized_max_pool1d_generated_plumbing(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode) {
8081 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
8082 auto maybe_layer = maybeCurrentDynamicLayer();
8083 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
8084 int64_t cur_level = maybe_layer->layerId();
8085 if (!isBatchedAtLevel(self, cur_level)) {
8086 return at::_ops::quantized_max_pool1d::call(self, kernel_size, stride, padding, dilation, ceil_mode);
8087 }
8088 Tensor self_value;
8089 optional<int64_t> self_bdim;
8090 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
8091 auto results = batch_rule(self_value, self_bdim, kernel_size, stride, padding, dilation, ceil_mode);
8092 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
8093}
8094template <typename batch_rule_t, batch_rule_t batch_rule>
8095at::Tensor quantized_max_pool2d_generated_plumbing(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode) {
8096 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
8097 auto maybe_layer = maybeCurrentDynamicLayer();
8098 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
8099 int64_t cur_level = maybe_layer->layerId();
8100 if (!isBatchedAtLevel(self, cur_level)) {
8101 return at::_ops::quantized_max_pool2d::call(self, kernel_size, stride, padding, dilation, ceil_mode);
8102 }
8103 Tensor self_value;
8104 optional<int64_t> self_bdim;
8105 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
8106 auto results = batch_rule(self_value, self_bdim, kernel_size, stride, padding, dilation, ceil_mode);
8107 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
8108}
8109template <typename batch_rule_t, batch_rule_t batch_rule>
8110at::Tensor max_pool3d_generated_plumbing(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode) {
8111 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
8112 auto maybe_layer = maybeCurrentDynamicLayer();
8113 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
8114 int64_t cur_level = maybe_layer->layerId();
8115 if (!isBatchedAtLevel(self, cur_level)) {
8116 return at::_ops::max_pool3d::call(self, kernel_size, stride, padding, dilation, ceil_mode);
8117 }
8118 Tensor self_value;
8119 optional<int64_t> self_bdim;
8120 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
8121 auto results = batch_rule(self_value, self_bdim, kernel_size, stride, padding, dilation, ceil_mode);
8122 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
8123}
8124template <typename batch_rule_t, batch_rule_t batch_rule>
8125at::Tensor mean_generated_plumbing(const at::Tensor & self, c10::optional<at::ScalarType> dtype) {
8126 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
8127 auto maybe_layer = maybeCurrentDynamicLayer();
8128 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
8129 int64_t cur_level = maybe_layer->layerId();
8130 if (!isBatchedAtLevel(self, cur_level)) {
8131 return at::_ops::mean::call(self, dtype);
8132 }
8133 Tensor self_value;
8134 optional<int64_t> self_bdim;
8135 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
8136 auto results = batch_rule(self_value, self_bdim, dtype);
8137 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
8138}
8139template <typename batch_rule_t, batch_rule_t batch_rule>
8140at::Tensor mean_dim_generated_plumbing(const at::Tensor & self, at::OptionalIntArrayRef dim, bool keepdim, c10::optional<at::ScalarType> dtype) {
8141 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
8142 auto maybe_layer = maybeCurrentDynamicLayer();
8143 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
8144 int64_t cur_level = maybe_layer->layerId();
8145 if (!isBatchedAtLevel(self, cur_level)) {
8146 return at::_ops::mean_dim::call(self, dim, keepdim, dtype);
8147 }
8148 Tensor self_value;
8149 optional<int64_t> self_bdim;
8150 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
8151 auto results = batch_rule(self_value, self_bdim, dim, keepdim, dtype);
8152 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
8153}
8154template <typename batch_rule_t, batch_rule_t batch_rule>
8155at::Tensor mean_names_dim_generated_plumbing(const at::Tensor & self, at::DimnameList dim, bool keepdim, c10::optional<at::ScalarType> dtype) {
8156 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
8157 auto maybe_layer = maybeCurrentDynamicLayer();
8158 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
8159 int64_t cur_level = maybe_layer->layerId();
8160 if (!isBatchedAtLevel(self, cur_level)) {
8161 return at::_ops::mean_names_dim::call(self, dim, keepdim, dtype);
8162 }
8163 Tensor self_value;
8164 optional<int64_t> self_bdim;
8165 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
8166 auto results = batch_rule(self_value, self_bdim, dim, keepdim, dtype);
8167 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
8168}
8169template <typename batch_rule_t, batch_rule_t batch_rule>
8170at::Tensor nanmean_generated_plumbing(const at::Tensor & self, at::OptionalIntArrayRef dim, bool keepdim, c10::optional<at::ScalarType> dtype) {
8171 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
8172 auto maybe_layer = maybeCurrentDynamicLayer();
8173 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
8174 int64_t cur_level = maybe_layer->layerId();
8175 if (!isBatchedAtLevel(self, cur_level)) {
8176 return at::_ops::nanmean::call(self, dim, keepdim, dtype);
8177 }
8178 Tensor self_value;
8179 optional<int64_t> self_bdim;
8180 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
8181 auto results = batch_rule(self_value, self_bdim, dim, keepdim, dtype);
8182 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
8183}
8184template <typename batch_rule_t, batch_rule_t batch_rule>
8185at::Tensor median_generated_plumbing(const at::Tensor & self) {
8186 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
8187 auto maybe_layer = maybeCurrentDynamicLayer();
8188 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
8189 int64_t cur_level = maybe_layer->layerId();
8190 if (!isBatchedAtLevel(self, cur_level)) {
8191 return at::_ops::median::call(self);
8192 }
8193 Tensor self_value;
8194 optional<int64_t> self_bdim;
8195 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
8196 auto results = batch_rule(self_value, self_bdim);
8197 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
8198}
8199template <typename batch_rule_t, batch_rule_t batch_rule>
8200::std::tuple<at::Tensor,at::Tensor> median_dim_generated_plumbing(const at::Tensor & self, int64_t dim, bool keepdim) {
8201 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
8202 auto maybe_layer = maybeCurrentDynamicLayer();
8203 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
8204 int64_t cur_level = maybe_layer->layerId();
8205 if (!isBatchedAtLevel(self, cur_level)) {
8206 return at::_ops::median_dim::call(self, dim, keepdim);
8207 }
8208 Tensor self_value;
8209 optional<int64_t> self_bdim;
8210 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
8211 auto results = batch_rule(self_value, self_bdim, dim, keepdim);
8212 return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
8213}
8214template <typename batch_rule_t, batch_rule_t batch_rule>
8215::std::tuple<at::Tensor,at::Tensor> median_names_dim_generated_plumbing(const at::Tensor & self, at::Dimname dim, bool keepdim) {
8216 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
8217 auto maybe_layer = maybeCurrentDynamicLayer();
8218 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
8219 int64_t cur_level = maybe_layer->layerId();
8220 if (!isBatchedAtLevel(self, cur_level)) {
8221 return at::_ops::median_names_dim::call(self, dim, keepdim);
8222 }
8223 Tensor self_value;
8224 optional<int64_t> self_bdim;
8225 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
8226 auto results = batch_rule(self_value, self_bdim, dim, keepdim);
8227 return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
8228}
8229template <typename batch_rule_t, batch_rule_t batch_rule>
8230at::Tensor nanmedian_generated_plumbing(const at::Tensor & self) {
8231 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
8232 auto maybe_layer = maybeCurrentDynamicLayer();
8233 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
8234 int64_t cur_level = maybe_layer->layerId();
8235 if (!isBatchedAtLevel(self, cur_level)) {
8236 return at::_ops::nanmedian::call(self);
8237 }
8238 Tensor self_value;
8239 optional<int64_t> self_bdim;
8240 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
8241 auto results = batch_rule(self_value, self_bdim);
8242 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
8243}
8244template <typename batch_rule_t, batch_rule_t batch_rule>
8245::std::tuple<at::Tensor,at::Tensor> nanmedian_dim_generated_plumbing(const at::Tensor & self, int64_t dim, bool keepdim) {
8246 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
8247 auto maybe_layer = maybeCurrentDynamicLayer();
8248 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
8249 int64_t cur_level = maybe_layer->layerId();
8250 if (!isBatchedAtLevel(self, cur_level)) {
8251 return at::_ops::nanmedian_dim::call(self, dim, keepdim);
8252 }
8253 Tensor self_value;
8254 optional<int64_t> self_bdim;
8255 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
8256 auto results = batch_rule(self_value, self_bdim, dim, keepdim);
8257 return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
8258}
8259template <typename batch_rule_t, batch_rule_t batch_rule>
8260::std::tuple<at::Tensor,at::Tensor> nanmedian_names_dim_generated_plumbing(const at::Tensor & self, at::Dimname dim, bool keepdim) {
8261 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
8262 auto maybe_layer = maybeCurrentDynamicLayer();
8263 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
8264 int64_t cur_level = maybe_layer->layerId();
8265 if (!isBatchedAtLevel(self, cur_level)) {
8266 return at::_ops::nanmedian_names_dim::call(self, dim, keepdim);
8267 }
8268 Tensor self_value;
8269 optional<int64_t> self_bdim;
8270 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
8271 auto results = batch_rule(self_value, self_bdim, dim, keepdim);
8272 return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
8273}
8274template <typename batch_rule_t, batch_rule_t batch_rule>
8275::std::tuple<at::Tensor,at::Tensor> min_dim_generated_plumbing(const at::Tensor & self, int64_t dim, bool keepdim) {
8276 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
8277 auto maybe_layer = maybeCurrentDynamicLayer();
8278 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
8279 int64_t cur_level = maybe_layer->layerId();
8280 if (!isBatchedAtLevel(self, cur_level)) {
8281 return at::_ops::min_dim::call(self, dim, keepdim);
8282 }
8283 Tensor self_value;
8284 optional<int64_t> self_bdim;
8285 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
8286 auto results = batch_rule(self_value, self_bdim, dim, keepdim);
8287 return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
8288}
8289template <typename batch_rule_t, batch_rule_t batch_rule>
8290::std::tuple<at::Tensor,at::Tensor> min_names_dim_generated_plumbing(const at::Tensor & self, at::Dimname dim, bool keepdim) {
8291 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
8292 auto maybe_layer = maybeCurrentDynamicLayer();
8293 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
8294 int64_t cur_level = maybe_layer->layerId();
8295 if (!isBatchedAtLevel(self, cur_level)) {
8296 return at::_ops::min_names_dim::call(self, dim, keepdim);
8297 }
8298 Tensor self_value;
8299 optional<int64_t> self_bdim;
8300 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
8301 auto results = batch_rule(self_value, self_bdim, dim, keepdim);
8302 return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
8303}
8304template <typename batch_rule_t, batch_rule_t batch_rule>
8305at::Tensor amin_generated_plumbing(const at::Tensor & self, at::IntArrayRef dim, bool keepdim) {
8306 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
8307 auto maybe_layer = maybeCurrentDynamicLayer();
8308 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
8309 int64_t cur_level = maybe_layer->layerId();
8310 if (!isBatchedAtLevel(self, cur_level)) {
8311 return at::_ops::amin::call(self, dim, keepdim);
8312 }
8313 Tensor self_value;
8314 optional<int64_t> self_bdim;
8315 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
8316 auto results = batch_rule(self_value, self_bdim, dim, keepdim);
8317 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
8318}
8319template <typename batch_rule_t, batch_rule_t batch_rule>
8320at::Tensor _mps_convolution_generated_plumbing(const at::Tensor & self, const at::Tensor & weight, const c10::optional<at::Tensor> & bias, at::IntArrayRef padding, at::IntArrayRef stride, at::IntArrayRef dilation, int64_t groups) {
8321 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
8322 auto maybe_layer = maybeCurrentDynamicLayer();
8323 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
8324 int64_t cur_level = maybe_layer->layerId();
8325 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(bias, cur_level)) {
8326 return at::_ops::_mps_convolution::call(self, weight, bias, padding, stride, dilation, groups);
8327 }
8328 Tensor self_value;
8329 optional<int64_t> self_bdim;
8330 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
8331 Tensor weight_value;
8332 optional<int64_t> weight_bdim;
8333 std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight, cur_level);
8334 optional<Tensor> bias_value;
8335 optional<int64_t> bias_bdim;
8336 if (bias) {
8337 std::tie(bias_value, bias_bdim) = unwrapTensorAtLevel(bias.value(), cur_level);
8338 }
8339 auto results = batch_rule(self_value, self_bdim, weight_value, weight_bdim, bias_value, bias_bdim, padding, stride, dilation, groups);
8340 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
8341}
8342template <typename batch_rule_t, batch_rule_t batch_rule>
8343::std::tuple<at::Tensor,at::Tensor,at::Tensor> mps_convolution_backward_generated_plumbing(const at::Tensor & self, const at::Tensor & grad_output, const at::Tensor & weight, at::IntArrayRef padding, at::IntArrayRef stride, at::IntArrayRef dilation, int64_t groups, ::std::array<bool,3> output_mask) {
8344 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
8345 auto maybe_layer = maybeCurrentDynamicLayer();
8346 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
8347 int64_t cur_level = maybe_layer->layerId();
8348 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(weight, cur_level)) {
8349 return at::_ops::mps_convolution_backward::call(self, grad_output, weight, padding, stride, dilation, groups, output_mask);
8350 }
8351 Tensor self_value;
8352 optional<int64_t> self_bdim;
8353 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
8354 Tensor grad_output_value;
8355 optional<int64_t> grad_output_bdim;
8356 std::tie(grad_output_value, grad_output_bdim) = unwrapTensorAtLevel(grad_output, cur_level);
8357 Tensor weight_value;
8358 optional<int64_t> weight_bdim;
8359 std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight, cur_level);
8360 auto results = batch_rule(self_value, self_bdim, grad_output_value, grad_output_bdim, weight_value, weight_bdim, padding, stride, dilation, groups, output_mask);
8361 return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level));
8362}
8363template <typename batch_rule_t, batch_rule_t batch_rule>
8364at::Tensor mkldnn_convolution_generated_plumbing(const at::Tensor & self, const at::Tensor & weight, const c10::optional<at::Tensor> & bias, c10::SymIntArrayRef padding, at::IntArrayRef stride, at::IntArrayRef dilation, int64_t groups) {
8365 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
8366 auto maybe_layer = maybeCurrentDynamicLayer();
8367 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
8368 int64_t cur_level = maybe_layer->layerId();
8369 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(bias, cur_level)) {
8370 return at::_ops::mkldnn_convolution::call(self, weight, bias, padding, stride, dilation, groups);
8371 }
8372 Tensor self_value;
8373 optional<int64_t> self_bdim;
8374 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
8375 Tensor weight_value;
8376 optional<int64_t> weight_bdim;
8377 std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight, cur_level);
8378 optional<Tensor> bias_value;
8379 optional<int64_t> bias_bdim;
8380 if (bias) {
8381 std::tie(bias_value, bias_bdim) = unwrapTensorAtLevel(bias.value(), cur_level);
8382 }
8383 auto results = batch_rule(self_value, self_bdim, weight_value, weight_bdim, bias_value, bias_bdim, padding, stride, dilation, groups);
8384 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
8385}
8386template <typename batch_rule_t, batch_rule_t batch_rule>
8387::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor> mkldnn_rnn_layer_generated_plumbing(const at::Tensor & input, const at::Tensor & weight0, const at::Tensor & weight1, const at::Tensor & weight2, const at::Tensor & weight3, const at::Tensor & hx_, const at::Tensor & cx_, bool reverse, at::IntArrayRef batch_sizes, int64_t mode, int64_t hidden_size, int64_t num_layers, bool has_biases, bool bidirectional, bool batch_first, bool train) {
8388 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
8389 auto maybe_layer = maybeCurrentDynamicLayer();
8390 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
8391 int64_t cur_level = maybe_layer->layerId();
8392 if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(weight0, cur_level) && !isBatchedAtLevel(weight1, cur_level) && !isBatchedAtLevel(weight2, cur_level) && !isBatchedAtLevel(weight3, cur_level) && !isBatchedAtLevel(hx_, cur_level) && !isBatchedAtLevel(cx_, cur_level)) {
8393 return at::_ops::mkldnn_rnn_layer::call(input, weight0, weight1, weight2, weight3, hx_, cx_, reverse, batch_sizes, mode, hidden_size, num_layers, has_biases, bidirectional, batch_first, train);
8394 }
8395 Tensor input_value;
8396 optional<int64_t> input_bdim;
8397 std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level);
8398 Tensor weight0_value;
8399 optional<int64_t> weight0_bdim;
8400 std::tie(weight0_value, weight0_bdim) = unwrapTensorAtLevel(weight0, cur_level);
8401 Tensor weight1_value;
8402 optional<int64_t> weight1_bdim;
8403 std::tie(weight1_value, weight1_bdim) = unwrapTensorAtLevel(weight1, cur_level);
8404 Tensor weight2_value;
8405 optional<int64_t> weight2_bdim;
8406 std::tie(weight2_value, weight2_bdim) = unwrapTensorAtLevel(weight2, cur_level);
8407 Tensor weight3_value;
8408 optional<int64_t> weight3_bdim;
8409 std::tie(weight3_value, weight3_bdim) = unwrapTensorAtLevel(weight3, cur_level);
8410 Tensor hx__value;
8411 optional<int64_t> hx__bdim;
8412 std::tie(hx__value, hx__bdim) = unwrapTensorAtLevel(hx_, cur_level);
8413 Tensor cx__value;
8414 optional<int64_t> cx__bdim;
8415 std::tie(cx__value, cx__bdim) = unwrapTensorAtLevel(cx_, cur_level);
8416 auto results = batch_rule(input_value, input_bdim, weight0_value, weight0_bdim, weight1_value, weight1_bdim, weight2_value, weight2_bdim, weight3_value, weight3_bdim, hx__value, hx__bdim, cx__value, cx__bdim, reverse, batch_sizes, mode, hidden_size, num_layers, has_biases, bidirectional, batch_first, train);
8417 return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level), makeBatched(std::get<6>(results), std::get<7>(results), cur_level));
8418}
8419template <typename batch_rule_t, batch_rule_t batch_rule>
8420::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor,at::Tensor,at::Tensor,at::Tensor> mkldnn_rnn_layer_backward_generated_plumbing(const at::Tensor & input, const at::Tensor & weight1, const at::Tensor & weight2, const at::Tensor & weight3, const at::Tensor & weight4, const at::Tensor & hx_, const at::Tensor & cx_tmp, const at::Tensor & output, const at::Tensor & hy_, const at::Tensor & cy_, const c10::optional<at::Tensor> & grad_output, const c10::optional<at::Tensor> & grad_hy, const c10::optional<at::Tensor> & grad_cy, bool reverse, int64_t mode, int64_t hidden_size, int64_t num_layers, bool has_biases, bool train, bool bidirectional, at::IntArrayRef batch_sizes, bool batch_first, const at::Tensor & workspace) {
8421 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
8422 auto maybe_layer = maybeCurrentDynamicLayer();
8423 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
8424 int64_t cur_level = maybe_layer->layerId();
8425 if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(weight1, cur_level) && !isBatchedAtLevel(weight2, cur_level) && !isBatchedAtLevel(weight3, cur_level) && !isBatchedAtLevel(weight4, cur_level) && !isBatchedAtLevel(hx_, cur_level) && !isBatchedAtLevel(cx_tmp, cur_level) && !isBatchedAtLevel(output, cur_level) && !isBatchedAtLevel(hy_, cur_level) && !isBatchedAtLevel(cy_, cur_level) && !isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(grad_hy, cur_level) && !isBatchedAtLevel(grad_cy, cur_level) && !isBatchedAtLevel(workspace, cur_level)) {
8426 return at::_ops::mkldnn_rnn_layer_backward::call(input, weight1, weight2, weight3, weight4, hx_, cx_tmp, output, hy_, cy_, grad_output, grad_hy, grad_cy, reverse, mode, hidden_size, num_layers, has_biases, train, bidirectional, batch_sizes, batch_first, workspace);
8427 }
8428 Tensor input_value;
8429 optional<int64_t> input_bdim;
8430 std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level);
8431 Tensor weight1_value;
8432 optional<int64_t> weight1_bdim;
8433 std::tie(weight1_value, weight1_bdim) = unwrapTensorAtLevel(weight1, cur_level);
8434 Tensor weight2_value;
8435 optional<int64_t> weight2_bdim;
8436 std::tie(weight2_value, weight2_bdim) = unwrapTensorAtLevel(weight2, cur_level);
8437 Tensor weight3_value;
8438 optional<int64_t> weight3_bdim;
8439 std::tie(weight3_value, weight3_bdim) = unwrapTensorAtLevel(weight3, cur_level);
8440 Tensor weight4_value;
8441 optional<int64_t> weight4_bdim;
8442 std::tie(weight4_value, weight4_bdim) = unwrapTensorAtLevel(weight4, cur_level);
8443 Tensor hx__value;
8444 optional<int64_t> hx__bdim;
8445 std::tie(hx__value, hx__bdim) = unwrapTensorAtLevel(hx_, cur_level);
8446 Tensor cx_tmp_value;
8447 optional<int64_t> cx_tmp_bdim;
8448 std::tie(cx_tmp_value, cx_tmp_bdim) = unwrapTensorAtLevel(cx_tmp, cur_level);
8449 Tensor output_value;
8450 optional<int64_t> output_bdim;
8451 std::tie(output_value, output_bdim) = unwrapTensorAtLevel(output, cur_level);
8452 Tensor hy__value;
8453 optional<int64_t> hy__bdim;
8454 std::tie(hy__value, hy__bdim) = unwrapTensorAtLevel(hy_, cur_level);
8455 Tensor cy__value;
8456 optional<int64_t> cy__bdim;
8457 std::tie(cy__value, cy__bdim) = unwrapTensorAtLevel(cy_, cur_level);
8458 Tensor workspace_value;
8459 optional<int64_t> workspace_bdim;
8460 std::tie(workspace_value, workspace_bdim) = unwrapTensorAtLevel(workspace, cur_level);
8461 optional<Tensor> grad_output_value;
8462 optional<int64_t> grad_output_bdim;
8463 if (grad_output) {
8464 std::tie(grad_output_value, grad_output_bdim) = unwrapTensorAtLevel(grad_output.value(), cur_level);
8465 }
8466 optional<Tensor> grad_hy_value;
8467 optional<int64_t> grad_hy_bdim;
8468 if (grad_hy) {
8469 std::tie(grad_hy_value, grad_hy_bdim) = unwrapTensorAtLevel(grad_hy.value(), cur_level);
8470 }
8471 optional<Tensor> grad_cy_value;
8472 optional<int64_t> grad_cy_bdim;
8473 if (grad_cy) {
8474 std::tie(grad_cy_value, grad_cy_bdim) = unwrapTensorAtLevel(grad_cy.value(), cur_level);
8475 }
8476 auto results = batch_rule(input_value, input_bdim, weight1_value, weight1_bdim, weight2_value, weight2_bdim, weight3_value, weight3_bdim, weight4_value, weight4_bdim, hx__value, hx__bdim, cx_tmp_value, cx_tmp_bdim, output_value, output_bdim, hy__value, hy__bdim, cy__value, cy__bdim, grad_output_value, grad_output_bdim, grad_hy_value, grad_hy_bdim, grad_cy_value, grad_cy_bdim, reverse, mode, hidden_size, num_layers, has_biases, train, bidirectional, batch_sizes, batch_first, workspace_value, workspace_bdim);
8477 return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level), makeBatched(std::get<6>(results), std::get<7>(results), cur_level), makeBatched(std::get<8>(results), std::get<9>(results), cur_level), makeBatched(std::get<10>(results), std::get<11>(results), cur_level), makeBatched(std::get<12>(results), std::get<13>(results), cur_level));
8478}
8479template <typename batch_rule_t, batch_rule_t batch_rule>
8480::std::tuple<at::Tensor,at::Tensor,at::Tensor> miopen_batch_norm_generated_plumbing(const at::Tensor & input, const at::Tensor & weight, const c10::optional<at::Tensor> & bias, const c10::optional<at::Tensor> & running_mean, const c10::optional<at::Tensor> & running_var, bool training, double exponential_average_factor, double epsilon) {
8481 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
8482 auto maybe_layer = maybeCurrentDynamicLayer();
8483 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
8484 int64_t cur_level = maybe_layer->layerId();
8485 if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(bias, cur_level) && !isBatchedAtLevel(running_mean, cur_level) && !isBatchedAtLevel(running_var, cur_level)) {
8486 return at::_ops::miopen_batch_norm::call(input, weight, bias, running_mean, running_var, training, exponential_average_factor, epsilon);
8487 }
8488 Tensor input_value;
8489 optional<int64_t> input_bdim;
8490 std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level);
8491 Tensor weight_value;
8492 optional<int64_t> weight_bdim;
8493 std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight, cur_level);
8494 optional<Tensor> bias_value;
8495 optional<int64_t> bias_bdim;
8496 if (bias) {
8497 std::tie(bias_value, bias_bdim) = unwrapTensorAtLevel(bias.value(), cur_level);
8498 }
8499 optional<Tensor> running_mean_value;
8500 optional<int64_t> running_mean_bdim;
8501 if (running_mean) {
8502 std::tie(running_mean_value, running_mean_bdim) = unwrapTensorAtLevel(running_mean.value(), cur_level);
8503 }
8504 optional<Tensor> running_var_value;
8505 optional<int64_t> running_var_bdim;
8506 if (running_var) {
8507 std::tie(running_var_value, running_var_bdim) = unwrapTensorAtLevel(running_var.value(), cur_level);
8508 }
8509 auto results = batch_rule(input_value, input_bdim, weight_value, weight_bdim, bias_value, bias_bdim, running_mean_value, running_mean_bdim, running_var_value, running_var_bdim, training, exponential_average_factor, epsilon);
8510 return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level));
8511}
8512template <typename batch_rule_t, batch_rule_t batch_rule>
8513::std::tuple<at::Tensor,at::Tensor,at::Tensor> miopen_batch_norm_backward_generated_plumbing(const at::Tensor & input, const at::Tensor & grad_output, const at::Tensor & weight, const c10::optional<at::Tensor> & running_mean, const c10::optional<at::Tensor> & running_var, const c10::optional<at::Tensor> & save_mean, const c10::optional<at::Tensor> & save_var, double epsilon) {
8514 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
8515 auto maybe_layer = maybeCurrentDynamicLayer();
8516 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
8517 int64_t cur_level = maybe_layer->layerId();
8518 if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(running_mean, cur_level) && !isBatchedAtLevel(running_var, cur_level) && !isBatchedAtLevel(save_mean, cur_level) && !isBatchedAtLevel(save_var, cur_level)) {
8519 return at::_ops::miopen_batch_norm_backward::call(input, grad_output, weight, running_mean, running_var, save_mean, save_var, epsilon);
8520 }
8521 Tensor input_value;
8522 optional<int64_t> input_bdim;
8523 std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level);
8524 Tensor grad_output_value;
8525 optional<int64_t> grad_output_bdim;
8526 std::tie(grad_output_value, grad_output_bdim) = unwrapTensorAtLevel(grad_output, cur_level);
8527 Tensor weight_value;
8528 optional<int64_t> weight_bdim;
8529 std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight, cur_level);
8530 optional<Tensor> running_mean_value;
8531 optional<int64_t> running_mean_bdim;
8532 if (running_mean) {
8533 std::tie(running_mean_value, running_mean_bdim) = unwrapTensorAtLevel(running_mean.value(), cur_level);
8534 }
8535 optional<Tensor> running_var_value;
8536 optional<int64_t> running_var_bdim;
8537 if (running_var) {
8538 std::tie(running_var_value, running_var_bdim) = unwrapTensorAtLevel(running_var.value(), cur_level);
8539 }
8540 optional<Tensor> save_mean_value;
8541 optional<int64_t> save_mean_bdim;
8542 if (save_mean) {
8543 std::tie(save_mean_value, save_mean_bdim) = unwrapTensorAtLevel(save_mean.value(), cur_level);
8544 }
8545 optional<Tensor> save_var_value;
8546 optional<int64_t> save_var_bdim;
8547 if (save_var) {
8548 std::tie(save_var_value, save_var_bdim) = unwrapTensorAtLevel(save_var.value(), cur_level);
8549 }
8550 auto results = batch_rule(input_value, input_bdim, grad_output_value, grad_output_bdim, weight_value, weight_bdim, running_mean_value, running_mean_bdim, running_var_value, running_var_bdim, save_mean_value, save_mean_bdim, save_var_value, save_var_bdim, epsilon);
8551 return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level));
8552}
8553template <typename batch_rule_t, batch_rule_t batch_rule>
8554at::Tensor miopen_convolution_generated_plumbing(const at::Tensor & self, const at::Tensor & weight, const c10::optional<at::Tensor> & bias, c10::SymIntArrayRef padding, at::IntArrayRef stride, at::IntArrayRef dilation, int64_t groups, bool benchmark, bool deterministic) {
8555 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
8556 auto maybe_layer = maybeCurrentDynamicLayer();
8557 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
8558 int64_t cur_level = maybe_layer->layerId();
8559 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(bias, cur_level)) {
8560 return at::_ops::miopen_convolution::call(self, weight, bias, padding, stride, dilation, groups, benchmark, deterministic);
8561 }
8562 Tensor self_value;
8563 optional<int64_t> self_bdim;
8564 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
8565 Tensor weight_value;
8566 optional<int64_t> weight_bdim;
8567 std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight, cur_level);
8568 optional<Tensor> bias_value;
8569 optional<int64_t> bias_bdim;
8570 if (bias) {
8571 std::tie(bias_value, bias_bdim) = unwrapTensorAtLevel(bias.value(), cur_level);
8572 }
8573 auto results = batch_rule(self_value, self_bdim, weight_value, weight_bdim, bias_value, bias_bdim, padding, stride, dilation, groups, benchmark, deterministic);
8574 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
8575}
8576template <typename batch_rule_t, batch_rule_t batch_rule>
8577at::Tensor miopen_convolution_transpose_generated_plumbing(const at::Tensor & self, const at::Tensor & weight, const c10::optional<at::Tensor> & bias, c10::SymIntArrayRef padding, c10::SymIntArrayRef output_padding, at::IntArrayRef stride, at::IntArrayRef dilation, int64_t groups, bool benchmark, bool deterministic) {
8578 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
8579 auto maybe_layer = maybeCurrentDynamicLayer();
8580 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
8581 int64_t cur_level = maybe_layer->layerId();
8582 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(bias, cur_level)) {
8583 return at::_ops::miopen_convolution_transpose::call(self, weight, bias, padding, output_padding, stride, dilation, groups, benchmark, deterministic);
8584 }
8585 Tensor self_value;
8586 optional<int64_t> self_bdim;
8587 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
8588 Tensor weight_value;
8589 optional<int64_t> weight_bdim;
8590 std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight, cur_level);
8591 optional<Tensor> bias_value;
8592 optional<int64_t> bias_bdim;
8593 if (bias) {
8594 std::tie(bias_value, bias_bdim) = unwrapTensorAtLevel(bias.value(), cur_level);
8595 }
8596 auto results = batch_rule(self_value, self_bdim, weight_value, weight_bdim, bias_value, bias_bdim, padding, output_padding, stride, dilation, groups, benchmark, deterministic);
8597 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
8598}
8599template <typename batch_rule_t, batch_rule_t batch_rule>
8600at::Tensor miopen_depthwise_convolution_generated_plumbing(const at::Tensor & self, const at::Tensor & weight, const c10::optional<at::Tensor> & bias, c10::SymIntArrayRef padding, at::IntArrayRef stride, at::IntArrayRef dilation, int64_t groups, bool benchmark, bool deterministic) {
8601 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
8602 auto maybe_layer = maybeCurrentDynamicLayer();
8603 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
8604 int64_t cur_level = maybe_layer->layerId();
8605 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(bias, cur_level)) {
8606 return at::_ops::miopen_depthwise_convolution::call(self, weight, bias, padding, stride, dilation, groups, benchmark, deterministic);
8607 }
8608 Tensor self_value;
8609 optional<int64_t> self_bdim;
8610 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
8611 Tensor weight_value;
8612 optional<int64_t> weight_bdim;
8613 std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight, cur_level);
8614 optional<Tensor> bias_value;
8615 optional<int64_t> bias_bdim;
8616 if (bias) {
8617 std::tie(bias_value, bias_bdim) = unwrapTensorAtLevel(bias.value(), cur_level);
8618 }
8619 auto results = batch_rule(self_value, self_bdim, weight_value, weight_bdim, bias_value, bias_bdim, padding, stride, dilation, groups, benchmark, deterministic);
8620 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
8621}
8622template <typename batch_rule_t, batch_rule_t batch_rule>
8623at::Tensor miopen_convolution_relu_generated_plumbing(const at::Tensor & self, const at::Tensor & weight, const c10::optional<at::Tensor> & bias, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, int64_t groups) {
8624 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
8625 auto maybe_layer = maybeCurrentDynamicLayer();
8626 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
8627 int64_t cur_level = maybe_layer->layerId();
8628 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(bias, cur_level)) {
8629 return at::_ops::miopen_convolution_relu::call(self, weight, bias, stride, padding, dilation, groups);
8630 }
8631 Tensor self_value;
8632 optional<int64_t> self_bdim;
8633 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
8634 Tensor weight_value;
8635 optional<int64_t> weight_bdim;
8636 std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight, cur_level);
8637 optional<Tensor> bias_value;
8638 optional<int64_t> bias_bdim;
8639 if (bias) {
8640 std::tie(bias_value, bias_bdim) = unwrapTensorAtLevel(bias.value(), cur_level);
8641 }
8642 auto results = batch_rule(self_value, self_bdim, weight_value, weight_bdim, bias_value, bias_bdim, stride, padding, dilation, groups);
8643 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
8644}
8645template <typename batch_rule_t, batch_rule_t batch_rule>
8646at::Tensor miopen_convolution_add_relu_generated_plumbing(const at::Tensor & self, const at::Tensor & weight, const at::Tensor & z, const c10::optional<at::Scalar> & alpha, const c10::optional<at::Tensor> & bias, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, int64_t groups) {
8647 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
8648 auto maybe_layer = maybeCurrentDynamicLayer();
8649 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
8650 int64_t cur_level = maybe_layer->layerId();
8651 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(z, cur_level) && !isBatchedAtLevel(bias, cur_level)) {
8652 return at::_ops::miopen_convolution_add_relu::call(self, weight, z, alpha, bias, stride, padding, dilation, groups);
8653 }
8654 Tensor self_value;
8655 optional<int64_t> self_bdim;
8656 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
8657 Tensor weight_value;
8658 optional<int64_t> weight_bdim;
8659 std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight, cur_level);
8660 Tensor z_value;
8661 optional<int64_t> z_bdim;
8662 std::tie(z_value, z_bdim) = unwrapTensorAtLevel(z, cur_level);
8663 optional<Tensor> bias_value;
8664 optional<int64_t> bias_bdim;
8665 if (bias) {
8666 std::tie(bias_value, bias_bdim) = unwrapTensorAtLevel(bias.value(), cur_level);
8667 }
8668 auto results = batch_rule(self_value, self_bdim, weight_value, weight_bdim, z_value, z_bdim, alpha, bias_value, bias_bdim, stride, padding, dilation, groups);
8669 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
8670}
8671template <typename batch_rule_t, batch_rule_t batch_rule>
8672::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor,at::Tensor> miopen_rnn_generated_plumbing(const at::Tensor & input, at::TensorList weight, int64_t weight_stride0, const at::Tensor & hx, const c10::optional<at::Tensor> & cx, int64_t mode, int64_t hidden_size, int64_t num_layers, bool batch_first, double dropout, bool train, bool bidirectional, at::IntArrayRef batch_sizes, const c10::optional<at::Tensor> & dropout_state) {
8673 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
8674 auto maybe_layer = maybeCurrentDynamicLayer();
8675 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
8676 int64_t cur_level = maybe_layer->layerId();
8677 if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(hx, cur_level) && !isBatchedAtLevel(cx, cur_level) && !isBatchedAtLevel(dropout_state, cur_level)) {
8678 return at::_ops::miopen_rnn::call(input, weight, weight_stride0, hx, cx, mode, hidden_size, num_layers, batch_first, dropout, train, bidirectional, batch_sizes, dropout_state);
8679 }
8680 Tensor input_value;
8681 optional<int64_t> input_bdim;
8682 std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level);
8683 Tensor hx_value;
8684 optional<int64_t> hx_bdim;
8685 std::tie(hx_value, hx_bdim) = unwrapTensorAtLevel(hx, cur_level);
8686 optional<Tensor> cx_value;
8687 optional<int64_t> cx_bdim;
8688 if (cx) {
8689 std::tie(cx_value, cx_bdim) = unwrapTensorAtLevel(cx.value(), cur_level);
8690 }
8691 optional<Tensor> dropout_state_value;
8692 optional<int64_t> dropout_state_bdim;
8693 if (dropout_state) {
8694 std::tie(dropout_state_value, dropout_state_bdim) = unwrapTensorAtLevel(dropout_state.value(), cur_level);
8695 }
8696 auto results = batch_rule(input_value, input_bdim, weight, weight_stride0, hx_value, hx_bdim, cx_value, cx_bdim, mode, hidden_size, num_layers, batch_first, dropout, train, bidirectional, batch_sizes, dropout_state_value, dropout_state_bdim);
8697 return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level), makeBatched(std::get<6>(results), std::get<7>(results), cur_level), makeBatched(std::get<8>(results), std::get<9>(results), cur_level));
8698}
8699template <typename batch_rule_t, batch_rule_t batch_rule>
8700::std::tuple<at::Tensor,at::Tensor,at::Tensor,::std::vector<at::Tensor>> miopen_rnn_backward_generated_plumbing(const at::Tensor & input, at::TensorList weight, int64_t weight_stride0, const at::Tensor & weight_buf, const at::Tensor & hx, const c10::optional<at::Tensor> & cx, const at::Tensor & output, const c10::optional<at::Tensor> & grad_output, const c10::optional<at::Tensor> & grad_hy, const c10::optional<at::Tensor> & grad_cy, int64_t mode, int64_t hidden_size, int64_t num_layers, bool batch_first, double dropout, bool train, bool bidirectional, at::IntArrayRef batch_sizes, const c10::optional<at::Tensor> & dropout_state, const at::Tensor & reserve, ::std::array<bool,4> output_mask) {
8701 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
8702 auto maybe_layer = maybeCurrentDynamicLayer();
8703 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
8704 int64_t cur_level = maybe_layer->layerId();
8705 if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(weight_buf, cur_level) && !isBatchedAtLevel(hx, cur_level) && !isBatchedAtLevel(cx, cur_level) && !isBatchedAtLevel(output, cur_level) && !isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(grad_hy, cur_level) && !isBatchedAtLevel(grad_cy, cur_level) && !isBatchedAtLevel(dropout_state, cur_level) && !isBatchedAtLevel(reserve, cur_level)) {
8706 return at::_ops::miopen_rnn_backward::call(input, weight, weight_stride0, weight_buf, hx, cx, output, grad_output, grad_hy, grad_cy, mode, hidden_size, num_layers, batch_first, dropout, train, bidirectional, batch_sizes, dropout_state, reserve, output_mask);
8707 }
8708 Tensor input_value;
8709 optional<int64_t> input_bdim;
8710 std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level);
8711 Tensor weight_buf_value;
8712 optional<int64_t> weight_buf_bdim;
8713 std::tie(weight_buf_value, weight_buf_bdim) = unwrapTensorAtLevel(weight_buf, cur_level);
8714 Tensor hx_value;
8715 optional<int64_t> hx_bdim;
8716 std::tie(hx_value, hx_bdim) = unwrapTensorAtLevel(hx, cur_level);
8717 Tensor output_value;
8718 optional<int64_t> output_bdim;
8719 std::tie(output_value, output_bdim) = unwrapTensorAtLevel(output, cur_level);
8720 Tensor reserve_value;
8721 optional<int64_t> reserve_bdim;
8722 std::tie(reserve_value, reserve_bdim) = unwrapTensorAtLevel(reserve, cur_level);
8723 optional<Tensor> cx_value;
8724 optional<int64_t> cx_bdim;
8725 if (cx) {
8726 std::tie(cx_value, cx_bdim) = unwrapTensorAtLevel(cx.value(), cur_level);
8727 }
8728 optional<Tensor> grad_output_value;
8729 optional<int64_t> grad_output_bdim;
8730 if (grad_output) {
8731 std::tie(grad_output_value, grad_output_bdim) = unwrapTensorAtLevel(grad_output.value(), cur_level);
8732 }
8733 optional<Tensor> grad_hy_value;
8734 optional<int64_t> grad_hy_bdim;
8735 if (grad_hy) {
8736 std::tie(grad_hy_value, grad_hy_bdim) = unwrapTensorAtLevel(grad_hy.value(), cur_level);
8737 }
8738 optional<Tensor> grad_cy_value;
8739 optional<int64_t> grad_cy_bdim;
8740 if (grad_cy) {
8741 std::tie(grad_cy_value, grad_cy_bdim) = unwrapTensorAtLevel(grad_cy.value(), cur_level);
8742 }
8743 optional<Tensor> dropout_state_value;
8744 optional<int64_t> dropout_state_bdim;
8745 if (dropout_state) {
8746 std::tie(dropout_state_value, dropout_state_bdim) = unwrapTensorAtLevel(dropout_state.value(), cur_level);
8747 }
8748 auto results = batch_rule(input_value, input_bdim, weight, weight_stride0, weight_buf_value, weight_buf_bdim, hx_value, hx_bdim, cx_value, cx_bdim, output_value, output_bdim, grad_output_value, grad_output_bdim, grad_hy_value, grad_hy_bdim, grad_cy_value, grad_cy_bdim, mode, hidden_size, num_layers, batch_first, dropout, train, bidirectional, batch_sizes, dropout_state_value, dropout_state_bdim, reserve_value, reserve_bdim, output_mask);
8749 return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level), makeBatchedVector(std::get<6>(results), std::get<7>(results), cur_level));
8750}
8751template <typename batch_rule_t, batch_rule_t batch_rule>
8752at::Tensor mm_generated_plumbing(const at::Tensor & self, const at::Tensor & mat2) {
8753 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
8754 auto maybe_layer = maybeCurrentDynamicLayer();
8755 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
8756 int64_t cur_level = maybe_layer->layerId();
8757 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(mat2, cur_level)) {
8758 return at::_ops::mm::call(self, mat2);
8759 }
8760 Tensor self_value;
8761 optional<int64_t> self_bdim;
8762 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
8763 Tensor mat2_value;
8764 optional<int64_t> mat2_bdim;
8765 std::tie(mat2_value, mat2_bdim) = unwrapTensorAtLevel(mat2, cur_level);
8766 auto results = batch_rule(self_value, self_bdim, mat2_value, mat2_bdim);
8767 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
8768}
8769template <typename batch_rule_t, batch_rule_t batch_rule>
8770at::Tensor _sparse_mm_generated_plumbing(const at::Tensor & sparse, const at::Tensor & dense) {
8771 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
8772 auto maybe_layer = maybeCurrentDynamicLayer();
8773 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
8774 int64_t cur_level = maybe_layer->layerId();
8775 if (!isBatchedAtLevel(sparse, cur_level) && !isBatchedAtLevel(dense, cur_level)) {
8776 return at::_ops::_sparse_mm::call(sparse, dense);
8777 }
8778 Tensor sparse_value;
8779 optional<int64_t> sparse_bdim;
8780 std::tie(sparse_value, sparse_bdim) = unwrapTensorAtLevel(sparse, cur_level);
8781 Tensor dense_value;
8782 optional<int64_t> dense_bdim;
8783 std::tie(dense_value, dense_bdim) = unwrapTensorAtLevel(dense, cur_level);
8784 auto results = batch_rule(sparse_value, sparse_bdim, dense_value, dense_bdim);
8785 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
8786}
8787template <typename batch_rule_t, batch_rule_t batch_rule>
8788at::Tensor _sparse_mm_reduce_generated_plumbing(const at::Tensor & sparse, const at::Tensor & dense, c10::string_view reduce) {
8789 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
8790 auto maybe_layer = maybeCurrentDynamicLayer();
8791 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
8792 int64_t cur_level = maybe_layer->layerId();
8793 if (!isBatchedAtLevel(sparse, cur_level) && !isBatchedAtLevel(dense, cur_level)) {
8794 return at::_ops::_sparse_mm_reduce::call(sparse, dense, reduce);
8795 }
8796 Tensor sparse_value;
8797 optional<int64_t> sparse_bdim;
8798 std::tie(sparse_value, sparse_bdim) = unwrapTensorAtLevel(sparse, cur_level);
8799 Tensor dense_value;
8800 optional<int64_t> dense_bdim;
8801 std::tie(dense_value, dense_bdim) = unwrapTensorAtLevel(dense, cur_level);
8802 auto results = batch_rule(sparse_value, sparse_bdim, dense_value, dense_bdim, reduce);
8803 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
8804}
8805template <typename batch_rule_t, batch_rule_t batch_rule>
8806at::Tensor _sparse_sparse_matmul_generated_plumbing(const at::Tensor & self, const at::Tensor & other) {
8807 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
8808 auto maybe_layer = maybeCurrentDynamicLayer();
8809 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
8810 int64_t cur_level = maybe_layer->layerId();
8811 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
8812 return at::_ops::_sparse_sparse_matmul::call(self, other);
8813 }
8814 Tensor self_value;
8815 optional<int64_t> self_bdim;
8816 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
8817 Tensor other_value;
8818 optional<int64_t> other_bdim;
8819 std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
8820 auto results = batch_rule(self_value, self_bdim, other_value, other_bdim);
8821 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
8822}
8823template <typename batch_rule_t, batch_rule_t batch_rule>
8824::std::tuple<at::Tensor,at::Tensor> mode_generated_plumbing(const at::Tensor & self, int64_t dim, bool keepdim) {
8825 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
8826 auto maybe_layer = maybeCurrentDynamicLayer();
8827 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
8828 int64_t cur_level = maybe_layer->layerId();
8829 if (!isBatchedAtLevel(self, cur_level)) {
8830 return at::_ops::mode::call(self, dim, keepdim);
8831 }
8832 Tensor self_value;
8833 optional<int64_t> self_bdim;
8834 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
8835 auto results = batch_rule(self_value, self_bdim, dim, keepdim);
8836 return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
8837}
8838template <typename batch_rule_t, batch_rule_t batch_rule>
8839::std::tuple<at::Tensor,at::Tensor> mode_dimname_generated_plumbing(const at::Tensor & self, at::Dimname dim, bool keepdim) {
8840 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
8841 auto maybe_layer = maybeCurrentDynamicLayer();
8842 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
8843 int64_t cur_level = maybe_layer->layerId();
8844 if (!isBatchedAtLevel(self, cur_level)) {
8845 return at::_ops::mode_dimname::call(self, dim, keepdim);
8846 }
8847 Tensor self_value;
8848 optional<int64_t> self_bdim;
8849 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
8850 auto results = batch_rule(self_value, self_bdim, dim, keepdim);
8851 return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
8852}
8853template <typename batch_rule_t, batch_rule_t batch_rule>
8854at::Tensor mul_Tensor_generated_plumbing(const at::Tensor & self, const at::Tensor & other) {
8855 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
8856 auto maybe_layer = maybeCurrentDynamicLayer();
8857 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
8858 int64_t cur_level = maybe_layer->layerId();
8859 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
8860 return at::_ops::mul_Tensor::call(self, other);
8861 }
8862 Tensor self_value;
8863 optional<int64_t> self_bdim;
8864 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
8865 Tensor other_value;
8866 optional<int64_t> other_bdim;
8867 std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
8868 auto results = batch_rule(self_value, self_bdim, other_value, other_bdim);
8869 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
8870}
8871template <typename batch_rule_t, batch_rule_t batch_rule>
8872at::Tensor & mul__Tensor_generated_plumbing(at::Tensor & self, const at::Tensor & other) {
8873 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
8874 auto maybe_layer = maybeCurrentDynamicLayer();
8875 vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
8876 int64_t cur_level = maybe_layer->layerId();
8877 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
8878 return at::_ops::mul__Tensor::call(self, other);
8879 }
8880 Tensor self_value;
8881 optional<int64_t> self_bdim;
8882 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
8883 Tensor other_value;
8884 optional<int64_t> other_bdim;
8885 std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
8886 batch_rule(self_value, self_bdim, other_value, other_bdim);
8887 return self;
8888}
8889template <typename batch_rule_t, batch_rule_t batch_rule>
8890at::Tensor mul_Scalar_generated_plumbing(const at::Tensor & self, const at::Scalar & other) {
8891 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
8892 auto maybe_layer = maybeCurrentDynamicLayer();
8893 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
8894 int64_t cur_level = maybe_layer->layerId();
8895 if (!isBatchedAtLevel(self, cur_level)) {
8896 return at::_ops::mul_Scalar::call(self, other);
8897 }
8898 Tensor self_value;
8899 optional<int64_t> self_bdim;
8900 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
8901 auto results = batch_rule(self_value, self_bdim, other);
8902 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
8903}
8904template <typename batch_rule_t, batch_rule_t batch_rule>
8905at::Tensor & mul__Scalar_generated_plumbing(at::Tensor & self, const at::Scalar & other) {
8906 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
8907 auto maybe_layer = maybeCurrentDynamicLayer();
8908 vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
8909 int64_t cur_level = maybe_layer->layerId();
8910 if (!isBatchedAtLevel(self, cur_level)) {
8911 return at::_ops::mul__Scalar::call(self, other);
8912 }
8913 Tensor self_value;
8914 optional<int64_t> self_bdim;
8915 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
8916 batch_rule(self_value, self_bdim, other);
8917 return self;
8918}
8919template <typename batch_rule_t, batch_rule_t batch_rule>
8920at::Tensor multiply_Tensor_generated_plumbing(const at::Tensor & self, const at::Tensor & other) {
8921 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
8922 auto maybe_layer = maybeCurrentDynamicLayer();
8923 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
8924 int64_t cur_level = maybe_layer->layerId();
8925 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
8926 return at::_ops::multiply_Tensor::call(self, other);
8927 }
8928 Tensor self_value;
8929 optional<int64_t> self_bdim;
8930 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
8931 Tensor other_value;
8932 optional<int64_t> other_bdim;
8933 std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
8934 auto results = batch_rule(self_value, self_bdim, other_value, other_bdim);
8935 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
8936}
8937template <typename batch_rule_t, batch_rule_t batch_rule>
8938at::Tensor & multiply__Tensor_generated_plumbing(at::Tensor & self, const at::Tensor & other) {
8939 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
8940 auto maybe_layer = maybeCurrentDynamicLayer();
8941 vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
8942 int64_t cur_level = maybe_layer->layerId();
8943 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
8944 return at::_ops::multiply__Tensor::call(self, other);
8945 }
8946 Tensor self_value;
8947 optional<int64_t> self_bdim;
8948 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
8949 Tensor other_value;
8950 optional<int64_t> other_bdim;
8951 std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
8952 batch_rule(self_value, self_bdim, other_value, other_bdim);
8953 return self;
8954}
8955template <typename batch_rule_t, batch_rule_t batch_rule>
8956at::Tensor multiply_Scalar_generated_plumbing(const at::Tensor & self, const at::Scalar & other) {
8957 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
8958 auto maybe_layer = maybeCurrentDynamicLayer();
8959 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
8960 int64_t cur_level = maybe_layer->layerId();
8961 if (!isBatchedAtLevel(self, cur_level)) {
8962 return at::_ops::multiply_Scalar::call(self, other);
8963 }
8964 Tensor self_value;
8965 optional<int64_t> self_bdim;
8966 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
8967 auto results = batch_rule(self_value, self_bdim, other);
8968 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
8969}
8970template <typename batch_rule_t, batch_rule_t batch_rule>
8971at::Tensor & multiply__Scalar_generated_plumbing(at::Tensor & self, const at::Scalar & other) {
8972 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
8973 auto maybe_layer = maybeCurrentDynamicLayer();
8974 vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
8975 int64_t cur_level = maybe_layer->layerId();
8976 if (!isBatchedAtLevel(self, cur_level)) {
8977 return at::_ops::multiply__Scalar::call(self, other);
8978 }
8979 Tensor self_value;
8980 optional<int64_t> self_bdim;
8981 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
8982 batch_rule(self_value, self_bdim, other);
8983 return self;
8984}
8985template <typename batch_rule_t, batch_rule_t batch_rule>
8986at::Tensor mv_generated_plumbing(const at::Tensor & self, const at::Tensor & vec) {
8987 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
8988 auto maybe_layer = maybeCurrentDynamicLayer();
8989 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
8990 int64_t cur_level = maybe_layer->layerId();
8991 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(vec, cur_level)) {
8992 return at::_ops::mv::call(self, vec);
8993 }
8994 Tensor self_value;
8995 optional<int64_t> self_bdim;
8996 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
8997 Tensor vec_value;
8998 optional<int64_t> vec_bdim;
8999 std::tie(vec_value, vec_bdim) = unwrapTensorAtLevel(vec, cur_level);
9000 auto results = batch_rule(self_value, self_bdim, vec_value, vec_bdim);
9001 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
9002}
9003template <typename batch_rule_t, batch_rule_t batch_rule>
9004at::Tensor mvlgamma_generated_plumbing(const at::Tensor & self, int64_t p) {
9005 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
9006 auto maybe_layer = maybeCurrentDynamicLayer();
9007 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
9008 int64_t cur_level = maybe_layer->layerId();
9009 if (!isBatchedAtLevel(self, cur_level)) {
9010 return at::_ops::mvlgamma::call(self, p);
9011 }
9012 Tensor self_value;
9013 optional<int64_t> self_bdim;
9014 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
9015 auto results = batch_rule(self_value, self_bdim, p);
9016 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
9017}
9018template <typename batch_rule_t, batch_rule_t batch_rule>
9019at::Tensor & mvlgamma__generated_plumbing(at::Tensor & self, int64_t p) {
9020 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
9021 auto maybe_layer = maybeCurrentDynamicLayer();
9022 vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
9023 int64_t cur_level = maybe_layer->layerId();
9024 if (!isBatchedAtLevel(self, cur_level)) {
9025 return at::_ops::mvlgamma_::call(self, p);
9026 }
9027 Tensor self_value;
9028 optional<int64_t> self_bdim;
9029 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
9030 batch_rule(self_value, self_bdim, p);
9031 return self;
9032}
9033template <typename batch_rule_t, batch_rule_t batch_rule>
9034at::Tensor narrow_copy_generated_plumbing(const at::Tensor & self, int64_t dim, c10::SymInt start, c10::SymInt length) {
9035 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
9036 auto maybe_layer = maybeCurrentDynamicLayer();
9037 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
9038 int64_t cur_level = maybe_layer->layerId();
9039 if (!isBatchedAtLevel(self, cur_level)) {
9040 return at::_ops::narrow_copy::call(self, dim, start, length);
9041 }
9042 Tensor self_value;
9043 optional<int64_t> self_bdim;
9044 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
9045 auto results = batch_rule(self_value, self_bdim, dim, start, length);
9046 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
9047}
9048template <typename batch_rule_t, batch_rule_t batch_rule>
9049at::Tensor narrow_generated_plumbing(const at::Tensor & self, int64_t dim, c10::SymInt start, c10::SymInt length) {
9050 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
9051 auto maybe_layer = maybeCurrentDynamicLayer();
9052 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
9053 int64_t cur_level = maybe_layer->layerId();
9054 if (!isBatchedAtLevel(self, cur_level)) {
9055 return at::_ops::narrow::call(self, dim, start, length);
9056 }
9057 Tensor self_value;
9058 optional<int64_t> self_bdim;
9059 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
9060 auto results = batch_rule(self_value, self_bdim, dim, start, length);
9061 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
9062}
9063template <typename batch_rule_t, batch_rule_t batch_rule>
9064at::Tensor narrow_Tensor_generated_plumbing(const at::Tensor & self, int64_t dim, const at::Tensor & start, c10::SymInt length) {
9065 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
9066 auto maybe_layer = maybeCurrentDynamicLayer();
9067 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
9068 int64_t cur_level = maybe_layer->layerId();
9069 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(start, cur_level)) {
9070 return at::_ops::narrow_Tensor::call(self, dim, start, length);
9071 }
9072 Tensor self_value;
9073 optional<int64_t> self_bdim;
9074 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
9075 Tensor start_value;
9076 optional<int64_t> start_bdim;
9077 std::tie(start_value, start_bdim) = unwrapTensorAtLevel(start, cur_level);
9078 auto results = batch_rule(self_value, self_bdim, dim, start_value, start_bdim, length);
9079 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
9080}
9081template <typename batch_rule_t, batch_rule_t batch_rule>
9082::std::tuple<at::Tensor,at::Tensor,at::Tensor> native_batch_norm_generated_plumbing(const at::Tensor & input, const c10::optional<at::Tensor> & weight, const c10::optional<at::Tensor> & bias, const c10::optional<at::Tensor> & running_mean, const c10::optional<at::Tensor> & running_var, bool training, double momentum, double eps) {
9083 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
9084 auto maybe_layer = maybeCurrentDynamicLayer();
9085 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
9086 int64_t cur_level = maybe_layer->layerId();
9087 if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(bias, cur_level) && !isBatchedAtLevel(running_mean, cur_level) && !isBatchedAtLevel(running_var, cur_level)) {
9088 return at::_ops::native_batch_norm::call(input, weight, bias, running_mean, running_var, training, momentum, eps);
9089 }
9090 Tensor input_value;
9091 optional<int64_t> input_bdim;
9092 std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level);
9093 optional<Tensor> weight_value;
9094 optional<int64_t> weight_bdim;
9095 if (weight) {
9096 std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight.value(), cur_level);
9097 }
9098 optional<Tensor> bias_value;
9099 optional<int64_t> bias_bdim;
9100 if (bias) {
9101 std::tie(bias_value, bias_bdim) = unwrapTensorAtLevel(bias.value(), cur_level);
9102 }
9103 optional<Tensor> running_mean_value;
9104 optional<int64_t> running_mean_bdim;
9105 if (running_mean) {
9106 std::tie(running_mean_value, running_mean_bdim) = unwrapTensorAtLevel(running_mean.value(), cur_level);
9107 }
9108 optional<Tensor> running_var_value;
9109 optional<int64_t> running_var_bdim;
9110 if (running_var) {
9111 std::tie(running_var_value, running_var_bdim) = unwrapTensorAtLevel(running_var.value(), cur_level);
9112 }
9113 auto results = batch_rule(input_value, input_bdim, weight_value, weight_bdim, bias_value, bias_bdim, running_mean_value, running_mean_bdim, running_var_value, running_var_bdim, training, momentum, eps);
9114 return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level));
9115}
9116template <typename batch_rule_t, batch_rule_t batch_rule>
9117::std::tuple<at::Tensor,at::Tensor,at::Tensor> _native_batch_norm_legit_no_stats_generated_plumbing(const at::Tensor & input, const c10::optional<at::Tensor> & weight, const c10::optional<at::Tensor> & bias, bool training, double momentum, double eps) {
9118 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
9119 auto maybe_layer = maybeCurrentDynamicLayer();
9120 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
9121 int64_t cur_level = maybe_layer->layerId();
9122 if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(bias, cur_level)) {
9123 return at::_ops::_native_batch_norm_legit_no_stats::call(input, weight, bias, training, momentum, eps);
9124 }
9125 Tensor input_value;
9126 optional<int64_t> input_bdim;
9127 std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level);
9128 optional<Tensor> weight_value;
9129 optional<int64_t> weight_bdim;
9130 if (weight) {
9131 std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight.value(), cur_level);
9132 }
9133 optional<Tensor> bias_value;
9134 optional<int64_t> bias_bdim;
9135 if (bias) {
9136 std::tie(bias_value, bias_bdim) = unwrapTensorAtLevel(bias.value(), cur_level);
9137 }
9138 auto results = batch_rule(input_value, input_bdim, weight_value, weight_bdim, bias_value, bias_bdim, training, momentum, eps);
9139 return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level));
9140}
9141template <typename batch_rule_t, batch_rule_t batch_rule>
9142::std::tuple<at::Tensor,at::Tensor> batch_norm_stats_generated_plumbing(const at::Tensor & input, double eps) {
9143 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
9144 auto maybe_layer = maybeCurrentDynamicLayer();
9145 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
9146 int64_t cur_level = maybe_layer->layerId();
9147 if (!isBatchedAtLevel(input, cur_level)) {
9148 return at::_ops::batch_norm_stats::call(input, eps);
9149 }
9150 Tensor input_value;
9151 optional<int64_t> input_bdim;
9152 std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level);
9153 auto results = batch_rule(input_value, input_bdim, eps);
9154 return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
9155}
9156template <typename batch_rule_t, batch_rule_t batch_rule>
9157at::Tensor batch_norm_elemt_generated_plumbing(const at::Tensor & input, const c10::optional<at::Tensor> & weight, const c10::optional<at::Tensor> & bias, const at::Tensor & mean, const at::Tensor & invstd, double eps) {
9158 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
9159 auto maybe_layer = maybeCurrentDynamicLayer();
9160 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
9161 int64_t cur_level = maybe_layer->layerId();
9162 if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(bias, cur_level) && !isBatchedAtLevel(mean, cur_level) && !isBatchedAtLevel(invstd, cur_level)) {
9163 return at::_ops::batch_norm_elemt::call(input, weight, bias, mean, invstd, eps);
9164 }
9165 Tensor input_value;
9166 optional<int64_t> input_bdim;
9167 std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level);
9168 Tensor mean_value;
9169 optional<int64_t> mean_bdim;
9170 std::tie(mean_value, mean_bdim) = unwrapTensorAtLevel(mean, cur_level);
9171 Tensor invstd_value;
9172 optional<int64_t> invstd_bdim;
9173 std::tie(invstd_value, invstd_bdim) = unwrapTensorAtLevel(invstd, cur_level);
9174 optional<Tensor> weight_value;
9175 optional<int64_t> weight_bdim;
9176 if (weight) {
9177 std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight.value(), cur_level);
9178 }
9179 optional<Tensor> bias_value;
9180 optional<int64_t> bias_bdim;
9181 if (bias) {
9182 std::tie(bias_value, bias_bdim) = unwrapTensorAtLevel(bias.value(), cur_level);
9183 }
9184 auto results = batch_rule(input_value, input_bdim, weight_value, weight_bdim, bias_value, bias_bdim, mean_value, mean_bdim, invstd_value, invstd_bdim, eps);
9185 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
9186}
9187template <typename batch_rule_t, batch_rule_t batch_rule>
9188::std::tuple<at::Tensor,at::Tensor> batch_norm_gather_stats_generated_plumbing(const at::Tensor & input, const at::Tensor & mean, const at::Tensor & invstd, const c10::optional<at::Tensor> & running_mean, const c10::optional<at::Tensor> & running_var, double momentum, double eps, int64_t count) {
9189 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
9190 auto maybe_layer = maybeCurrentDynamicLayer();
9191 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
9192 int64_t cur_level = maybe_layer->layerId();
9193 if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(mean, cur_level) && !isBatchedAtLevel(invstd, cur_level) && !isBatchedAtLevel(running_mean, cur_level) && !isBatchedAtLevel(running_var, cur_level)) {
9194 return at::_ops::batch_norm_gather_stats::call(input, mean, invstd, running_mean, running_var, momentum, eps, count);
9195 }
9196 Tensor input_value;
9197 optional<int64_t> input_bdim;
9198 std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level);
9199 Tensor mean_value;
9200 optional<int64_t> mean_bdim;
9201 std::tie(mean_value, mean_bdim) = unwrapTensorAtLevel(mean, cur_level);
9202 Tensor invstd_value;
9203 optional<int64_t> invstd_bdim;
9204 std::tie(invstd_value, invstd_bdim) = unwrapTensorAtLevel(invstd, cur_level);
9205 optional<Tensor> running_mean_value;
9206 optional<int64_t> running_mean_bdim;
9207 if (running_mean) {
9208 std::tie(running_mean_value, running_mean_bdim) = unwrapTensorAtLevel(running_mean.value(), cur_level);
9209 }
9210 optional<Tensor> running_var_value;
9211 optional<int64_t> running_var_bdim;
9212 if (running_var) {
9213 std::tie(running_var_value, running_var_bdim) = unwrapTensorAtLevel(running_var.value(), cur_level);
9214 }
9215 auto results = batch_rule(input_value, input_bdim, mean_value, mean_bdim, invstd_value, invstd_bdim, running_mean_value, running_mean_bdim, running_var_value, running_var_bdim, momentum, eps, count);
9216 return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
9217}
9218template <typename batch_rule_t, batch_rule_t batch_rule>
9219::std::tuple<at::Tensor,at::Tensor> batch_norm_gather_stats_with_counts_generated_plumbing(const at::Tensor & input, const at::Tensor & mean, const at::Tensor & invstd, const c10::optional<at::Tensor> & running_mean, const c10::optional<at::Tensor> & running_var, double momentum, double eps, const at::Tensor & counts) {
9220 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
9221 auto maybe_layer = maybeCurrentDynamicLayer();
9222 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
9223 int64_t cur_level = maybe_layer->layerId();
9224 if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(mean, cur_level) && !isBatchedAtLevel(invstd, cur_level) && !isBatchedAtLevel(running_mean, cur_level) && !isBatchedAtLevel(running_var, cur_level) && !isBatchedAtLevel(counts, cur_level)) {
9225 return at::_ops::batch_norm_gather_stats_with_counts::call(input, mean, invstd, running_mean, running_var, momentum, eps, counts);
9226 }
9227 Tensor input_value;
9228 optional<int64_t> input_bdim;
9229 std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level);
9230 Tensor mean_value;
9231 optional<int64_t> mean_bdim;
9232 std::tie(mean_value, mean_bdim) = unwrapTensorAtLevel(mean, cur_level);
9233 Tensor invstd_value;
9234 optional<int64_t> invstd_bdim;
9235 std::tie(invstd_value, invstd_bdim) = unwrapTensorAtLevel(invstd, cur_level);
9236 Tensor counts_value;
9237 optional<int64_t> counts_bdim;
9238 std::tie(counts_value, counts_bdim) = unwrapTensorAtLevel(counts, cur_level);
9239 optional<Tensor> running_mean_value;
9240 optional<int64_t> running_mean_bdim;
9241 if (running_mean) {
9242 std::tie(running_mean_value, running_mean_bdim) = unwrapTensorAtLevel(running_mean.value(), cur_level);
9243 }
9244 optional<Tensor> running_var_value;
9245 optional<int64_t> running_var_bdim;
9246 if (running_var) {
9247 std::tie(running_var_value, running_var_bdim) = unwrapTensorAtLevel(running_var.value(), cur_level);
9248 }
9249 auto results = batch_rule(input_value, input_bdim, mean_value, mean_bdim, invstd_value, invstd_bdim, running_mean_value, running_mean_bdim, running_var_value, running_var_bdim, momentum, eps, counts_value, counts_bdim);
9250 return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
9251}
9252template <typename batch_rule_t, batch_rule_t batch_rule>
9253::std::tuple<at::Tensor,at::Tensor,at::Tensor> native_batch_norm_backward_generated_plumbing(const at::Tensor & grad_out, const at::Tensor & input, const c10::optional<at::Tensor> & weight, const c10::optional<at::Tensor> & running_mean, const c10::optional<at::Tensor> & running_var, const c10::optional<at::Tensor> & save_mean, const c10::optional<at::Tensor> & save_invstd, bool train, double eps, ::std::array<bool,3> output_mask) {
9254 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
9255 auto maybe_layer = maybeCurrentDynamicLayer();
9256 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
9257 int64_t cur_level = maybe_layer->layerId();
9258 if (!isBatchedAtLevel(grad_out, cur_level) && !isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(running_mean, cur_level) && !isBatchedAtLevel(running_var, cur_level) && !isBatchedAtLevel(save_mean, cur_level) && !isBatchedAtLevel(save_invstd, cur_level)) {
9259 return at::_ops::native_batch_norm_backward::call(grad_out, input, weight, running_mean, running_var, save_mean, save_invstd, train, eps, output_mask);
9260 }
9261 Tensor grad_out_value;
9262 optional<int64_t> grad_out_bdim;
9263 std::tie(grad_out_value, grad_out_bdim) = unwrapTensorAtLevel(grad_out, cur_level);
9264 Tensor input_value;
9265 optional<int64_t> input_bdim;
9266 std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level);
9267 optional<Tensor> weight_value;
9268 optional<int64_t> weight_bdim;
9269 if (weight) {
9270 std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight.value(), cur_level);
9271 }
9272 optional<Tensor> running_mean_value;
9273 optional<int64_t> running_mean_bdim;
9274 if (running_mean) {
9275 std::tie(running_mean_value, running_mean_bdim) = unwrapTensorAtLevel(running_mean.value(), cur_level);
9276 }
9277 optional<Tensor> running_var_value;
9278 optional<int64_t> running_var_bdim;
9279 if (running_var) {
9280 std::tie(running_var_value, running_var_bdim) = unwrapTensorAtLevel(running_var.value(), cur_level);
9281 }
9282 optional<Tensor> save_mean_value;
9283 optional<int64_t> save_mean_bdim;
9284 if (save_mean) {
9285 std::tie(save_mean_value, save_mean_bdim) = unwrapTensorAtLevel(save_mean.value(), cur_level);
9286 }
9287 optional<Tensor> save_invstd_value;
9288 optional<int64_t> save_invstd_bdim;
9289 if (save_invstd) {
9290 std::tie(save_invstd_value, save_invstd_bdim) = unwrapTensorAtLevel(save_invstd.value(), cur_level);
9291 }
9292 auto results = batch_rule(grad_out_value, grad_out_bdim, input_value, input_bdim, weight_value, weight_bdim, running_mean_value, running_mean_bdim, running_var_value, running_var_bdim, save_mean_value, save_mean_bdim, save_invstd_value, save_invstd_bdim, train, eps, output_mask);
9293 return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level));
9294}
9295template <typename batch_rule_t, batch_rule_t batch_rule>
9296::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor> batch_norm_backward_reduce_generated_plumbing(const at::Tensor & grad_out, const at::Tensor & input, const at::Tensor & mean, const at::Tensor & invstd, const c10::optional<at::Tensor> & weight, bool input_g, bool weight_g, bool bias_g) {
9297 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
9298 auto maybe_layer = maybeCurrentDynamicLayer();
9299 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
9300 int64_t cur_level = maybe_layer->layerId();
9301 if (!isBatchedAtLevel(grad_out, cur_level) && !isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(mean, cur_level) && !isBatchedAtLevel(invstd, cur_level) && !isBatchedAtLevel(weight, cur_level)) {
9302 return at::_ops::batch_norm_backward_reduce::call(grad_out, input, mean, invstd, weight, input_g, weight_g, bias_g);
9303 }
9304 Tensor grad_out_value;
9305 optional<int64_t> grad_out_bdim;
9306 std::tie(grad_out_value, grad_out_bdim) = unwrapTensorAtLevel(grad_out, cur_level);
9307 Tensor input_value;
9308 optional<int64_t> input_bdim;
9309 std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level);
9310 Tensor mean_value;
9311 optional<int64_t> mean_bdim;
9312 std::tie(mean_value, mean_bdim) = unwrapTensorAtLevel(mean, cur_level);
9313 Tensor invstd_value;
9314 optional<int64_t> invstd_bdim;
9315 std::tie(invstd_value, invstd_bdim) = unwrapTensorAtLevel(invstd, cur_level);
9316 optional<Tensor> weight_value;
9317 optional<int64_t> weight_bdim;
9318 if (weight) {
9319 std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight.value(), cur_level);
9320 }
9321 auto results = batch_rule(grad_out_value, grad_out_bdim, input_value, input_bdim, mean_value, mean_bdim, invstd_value, invstd_bdim, weight_value, weight_bdim, input_g, weight_g, bias_g);
9322 return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level), makeBatched(std::get<6>(results), std::get<7>(results), cur_level));
9323}
9324template <typename batch_rule_t, batch_rule_t batch_rule>
9325at::Tensor batch_norm_backward_elemt_generated_plumbing(const at::Tensor & grad_out, const at::Tensor & input, const at::Tensor & mean, const at::Tensor & invstd, const c10::optional<at::Tensor> & weight, const at::Tensor & mean_dy, const at::Tensor & mean_dy_xmu, const at::Tensor & count) {
9326 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
9327 auto maybe_layer = maybeCurrentDynamicLayer();
9328 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
9329 int64_t cur_level = maybe_layer->layerId();
9330 if (!isBatchedAtLevel(grad_out, cur_level) && !isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(mean, cur_level) && !isBatchedAtLevel(invstd, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(mean_dy, cur_level) && !isBatchedAtLevel(mean_dy_xmu, cur_level) && !isBatchedAtLevel(count, cur_level)) {
9331 return at::_ops::batch_norm_backward_elemt::call(grad_out, input, mean, invstd, weight, mean_dy, mean_dy_xmu, count);
9332 }
9333 Tensor grad_out_value;
9334 optional<int64_t> grad_out_bdim;
9335 std::tie(grad_out_value, grad_out_bdim) = unwrapTensorAtLevel(grad_out, cur_level);
9336 Tensor input_value;
9337 optional<int64_t> input_bdim;
9338 std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level);
9339 Tensor mean_value;
9340 optional<int64_t> mean_bdim;
9341 std::tie(mean_value, mean_bdim) = unwrapTensorAtLevel(mean, cur_level);
9342 Tensor invstd_value;
9343 optional<int64_t> invstd_bdim;
9344 std::tie(invstd_value, invstd_bdim) = unwrapTensorAtLevel(invstd, cur_level);
9345 Tensor mean_dy_value;
9346 optional<int64_t> mean_dy_bdim;
9347 std::tie(mean_dy_value, mean_dy_bdim) = unwrapTensorAtLevel(mean_dy, cur_level);
9348 Tensor mean_dy_xmu_value;
9349 optional<int64_t> mean_dy_xmu_bdim;
9350 std::tie(mean_dy_xmu_value, mean_dy_xmu_bdim) = unwrapTensorAtLevel(mean_dy_xmu, cur_level);
9351 Tensor count_value;
9352 optional<int64_t> count_bdim;
9353 std::tie(count_value, count_bdim) = unwrapTensorAtLevel(count, cur_level);
9354 optional<Tensor> weight_value;
9355 optional<int64_t> weight_bdim;
9356 if (weight) {
9357 std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight.value(), cur_level);
9358 }
9359 auto results = batch_rule(grad_out_value, grad_out_bdim, input_value, input_bdim, mean_value, mean_bdim, invstd_value, invstd_bdim, weight_value, weight_bdim, mean_dy_value, mean_dy_bdim, mean_dy_xmu_value, mean_dy_xmu_bdim, count_value, count_bdim);
9360 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
9361}
9362template <typename batch_rule_t, batch_rule_t batch_rule>
9363::std::tuple<at::Tensor,at::Tensor> batch_norm_update_stats_generated_plumbing(const at::Tensor & input, const c10::optional<at::Tensor> & running_mean, const c10::optional<at::Tensor> & running_var, double momentum) {
9364 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
9365 auto maybe_layer = maybeCurrentDynamicLayer();
9366 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
9367 int64_t cur_level = maybe_layer->layerId();
9368 if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(running_mean, cur_level) && !isBatchedAtLevel(running_var, cur_level)) {
9369 return at::_ops::batch_norm_update_stats::call(input, running_mean, running_var, momentum);
9370 }
9371 Tensor input_value;
9372 optional<int64_t> input_bdim;
9373 std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level);
9374 optional<Tensor> running_mean_value;
9375 optional<int64_t> running_mean_bdim;
9376 if (running_mean) {
9377 std::tie(running_mean_value, running_mean_bdim) = unwrapTensorAtLevel(running_mean.value(), cur_level);
9378 }
9379 optional<Tensor> running_var_value;
9380 optional<int64_t> running_var_bdim;
9381 if (running_var) {
9382 std::tie(running_var_value, running_var_bdim) = unwrapTensorAtLevel(running_var.value(), cur_level);
9383 }
9384 auto results = batch_rule(input_value, input_bdim, running_mean_value, running_mean_bdim, running_var_value, running_var_bdim, momentum);
9385 return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
9386}
9387template <typename batch_rule_t, batch_rule_t batch_rule>
9388at::Tensor _nnpack_spatial_convolution_generated_plumbing(const at::Tensor & input, const at::Tensor & weight, const c10::optional<at::Tensor> & bias, c10::SymIntArrayRef padding, at::IntArrayRef stride) {
9389 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
9390 auto maybe_layer = maybeCurrentDynamicLayer();
9391 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
9392 int64_t cur_level = maybe_layer->layerId();
9393 if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(bias, cur_level)) {
9394 return at::_ops::_nnpack_spatial_convolution::call(input, weight, bias, padding, stride);
9395 }
9396 Tensor input_value;
9397 optional<int64_t> input_bdim;
9398 std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level);
9399 Tensor weight_value;
9400 optional<int64_t> weight_bdim;
9401 std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight, cur_level);
9402 optional<Tensor> bias_value;
9403 optional<int64_t> bias_bdim;
9404 if (bias) {
9405 std::tie(bias_value, bias_bdim) = unwrapTensorAtLevel(bias.value(), cur_level);
9406 }
9407 auto results = batch_rule(input_value, input_bdim, weight_value, weight_bdim, bias_value, bias_bdim, padding, stride);
9408 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
9409}
9410template <typename batch_rule_t, batch_rule_t batch_rule>
9411at::Tensor ones_like_generated_plumbing(const at::Tensor & self, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory, c10::optional<at::MemoryFormat> memory_format) {
9412 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
9413 auto maybe_layer = maybeCurrentDynamicLayer();
9414 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
9415 int64_t cur_level = maybe_layer->layerId();
9416 if (!isBatchedAtLevel(self, cur_level)) {
9417 return at::_ops::ones_like::call(self, dtype, layout, device, pin_memory, memory_format);
9418 }
9419 Tensor self_value;
9420 optional<int64_t> self_bdim;
9421 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
9422 auto results = batch_rule(self_value, self_bdim, dtype, layout, device, pin_memory, memory_format);
9423 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
9424}
9425template <typename batch_rule_t, batch_rule_t batch_rule>
9426at::Tensor pairwise_distance_generated_plumbing(const at::Tensor & x1, const at::Tensor & x2, double p, double eps, bool keepdim) {
9427 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
9428 auto maybe_layer = maybeCurrentDynamicLayer();
9429 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
9430 int64_t cur_level = maybe_layer->layerId();
9431 if (!isBatchedAtLevel(x1, cur_level) && !isBatchedAtLevel(x2, cur_level)) {
9432 return at::_ops::pairwise_distance::call(x1, x2, p, eps, keepdim);
9433 }
9434 Tensor x1_value;
9435 optional<int64_t> x1_bdim;
9436 std::tie(x1_value, x1_bdim) = unwrapTensorAtLevel(x1, cur_level);
9437 Tensor x2_value;
9438 optional<int64_t> x2_bdim;
9439 std::tie(x2_value, x2_bdim) = unwrapTensorAtLevel(x2, cur_level);
9440 auto results = batch_rule(x1_value, x1_bdim, x2_value, x2_bdim, p, eps, keepdim);
9441 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
9442}
9443template <typename batch_rule_t, batch_rule_t batch_rule>
9444at::Tensor cdist_generated_plumbing(const at::Tensor & x1, const at::Tensor & x2, double p, c10::optional<int64_t> compute_mode) {
9445 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
9446 auto maybe_layer = maybeCurrentDynamicLayer();
9447 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
9448 int64_t cur_level = maybe_layer->layerId();
9449 if (!isBatchedAtLevel(x1, cur_level) && !isBatchedAtLevel(x2, cur_level)) {
9450 return at::_ops::cdist::call(x1, x2, p, compute_mode);
9451 }
9452 Tensor x1_value;
9453 optional<int64_t> x1_bdim;
9454 std::tie(x1_value, x1_bdim) = unwrapTensorAtLevel(x1, cur_level);
9455 Tensor x2_value;
9456 optional<int64_t> x2_bdim;
9457 std::tie(x2_value, x2_bdim) = unwrapTensorAtLevel(x2, cur_level);
9458 auto results = batch_rule(x1_value, x1_bdim, x2_value, x2_bdim, p, compute_mode);
9459 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
9460}
9461template <typename batch_rule_t, batch_rule_t batch_rule>
9462at::Tensor _euclidean_dist_generated_plumbing(const at::Tensor & x1, const at::Tensor & x2) {
9463 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
9464 auto maybe_layer = maybeCurrentDynamicLayer();
9465 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
9466 int64_t cur_level = maybe_layer->layerId();
9467 if (!isBatchedAtLevel(x1, cur_level) && !isBatchedAtLevel(x2, cur_level)) {
9468 return at::_ops::_euclidean_dist::call(x1, x2);
9469 }
9470 Tensor x1_value;
9471 optional<int64_t> x1_bdim;
9472 std::tie(x1_value, x1_bdim) = unwrapTensorAtLevel(x1, cur_level);
9473 Tensor x2_value;
9474 optional<int64_t> x2_bdim;
9475 std::tie(x2_value, x2_bdim) = unwrapTensorAtLevel(x2, cur_level);
9476 auto results = batch_rule(x1_value, x1_bdim, x2_value, x2_bdim);
9477 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
9478}
9479template <typename batch_rule_t, batch_rule_t batch_rule>
9480at::Tensor _cdist_forward_generated_plumbing(const at::Tensor & x1, const at::Tensor & x2, double p, c10::optional<int64_t> compute_mode) {
9481 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
9482 auto maybe_layer = maybeCurrentDynamicLayer();
9483 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
9484 int64_t cur_level = maybe_layer->layerId();
9485 if (!isBatchedAtLevel(x1, cur_level) && !isBatchedAtLevel(x2, cur_level)) {
9486 return at::_ops::_cdist_forward::call(x1, x2, p, compute_mode);
9487 }
9488 Tensor x1_value;
9489 optional<int64_t> x1_bdim;
9490 std::tie(x1_value, x1_bdim) = unwrapTensorAtLevel(x1, cur_level);
9491 Tensor x2_value;
9492 optional<int64_t> x2_bdim;
9493 std::tie(x2_value, x2_bdim) = unwrapTensorAtLevel(x2, cur_level);
9494 auto results = batch_rule(x1_value, x1_bdim, x2_value, x2_bdim, p, compute_mode);
9495 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
9496}
9497template <typename batch_rule_t, batch_rule_t batch_rule>
9498at::Tensor _cdist_backward_generated_plumbing(const at::Tensor & grad, const at::Tensor & x1, const at::Tensor & x2, double p, const at::Tensor & cdist) {
9499 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
9500 auto maybe_layer = maybeCurrentDynamicLayer();
9501 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
9502 int64_t cur_level = maybe_layer->layerId();
9503 if (!isBatchedAtLevel(grad, cur_level) && !isBatchedAtLevel(x1, cur_level) && !isBatchedAtLevel(x2, cur_level) && !isBatchedAtLevel(cdist, cur_level)) {
9504 return at::_ops::_cdist_backward::call(grad, x1, x2, p, cdist);
9505 }
9506 Tensor grad_value;
9507 optional<int64_t> grad_bdim;
9508 std::tie(grad_value, grad_bdim) = unwrapTensorAtLevel(grad, cur_level);
9509 Tensor x1_value;
9510 optional<int64_t> x1_bdim;
9511 std::tie(x1_value, x1_bdim) = unwrapTensorAtLevel(x1, cur_level);
9512 Tensor x2_value;
9513 optional<int64_t> x2_bdim;
9514 std::tie(x2_value, x2_bdim) = unwrapTensorAtLevel(x2, cur_level);
9515 Tensor cdist_value;
9516 optional<int64_t> cdist_bdim;
9517 std::tie(cdist_value, cdist_bdim) = unwrapTensorAtLevel(cdist, cur_level);
9518 auto results = batch_rule(grad_value, grad_bdim, x1_value, x1_bdim, x2_value, x2_bdim, p, cdist_value, cdist_bdim);
9519 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
9520}
9521template <typename batch_rule_t, batch_rule_t batch_rule>
9522at::Tensor pdist_generated_plumbing(const at::Tensor & self, double p) {
9523 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
9524 auto maybe_layer = maybeCurrentDynamicLayer();
9525 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
9526 int64_t cur_level = maybe_layer->layerId();
9527 if (!isBatchedAtLevel(self, cur_level)) {
9528 return at::_ops::pdist::call(self, p);
9529 }
9530 Tensor self_value;
9531 optional<int64_t> self_bdim;
9532 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
9533 auto results = batch_rule(self_value, self_bdim, p);
9534 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
9535}
9536template <typename batch_rule_t, batch_rule_t batch_rule>
9537at::Tensor _pdist_forward_generated_plumbing(const at::Tensor & self, double p) {
9538 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
9539 auto maybe_layer = maybeCurrentDynamicLayer();
9540 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
9541 int64_t cur_level = maybe_layer->layerId();
9542 if (!isBatchedAtLevel(self, cur_level)) {
9543 return at::_ops::_pdist_forward::call(self, p);
9544 }
9545 Tensor self_value;
9546 optional<int64_t> self_bdim;
9547 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
9548 auto results = batch_rule(self_value, self_bdim, p);
9549 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
9550}
9551template <typename batch_rule_t, batch_rule_t batch_rule>
9552at::Tensor _pdist_backward_generated_plumbing(const at::Tensor & grad, const at::Tensor & self, double p, const at::Tensor & pdist) {
9553 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
9554 auto maybe_layer = maybeCurrentDynamicLayer();
9555 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
9556 int64_t cur_level = maybe_layer->layerId();
9557 if (!isBatchedAtLevel(grad, cur_level) && !isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(pdist, cur_level)) {
9558 return at::_ops::_pdist_backward::call(grad, self, p, pdist);
9559 }
9560 Tensor grad_value;
9561 optional<int64_t> grad_bdim;
9562 std::tie(grad_value, grad_bdim) = unwrapTensorAtLevel(grad, cur_level);
9563 Tensor self_value;
9564 optional<int64_t> self_bdim;
9565 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
9566 Tensor pdist_value;
9567 optional<int64_t> pdist_bdim;
9568 std::tie(pdist_value, pdist_bdim) = unwrapTensorAtLevel(pdist, cur_level);
9569 auto results = batch_rule(grad_value, grad_bdim, self_value, self_bdim, p, pdist_value, pdist_bdim);
9570 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
9571}
9572template <typename batch_rule_t, batch_rule_t batch_rule>
9573at::Tensor cosine_similarity_generated_plumbing(const at::Tensor & x1, const at::Tensor & x2, int64_t dim, double eps) {
9574 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
9575 auto maybe_layer = maybeCurrentDynamicLayer();
9576 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
9577 int64_t cur_level = maybe_layer->layerId();
9578 if (!isBatchedAtLevel(x1, cur_level) && !isBatchedAtLevel(x2, cur_level)) {
9579 return at::_ops::cosine_similarity::call(x1, x2, dim, eps);
9580 }
9581 Tensor x1_value;
9582 optional<int64_t> x1_bdim;
9583 std::tie(x1_value, x1_bdim) = unwrapTensorAtLevel(x1, cur_level);
9584 Tensor x2_value;
9585 optional<int64_t> x2_bdim;
9586 std::tie(x2_value, x2_bdim) = unwrapTensorAtLevel(x2, cur_level);
9587 auto results = batch_rule(x1_value, x1_bdim, x2_value, x2_bdim, dim, eps);
9588 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
9589}
9590template <typename batch_rule_t, batch_rule_t batch_rule>
9591at::Tensor permute_generated_plumbing(const at::Tensor & self, at::IntArrayRef dims) {
9592 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
9593 auto maybe_layer = maybeCurrentDynamicLayer();
9594 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
9595 int64_t cur_level = maybe_layer->layerId();
9596 if (!isBatchedAtLevel(self, cur_level)) {
9597 return at::_ops::permute::call(self, dims);
9598 }
9599 Tensor self_value;
9600 optional<int64_t> self_bdim;
9601 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
9602 auto results = batch_rule(self_value, self_bdim, dims);
9603 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
9604}
9605template <typename batch_rule_t, batch_rule_t batch_rule>
9606at::Tensor movedim_intlist_generated_plumbing(const at::Tensor & self, at::IntArrayRef source, at::IntArrayRef destination) {
9607 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
9608 auto maybe_layer = maybeCurrentDynamicLayer();
9609 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
9610 int64_t cur_level = maybe_layer->layerId();
9611 if (!isBatchedAtLevel(self, cur_level)) {
9612 return at::_ops::movedim_intlist::call(self, source, destination);
9613 }
9614 Tensor self_value;
9615 optional<int64_t> self_bdim;
9616 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
9617 auto results = batch_rule(self_value, self_bdim, source, destination);
9618 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
9619}
9620template <typename batch_rule_t, batch_rule_t batch_rule>
9621at::Tensor movedim_int_generated_plumbing(const at::Tensor & self, int64_t source, int64_t destination) {
9622 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
9623 auto maybe_layer = maybeCurrentDynamicLayer();
9624 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
9625 int64_t cur_level = maybe_layer->layerId();
9626 if (!isBatchedAtLevel(self, cur_level)) {
9627 return at::_ops::movedim_int::call(self, source, destination);
9628 }
9629 Tensor self_value;
9630 optional<int64_t> self_bdim;
9631 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
9632 auto results = batch_rule(self_value, self_bdim, source, destination);
9633 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
9634}
9635template <typename batch_rule_t, batch_rule_t batch_rule>
9636at::Tensor moveaxis_intlist_generated_plumbing(const at::Tensor & self, at::IntArrayRef source, at::IntArrayRef destination) {
9637 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
9638 auto maybe_layer = maybeCurrentDynamicLayer();
9639 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
9640 int64_t cur_level = maybe_layer->layerId();
9641 if (!isBatchedAtLevel(self, cur_level)) {
9642 return at::_ops::moveaxis_intlist::call(self, source, destination);
9643 }
9644 Tensor self_value;
9645 optional<int64_t> self_bdim;
9646 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
9647 auto results = batch_rule(self_value, self_bdim, source, destination);
9648 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
9649}
9650template <typename batch_rule_t, batch_rule_t batch_rule>
9651at::Tensor moveaxis_int_generated_plumbing(const at::Tensor & self, int64_t source, int64_t destination) {
9652 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
9653 auto maybe_layer = maybeCurrentDynamicLayer();
9654 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
9655 int64_t cur_level = maybe_layer->layerId();
9656 if (!isBatchedAtLevel(self, cur_level)) {
9657 return at::_ops::moveaxis_int::call(self, source, destination);
9658 }
9659 Tensor self_value;
9660 optional<int64_t> self_bdim;
9661 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
9662 auto results = batch_rule(self_value, self_bdim, source, destination);
9663 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
9664}
9665template <typename batch_rule_t, batch_rule_t batch_rule>
9666at::Tensor numpy_T_generated_plumbing(const at::Tensor & self) {
9667 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
9668 auto maybe_layer = maybeCurrentDynamicLayer();
9669 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
9670 int64_t cur_level = maybe_layer->layerId();
9671 if (!isBatchedAtLevel(self, cur_level)) {
9672 return at::_ops::numpy_T::call(self);
9673 }
9674 Tensor self_value;
9675 optional<int64_t> self_bdim;
9676 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
9677 auto results = batch_rule(self_value, self_bdim);
9678 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
9679}
9680template <typename batch_rule_t, batch_rule_t batch_rule>
9681at::Tensor matrix_H_generated_plumbing(const at::Tensor & self) {
9682 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
9683 auto maybe_layer = maybeCurrentDynamicLayer();
9684 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
9685 int64_t cur_level = maybe_layer->layerId();
9686 if (!isBatchedAtLevel(self, cur_level)) {
9687 return at::_ops::matrix_H::call(self);
9688 }
9689 Tensor self_value;
9690 optional<int64_t> self_bdim;
9691 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
9692 auto results = batch_rule(self_value, self_bdim);
9693 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
9694}
9695template <typename batch_rule_t, batch_rule_t batch_rule>
9696at::Tensor mT_generated_plumbing(const at::Tensor & self) {
9697 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
9698 auto maybe_layer = maybeCurrentDynamicLayer();
9699 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
9700 int64_t cur_level = maybe_layer->layerId();
9701 if (!isBatchedAtLevel(self, cur_level)) {
9702 return at::_ops::mT::call(self);
9703 }
9704 Tensor self_value;
9705 optional<int64_t> self_bdim;
9706 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
9707 auto results = batch_rule(self_value, self_bdim);
9708 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
9709}
9710template <typename batch_rule_t, batch_rule_t batch_rule>
9711at::Tensor mH_generated_plumbing(const at::Tensor & self) {
9712 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
9713 auto maybe_layer = maybeCurrentDynamicLayer();
9714 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
9715 int64_t cur_level = maybe_layer->layerId();
9716 if (!isBatchedAtLevel(self, cur_level)) {
9717 return at::_ops::mH::call(self);
9718 }
9719 Tensor self_value;
9720 optional<int64_t> self_bdim;
9721 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
9722 auto results = batch_rule(self_value, self_bdim);
9723 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
9724}
9725template <typename batch_rule_t, batch_rule_t batch_rule>
9726at::Tensor adjoint_generated_plumbing(const at::Tensor & self) {
9727 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
9728 auto maybe_layer = maybeCurrentDynamicLayer();
9729 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
9730 int64_t cur_level = maybe_layer->layerId();
9731 if (!isBatchedAtLevel(self, cur_level)) {
9732 return at::_ops::adjoint::call(self);
9733 }
9734 Tensor self_value;
9735 optional<int64_t> self_bdim;
9736 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
9737 auto results = batch_rule(self_value, self_bdim);
9738 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
9739}
9740template <typename batch_rule_t, batch_rule_t batch_rule>
9741at::Tensor pixel_shuffle_generated_plumbing(const at::Tensor & self, int64_t upscale_factor) {
9742 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
9743 auto maybe_layer = maybeCurrentDynamicLayer();
9744 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
9745 int64_t cur_level = maybe_layer->layerId();
9746 if (!isBatchedAtLevel(self, cur_level)) {
9747 return at::_ops::pixel_shuffle::call(self, upscale_factor);
9748 }
9749 Tensor self_value;
9750 optional<int64_t> self_bdim;
9751 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
9752 auto results = batch_rule(self_value, self_bdim, upscale_factor);
9753 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
9754}
9755template <typename batch_rule_t, batch_rule_t batch_rule>
9756at::Tensor pixel_unshuffle_generated_plumbing(const at::Tensor & self, int64_t downscale_factor) {
9757 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
9758 auto maybe_layer = maybeCurrentDynamicLayer();
9759 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
9760 int64_t cur_level = maybe_layer->layerId();
9761 if (!isBatchedAtLevel(self, cur_level)) {
9762 return at::_ops::pixel_unshuffle::call(self, downscale_factor);
9763 }
9764 Tensor self_value;
9765 optional<int64_t> self_bdim;
9766 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
9767 auto results = batch_rule(self_value, self_bdim, downscale_factor);
9768 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
9769}
9770template <typename batch_rule_t, batch_rule_t batch_rule>
9771at::Tensor channel_shuffle_generated_plumbing(const at::Tensor & self, int64_t groups) {
9772 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
9773 auto maybe_layer = maybeCurrentDynamicLayer();
9774 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
9775 int64_t cur_level = maybe_layer->layerId();
9776 if (!isBatchedAtLevel(self, cur_level)) {
9777 return at::_ops::channel_shuffle::call(self, groups);
9778 }
9779 Tensor self_value;
9780 optional<int64_t> self_bdim;
9781 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
9782 auto results = batch_rule(self_value, self_bdim, groups);
9783 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
9784}
9785template <typename batch_rule_t, batch_rule_t batch_rule>
9786at::Tensor native_channel_shuffle_generated_plumbing(const at::Tensor & self, int64_t groups) {
9787 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
9788 auto maybe_layer = maybeCurrentDynamicLayer();
9789 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
9790 int64_t cur_level = maybe_layer->layerId();
9791 if (!isBatchedAtLevel(self, cur_level)) {
9792 return at::_ops::native_channel_shuffle::call(self, groups);
9793 }
9794 Tensor self_value;
9795 optional<int64_t> self_bdim;
9796 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
9797 auto results = batch_rule(self_value, self_bdim, groups);
9798 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
9799}
9800template <typename batch_rule_t, batch_rule_t batch_rule>
9801at::Tensor pin_memory_generated_plumbing(const at::Tensor & self, c10::optional<at::Device> device) {
9802 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
9803 auto maybe_layer = maybeCurrentDynamicLayer();
9804 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
9805 int64_t cur_level = maybe_layer->layerId();
9806 if (!isBatchedAtLevel(self, cur_level)) {
9807 return at::_ops::pin_memory::call(self, device);
9808 }
9809 Tensor self_value;
9810 optional<int64_t> self_bdim;
9811 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
9812 auto results = batch_rule(self_value, self_bdim, device);
9813 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
9814}
9815template <typename batch_rule_t, batch_rule_t batch_rule>
9816at::Tensor _pin_memory_generated_plumbing(const at::Tensor & self, c10::optional<at::Device> device) {
9817 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
9818 auto maybe_layer = maybeCurrentDynamicLayer();
9819 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
9820 int64_t cur_level = maybe_layer->layerId();
9821 if (!isBatchedAtLevel(self, cur_level)) {
9822 return at::_ops::_pin_memory::call(self, device);
9823 }
9824 Tensor self_value;
9825 optional<int64_t> self_bdim;
9826 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
9827 auto results = batch_rule(self_value, self_bdim, device);
9828 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
9829}
9830template <typename batch_rule_t, batch_rule_t batch_rule>
9831at::Tensor pinverse_generated_plumbing(const at::Tensor & self, double rcond) {
9832 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
9833 auto maybe_layer = maybeCurrentDynamicLayer();
9834 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
9835 int64_t cur_level = maybe_layer->layerId();
9836 if (!isBatchedAtLevel(self, cur_level)) {
9837 return at::_ops::pinverse::call(self, rcond);
9838 }
9839 Tensor self_value;
9840 optional<int64_t> self_bdim;
9841 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
9842 auto results = batch_rule(self_value, self_bdim, rcond);
9843 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
9844}
9845template <typename batch_rule_t, batch_rule_t batch_rule>
9846at::Tensor poisson_nll_loss_generated_plumbing(const at::Tensor & input, const at::Tensor & target, bool log_input, bool full, double eps, int64_t reduction) {
9847 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
9848 auto maybe_layer = maybeCurrentDynamicLayer();
9849 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
9850 int64_t cur_level = maybe_layer->layerId();
9851 if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(target, cur_level)) {
9852 return at::_ops::poisson_nll_loss::call(input, target, log_input, full, eps, reduction);
9853 }
9854 Tensor input_value;
9855 optional<int64_t> input_bdim;
9856 std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level);
9857 Tensor target_value;
9858 optional<int64_t> target_bdim;
9859 std::tie(target_value, target_bdim) = unwrapTensorAtLevel(target, cur_level);
9860 auto results = batch_rule(input_value, input_bdim, target_value, target_bdim, log_input, full, eps, reduction);
9861 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
9862}
9863template <typename batch_rule_t, batch_rule_t batch_rule>
9864at::Tensor rad2deg_generated_plumbing(const at::Tensor & self) {
9865 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
9866 auto maybe_layer = maybeCurrentDynamicLayer();
9867 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
9868 int64_t cur_level = maybe_layer->layerId();
9869 if (!isBatchedAtLevel(self, cur_level)) {
9870 return at::_ops::rad2deg::call(self);
9871 }
9872 Tensor self_value;
9873 optional<int64_t> self_bdim;
9874 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
9875 auto results = batch_rule(self_value, self_bdim);
9876 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
9877}
9878template <typename batch_rule_t, batch_rule_t batch_rule>
9879at::Tensor & rad2deg__generated_plumbing(at::Tensor & self) {
9880 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
9881 auto maybe_layer = maybeCurrentDynamicLayer();
9882 vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
9883 int64_t cur_level = maybe_layer->layerId();
9884 if (!isBatchedAtLevel(self, cur_level)) {
9885 return at::_ops::rad2deg_::call(self);
9886 }
9887 Tensor self_value;
9888 optional<int64_t> self_bdim;
9889 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
9890 batch_rule(self_value, self_bdim);
9891 return self;
9892}
9893template <typename batch_rule_t, batch_rule_t batch_rule>
9894at::Tensor deg2rad_generated_plumbing(const at::Tensor & self) {
9895 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
9896 auto maybe_layer = maybeCurrentDynamicLayer();
9897 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
9898 int64_t cur_level = maybe_layer->layerId();
9899 if (!isBatchedAtLevel(self, cur_level)) {
9900 return at::_ops::deg2rad::call(self);
9901 }
9902 Tensor self_value;
9903 optional<int64_t> self_bdim;
9904 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
9905 auto results = batch_rule(self_value, self_bdim);
9906 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
9907}
9908template <typename batch_rule_t, batch_rule_t batch_rule>
9909at::Tensor & deg2rad__generated_plumbing(at::Tensor & self) {
9910 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
9911 auto maybe_layer = maybeCurrentDynamicLayer();
9912 vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
9913 int64_t cur_level = maybe_layer->layerId();
9914 if (!isBatchedAtLevel(self, cur_level)) {
9915 return at::_ops::deg2rad_::call(self);
9916 }
9917 Tensor self_value;
9918 optional<int64_t> self_bdim;
9919 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
9920 batch_rule(self_value, self_bdim);
9921 return self;
9922}
9923template <typename batch_rule_t, batch_rule_t batch_rule>
9924at::Tensor rand_like_generated_plumbing(const at::Tensor & self, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory, c10::optional<at::MemoryFormat> memory_format) {
9925 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
9926 auto maybe_layer = maybeCurrentDynamicLayer();
9927 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
9928 int64_t cur_level = maybe_layer->layerId();
9929 if (!isBatchedAtLevel(self, cur_level)) {
9930 return at::_ops::rand_like::call(self, dtype, layout, device, pin_memory, memory_format);
9931 }
9932 Tensor self_value;
9933 optional<int64_t> self_bdim;
9934 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
9935 auto results = batch_rule(self_value, self_bdim, dtype, layout, device, pin_memory, memory_format);
9936 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
9937}
9938template <typename batch_rule_t, batch_rule_t batch_rule>
9939at::Tensor randint_like_generated_plumbing(const at::Tensor & self, int64_t high, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory, c10::optional<at::MemoryFormat> memory_format) {
9940 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
9941 auto maybe_layer = maybeCurrentDynamicLayer();
9942 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
9943 int64_t cur_level = maybe_layer->layerId();
9944 if (!isBatchedAtLevel(self, cur_level)) {
9945 return at::_ops::randint_like::call(self, high, dtype, layout, device, pin_memory, memory_format);
9946 }
9947 Tensor self_value;
9948 optional<int64_t> self_bdim;
9949 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
9950 auto results = batch_rule(self_value, self_bdim, high, dtype, layout, device, pin_memory, memory_format);
9951 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
9952}
9953template <typename batch_rule_t, batch_rule_t batch_rule>
9954at::Tensor randint_like_low_dtype_generated_plumbing(const at::Tensor & self, int64_t low, int64_t high, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory, c10::optional<at::MemoryFormat> memory_format) {
9955 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
9956 auto maybe_layer = maybeCurrentDynamicLayer();
9957 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
9958 int64_t cur_level = maybe_layer->layerId();
9959 if (!isBatchedAtLevel(self, cur_level)) {
9960 return at::_ops::randint_like_low_dtype::call(self, low, high, dtype, layout, device, pin_memory, memory_format);
9961 }
9962 Tensor self_value;
9963 optional<int64_t> self_bdim;
9964 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
9965 auto results = batch_rule(self_value, self_bdim, low, high, dtype, layout, device, pin_memory, memory_format);
9966 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
9967}
9968template <typename batch_rule_t, batch_rule_t batch_rule>
9969at::Tensor randn_like_generated_plumbing(const at::Tensor & self, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory, c10::optional<at::MemoryFormat> memory_format) {
9970 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
9971 auto maybe_layer = maybeCurrentDynamicLayer();
9972 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
9973 int64_t cur_level = maybe_layer->layerId();
9974 if (!isBatchedAtLevel(self, cur_level)) {
9975 return at::_ops::randn_like::call(self, dtype, layout, device, pin_memory, memory_format);
9976 }
9977 Tensor self_value;
9978 optional<int64_t> self_bdim;
9979 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
9980 auto results = batch_rule(self_value, self_bdim, dtype, layout, device, pin_memory, memory_format);
9981 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
9982}
9983template <typename batch_rule_t, batch_rule_t batch_rule>
9984at::Tensor ravel_generated_plumbing(const at::Tensor & self) {
9985 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
9986 auto maybe_layer = maybeCurrentDynamicLayer();
9987 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
9988 int64_t cur_level = maybe_layer->layerId();
9989 if (!isBatchedAtLevel(self, cur_level)) {
9990 return at::_ops::ravel::call(self);
9991 }
9992 Tensor self_value;
9993 optional<int64_t> self_bdim;
9994 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
9995 auto results = batch_rule(self_value, self_bdim);
9996 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
9997}
9998template <typename batch_rule_t, batch_rule_t batch_rule>
9999at::Tensor reciprocal_generated_plumbing(const at::Tensor & self) {
10000 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
10001 auto maybe_layer = maybeCurrentDynamicLayer();
10002 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
10003 int64_t cur_level = maybe_layer->layerId();
10004 if (!isBatchedAtLevel(self, cur_level)) {
10005 return at::_ops::reciprocal::call(self);
10006 }
10007 Tensor self_value;
10008 optional<int64_t> self_bdim;
10009 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
10010 auto results = batch_rule(self_value, self_bdim);
10011 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
10012}
10013template <typename batch_rule_t, batch_rule_t batch_rule>
10014at::Tensor & reciprocal__generated_plumbing(at::Tensor & self) {
10015 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
10016 auto maybe_layer = maybeCurrentDynamicLayer();
10017 vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
10018 int64_t cur_level = maybe_layer->layerId();
10019 if (!isBatchedAtLevel(self, cur_level)) {
10020 return at::_ops::reciprocal_::call(self);
10021 }
10022 Tensor self_value;
10023 optional<int64_t> self_bdim;
10024 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
10025 batch_rule(self_value, self_bdim);
10026 return self;
10027}
10028template <typename batch_rule_t, batch_rule_t batch_rule>
10029at::Tensor neg_generated_plumbing(const at::Tensor & self) {
10030 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
10031 auto maybe_layer = maybeCurrentDynamicLayer();
10032 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
10033 int64_t cur_level = maybe_layer->layerId();
10034 if (!isBatchedAtLevel(self, cur_level)) {
10035 return at::_ops::neg::call(self);
10036 }
10037 Tensor self_value;
10038 optional<int64_t> self_bdim;
10039 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
10040 auto results = batch_rule(self_value, self_bdim);
10041 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
10042}
10043template <typename batch_rule_t, batch_rule_t batch_rule>
10044at::Tensor & neg__generated_plumbing(at::Tensor & self) {
10045 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
10046 auto maybe_layer = maybeCurrentDynamicLayer();
10047 vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
10048 int64_t cur_level = maybe_layer->layerId();
10049 if (!isBatchedAtLevel(self, cur_level)) {
10050 return at::_ops::neg_::call(self);
10051 }
10052 Tensor self_value;
10053 optional<int64_t> self_bdim;
10054 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
10055 batch_rule(self_value, self_bdim);
10056 return self;
10057}
10058template <typename batch_rule_t, batch_rule_t batch_rule>
10059at::Tensor negative_generated_plumbing(const at::Tensor & self) {
10060 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
10061 auto maybe_layer = maybeCurrentDynamicLayer();
10062 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
10063 int64_t cur_level = maybe_layer->layerId();
10064 if (!isBatchedAtLevel(self, cur_level)) {
10065 return at::_ops::negative::call(self);
10066 }
10067 Tensor self_value;
10068 optional<int64_t> self_bdim;
10069 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
10070 auto results = batch_rule(self_value, self_bdim);
10071 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
10072}
10073template <typename batch_rule_t, batch_rule_t batch_rule>
10074at::Tensor & negative__generated_plumbing(at::Tensor & self) {
10075 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
10076 auto maybe_layer = maybeCurrentDynamicLayer();
10077 vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
10078 int64_t cur_level = maybe_layer->layerId();
10079 if (!isBatchedAtLevel(self, cur_level)) {
10080 return at::_ops::negative_::call(self);
10081 }
10082 Tensor self_value;
10083 optional<int64_t> self_bdim;
10084 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
10085 batch_rule(self_value, self_bdim);
10086 return self;
10087}
10088template <typename batch_rule_t, batch_rule_t batch_rule>
10089at::Tensor repeat_generated_plumbing(const at::Tensor & self, c10::SymIntArrayRef repeats) {
10090 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
10091 auto maybe_layer = maybeCurrentDynamicLayer();
10092 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
10093 int64_t cur_level = maybe_layer->layerId();
10094 if (!isBatchedAtLevel(self, cur_level)) {
10095 return at::_ops::repeat::call(self, repeats);
10096 }
10097 Tensor self_value;
10098 optional<int64_t> self_bdim;
10099 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
10100 auto results = batch_rule(self_value, self_bdim, repeats);
10101 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
10102}
10103template <typename batch_rule_t, batch_rule_t batch_rule>
10104at::Tensor repeat_interleave_Tensor_generated_plumbing(const at::Tensor & repeats, c10::optional<int64_t> output_size) {
10105 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
10106 auto maybe_layer = maybeCurrentDynamicLayer();
10107 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
10108 int64_t cur_level = maybe_layer->layerId();
10109 if (!isBatchedAtLevel(repeats, cur_level)) {
10110 return at::_ops::repeat_interleave_Tensor::call(repeats, output_size);
10111 }
10112 Tensor repeats_value;
10113 optional<int64_t> repeats_bdim;
10114 std::tie(repeats_value, repeats_bdim) = unwrapTensorAtLevel(repeats, cur_level);
10115 auto results = batch_rule(repeats_value, repeats_bdim, output_size);
10116 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
10117}
10118template <typename batch_rule_t, batch_rule_t batch_rule>
10119at::Tensor repeat_interleave_self_Tensor_generated_plumbing(const at::Tensor & self, const at::Tensor & repeats, c10::optional<int64_t> dim, c10::optional<int64_t> output_size) {
10120 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
10121 auto maybe_layer = maybeCurrentDynamicLayer();
10122 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
10123 int64_t cur_level = maybe_layer->layerId();
10124 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(repeats, cur_level)) {
10125 return at::_ops::repeat_interleave_self_Tensor::call(self, repeats, dim, output_size);
10126 }
10127 Tensor self_value;
10128 optional<int64_t> self_bdim;
10129 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
10130 Tensor repeats_value;
10131 optional<int64_t> repeats_bdim;
10132 std::tie(repeats_value, repeats_bdim) = unwrapTensorAtLevel(repeats, cur_level);
10133 auto results = batch_rule(self_value, self_bdim, repeats_value, repeats_bdim, dim, output_size);
10134 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
10135}
10136template <typename batch_rule_t, batch_rule_t batch_rule>
10137at::Tensor repeat_interleave_self_int_generated_plumbing(const at::Tensor & self, c10::SymInt repeats, c10::optional<int64_t> dim, c10::optional<int64_t> output_size) {
10138 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
10139 auto maybe_layer = maybeCurrentDynamicLayer();
10140 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
10141 int64_t cur_level = maybe_layer->layerId();
10142 if (!isBatchedAtLevel(self, cur_level)) {
10143 return at::_ops::repeat_interleave_self_int::call(self, repeats, dim, output_size);
10144 }
10145 Tensor self_value;
10146 optional<int64_t> self_bdim;
10147 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
10148 auto results = batch_rule(self_value, self_bdim, repeats, dim, output_size);
10149 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
10150}
10151template <typename batch_rule_t, batch_rule_t batch_rule>
10152at::Tensor reshape_generated_plumbing(const at::Tensor & self, c10::SymIntArrayRef shape) {
10153 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
10154 auto maybe_layer = maybeCurrentDynamicLayer();
10155 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
10156 int64_t cur_level = maybe_layer->layerId();
10157 if (!isBatchedAtLevel(self, cur_level)) {
10158 return at::_ops::reshape::call(self, shape);
10159 }
10160 Tensor self_value;
10161 optional<int64_t> self_bdim;
10162 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
10163 auto results = batch_rule(self_value, self_bdim, shape);
10164 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
10165}
10166template <typename batch_rule_t, batch_rule_t batch_rule>
10167at::Tensor _reshape_copy_generated_plumbing(const at::Tensor & self, c10::SymIntArrayRef size) {
10168 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
10169 auto maybe_layer = maybeCurrentDynamicLayer();
10170 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
10171 int64_t cur_level = maybe_layer->layerId();
10172 if (!isBatchedAtLevel(self, cur_level)) {
10173 return at::_ops::_reshape_copy::call(self, size);
10174 }
10175 Tensor self_value;
10176 optional<int64_t> self_bdim;
10177 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
10178 auto results = batch_rule(self_value, self_bdim, size);
10179 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
10180}
10181template <typename batch_rule_t, batch_rule_t batch_rule>
10182at::Tensor _reshape_alias_generated_plumbing(const at::Tensor & self, c10::SymIntArrayRef size, c10::SymIntArrayRef stride) {
10183 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
10184 auto maybe_layer = maybeCurrentDynamicLayer();
10185 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
10186 int64_t cur_level = maybe_layer->layerId();
10187 if (!isBatchedAtLevel(self, cur_level)) {
10188 return at::_ops::_reshape_alias::call(self, size, stride);
10189 }
10190 Tensor self_value;
10191 optional<int64_t> self_bdim;
10192 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
10193 auto results = batch_rule(self_value, self_bdim, size, stride);
10194 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
10195}
10196template <typename batch_rule_t, batch_rule_t batch_rule>
10197at::Tensor _mkldnn_reshape_generated_plumbing(const at::Tensor & self, at::IntArrayRef shape) {
10198 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
10199 auto maybe_layer = maybeCurrentDynamicLayer();
10200 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
10201 int64_t cur_level = maybe_layer->layerId();
10202 if (!isBatchedAtLevel(self, cur_level)) {
10203 return at::_ops::_mkldnn_reshape::call(self, shape);
10204 }
10205 Tensor self_value;
10206 optional<int64_t> self_bdim;
10207 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
10208 auto results = batch_rule(self_value, self_bdim, shape);
10209 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
10210}
10211template <typename batch_rule_t, batch_rule_t batch_rule>
10212at::Tensor reshape_as_generated_plumbing(const at::Tensor & self, const at::Tensor & other) {
10213 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
10214 auto maybe_layer = maybeCurrentDynamicLayer();
10215 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
10216 int64_t cur_level = maybe_layer->layerId();
10217 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
10218 return at::_ops::reshape_as::call(self, other);
10219 }
10220 Tensor self_value;
10221 optional<int64_t> self_bdim;
10222 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
10223 Tensor other_value;
10224 optional<int64_t> other_bdim;
10225 std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
10226 auto results = batch_rule(self_value, self_bdim, other_value, other_bdim);
10227 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
10228}
10229template <typename batch_rule_t, batch_rule_t batch_rule>
10230at::Tensor round_generated_plumbing(const at::Tensor & self) {
10231 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
10232 auto maybe_layer = maybeCurrentDynamicLayer();
10233 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
10234 int64_t cur_level = maybe_layer->layerId();
10235 if (!isBatchedAtLevel(self, cur_level)) {
10236 return at::_ops::round::call(self);
10237 }
10238 Tensor self_value;
10239 optional<int64_t> self_bdim;
10240 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
10241 auto results = batch_rule(self_value, self_bdim);
10242 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
10243}
10244template <typename batch_rule_t, batch_rule_t batch_rule>
10245at::Tensor & round__generated_plumbing(at::Tensor & self) {
10246 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
10247 auto maybe_layer = maybeCurrentDynamicLayer();
10248 vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
10249 int64_t cur_level = maybe_layer->layerId();
10250 if (!isBatchedAtLevel(self, cur_level)) {
10251 return at::_ops::round_::call(self);
10252 }
10253 Tensor self_value;
10254 optional<int64_t> self_bdim;
10255 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
10256 batch_rule(self_value, self_bdim);
10257 return self;
10258}
10259template <typename batch_rule_t, batch_rule_t batch_rule>
10260at::Tensor round_decimals_generated_plumbing(const at::Tensor & self, int64_t decimals) {
10261 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
10262 auto maybe_layer = maybeCurrentDynamicLayer();
10263 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
10264 int64_t cur_level = maybe_layer->layerId();
10265 if (!isBatchedAtLevel(self, cur_level)) {
10266 return at::_ops::round_decimals::call(self, decimals);
10267 }
10268 Tensor self_value;
10269 optional<int64_t> self_bdim;
10270 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
10271 auto results = batch_rule(self_value, self_bdim, decimals);
10272 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
10273}
10274template <typename batch_rule_t, batch_rule_t batch_rule>
10275at::Tensor & round__decimals_generated_plumbing(at::Tensor & self, int64_t decimals) {
10276 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
10277 auto maybe_layer = maybeCurrentDynamicLayer();
10278 vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
10279 int64_t cur_level = maybe_layer->layerId();
10280 if (!isBatchedAtLevel(self, cur_level)) {
10281 return at::_ops::round__decimals::call(self, decimals);
10282 }
10283 Tensor self_value;
10284 optional<int64_t> self_bdim;
10285 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
10286 batch_rule(self_value, self_bdim, decimals);
10287 return self;
10288}
10289template <typename batch_rule_t, batch_rule_t batch_rule>
10290at::Tensor rrelu_generated_plumbing(const at::Tensor & self, const at::Scalar & lower, const at::Scalar & upper, bool training, c10::optional<at::Generator> generator) {
10291 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
10292 auto maybe_layer = maybeCurrentDynamicLayer();
10293 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
10294 int64_t cur_level = maybe_layer->layerId();
10295 if (!isBatchedAtLevel(self, cur_level)) {
10296 return at::_ops::rrelu::call(self, lower, upper, training, generator);
10297 }
10298 Tensor self_value;
10299 optional<int64_t> self_bdim;
10300 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
10301 auto results = batch_rule(self_value, self_bdim, lower, upper, training, generator);
10302 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
10303}
10304template <typename batch_rule_t, batch_rule_t batch_rule>
10305at::Tensor & rrelu__generated_plumbing(at::Tensor & self, const at::Scalar & lower, const at::Scalar & upper, bool training, c10::optional<at::Generator> generator) {
10306 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
10307 auto maybe_layer = maybeCurrentDynamicLayer();
10308 vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
10309 int64_t cur_level = maybe_layer->layerId();
10310 if (!isBatchedAtLevel(self, cur_level)) {
10311 return at::_ops::rrelu_::call(self, lower, upper, training, generator);
10312 }
10313 Tensor self_value;
10314 optional<int64_t> self_bdim;
10315 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
10316 batch_rule(self_value, self_bdim, lower, upper, training, generator);
10317 return self;
10318}
10319template <typename batch_rule_t, batch_rule_t batch_rule>
10320at::Tensor relu_generated_plumbing(const at::Tensor & self) {
10321 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
10322 auto maybe_layer = maybeCurrentDynamicLayer();
10323 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
10324 int64_t cur_level = maybe_layer->layerId();
10325 if (!isBatchedAtLevel(self, cur_level)) {
10326 return at::_ops::relu::call(self);
10327 }
10328 Tensor self_value;
10329 optional<int64_t> self_bdim;
10330 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
10331 auto results = batch_rule(self_value, self_bdim);
10332 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
10333}
10334template <typename batch_rule_t, batch_rule_t batch_rule>
10335at::Tensor & relu__generated_plumbing(at::Tensor & self) {
10336 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
10337 auto maybe_layer = maybeCurrentDynamicLayer();
10338 vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
10339 int64_t cur_level = maybe_layer->layerId();
10340 if (!isBatchedAtLevel(self, cur_level)) {
10341 return at::_ops::relu_::call(self);
10342 }
10343 Tensor self_value;
10344 optional<int64_t> self_bdim;
10345 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
10346 batch_rule(self_value, self_bdim);
10347 return self;
10348}
10349template <typename batch_rule_t, batch_rule_t batch_rule>
10350at::Tensor relu6_generated_plumbing(const at::Tensor & self) {
10351 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
10352 auto maybe_layer = maybeCurrentDynamicLayer();
10353 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
10354 int64_t cur_level = maybe_layer->layerId();
10355 if (!isBatchedAtLevel(self, cur_level)) {
10356 return at::_ops::relu6::call(self);
10357 }
10358 Tensor self_value;
10359 optional<int64_t> self_bdim;
10360 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
10361 auto results = batch_rule(self_value, self_bdim);
10362 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
10363}
10364template <typename batch_rule_t, batch_rule_t batch_rule>
10365at::Tensor & relu6__generated_plumbing(at::Tensor & self) {
10366 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
10367 auto maybe_layer = maybeCurrentDynamicLayer();
10368 vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
10369 int64_t cur_level = maybe_layer->layerId();
10370 if (!isBatchedAtLevel(self, cur_level)) {
10371 return at::_ops::relu6_::call(self);
10372 }
10373 Tensor self_value;
10374 optional<int64_t> self_bdim;
10375 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
10376 batch_rule(self_value, self_bdim);
10377 return self;
10378}
10379template <typename batch_rule_t, batch_rule_t batch_rule>
10380at::Tensor prelu_generated_plumbing(const at::Tensor & self, const at::Tensor & weight) {
10381 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
10382 auto maybe_layer = maybeCurrentDynamicLayer();
10383 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
10384 int64_t cur_level = maybe_layer->layerId();
10385 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(weight, cur_level)) {
10386 return at::_ops::prelu::call(self, weight);
10387 }
10388 Tensor self_value;
10389 optional<int64_t> self_bdim;
10390 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
10391 Tensor weight_value;
10392 optional<int64_t> weight_bdim;
10393 std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight, cur_level);
10394 auto results = batch_rule(self_value, self_bdim, weight_value, weight_bdim);
10395 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
10396}
10397template <typename batch_rule_t, batch_rule_t batch_rule>
10398at::Tensor _prelu_kernel_generated_plumbing(const at::Tensor & self, const at::Tensor & weight) {
10399 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
10400 auto maybe_layer = maybeCurrentDynamicLayer();
10401 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
10402 int64_t cur_level = maybe_layer->layerId();
10403 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(weight, cur_level)) {
10404 return at::_ops::_prelu_kernel::call(self, weight);
10405 }
10406 Tensor self_value;
10407 optional<int64_t> self_bdim;
10408 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
10409 Tensor weight_value;
10410 optional<int64_t> weight_bdim;
10411 std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight, cur_level);
10412 auto results = batch_rule(self_value, self_bdim, weight_value, weight_bdim);
10413 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
10414}
10415template <typename batch_rule_t, batch_rule_t batch_rule>
10416::std::tuple<at::Tensor,at::Tensor> _prelu_kernel_backward_generated_plumbing(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & weight) {
10417 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
10418 auto maybe_layer = maybeCurrentDynamicLayer();
10419 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
10420 int64_t cur_level = maybe_layer->layerId();
10421 if (!isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(weight, cur_level)) {
10422 return at::_ops::_prelu_kernel_backward::call(grad_output, self, weight);
10423 }
10424 Tensor grad_output_value;
10425 optional<int64_t> grad_output_bdim;
10426 std::tie(grad_output_value, grad_output_bdim) = unwrapTensorAtLevel(grad_output, cur_level);
10427 Tensor self_value;
10428 optional<int64_t> self_bdim;
10429 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
10430 Tensor weight_value;
10431 optional<int64_t> weight_bdim;
10432 std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight, cur_level);
10433 auto results = batch_rule(grad_output_value, grad_output_bdim, self_value, self_bdim, weight_value, weight_bdim);
10434 return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
10435}
10436template <typename batch_rule_t, batch_rule_t batch_rule>
10437at::Tensor & gelu__generated_plumbing(at::Tensor & self, c10::string_view approximate) {
10438 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
10439 auto maybe_layer = maybeCurrentDynamicLayer();
10440 vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
10441 int64_t cur_level = maybe_layer->layerId();
10442 if (!isBatchedAtLevel(self, cur_level)) {
10443 return at::_ops::gelu_::call(self, approximate);
10444 }
10445 Tensor self_value;
10446 optional<int64_t> self_bdim;
10447 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
10448 batch_rule(self_value, self_bdim, approximate);
10449 return self;
10450}
10451template <typename batch_rule_t, batch_rule_t batch_rule>
10452at::Tensor gelu_generated_plumbing(const at::Tensor & self, c10::string_view approximate) {
10453 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
10454 auto maybe_layer = maybeCurrentDynamicLayer();
10455 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
10456 int64_t cur_level = maybe_layer->layerId();
10457 if (!isBatchedAtLevel(self, cur_level)) {
10458 return at::_ops::gelu::call(self, approximate);
10459 }
10460 Tensor self_value;
10461 optional<int64_t> self_bdim;
10462 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
10463 auto results = batch_rule(self_value, self_bdim, approximate);
10464 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
10465}
10466template <typename batch_rule_t, batch_rule_t batch_rule>
10467at::Tensor gelu_backward_generated_plumbing(const at::Tensor & grad_output, const at::Tensor & self, c10::string_view approximate) {
10468 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
10469 auto maybe_layer = maybeCurrentDynamicLayer();
10470 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
10471 int64_t cur_level = maybe_layer->layerId();
10472 if (!isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(self, cur_level)) {
10473 return at::_ops::gelu_backward::call(grad_output, self, approximate);
10474 }
10475 Tensor grad_output_value;
10476 optional<int64_t> grad_output_bdim;
10477 std::tie(grad_output_value, grad_output_bdim) = unwrapTensorAtLevel(grad_output, cur_level);
10478 Tensor self_value;
10479 optional<int64_t> self_bdim;
10480 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
10481 auto results = batch_rule(grad_output_value, grad_output_bdim, self_value, self_bdim, approximate);
10482 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
10483}
10484template <typename batch_rule_t, batch_rule_t batch_rule>
10485at::Tensor infinitely_differentiable_gelu_backward_generated_plumbing(const at::Tensor & grad, const at::Tensor & self) {
10486 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
10487 auto maybe_layer = maybeCurrentDynamicLayer();
10488 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
10489 int64_t cur_level = maybe_layer->layerId();
10490 if (!isBatchedAtLevel(grad, cur_level) && !isBatchedAtLevel(self, cur_level)) {
10491 return at::_ops::infinitely_differentiable_gelu_backward::call(grad, self);
10492 }
10493 Tensor grad_value;
10494 optional<int64_t> grad_bdim;
10495 std::tie(grad_value, grad_bdim) = unwrapTensorAtLevel(grad, cur_level);
10496 Tensor self_value;
10497 optional<int64_t> self_bdim;
10498 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
10499 auto results = batch_rule(grad_value, grad_bdim, self_value, self_bdim);
10500 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
10501}
10502template <typename batch_rule_t, batch_rule_t batch_rule>
10503at::Tensor hardshrink_generated_plumbing(const at::Tensor & self, const at::Scalar & lambd) {
10504 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
10505 auto maybe_layer = maybeCurrentDynamicLayer();
10506 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
10507 int64_t cur_level = maybe_layer->layerId();
10508 if (!isBatchedAtLevel(self, cur_level)) {
10509 return at::_ops::hardshrink::call(self, lambd);
10510 }
10511 Tensor self_value;
10512 optional<int64_t> self_bdim;
10513 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
10514 auto results = batch_rule(self_value, self_bdim, lambd);
10515 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
10516}
10517template <typename batch_rule_t, batch_rule_t batch_rule>
10518at::Tensor hardshrink_backward_generated_plumbing(const at::Tensor & grad_out, const at::Tensor & self, const at::Scalar & lambd) {
10519 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
10520 auto maybe_layer = maybeCurrentDynamicLayer();
10521 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
10522 int64_t cur_level = maybe_layer->layerId();
10523 if (!isBatchedAtLevel(grad_out, cur_level) && !isBatchedAtLevel(self, cur_level)) {
10524 return at::_ops::hardshrink_backward::call(grad_out, self, lambd);
10525 }
10526 Tensor grad_out_value;
10527 optional<int64_t> grad_out_bdim;
10528 std::tie(grad_out_value, grad_out_bdim) = unwrapTensorAtLevel(grad_out, cur_level);
10529 Tensor self_value;
10530 optional<int64_t> self_bdim;
10531 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
10532 auto results = batch_rule(grad_out_value, grad_out_bdim, self_value, self_bdim, lambd);
10533 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
10534}
10535template <typename batch_rule_t, batch_rule_t batch_rule>
10536at::Tensor rsqrt_generated_plumbing(const at::Tensor & self) {
10537 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
10538 auto maybe_layer = maybeCurrentDynamicLayer();
10539 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
10540 int64_t cur_level = maybe_layer->layerId();
10541 if (!isBatchedAtLevel(self, cur_level)) {
10542 return at::_ops::rsqrt::call(self);
10543 }
10544 Tensor self_value;
10545 optional<int64_t> self_bdim;
10546 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
10547 auto results = batch_rule(self_value, self_bdim);
10548 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
10549}
10550template <typename batch_rule_t, batch_rule_t batch_rule>
10551at::Tensor & rsqrt__generated_plumbing(at::Tensor & self) {
10552 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
10553 auto maybe_layer = maybeCurrentDynamicLayer();
10554 vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
10555 int64_t cur_level = maybe_layer->layerId();
10556 if (!isBatchedAtLevel(self, cur_level)) {
10557 return at::_ops::rsqrt_::call(self);
10558 }
10559 Tensor self_value;
10560 optional<int64_t> self_bdim;
10561 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
10562 batch_rule(self_value, self_bdim);
10563 return self;
10564}
10565template <typename batch_rule_t, batch_rule_t batch_rule>
10566at::Tensor select_Dimname_generated_plumbing(const at::Tensor & self, at::Dimname dim, int64_t index) {
10567 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
10568 auto maybe_layer = maybeCurrentDynamicLayer();
10569 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
10570 int64_t cur_level = maybe_layer->layerId();
10571 if (!isBatchedAtLevel(self, cur_level)) {
10572 return at::_ops::select_Dimname::call(self, dim, index);
10573 }
10574 Tensor self_value;
10575 optional<int64_t> self_bdim;
10576 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
10577 auto results = batch_rule(self_value, self_bdim, dim, index);
10578 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
10579}
10580template <typename batch_rule_t, batch_rule_t batch_rule>
10581at::Tensor select_int_generated_plumbing(const at::Tensor & self, int64_t dim, c10::SymInt index) {
10582 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
10583 auto maybe_layer = maybeCurrentDynamicLayer();
10584 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
10585 int64_t cur_level = maybe_layer->layerId();
10586 if (!isBatchedAtLevel(self, cur_level)) {
10587 return at::_ops::select_int::call(self, dim, index);
10588 }
10589 Tensor self_value;
10590 optional<int64_t> self_bdim;
10591 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
10592 auto results = batch_rule(self_value, self_bdim, dim, index);
10593 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
10594}
10595template <typename batch_rule_t, batch_rule_t batch_rule>
10596at::Tensor select_backward_generated_plumbing(const at::Tensor & grad_output, c10::SymIntArrayRef input_sizes, int64_t dim, c10::SymInt index) {
10597 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
10598 auto maybe_layer = maybeCurrentDynamicLayer();
10599 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
10600 int64_t cur_level = maybe_layer->layerId();
10601 if (!isBatchedAtLevel(grad_output, cur_level)) {
10602 return at::_ops::select_backward::call(grad_output, input_sizes, dim, index);
10603 }
10604 Tensor grad_output_value;
10605 optional<int64_t> grad_output_bdim;
10606 std::tie(grad_output_value, grad_output_bdim) = unwrapTensorAtLevel(grad_output, cur_level);
10607 auto results = batch_rule(grad_output_value, grad_output_bdim, input_sizes, dim, index);
10608 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
10609}
10610template <typename batch_rule_t, batch_rule_t batch_rule>
10611at::Tensor _nested_select_backward_generated_plumbing(const at::Tensor & grad_output, const at::Tensor & self, int64_t dim, c10::SymInt index) {
10612 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
10613 auto maybe_layer = maybeCurrentDynamicLayer();
10614 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
10615 int64_t cur_level = maybe_layer->layerId();
10616 if (!isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(self, cur_level)) {
10617 return at::_ops::_nested_select_backward::call(grad_output, self, dim, index);
10618 }
10619 Tensor grad_output_value;
10620 optional<int64_t> grad_output_bdim;
10621 std::tie(grad_output_value, grad_output_bdim) = unwrapTensorAtLevel(grad_output, cur_level);
10622 Tensor self_value;
10623 optional<int64_t> self_bdim;
10624 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
10625 auto results = batch_rule(grad_output_value, grad_output_bdim, self_value, self_bdim, dim, index);
10626 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
10627}
10628template <typename batch_rule_t, batch_rule_t batch_rule>
10629at::Tensor selu_generated_plumbing(const at::Tensor & self) {
10630 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
10631 auto maybe_layer = maybeCurrentDynamicLayer();
10632 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
10633 int64_t cur_level = maybe_layer->layerId();
10634 if (!isBatchedAtLevel(self, cur_level)) {
10635 return at::_ops::selu::call(self);
10636 }
10637 Tensor self_value;
10638 optional<int64_t> self_bdim;
10639 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
10640 auto results = batch_rule(self_value, self_bdim);
10641 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
10642}
10643template <typename batch_rule_t, batch_rule_t batch_rule>
10644at::Tensor & selu__generated_plumbing(at::Tensor & self) {
10645 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
10646 auto maybe_layer = maybeCurrentDynamicLayer();
10647 vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
10648 int64_t cur_level = maybe_layer->layerId();
10649 if (!isBatchedAtLevel(self, cur_level)) {
10650 return at::_ops::selu_::call(self);
10651 }
10652 Tensor self_value;
10653 optional<int64_t> self_bdim;
10654 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
10655 batch_rule(self_value, self_bdim);
10656 return self;
10657}
10658template <typename batch_rule_t, batch_rule_t batch_rule>
10659at::Tensor celu_generated_plumbing(const at::Tensor & self, const at::Scalar & alpha) {
10660 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
10661 auto maybe_layer = maybeCurrentDynamicLayer();
10662 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
10663 int64_t cur_level = maybe_layer->layerId();
10664 if (!isBatchedAtLevel(self, cur_level)) {
10665 return at::_ops::celu::call(self, alpha);
10666 }
10667 Tensor self_value;
10668 optional<int64_t> self_bdim;
10669 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
10670 auto results = batch_rule(self_value, self_bdim, alpha);
10671 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
10672}
10673template <typename batch_rule_t, batch_rule_t batch_rule>
10674at::Tensor & celu__generated_plumbing(at::Tensor & self, const at::Scalar & alpha) {
10675 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
10676 auto maybe_layer = maybeCurrentDynamicLayer();
10677 vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
10678 int64_t cur_level = maybe_layer->layerId();
10679 if (!isBatchedAtLevel(self, cur_level)) {
10680 return at::_ops::celu_::call(self, alpha);
10681 }
10682 Tensor self_value;
10683 optional<int64_t> self_bdim;
10684 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
10685 batch_rule(self_value, self_bdim, alpha);
10686 return self;
10687}
10688template <typename batch_rule_t, batch_rule_t batch_rule>
10689at::Tensor silu_generated_plumbing(const at::Tensor & self) {
10690 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
10691 auto maybe_layer = maybeCurrentDynamicLayer();
10692 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
10693 int64_t cur_level = maybe_layer->layerId();
10694 if (!isBatchedAtLevel(self, cur_level)) {
10695 return at::_ops::silu::call(self);
10696 }
10697 Tensor self_value;
10698 optional<int64_t> self_bdim;
10699 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
10700 auto results = batch_rule(self_value, self_bdim);
10701 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
10702}
10703template <typename batch_rule_t, batch_rule_t batch_rule>
10704at::Tensor & silu__generated_plumbing(at::Tensor & self) {
10705 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
10706 auto maybe_layer = maybeCurrentDynamicLayer();
10707 vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
10708 int64_t cur_level = maybe_layer->layerId();
10709 if (!isBatchedAtLevel(self, cur_level)) {
10710 return at::_ops::silu_::call(self);
10711 }
10712 Tensor self_value;
10713 optional<int64_t> self_bdim;
10714 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
10715 batch_rule(self_value, self_bdim);
10716 return self;
10717}
10718template <typename batch_rule_t, batch_rule_t batch_rule>
10719at::Tensor silu_backward_generated_plumbing(const at::Tensor & grad_output, const at::Tensor & self) {
10720 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
10721 auto maybe_layer = maybeCurrentDynamicLayer();
10722 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
10723 int64_t cur_level = maybe_layer->layerId();
10724 if (!isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(self, cur_level)) {
10725 return at::_ops::silu_backward::call(grad_output, self);
10726 }
10727 Tensor grad_output_value;
10728 optional<int64_t> grad_output_bdim;
10729 std::tie(grad_output_value, grad_output_bdim) = unwrapTensorAtLevel(grad_output, cur_level);
10730 Tensor self_value;
10731 optional<int64_t> self_bdim;
10732 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
10733 auto results = batch_rule(grad_output_value, grad_output_bdim, self_value, self_bdim);
10734 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
10735}
10736template <typename batch_rule_t, batch_rule_t batch_rule>
10737at::Tensor mish_generated_plumbing(const at::Tensor & self) {
10738 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
10739 auto maybe_layer = maybeCurrentDynamicLayer();
10740 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
10741 int64_t cur_level = maybe_layer->layerId();
10742 if (!isBatchedAtLevel(self, cur_level)) {
10743 return at::_ops::mish::call(self);
10744 }
10745 Tensor self_value;
10746 optional<int64_t> self_bdim;
10747 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
10748 auto results = batch_rule(self_value, self_bdim);
10749 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
10750}
10751template <typename batch_rule_t, batch_rule_t batch_rule>
10752at::Tensor & mish__generated_plumbing(at::Tensor & self) {
10753 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
10754 auto maybe_layer = maybeCurrentDynamicLayer();
10755 vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
10756 int64_t cur_level = maybe_layer->layerId();
10757 if (!isBatchedAtLevel(self, cur_level)) {
10758 return at::_ops::mish_::call(self);
10759 }
10760 Tensor self_value;
10761 optional<int64_t> self_bdim;
10762 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
10763 batch_rule(self_value, self_bdim);
10764 return self;
10765}
10766template <typename batch_rule_t, batch_rule_t batch_rule>
10767at::Tensor mish_backward_generated_plumbing(const at::Tensor & grad_output, const at::Tensor & self) {
10768 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
10769 auto maybe_layer = maybeCurrentDynamicLayer();
10770 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
10771 int64_t cur_level = maybe_layer->layerId();
10772 if (!isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(self, cur_level)) {
10773 return at::_ops::mish_backward::call(grad_output, self);
10774 }
10775 Tensor grad_output_value;
10776 optional<int64_t> grad_output_bdim;
10777 std::tie(grad_output_value, grad_output_bdim) = unwrapTensorAtLevel(grad_output, cur_level);
10778 Tensor self_value;
10779 optional<int64_t> self_bdim;
10780 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
10781 auto results = batch_rule(grad_output_value, grad_output_bdim, self_value, self_bdim);
10782 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
10783}
10784template <typename batch_rule_t, batch_rule_t batch_rule>
10785at::Tensor sigmoid_generated_plumbing(const at::Tensor & self) {
10786 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
10787 auto maybe_layer = maybeCurrentDynamicLayer();
10788 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
10789 int64_t cur_level = maybe_layer->layerId();
10790 if (!isBatchedAtLevel(self, cur_level)) {
10791 return at::_ops::sigmoid::call(self);
10792 }
10793 Tensor self_value;
10794 optional<int64_t> self_bdim;
10795 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
10796 auto results = batch_rule(self_value, self_bdim);
10797 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
10798}
10799template <typename batch_rule_t, batch_rule_t batch_rule>
10800at::Tensor & sigmoid__generated_plumbing(at::Tensor & self) {
10801 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
10802 auto maybe_layer = maybeCurrentDynamicLayer();
10803 vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
10804 int64_t cur_level = maybe_layer->layerId();
10805 if (!isBatchedAtLevel(self, cur_level)) {
10806 return at::_ops::sigmoid_::call(self);
10807 }
10808 Tensor self_value;
10809 optional<int64_t> self_bdim;
10810 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
10811 batch_rule(self_value, self_bdim);
10812 return self;
10813}
10814template <typename batch_rule_t, batch_rule_t batch_rule>
10815at::Tensor logit_generated_plumbing(const at::Tensor & self, c10::optional<double> eps) {
10816 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
10817 auto maybe_layer = maybeCurrentDynamicLayer();
10818 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
10819 int64_t cur_level = maybe_layer->layerId();
10820 if (!isBatchedAtLevel(self, cur_level)) {
10821 return at::_ops::logit::call(self, eps);
10822 }
10823 Tensor self_value;
10824 optional<int64_t> self_bdim;
10825 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
10826 auto results = batch_rule(self_value, self_bdim, eps);
10827 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
10828}
10829template <typename batch_rule_t, batch_rule_t batch_rule>
10830at::Tensor & logit__generated_plumbing(at::Tensor & self, c10::optional<double> eps) {
10831 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
10832 auto maybe_layer = maybeCurrentDynamicLayer();
10833 vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
10834 int64_t cur_level = maybe_layer->layerId();
10835 if (!isBatchedAtLevel(self, cur_level)) {
10836 return at::_ops::logit_::call(self, eps);
10837 }
10838 Tensor self_value;
10839 optional<int64_t> self_bdim;
10840 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
10841 batch_rule(self_value, self_bdim, eps);
10842 return self;
10843}
10844template <typename batch_rule_t, batch_rule_t batch_rule>
10845at::Tensor sin_generated_plumbing(const at::Tensor & self) {
10846 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
10847 auto maybe_layer = maybeCurrentDynamicLayer();
10848 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
10849 int64_t cur_level = maybe_layer->layerId();
10850 if (!isBatchedAtLevel(self, cur_level)) {
10851 return at::_ops::sin::call(self);
10852 }
10853 Tensor self_value;
10854 optional<int64_t> self_bdim;
10855 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
10856 auto results = batch_rule(self_value, self_bdim);
10857 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
10858}
10859template <typename batch_rule_t, batch_rule_t batch_rule>
10860at::Tensor & sin__generated_plumbing(at::Tensor & self) {
10861 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
10862 auto maybe_layer = maybeCurrentDynamicLayer();
10863 vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
10864 int64_t cur_level = maybe_layer->layerId();
10865 if (!isBatchedAtLevel(self, cur_level)) {
10866 return at::_ops::sin_::call(self);
10867 }
10868 Tensor self_value;
10869 optional<int64_t> self_bdim;
10870 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
10871 batch_rule(self_value, self_bdim);
10872 return self;
10873}
10874template <typename batch_rule_t, batch_rule_t batch_rule>
10875at::Tensor sinc_generated_plumbing(const at::Tensor & self) {
10876 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
10877 auto maybe_layer = maybeCurrentDynamicLayer();
10878 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
10879 int64_t cur_level = maybe_layer->layerId();
10880 if (!isBatchedAtLevel(self, cur_level)) {
10881 return at::_ops::sinc::call(self);
10882 }
10883 Tensor self_value;
10884 optional<int64_t> self_bdim;
10885 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
10886 auto results = batch_rule(self_value, self_bdim);
10887 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
10888}
10889template <typename batch_rule_t, batch_rule_t batch_rule>
10890at::Tensor & sinc__generated_plumbing(at::Tensor & self) {
10891 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
10892 auto maybe_layer = maybeCurrentDynamicLayer();
10893 vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
10894 int64_t cur_level = maybe_layer->layerId();
10895 if (!isBatchedAtLevel(self, cur_level)) {
10896 return at::_ops::sinc_::call(self);
10897 }
10898 Tensor self_value;
10899 optional<int64_t> self_bdim;
10900 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
10901 batch_rule(self_value, self_bdim);
10902 return self;
10903}
10904template <typename batch_rule_t, batch_rule_t batch_rule>
10905at::Tensor sinh_generated_plumbing(const at::Tensor & self) {
10906 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
10907 auto maybe_layer = maybeCurrentDynamicLayer();
10908 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
10909 int64_t cur_level = maybe_layer->layerId();
10910 if (!isBatchedAtLevel(self, cur_level)) {
10911 return at::_ops::sinh::call(self);
10912 }
10913 Tensor self_value;
10914 optional<int64_t> self_bdim;
10915 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
10916 auto results = batch_rule(self_value, self_bdim);
10917 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
10918}
10919template <typename batch_rule_t, batch_rule_t batch_rule>
10920at::Tensor & sinh__generated_plumbing(at::Tensor & self) {
10921 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
10922 auto maybe_layer = maybeCurrentDynamicLayer();
10923 vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
10924 int64_t cur_level = maybe_layer->layerId();
10925 if (!isBatchedAtLevel(self, cur_level)) {
10926 return at::_ops::sinh_::call(self);
10927 }
10928 Tensor self_value;
10929 optional<int64_t> self_bdim;
10930 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
10931 batch_rule(self_value, self_bdim);
10932 return self;
10933}
10934template <typename batch_rule_t, batch_rule_t batch_rule>
10935at::Tensor detach_generated_plumbing(const at::Tensor & self) {
10936 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
10937 auto maybe_layer = maybeCurrentDynamicLayer();
10938 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
10939 int64_t cur_level = maybe_layer->layerId();
10940 if (!isBatchedAtLevel(self, cur_level)) {
10941 return at::_ops::detach::call(self);
10942 }
10943 Tensor self_value;
10944 optional<int64_t> self_bdim;
10945 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
10946 auto results = batch_rule(self_value, self_bdim);
10947 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
10948}
10949template <typename batch_rule_t, batch_rule_t batch_rule>
10950at::Tensor slice_Tensor_generated_plumbing(const at::Tensor & self, int64_t dim, c10::optional<c10::SymInt> start, c10::optional<c10::SymInt> end, c10::SymInt step) {
10951 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
10952 auto maybe_layer = maybeCurrentDynamicLayer();
10953 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
10954 int64_t cur_level = maybe_layer->layerId();
10955 if (!isBatchedAtLevel(self, cur_level)) {
10956 return at::_ops::slice_Tensor::call(self, dim, start, end, step);
10957 }
10958 Tensor self_value;
10959 optional<int64_t> self_bdim;
10960 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
10961 auto results = batch_rule(self_value, self_bdim, dim, start, end, step);
10962 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
10963}
10964template <typename batch_rule_t, batch_rule_t batch_rule>
10965at::Tensor slice_backward_generated_plumbing(const at::Tensor & grad_output, c10::SymIntArrayRef input_sizes, int64_t dim, c10::SymInt start, c10::SymInt end, c10::SymInt step) {
10966 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
10967 auto maybe_layer = maybeCurrentDynamicLayer();
10968 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
10969 int64_t cur_level = maybe_layer->layerId();
10970 if (!isBatchedAtLevel(grad_output, cur_level)) {
10971 return at::_ops::slice_backward::call(grad_output, input_sizes, dim, start, end, step);
10972 }
10973 Tensor grad_output_value;
10974 optional<int64_t> grad_output_bdim;
10975 std::tie(grad_output_value, grad_output_bdim) = unwrapTensorAtLevel(grad_output, cur_level);
10976 auto results = batch_rule(grad_output_value, grad_output_bdim, input_sizes, dim, start, end, step);
10977 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
10978}
10979template <typename batch_rule_t, batch_rule_t batch_rule>
10980at::Tensor slice_scatter_generated_plumbing(const at::Tensor & self, const at::Tensor & src, int64_t dim, c10::optional<c10::SymInt> start, c10::optional<c10::SymInt> end, c10::SymInt step) {
10981 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
10982 auto maybe_layer = maybeCurrentDynamicLayer();
10983 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
10984 int64_t cur_level = maybe_layer->layerId();
10985 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(src, cur_level)) {
10986 return at::_ops::slice_scatter::call(self, src, dim, start, end, step);
10987 }
10988 Tensor self_value;
10989 optional<int64_t> self_bdim;
10990 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
10991 Tensor src_value;
10992 optional<int64_t> src_bdim;
10993 std::tie(src_value, src_bdim) = unwrapTensorAtLevel(src, cur_level);
10994 auto results = batch_rule(self_value, self_bdim, src_value, src_bdim, dim, start, end, step);
10995 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
10996}
10997template <typename batch_rule_t, batch_rule_t batch_rule>
10998at::Tensor select_scatter_generated_plumbing(const at::Tensor & self, const at::Tensor & src, int64_t dim, c10::SymInt index) {
10999 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
11000 auto maybe_layer = maybeCurrentDynamicLayer();
11001 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
11002 int64_t cur_level = maybe_layer->layerId();
11003 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(src, cur_level)) {
11004 return at::_ops::select_scatter::call(self, src, dim, index);
11005 }
11006 Tensor self_value;
11007 optional<int64_t> self_bdim;
11008 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
11009 Tensor src_value;
11010 optional<int64_t> src_bdim;
11011 std::tie(src_value, src_bdim) = unwrapTensorAtLevel(src, cur_level);
11012 auto results = batch_rule(self_value, self_bdim, src_value, src_bdim, dim, index);
11013 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
11014}
11015template <typename batch_rule_t, batch_rule_t batch_rule>
11016at::Tensor diagonal_scatter_generated_plumbing(const at::Tensor & self, const at::Tensor & src, int64_t offset, int64_t dim1, int64_t dim2) {
11017 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
11018 auto maybe_layer = maybeCurrentDynamicLayer();
11019 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
11020 int64_t cur_level = maybe_layer->layerId();
11021 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(src, cur_level)) {
11022 return at::_ops::diagonal_scatter::call(self, src, offset, dim1, dim2);
11023 }
11024 Tensor self_value;
11025 optional<int64_t> self_bdim;
11026 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
11027 Tensor src_value;
11028 optional<int64_t> src_bdim;
11029 std::tie(src_value, src_bdim) = unwrapTensorAtLevel(src, cur_level);
11030 auto results = batch_rule(self_value, self_bdim, src_value, src_bdim, offset, dim1, dim2);
11031 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
11032}
11033template <typename batch_rule_t, batch_rule_t batch_rule>
11034at::Tensor as_strided_scatter_generated_plumbing(const at::Tensor & self, const at::Tensor & src, c10::SymIntArrayRef size, c10::SymIntArrayRef stride, c10::optional<c10::SymInt> storage_offset) {
11035 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
11036 auto maybe_layer = maybeCurrentDynamicLayer();
11037 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
11038 int64_t cur_level = maybe_layer->layerId();
11039 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(src, cur_level)) {
11040 return at::_ops::as_strided_scatter::call(self, src, size, stride, storage_offset);
11041 }
11042 Tensor self_value;
11043 optional<int64_t> self_bdim;
11044 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
11045 Tensor src_value;
11046 optional<int64_t> src_bdim;
11047 std::tie(src_value, src_bdim) = unwrapTensorAtLevel(src, cur_level);
11048 auto results = batch_rule(self_value, self_bdim, src_value, src_bdim, size, stride, storage_offset);
11049 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
11050}
11051template <typename batch_rule_t, batch_rule_t batch_rule>
11052at::Tensor smm_generated_plumbing(const at::Tensor & self, const at::Tensor & mat2) {
11053 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
11054 auto maybe_layer = maybeCurrentDynamicLayer();
11055 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
11056 int64_t cur_level = maybe_layer->layerId();
11057 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(mat2, cur_level)) {
11058 return at::_ops::smm::call(self, mat2);
11059 }
11060 Tensor self_value;
11061 optional<int64_t> self_bdim;
11062 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
11063 Tensor mat2_value;
11064 optional<int64_t> mat2_bdim;
11065 std::tie(mat2_value, mat2_bdim) = unwrapTensorAtLevel(mat2, cur_level);
11066 auto results = batch_rule(self_value, self_bdim, mat2_value, mat2_bdim);
11067 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
11068}
11069template <typename batch_rule_t, batch_rule_t batch_rule>
11070at::Tensor softmax_int_generated_plumbing(const at::Tensor & self, int64_t dim, c10::optional<at::ScalarType> dtype) {
11071 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
11072 auto maybe_layer = maybeCurrentDynamicLayer();
11073 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
11074 int64_t cur_level = maybe_layer->layerId();
11075 if (!isBatchedAtLevel(self, cur_level)) {
11076 return at::_ops::softmax_int::call(self, dim, dtype);
11077 }
11078 Tensor self_value;
11079 optional<int64_t> self_bdim;
11080 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
11081 auto results = batch_rule(self_value, self_bdim, dim, dtype);
11082 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
11083}
11084template <typename batch_rule_t, batch_rule_t batch_rule>
11085at::Tensor softmax_Dimname_generated_plumbing(const at::Tensor & self, at::Dimname dim, c10::optional<at::ScalarType> dtype) {
11086 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
11087 auto maybe_layer = maybeCurrentDynamicLayer();
11088 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
11089 int64_t cur_level = maybe_layer->layerId();
11090 if (!isBatchedAtLevel(self, cur_level)) {
11091 return at::_ops::softmax_Dimname::call(self, dim, dtype);
11092 }
11093 Tensor self_value;
11094 optional<int64_t> self_bdim;
11095 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
11096 auto results = batch_rule(self_value, self_bdim, dim, dtype);
11097 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
11098}
11099template <typename batch_rule_t, batch_rule_t batch_rule>
11100at::Tensor _softmax_generated_plumbing(const at::Tensor & self, int64_t dim, bool half_to_float) {
11101 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
11102 auto maybe_layer = maybeCurrentDynamicLayer();
11103 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
11104 int64_t cur_level = maybe_layer->layerId();
11105 if (!isBatchedAtLevel(self, cur_level)) {
11106 return at::_ops::_softmax::call(self, dim, half_to_float);
11107 }
11108 Tensor self_value;
11109 optional<int64_t> self_bdim;
11110 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
11111 auto results = batch_rule(self_value, self_bdim, dim, half_to_float);
11112 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
11113}
11114template <typename batch_rule_t, batch_rule_t batch_rule>
11115at::Tensor _softmax_backward_data_generated_plumbing(const at::Tensor & grad_output, const at::Tensor & output, int64_t dim, at::ScalarType input_dtype) {
11116 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
11117 auto maybe_layer = maybeCurrentDynamicLayer();
11118 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
11119 int64_t cur_level = maybe_layer->layerId();
11120 if (!isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(output, cur_level)) {
11121 return at::_ops::_softmax_backward_data::call(grad_output, output, dim, input_dtype);
11122 }
11123 Tensor grad_output_value;
11124 optional<int64_t> grad_output_bdim;
11125 std::tie(grad_output_value, grad_output_bdim) = unwrapTensorAtLevel(grad_output, cur_level);
11126 Tensor output_value;
11127 optional<int64_t> output_bdim;
11128 std::tie(output_value, output_bdim) = unwrapTensorAtLevel(output, cur_level);
11129 auto results = batch_rule(grad_output_value, grad_output_bdim, output_value, output_bdim, dim, input_dtype);
11130 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
11131}
11132template <typename batch_rule_t, batch_rule_t batch_rule>
11133::std::vector<at::Tensor> unsafe_split_Tensor_generated_plumbing(const at::Tensor & self, c10::SymInt split_size, int64_t dim) {
11134 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
11135 auto maybe_layer = maybeCurrentDynamicLayer();
11136 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
11137 int64_t cur_level = maybe_layer->layerId();
11138 if (!isBatchedAtLevel(self, cur_level)) {
11139 return at::_ops::unsafe_split_Tensor::call(self, split_size, dim);
11140 }
11141 Tensor self_value;
11142 optional<int64_t> self_bdim;
11143 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
11144 auto results = batch_rule(self_value, self_bdim, split_size, dim);
11145 return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
11146}
11147template <typename batch_rule_t, batch_rule_t batch_rule>
11148::std::vector<at::Tensor> split_Tensor_generated_plumbing(const at::Tensor & self, c10::SymInt split_size, int64_t dim) {
11149 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
11150 auto maybe_layer = maybeCurrentDynamicLayer();
11151 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
11152 int64_t cur_level = maybe_layer->layerId();
11153 if (!isBatchedAtLevel(self, cur_level)) {
11154 return at::_ops::split_Tensor::call(self, split_size, dim);
11155 }
11156 Tensor self_value;
11157 optional<int64_t> self_bdim;
11158 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
11159 auto results = batch_rule(self_value, self_bdim, split_size, dim);
11160 return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
11161}
11162template <typename batch_rule_t, batch_rule_t batch_rule>
11163::std::vector<at::Tensor> split_sizes_generated_plumbing(const at::Tensor & self, c10::SymIntArrayRef split_size, int64_t dim) {
11164 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
11165 auto maybe_layer = maybeCurrentDynamicLayer();
11166 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
11167 int64_t cur_level = maybe_layer->layerId();
11168 if (!isBatchedAtLevel(self, cur_level)) {
11169 return at::_ops::split_sizes::call(self, split_size, dim);
11170 }
11171 Tensor self_value;
11172 optional<int64_t> self_bdim;
11173 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
11174 auto results = batch_rule(self_value, self_bdim, split_size, dim);
11175 return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
11176}
11177template <typename batch_rule_t, batch_rule_t batch_rule>
11178::std::vector<at::Tensor> unsafe_split_with_sizes_generated_plumbing(const at::Tensor & self, c10::SymIntArrayRef split_sizes, int64_t dim) {
11179 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
11180 auto maybe_layer = maybeCurrentDynamicLayer();
11181 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
11182 int64_t cur_level = maybe_layer->layerId();
11183 if (!isBatchedAtLevel(self, cur_level)) {
11184 return at::_ops::unsafe_split_with_sizes::call(self, split_sizes, dim);
11185 }
11186 Tensor self_value;
11187 optional<int64_t> self_bdim;
11188 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
11189 auto results = batch_rule(self_value, self_bdim, split_sizes, dim);
11190 return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
11191}
11192template <typename batch_rule_t, batch_rule_t batch_rule>
11193::std::vector<at::Tensor> split_with_sizes_generated_plumbing(const at::Tensor & self, c10::SymIntArrayRef split_sizes, int64_t dim) {
11194 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
11195 auto maybe_layer = maybeCurrentDynamicLayer();
11196 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
11197 int64_t cur_level = maybe_layer->layerId();
11198 if (!isBatchedAtLevel(self, cur_level)) {
11199 return at::_ops::split_with_sizes::call(self, split_sizes, dim);
11200 }
11201 Tensor self_value;
11202 optional<int64_t> self_bdim;
11203 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
11204 auto results = batch_rule(self_value, self_bdim, split_sizes, dim);
11205 return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
11206}
11207template <typename batch_rule_t, batch_rule_t batch_rule>
11208::std::vector<at::Tensor> hsplit_int_generated_plumbing(const at::Tensor & self, int64_t sections) {
11209 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
11210 auto maybe_layer = maybeCurrentDynamicLayer();
11211 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
11212 int64_t cur_level = maybe_layer->layerId();
11213 if (!isBatchedAtLevel(self, cur_level)) {
11214 return at::_ops::hsplit_int::call(self, sections);
11215 }
11216 Tensor self_value;
11217 optional<int64_t> self_bdim;
11218 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
11219 auto results = batch_rule(self_value, self_bdim, sections);
11220 return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
11221}
11222template <typename batch_rule_t, batch_rule_t batch_rule>
11223::std::vector<at::Tensor> hsplit_array_generated_plumbing(const at::Tensor & self, at::IntArrayRef indices) {
11224 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
11225 auto maybe_layer = maybeCurrentDynamicLayer();
11226 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
11227 int64_t cur_level = maybe_layer->layerId();
11228 if (!isBatchedAtLevel(self, cur_level)) {
11229 return at::_ops::hsplit_array::call(self, indices);
11230 }
11231 Tensor self_value;
11232 optional<int64_t> self_bdim;
11233 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
11234 auto results = batch_rule(self_value, self_bdim, indices);
11235 return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
11236}
11237template <typename batch_rule_t, batch_rule_t batch_rule>
11238::std::vector<at::Tensor> vsplit_int_generated_plumbing(const at::Tensor & self, int64_t sections) {
11239 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
11240 auto maybe_layer = maybeCurrentDynamicLayer();
11241 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
11242 int64_t cur_level = maybe_layer->layerId();
11243 if (!isBatchedAtLevel(self, cur_level)) {
11244 return at::_ops::vsplit_int::call(self, sections);
11245 }
11246 Tensor self_value;
11247 optional<int64_t> self_bdim;
11248 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
11249 auto results = batch_rule(self_value, self_bdim, sections);
11250 return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
11251}
11252template <typename batch_rule_t, batch_rule_t batch_rule>
11253::std::vector<at::Tensor> vsplit_array_generated_plumbing(const at::Tensor & self, at::IntArrayRef indices) {
11254 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
11255 auto maybe_layer = maybeCurrentDynamicLayer();
11256 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
11257 int64_t cur_level = maybe_layer->layerId();
11258 if (!isBatchedAtLevel(self, cur_level)) {
11259 return at::_ops::vsplit_array::call(self, indices);
11260 }
11261 Tensor self_value;
11262 optional<int64_t> self_bdim;
11263 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
11264 auto results = batch_rule(self_value, self_bdim, indices);
11265 return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
11266}
11267template <typename batch_rule_t, batch_rule_t batch_rule>
11268::std::vector<at::Tensor> dsplit_int_generated_plumbing(const at::Tensor & self, int64_t sections) {
11269 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
11270 auto maybe_layer = maybeCurrentDynamicLayer();
11271 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
11272 int64_t cur_level = maybe_layer->layerId();
11273 if (!isBatchedAtLevel(self, cur_level)) {
11274 return at::_ops::dsplit_int::call(self, sections);
11275 }
11276 Tensor self_value;
11277 optional<int64_t> self_bdim;
11278 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
11279 auto results = batch_rule(self_value, self_bdim, sections);
11280 return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
11281}
11282template <typename batch_rule_t, batch_rule_t batch_rule>
11283::std::vector<at::Tensor> dsplit_array_generated_plumbing(const at::Tensor & self, at::IntArrayRef indices) {
11284 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
11285 auto maybe_layer = maybeCurrentDynamicLayer();
11286 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
11287 int64_t cur_level = maybe_layer->layerId();
11288 if (!isBatchedAtLevel(self, cur_level)) {
11289 return at::_ops::dsplit_array::call(self, indices);
11290 }
11291 Tensor self_value;
11292 optional<int64_t> self_bdim;
11293 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
11294 auto results = batch_rule(self_value, self_bdim, indices);
11295 return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
11296}
11297template <typename batch_rule_t, batch_rule_t batch_rule>
11298at::Tensor squeeze_generated_plumbing(const at::Tensor & self) {
11299 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
11300 auto maybe_layer = maybeCurrentDynamicLayer();
11301 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
11302 int64_t cur_level = maybe_layer->layerId();
11303 if (!isBatchedAtLevel(self, cur_level)) {
11304 return at::_ops::squeeze::call(self);
11305 }
11306 Tensor self_value;
11307 optional<int64_t> self_bdim;
11308 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
11309 auto results = batch_rule(self_value, self_bdim);
11310 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
11311}
11312template <typename batch_rule_t, batch_rule_t batch_rule>
11313at::Tensor squeeze_dim_generated_plumbing(const at::Tensor & self, int64_t dim) {
11314 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
11315 auto maybe_layer = maybeCurrentDynamicLayer();
11316 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
11317 int64_t cur_level = maybe_layer->layerId();
11318 if (!isBatchedAtLevel(self, cur_level)) {
11319 return at::_ops::squeeze_dim::call(self, dim);
11320 }
11321 Tensor self_value;
11322 optional<int64_t> self_bdim;
11323 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
11324 auto results = batch_rule(self_value, self_bdim, dim);
11325 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
11326}
11327template <typename batch_rule_t, batch_rule_t batch_rule>
11328at::Tensor squeeze_dimname_generated_plumbing(const at::Tensor & self, at::Dimname dim) {
11329 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
11330 auto maybe_layer = maybeCurrentDynamicLayer();
11331 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
11332 int64_t cur_level = maybe_layer->layerId();
11333 if (!isBatchedAtLevel(self, cur_level)) {
11334 return at::_ops::squeeze_dimname::call(self, dim);
11335 }
11336 Tensor self_value;
11337 optional<int64_t> self_bdim;
11338 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
11339 auto results = batch_rule(self_value, self_bdim, dim);
11340 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
11341}
11342template <typename batch_rule_t, batch_rule_t batch_rule>
11343at::Tensor squeeze_dims_generated_plumbing(const at::Tensor & self, at::IntArrayRef dim) {
11344 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
11345 auto maybe_layer = maybeCurrentDynamicLayer();
11346 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
11347 int64_t cur_level = maybe_layer->layerId();
11348 if (!isBatchedAtLevel(self, cur_level)) {
11349 return at::_ops::squeeze_dims::call(self, dim);
11350 }
11351 Tensor self_value;
11352 optional<int64_t> self_bdim;
11353 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
11354 auto results = batch_rule(self_value, self_bdim, dim);
11355 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
11356}
11357template <typename batch_rule_t, batch_rule_t batch_rule>
11358at::Tensor sspaddmm_generated_plumbing(const at::Tensor & self, const at::Tensor & mat1, const at::Tensor & mat2, const at::Scalar & beta, const at::Scalar & alpha) {
11359 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
11360 auto maybe_layer = maybeCurrentDynamicLayer();
11361 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
11362 int64_t cur_level = maybe_layer->layerId();
11363 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(mat1, cur_level) && !isBatchedAtLevel(mat2, cur_level)) {
11364 return at::_ops::sspaddmm::call(self, mat1, mat2, beta, alpha);
11365 }
11366 Tensor self_value;
11367 optional<int64_t> self_bdim;
11368 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
11369 Tensor mat1_value;
11370 optional<int64_t> mat1_bdim;
11371 std::tie(mat1_value, mat1_bdim) = unwrapTensorAtLevel(mat1, cur_level);
11372 Tensor mat2_value;
11373 optional<int64_t> mat2_bdim;
11374 std::tie(mat2_value, mat2_bdim) = unwrapTensorAtLevel(mat2, cur_level);
11375 auto results = batch_rule(self_value, self_bdim, mat1_value, mat1_bdim, mat2_value, mat2_bdim, beta, alpha);
11376 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
11377}
11378template <typename batch_rule_t, batch_rule_t batch_rule>
11379at::Tensor stack_generated_plumbing(at::TensorList tensors, int64_t dim) {
11380 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
11381 auto maybe_layer = maybeCurrentDynamicLayer();
11382 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
11383 int64_t cur_level = maybe_layer->layerId();
11384 if (!isBatchedAtLevel(tensors, cur_level)) {
11385 return at::_ops::stack::call(tensors, dim);
11386 }
11387
11388 auto results = batch_rule(tensors, dim);
11389 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
11390}
11391template <typename batch_rule_t, batch_rule_t batch_rule>
11392at::Tensor _stack_generated_plumbing(at::TensorList tensors, int64_t dim) {
11393 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
11394 auto maybe_layer = maybeCurrentDynamicLayer();
11395 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
11396 int64_t cur_level = maybe_layer->layerId();
11397 if (!isBatchedAtLevel(tensors, cur_level)) {
11398 return at::_ops::_stack::call(tensors, dim);
11399 }
11400
11401 auto results = batch_rule(tensors, dim);
11402 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
11403}
11404template <typename batch_rule_t, batch_rule_t batch_rule>
11405at::Tensor hstack_generated_plumbing(at::TensorList tensors) {
11406 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
11407 auto maybe_layer = maybeCurrentDynamicLayer();
11408 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
11409 int64_t cur_level = maybe_layer->layerId();
11410 if (!isBatchedAtLevel(tensors, cur_level)) {
11411 return at::_ops::hstack::call(tensors);
11412 }
11413
11414 auto results = batch_rule(tensors);
11415 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
11416}
11417template <typename batch_rule_t, batch_rule_t batch_rule>
11418at::Tensor vstack_generated_plumbing(at::TensorList tensors) {
11419 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
11420 auto maybe_layer = maybeCurrentDynamicLayer();
11421 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
11422 int64_t cur_level = maybe_layer->layerId();
11423 if (!isBatchedAtLevel(tensors, cur_level)) {
11424 return at::_ops::vstack::call(tensors);
11425 }
11426
11427 auto results = batch_rule(tensors);
11428 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
11429}
11430template <typename batch_rule_t, batch_rule_t batch_rule>
11431at::Tensor dstack_generated_plumbing(at::TensorList tensors) {
11432 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
11433 auto maybe_layer = maybeCurrentDynamicLayer();
11434 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
11435 int64_t cur_level = maybe_layer->layerId();
11436 if (!isBatchedAtLevel(tensors, cur_level)) {
11437 return at::_ops::dstack::call(tensors);
11438 }
11439
11440 auto results = batch_rule(tensors);
11441 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
11442}
11443template <typename batch_rule_t, batch_rule_t batch_rule>
11444at::Tensor stft_generated_plumbing(const at::Tensor & self, int64_t n_fft, c10::optional<int64_t> hop_length, c10::optional<int64_t> win_length, const c10::optional<at::Tensor> & window, bool normalized, c10::optional<bool> onesided, c10::optional<bool> return_complex) {
11445 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
11446 auto maybe_layer = maybeCurrentDynamicLayer();
11447 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
11448 int64_t cur_level = maybe_layer->layerId();
11449 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(window, cur_level)) {
11450 return at::_ops::stft::call(self, n_fft, hop_length, win_length, window, normalized, onesided, return_complex);
11451 }
11452 Tensor self_value;
11453 optional<int64_t> self_bdim;
11454 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
11455 optional<Tensor> window_value;
11456 optional<int64_t> window_bdim;
11457 if (window) {
11458 std::tie(window_value, window_bdim) = unwrapTensorAtLevel(window.value(), cur_level);
11459 }
11460 auto results = batch_rule(self_value, self_bdim, n_fft, hop_length, win_length, window_value, window_bdim, normalized, onesided, return_complex);
11461 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
11462}
11463template <typename batch_rule_t, batch_rule_t batch_rule>
11464at::Tensor stft_center_generated_plumbing(const at::Tensor & self, int64_t n_fft, c10::optional<int64_t> hop_length, c10::optional<int64_t> win_length, const c10::optional<at::Tensor> & window, bool center, c10::string_view pad_mode, bool normalized, c10::optional<bool> onesided, c10::optional<bool> return_complex) {
11465 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
11466 auto maybe_layer = maybeCurrentDynamicLayer();
11467 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
11468 int64_t cur_level = maybe_layer->layerId();
11469 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(window, cur_level)) {
11470 return at::_ops::stft_center::call(self, n_fft, hop_length, win_length, window, center, pad_mode, normalized, onesided, return_complex);
11471 }
11472 Tensor self_value;
11473 optional<int64_t> self_bdim;
11474 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
11475 optional<Tensor> window_value;
11476 optional<int64_t> window_bdim;
11477 if (window) {
11478 std::tie(window_value, window_bdim) = unwrapTensorAtLevel(window.value(), cur_level);
11479 }
11480 auto results = batch_rule(self_value, self_bdim, n_fft, hop_length, win_length, window_value, window_bdim, center, pad_mode, normalized, onesided, return_complex);
11481 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
11482}
11483template <typename batch_rule_t, batch_rule_t batch_rule>
11484at::Tensor istft_generated_plumbing(const at::Tensor & self, int64_t n_fft, c10::optional<int64_t> hop_length, c10::optional<int64_t> win_length, const c10::optional<at::Tensor> & window, bool center, bool normalized, c10::optional<bool> onesided, c10::optional<int64_t> length, bool return_complex) {
11485 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
11486 auto maybe_layer = maybeCurrentDynamicLayer();
11487 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
11488 int64_t cur_level = maybe_layer->layerId();
11489 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(window, cur_level)) {
11490 return at::_ops::istft::call(self, n_fft, hop_length, win_length, window, center, normalized, onesided, length, return_complex);
11491 }
11492 Tensor self_value;
11493 optional<int64_t> self_bdim;
11494 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
11495 optional<Tensor> window_value;
11496 optional<int64_t> window_bdim;
11497 if (window) {
11498 std::tie(window_value, window_bdim) = unwrapTensorAtLevel(window.value(), cur_level);
11499 }
11500 auto results = batch_rule(self_value, self_bdim, n_fft, hop_length, win_length, window_value, window_bdim, center, normalized, onesided, length, return_complex);
11501 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
11502}
11503template <typename batch_rule_t, batch_rule_t batch_rule>
11504at::Tensor sum_generated_plumbing(const at::Tensor & self, c10::optional<at::ScalarType> dtype) {
11505 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
11506 auto maybe_layer = maybeCurrentDynamicLayer();
11507 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
11508 int64_t cur_level = maybe_layer->layerId();
11509 if (!isBatchedAtLevel(self, cur_level)) {
11510 return at::_ops::sum::call(self, dtype);
11511 }
11512 Tensor self_value;
11513 optional<int64_t> self_bdim;
11514 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
11515 auto results = batch_rule(self_value, self_bdim, dtype);
11516 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
11517}
11518template <typename batch_rule_t, batch_rule_t batch_rule>
11519at::Tensor sum_dim_IntList_generated_plumbing(const at::Tensor & self, at::OptionalIntArrayRef dim, bool keepdim, c10::optional<at::ScalarType> dtype) {
11520 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
11521 auto maybe_layer = maybeCurrentDynamicLayer();
11522 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
11523 int64_t cur_level = maybe_layer->layerId();
11524 if (!isBatchedAtLevel(self, cur_level)) {
11525 return at::_ops::sum_dim_IntList::call(self, dim, keepdim, dtype);
11526 }
11527 Tensor self_value;
11528 optional<int64_t> self_bdim;
11529 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
11530 auto results = batch_rule(self_value, self_bdim, dim, keepdim, dtype);
11531 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
11532}
11533template <typename batch_rule_t, batch_rule_t batch_rule>
11534at::Tensor sum_dim_DimnameList_generated_plumbing(const at::Tensor & self, at::DimnameList dim, bool keepdim, c10::optional<at::ScalarType> dtype) {
11535 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
11536 auto maybe_layer = maybeCurrentDynamicLayer();
11537 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
11538 int64_t cur_level = maybe_layer->layerId();
11539 if (!isBatchedAtLevel(self, cur_level)) {
11540 return at::_ops::sum_dim_DimnameList::call(self, dim, keepdim, dtype);
11541 }
11542 Tensor self_value;
11543 optional<int64_t> self_bdim;
11544 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
11545 auto results = batch_rule(self_value, self_bdim, dim, keepdim, dtype);
11546 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
11547}
11548template <typename batch_rule_t, batch_rule_t batch_rule>
11549at::Tensor _nested_sum_backward_generated_plumbing(const at::Tensor & grad, const at::Tensor & self, at::OptionalIntArrayRef dim, bool keepdim) {
11550 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
11551 auto maybe_layer = maybeCurrentDynamicLayer();
11552 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
11553 int64_t cur_level = maybe_layer->layerId();
11554 if (!isBatchedAtLevel(grad, cur_level) && !isBatchedAtLevel(self, cur_level)) {
11555 return at::_ops::_nested_sum_backward::call(grad, self, dim, keepdim);
11556 }
11557 Tensor grad_value;
11558 optional<int64_t> grad_bdim;
11559 std::tie(grad_value, grad_bdim) = unwrapTensorAtLevel(grad, cur_level);
11560 Tensor self_value;
11561 optional<int64_t> self_bdim;
11562 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
11563 auto results = batch_rule(grad_value, grad_bdim, self_value, self_bdim, dim, keepdim);
11564 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
11565}
11566template <typename batch_rule_t, batch_rule_t batch_rule>
11567at::Tensor nansum_generated_plumbing(const at::Tensor & self, at::OptionalIntArrayRef dim, bool keepdim, c10::optional<at::ScalarType> dtype) {
11568 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
11569 auto maybe_layer = maybeCurrentDynamicLayer();
11570 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
11571 int64_t cur_level = maybe_layer->layerId();
11572 if (!isBatchedAtLevel(self, cur_level)) {
11573 return at::_ops::nansum::call(self, dim, keepdim, dtype);
11574 }
11575 Tensor self_value;
11576 optional<int64_t> self_bdim;
11577 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
11578 auto results = batch_rule(self_value, self_bdim, dim, keepdim, dtype);
11579 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
11580}
11581template <typename batch_rule_t, batch_rule_t batch_rule>
11582at::Tensor sum_to_size_generated_plumbing(const at::Tensor & self, at::IntArrayRef size) {
11583 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
11584 auto maybe_layer = maybeCurrentDynamicLayer();
11585 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
11586 int64_t cur_level = maybe_layer->layerId();
11587 if (!isBatchedAtLevel(self, cur_level)) {
11588 return at::_ops::sum_to_size::call(self, size);
11589 }
11590 Tensor self_value;
11591 optional<int64_t> self_bdim;
11592 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
11593 auto results = batch_rule(self_value, self_bdim, size);
11594 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
11595}
11596template <typename batch_rule_t, batch_rule_t batch_rule>
11597at::Tensor sqrt_generated_plumbing(const at::Tensor & self) {
11598 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
11599 auto maybe_layer = maybeCurrentDynamicLayer();
11600 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
11601 int64_t cur_level = maybe_layer->layerId();
11602 if (!isBatchedAtLevel(self, cur_level)) {
11603 return at::_ops::sqrt::call(self);
11604 }
11605 Tensor self_value;
11606 optional<int64_t> self_bdim;
11607 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
11608 auto results = batch_rule(self_value, self_bdim);
11609 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
11610}
11611template <typename batch_rule_t, batch_rule_t batch_rule>
11612at::Tensor & sqrt__generated_plumbing(at::Tensor & self) {
11613 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
11614 auto maybe_layer = maybeCurrentDynamicLayer();
11615 vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
11616 int64_t cur_level = maybe_layer->layerId();
11617 if (!isBatchedAtLevel(self, cur_level)) {
11618 return at::_ops::sqrt_::call(self);
11619 }
11620 Tensor self_value;
11621 optional<int64_t> self_bdim;
11622 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
11623 batch_rule(self_value, self_bdim);
11624 return self;
11625}
11626template <typename batch_rule_t, batch_rule_t batch_rule>
11627at::Tensor square_generated_plumbing(const at::Tensor & self) {
11628 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
11629 auto maybe_layer = maybeCurrentDynamicLayer();
11630 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
11631 int64_t cur_level = maybe_layer->layerId();
11632 if (!isBatchedAtLevel(self, cur_level)) {
11633 return at::_ops::square::call(self);
11634 }
11635 Tensor self_value;
11636 optional<int64_t> self_bdim;
11637 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
11638 auto results = batch_rule(self_value, self_bdim);
11639 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
11640}
11641template <typename batch_rule_t, batch_rule_t batch_rule>
11642at::Tensor & square__generated_plumbing(at::Tensor & self) {
11643 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
11644 auto maybe_layer = maybeCurrentDynamicLayer();
11645 vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
11646 int64_t cur_level = maybe_layer->layerId();
11647 if (!isBatchedAtLevel(self, cur_level)) {
11648 return at::_ops::square_::call(self);
11649 }
11650 Tensor self_value;
11651 optional<int64_t> self_bdim;
11652 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
11653 batch_rule(self_value, self_bdim);
11654 return self;
11655}
11656template <typename batch_rule_t, batch_rule_t batch_rule>
11657at::Tensor std_generated_plumbing(const at::Tensor & self, bool unbiased) {
11658 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
11659 auto maybe_layer = maybeCurrentDynamicLayer();
11660 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
11661 int64_t cur_level = maybe_layer->layerId();
11662 if (!isBatchedAtLevel(self, cur_level)) {
11663 return at::_ops::std::call(self, unbiased);
11664 }
11665 Tensor self_value;
11666 optional<int64_t> self_bdim;
11667 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
11668 auto results = batch_rule(self_value, self_bdim, unbiased);
11669 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
11670}
11671template <typename batch_rule_t, batch_rule_t batch_rule>
11672at::Tensor std_dim_generated_plumbing(const at::Tensor & self, at::OptionalIntArrayRef dim, bool unbiased, bool keepdim) {
11673 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
11674 auto maybe_layer = maybeCurrentDynamicLayer();
11675 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
11676 int64_t cur_level = maybe_layer->layerId();
11677 if (!isBatchedAtLevel(self, cur_level)) {
11678 return at::_ops::std_dim::call(self, dim, unbiased, keepdim);
11679 }
11680 Tensor self_value;
11681 optional<int64_t> self_bdim;
11682 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
11683 auto results = batch_rule(self_value, self_bdim, dim, unbiased, keepdim);
11684 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
11685}
11686template <typename batch_rule_t, batch_rule_t batch_rule>
11687at::Tensor std_correction_generated_plumbing(const at::Tensor & self, at::OptionalIntArrayRef dim, c10::optional<int64_t> correction, bool keepdim) {
11688 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
11689 auto maybe_layer = maybeCurrentDynamicLayer();
11690 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
11691 int64_t cur_level = maybe_layer->layerId();
11692 if (!isBatchedAtLevel(self, cur_level)) {
11693 return at::_ops::std_correction::call(self, dim, correction, keepdim);
11694 }
11695 Tensor self_value;
11696 optional<int64_t> self_bdim;
11697 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
11698 auto results = batch_rule(self_value, self_bdim, dim, correction, keepdim);
11699 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
11700}
11701template <typename batch_rule_t, batch_rule_t batch_rule>
11702::std::tuple<at::Tensor,at::Tensor> std_mean_generated_plumbing(const at::Tensor & self, bool unbiased) {
11703 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
11704 auto maybe_layer = maybeCurrentDynamicLayer();
11705 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
11706 int64_t cur_level = maybe_layer->layerId();
11707 if (!isBatchedAtLevel(self, cur_level)) {
11708 return at::_ops::std_mean::call(self, unbiased);
11709 }
11710 Tensor self_value;
11711 optional<int64_t> self_bdim;
11712 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
11713 auto results = batch_rule(self_value, self_bdim, unbiased);
11714 return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
11715}
11716template <typename batch_rule_t, batch_rule_t batch_rule>
11717::std::tuple<at::Tensor,at::Tensor> std_mean_dim_generated_plumbing(const at::Tensor & self, at::OptionalIntArrayRef dim, bool unbiased, bool keepdim) {
11718 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
11719 auto maybe_layer = maybeCurrentDynamicLayer();
11720 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
11721 int64_t cur_level = maybe_layer->layerId();
11722 if (!isBatchedAtLevel(self, cur_level)) {
11723 return at::_ops::std_mean_dim::call(self, dim, unbiased, keepdim);
11724 }
11725 Tensor self_value;
11726 optional<int64_t> self_bdim;
11727 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
11728 auto results = batch_rule(self_value, self_bdim, dim, unbiased, keepdim);
11729 return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
11730}
11731template <typename batch_rule_t, batch_rule_t batch_rule>
11732::std::tuple<at::Tensor,at::Tensor> std_mean_correction_generated_plumbing(const at::Tensor & self, at::OptionalIntArrayRef dim, c10::optional<int64_t> correction, bool keepdim) {
11733 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
11734 auto maybe_layer = maybeCurrentDynamicLayer();
11735 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
11736 int64_t cur_level = maybe_layer->layerId();
11737 if (!isBatchedAtLevel(self, cur_level)) {
11738 return at::_ops::std_mean_correction::call(self, dim, correction, keepdim);
11739 }
11740 Tensor self_value;
11741 optional<int64_t> self_bdim;
11742 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
11743 auto results = batch_rule(self_value, self_bdim, dim, correction, keepdim);
11744 return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
11745}
11746template <typename batch_rule_t, batch_rule_t batch_rule>
11747::std::tuple<at::Tensor,at::Tensor> std_mean_names_dim_generated_plumbing(const at::Tensor & self, at::DimnameList dim, bool unbiased, bool keepdim) {
11748 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
11749 auto maybe_layer = maybeCurrentDynamicLayer();
11750 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
11751 int64_t cur_level = maybe_layer->layerId();
11752 if (!isBatchedAtLevel(self, cur_level)) {
11753 return at::_ops::std_mean_names_dim::call(self, dim, unbiased, keepdim);
11754 }
11755 Tensor self_value;
11756 optional<int64_t> self_bdim;
11757 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
11758 auto results = batch_rule(self_value, self_bdim, dim, unbiased, keepdim);
11759 return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
11760}
11761template <typename batch_rule_t, batch_rule_t batch_rule>
11762::std::tuple<at::Tensor,at::Tensor> std_mean_correction_names_generated_plumbing(const at::Tensor & self, at::DimnameList dim, c10::optional<int64_t> correction, bool keepdim) {
11763 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
11764 auto maybe_layer = maybeCurrentDynamicLayer();
11765 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
11766 int64_t cur_level = maybe_layer->layerId();
11767 if (!isBatchedAtLevel(self, cur_level)) {
11768 return at::_ops::std_mean_correction_names::call(self, dim, correction, keepdim);
11769 }
11770 Tensor self_value;
11771 optional<int64_t> self_bdim;
11772 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
11773 auto results = batch_rule(self_value, self_bdim, dim, correction, keepdim);
11774 return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
11775}
11776template <typename batch_rule_t, batch_rule_t batch_rule>
11777at::Tensor std_names_dim_generated_plumbing(const at::Tensor & self, at::DimnameList dim, bool unbiased, bool keepdim) {
11778 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
11779 auto maybe_layer = maybeCurrentDynamicLayer();
11780 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
11781 int64_t cur_level = maybe_layer->layerId();
11782 if (!isBatchedAtLevel(self, cur_level)) {
11783 return at::_ops::std_names_dim::call(self, dim, unbiased, keepdim);
11784 }
11785 Tensor self_value;
11786 optional<int64_t> self_bdim;
11787 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
11788 auto results = batch_rule(self_value, self_bdim, dim, unbiased, keepdim);
11789 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
11790}
11791template <typename batch_rule_t, batch_rule_t batch_rule>
11792at::Tensor std_correction_names_generated_plumbing(const at::Tensor & self, at::DimnameList dim, c10::optional<int64_t> correction, bool keepdim) {
11793 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
11794 auto maybe_layer = maybeCurrentDynamicLayer();
11795 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
11796 int64_t cur_level = maybe_layer->layerId();
11797 if (!isBatchedAtLevel(self, cur_level)) {
11798 return at::_ops::std_correction_names::call(self, dim, correction, keepdim);
11799 }
11800 Tensor self_value;
11801 optional<int64_t> self_bdim;
11802 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
11803 auto results = batch_rule(self_value, self_bdim, dim, correction, keepdim);
11804 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
11805}
11806template <typename batch_rule_t, batch_rule_t batch_rule>
11807at::Tensor prod_generated_plumbing(const at::Tensor & self, c10::optional<at::ScalarType> dtype) {
11808 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
11809 auto maybe_layer = maybeCurrentDynamicLayer();
11810 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
11811 int64_t cur_level = maybe_layer->layerId();
11812 if (!isBatchedAtLevel(self, cur_level)) {
11813 return at::_ops::prod::call(self, dtype);
11814 }
11815 Tensor self_value;
11816 optional<int64_t> self_bdim;
11817 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
11818 auto results = batch_rule(self_value, self_bdim, dtype);
11819 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
11820}
11821template <typename batch_rule_t, batch_rule_t batch_rule>
11822at::Tensor prod_dim_int_generated_plumbing(const at::Tensor & self, int64_t dim, bool keepdim, c10::optional<at::ScalarType> dtype) {
11823 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
11824 auto maybe_layer = maybeCurrentDynamicLayer();
11825 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
11826 int64_t cur_level = maybe_layer->layerId();
11827 if (!isBatchedAtLevel(self, cur_level)) {
11828 return at::_ops::prod_dim_int::call(self, dim, keepdim, dtype);
11829 }
11830 Tensor self_value;
11831 optional<int64_t> self_bdim;
11832 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
11833 auto results = batch_rule(self_value, self_bdim, dim, keepdim, dtype);
11834 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
11835}
11836template <typename batch_rule_t, batch_rule_t batch_rule>
11837at::Tensor prod_dim_Dimname_generated_plumbing(const at::Tensor & self, at::Dimname dim, bool keepdim, c10::optional<at::ScalarType> dtype) {
11838 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
11839 auto maybe_layer = maybeCurrentDynamicLayer();
11840 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
11841 int64_t cur_level = maybe_layer->layerId();
11842 if (!isBatchedAtLevel(self, cur_level)) {
11843 return at::_ops::prod_dim_Dimname::call(self, dim, keepdim, dtype);
11844 }
11845 Tensor self_value;
11846 optional<int64_t> self_bdim;
11847 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
11848 auto results = batch_rule(self_value, self_bdim, dim, keepdim, dtype);
11849 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
11850}
11851template <typename batch_rule_t, batch_rule_t batch_rule>
11852at::Tensor t_generated_plumbing(const at::Tensor & self) {
11853 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
11854 auto maybe_layer = maybeCurrentDynamicLayer();
11855 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
11856 int64_t cur_level = maybe_layer->layerId();
11857 if (!isBatchedAtLevel(self, cur_level)) {
11858 return at::_ops::t::call(self);
11859 }
11860 Tensor self_value;
11861 optional<int64_t> self_bdim;
11862 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
11863 auto results = batch_rule(self_value, self_bdim);
11864 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
11865}
11866template <typename batch_rule_t, batch_rule_t batch_rule>
11867at::Tensor tan_generated_plumbing(const at::Tensor & self) {
11868 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
11869 auto maybe_layer = maybeCurrentDynamicLayer();
11870 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
11871 int64_t cur_level = maybe_layer->layerId();
11872 if (!isBatchedAtLevel(self, cur_level)) {
11873 return at::_ops::tan::call(self);
11874 }
11875 Tensor self_value;
11876 optional<int64_t> self_bdim;
11877 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
11878 auto results = batch_rule(self_value, self_bdim);
11879 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
11880}
11881template <typename batch_rule_t, batch_rule_t batch_rule>
11882at::Tensor & tan__generated_plumbing(at::Tensor & self) {
11883 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
11884 auto maybe_layer = maybeCurrentDynamicLayer();
11885 vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
11886 int64_t cur_level = maybe_layer->layerId();
11887 if (!isBatchedAtLevel(self, cur_level)) {
11888 return at::_ops::tan_::call(self);
11889 }
11890 Tensor self_value;
11891 optional<int64_t> self_bdim;
11892 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
11893 batch_rule(self_value, self_bdim);
11894 return self;
11895}
11896template <typename batch_rule_t, batch_rule_t batch_rule>
11897at::Tensor tanh_generated_plumbing(const at::Tensor & self) {
11898 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
11899 auto maybe_layer = maybeCurrentDynamicLayer();
11900 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
11901 int64_t cur_level = maybe_layer->layerId();
11902 if (!isBatchedAtLevel(self, cur_level)) {
11903 return at::_ops::tanh::call(self);
11904 }
11905 Tensor self_value;
11906 optional<int64_t> self_bdim;
11907 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
11908 auto results = batch_rule(self_value, self_bdim);
11909 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
11910}
11911template <typename batch_rule_t, batch_rule_t batch_rule>
11912at::Tensor & tanh__generated_plumbing(at::Tensor & self) {
11913 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
11914 auto maybe_layer = maybeCurrentDynamicLayer();
11915 vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
11916 int64_t cur_level = maybe_layer->layerId();
11917 if (!isBatchedAtLevel(self, cur_level)) {
11918 return at::_ops::tanh_::call(self);
11919 }
11920 Tensor self_value;
11921 optional<int64_t> self_bdim;
11922 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
11923 batch_rule(self_value, self_bdim);
11924 return self;
11925}
11926template <typename batch_rule_t, batch_rule_t batch_rule>
11927at::Tensor tensordot_generated_plumbing(const at::Tensor & self, const at::Tensor & other, at::IntArrayRef dims_self, at::IntArrayRef dims_other) {
11928 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
11929 auto maybe_layer = maybeCurrentDynamicLayer();
11930 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
11931 int64_t cur_level = maybe_layer->layerId();
11932 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
11933 return at::_ops::tensordot::call(self, other, dims_self, dims_other);
11934 }
11935 Tensor self_value;
11936 optional<int64_t> self_bdim;
11937 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
11938 Tensor other_value;
11939 optional<int64_t> other_bdim;
11940 std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
11941 auto results = batch_rule(self_value, self_bdim, other_value, other_bdim, dims_self, dims_other);
11942 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
11943}
11944template <typename batch_rule_t, batch_rule_t batch_rule>
11945at::Tensor threshold_generated_plumbing(const at::Tensor & self, const at::Scalar & threshold, const at::Scalar & value) {
11946 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
11947 auto maybe_layer = maybeCurrentDynamicLayer();
11948 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
11949 int64_t cur_level = maybe_layer->layerId();
11950 if (!isBatchedAtLevel(self, cur_level)) {
11951 return at::_ops::threshold::call(self, threshold, value);
11952 }
11953 Tensor self_value;
11954 optional<int64_t> self_bdim;
11955 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
11956 auto results = batch_rule(self_value, self_bdim, threshold, value);
11957 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
11958}
11959template <typename batch_rule_t, batch_rule_t batch_rule>
11960at::Tensor & threshold__generated_plumbing(at::Tensor & self, const at::Scalar & threshold, const at::Scalar & value) {
11961 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
11962 auto maybe_layer = maybeCurrentDynamicLayer();
11963 vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
11964 int64_t cur_level = maybe_layer->layerId();
11965 if (!isBatchedAtLevel(self, cur_level)) {
11966 return at::_ops::threshold_::call(self, threshold, value);
11967 }
11968 Tensor self_value;
11969 optional<int64_t> self_bdim;
11970 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
11971 batch_rule(self_value, self_bdim, threshold, value);
11972 return self;
11973}
11974template <typename batch_rule_t, batch_rule_t batch_rule>
11975at::Tensor threshold_backward_generated_plumbing(const at::Tensor & grad_output, const at::Tensor & self, const at::Scalar & threshold) {
11976 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
11977 auto maybe_layer = maybeCurrentDynamicLayer();
11978 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
11979 int64_t cur_level = maybe_layer->layerId();
11980 if (!isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(self, cur_level)) {
11981 return at::_ops::threshold_backward::call(grad_output, self, threshold);
11982 }
11983 Tensor grad_output_value;
11984 optional<int64_t> grad_output_bdim;
11985 std::tie(grad_output_value, grad_output_bdim) = unwrapTensorAtLevel(grad_output, cur_level);
11986 Tensor self_value;
11987 optional<int64_t> self_bdim;
11988 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
11989 auto results = batch_rule(grad_output_value, grad_output_bdim, self_value, self_bdim, threshold);
11990 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
11991}
11992template <typename batch_rule_t, batch_rule_t batch_rule>
11993at::Tensor tile_generated_plumbing(const at::Tensor & self, at::IntArrayRef dims) {
11994 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
11995 auto maybe_layer = maybeCurrentDynamicLayer();
11996 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
11997 int64_t cur_level = maybe_layer->layerId();
11998 if (!isBatchedAtLevel(self, cur_level)) {
11999 return at::_ops::tile::call(self, dims);
12000 }
12001 Tensor self_value;
12002 optional<int64_t> self_bdim;
12003 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
12004 auto results = batch_rule(self_value, self_bdim, dims);
12005 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
12006}
12007template <typename batch_rule_t, batch_rule_t batch_rule>
12008at::Tensor transpose_int_generated_plumbing(const at::Tensor & self, int64_t dim0, int64_t dim1) {
12009 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
12010 auto maybe_layer = maybeCurrentDynamicLayer();
12011 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
12012 int64_t cur_level = maybe_layer->layerId();
12013 if (!isBatchedAtLevel(self, cur_level)) {
12014 return at::_ops::transpose_int::call(self, dim0, dim1);
12015 }
12016 Tensor self_value;
12017 optional<int64_t> self_bdim;
12018 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
12019 auto results = batch_rule(self_value, self_bdim, dim0, dim1);
12020 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
12021}
12022template <typename batch_rule_t, batch_rule_t batch_rule>
12023at::Tensor transpose_Dimname_generated_plumbing(const at::Tensor & self, at::Dimname dim0, at::Dimname dim1) {
12024 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
12025 auto maybe_layer = maybeCurrentDynamicLayer();
12026 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
12027 int64_t cur_level = maybe_layer->layerId();
12028 if (!isBatchedAtLevel(self, cur_level)) {
12029 return at::_ops::transpose_Dimname::call(self, dim0, dim1);
12030 }
12031 Tensor self_value;
12032 optional<int64_t> self_bdim;
12033 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
12034 auto results = batch_rule(self_value, self_bdim, dim0, dim1);
12035 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
12036}
12037template <typename batch_rule_t, batch_rule_t batch_rule>
12038at::Tensor _mkldnn_transpose_generated_plumbing(const at::Tensor & self, int64_t dim0, int64_t dim1) {
12039 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
12040 auto maybe_layer = maybeCurrentDynamicLayer();
12041 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
12042 int64_t cur_level = maybe_layer->layerId();
12043 if (!isBatchedAtLevel(self, cur_level)) {
12044 return at::_ops::_mkldnn_transpose::call(self, dim0, dim1);
12045 }
12046 Tensor self_value;
12047 optional<int64_t> self_bdim;
12048 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
12049 auto results = batch_rule(self_value, self_bdim, dim0, dim1);
12050 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
12051}
12052template <typename batch_rule_t, batch_rule_t batch_rule>
12053at::Tensor & _mkldnn_transpose__generated_plumbing(at::Tensor & self, int64_t dim0, int64_t dim1) {
12054 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
12055 auto maybe_layer = maybeCurrentDynamicLayer();
12056 vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
12057 int64_t cur_level = maybe_layer->layerId();
12058 if (!isBatchedAtLevel(self, cur_level)) {
12059 return at::_ops::_mkldnn_transpose_::call(self, dim0, dim1);
12060 }
12061 Tensor self_value;
12062 optional<int64_t> self_bdim;
12063 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
12064 batch_rule(self_value, self_bdim, dim0, dim1);
12065 return self;
12066}
12067template <typename batch_rule_t, batch_rule_t batch_rule>
12068at::Tensor one_hot_generated_plumbing(const at::Tensor & self, int64_t num_classes) {
12069 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
12070 auto maybe_layer = maybeCurrentDynamicLayer();
12071 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
12072 int64_t cur_level = maybe_layer->layerId();
12073 if (!isBatchedAtLevel(self, cur_level)) {
12074 return at::_ops::one_hot::call(self, num_classes);
12075 }
12076 Tensor self_value;
12077 optional<int64_t> self_bdim;
12078 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
12079 auto results = batch_rule(self_value, self_bdim, num_classes);
12080 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
12081}
12082template <typename batch_rule_t, batch_rule_t batch_rule>
12083at::Tensor flip_generated_plumbing(const at::Tensor & self, at::IntArrayRef dims) {
12084 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
12085 auto maybe_layer = maybeCurrentDynamicLayer();
12086 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
12087 int64_t cur_level = maybe_layer->layerId();
12088 if (!isBatchedAtLevel(self, cur_level)) {
12089 return at::_ops::flip::call(self, dims);
12090 }
12091 Tensor self_value;
12092 optional<int64_t> self_bdim;
12093 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
12094 auto results = batch_rule(self_value, self_bdim, dims);
12095 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
12096}
12097template <typename batch_rule_t, batch_rule_t batch_rule>
12098at::Tensor fliplr_generated_plumbing(const at::Tensor & self) {
12099 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
12100 auto maybe_layer = maybeCurrentDynamicLayer();
12101 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
12102 int64_t cur_level = maybe_layer->layerId();
12103 if (!isBatchedAtLevel(self, cur_level)) {
12104 return at::_ops::fliplr::call(self);
12105 }
12106 Tensor self_value;
12107 optional<int64_t> self_bdim;
12108 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
12109 auto results = batch_rule(self_value, self_bdim);
12110 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
12111}
12112template <typename batch_rule_t, batch_rule_t batch_rule>
12113at::Tensor flipud_generated_plumbing(const at::Tensor & self) {
12114 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
12115 auto maybe_layer = maybeCurrentDynamicLayer();
12116 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
12117 int64_t cur_level = maybe_layer->layerId();
12118 if (!isBatchedAtLevel(self, cur_level)) {
12119 return at::_ops::flipud::call(self);
12120 }
12121 Tensor self_value;
12122 optional<int64_t> self_bdim;
12123 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
12124 auto results = batch_rule(self_value, self_bdim);
12125 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
12126}
12127template <typename batch_rule_t, batch_rule_t batch_rule>
12128at::Tensor roll_generated_plumbing(const at::Tensor & self, at::IntArrayRef shifts, at::IntArrayRef dims) {
12129 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
12130 auto maybe_layer = maybeCurrentDynamicLayer();
12131 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
12132 int64_t cur_level = maybe_layer->layerId();
12133 if (!isBatchedAtLevel(self, cur_level)) {
12134 return at::_ops::roll::call(self, shifts, dims);
12135 }
12136 Tensor self_value;
12137 optional<int64_t> self_bdim;
12138 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
12139 auto results = batch_rule(self_value, self_bdim, shifts, dims);
12140 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
12141}
12142template <typename batch_rule_t, batch_rule_t batch_rule>
12143at::Tensor rot90_generated_plumbing(const at::Tensor & self, int64_t k, at::IntArrayRef dims) {
12144 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
12145 auto maybe_layer = maybeCurrentDynamicLayer();
12146 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
12147 int64_t cur_level = maybe_layer->layerId();
12148 if (!isBatchedAtLevel(self, cur_level)) {
12149 return at::_ops::rot90::call(self, k, dims);
12150 }
12151 Tensor self_value;
12152 optional<int64_t> self_bdim;
12153 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
12154 auto results = batch_rule(self_value, self_bdim, k, dims);
12155 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
12156}
12157template <typename batch_rule_t, batch_rule_t batch_rule>
12158at::Tensor trapezoid_x_generated_plumbing(const at::Tensor & y, const at::Tensor & x, int64_t dim) {
12159 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
12160 auto maybe_layer = maybeCurrentDynamicLayer();
12161 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
12162 int64_t cur_level = maybe_layer->layerId();
12163 if (!isBatchedAtLevel(y, cur_level) && !isBatchedAtLevel(x, cur_level)) {
12164 return at::_ops::trapezoid_x::call(y, x, dim);
12165 }
12166 Tensor y_value;
12167 optional<int64_t> y_bdim;
12168 std::tie(y_value, y_bdim) = unwrapTensorAtLevel(y, cur_level);
12169 Tensor x_value;
12170 optional<int64_t> x_bdim;
12171 std::tie(x_value, x_bdim) = unwrapTensorAtLevel(x, cur_level);
12172 auto results = batch_rule(y_value, y_bdim, x_value, x_bdim, dim);
12173 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
12174}
12175template <typename batch_rule_t, batch_rule_t batch_rule>
12176at::Tensor trapezoid_dx_generated_plumbing(const at::Tensor & y, const at::Scalar & dx, int64_t dim) {
12177 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
12178 auto maybe_layer = maybeCurrentDynamicLayer();
12179 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
12180 int64_t cur_level = maybe_layer->layerId();
12181 if (!isBatchedAtLevel(y, cur_level)) {
12182 return at::_ops::trapezoid_dx::call(y, dx, dim);
12183 }
12184 Tensor y_value;
12185 optional<int64_t> y_bdim;
12186 std::tie(y_value, y_bdim) = unwrapTensorAtLevel(y, cur_level);
12187 auto results = batch_rule(y_value, y_bdim, dx, dim);
12188 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
12189}
12190template <typename batch_rule_t, batch_rule_t batch_rule>
12191at::Tensor trapz_x_generated_plumbing(const at::Tensor & y, const at::Tensor & x, int64_t dim) {
12192 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
12193 auto maybe_layer = maybeCurrentDynamicLayer();
12194 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
12195 int64_t cur_level = maybe_layer->layerId();
12196 if (!isBatchedAtLevel(y, cur_level) && !isBatchedAtLevel(x, cur_level)) {
12197 return at::_ops::trapz_x::call(y, x, dim);
12198 }
12199 Tensor y_value;
12200 optional<int64_t> y_bdim;
12201 std::tie(y_value, y_bdim) = unwrapTensorAtLevel(y, cur_level);
12202 Tensor x_value;
12203 optional<int64_t> x_bdim;
12204 std::tie(x_value, x_bdim) = unwrapTensorAtLevel(x, cur_level);
12205 auto results = batch_rule(y_value, y_bdim, x_value, x_bdim, dim);
12206 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
12207}
12208template <typename batch_rule_t, batch_rule_t batch_rule>
12209at::Tensor trapz_dx_generated_plumbing(const at::Tensor & y, double dx, int64_t dim) {
12210 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
12211 auto maybe_layer = maybeCurrentDynamicLayer();
12212 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
12213 int64_t cur_level = maybe_layer->layerId();
12214 if (!isBatchedAtLevel(y, cur_level)) {
12215 return at::_ops::trapz_dx::call(y, dx, dim);
12216 }
12217 Tensor y_value;
12218 optional<int64_t> y_bdim;
12219 std::tie(y_value, y_bdim) = unwrapTensorAtLevel(y, cur_level);
12220 auto results = batch_rule(y_value, y_bdim, dx, dim);
12221 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
12222}
12223template <typename batch_rule_t, batch_rule_t batch_rule>
12224::std::tuple<at::Tensor,at::Tensor,at::Tensor> _transform_bias_rescale_qkv_generated_plumbing(const at::Tensor & qkv, const at::Tensor & qkv_bias, int64_t num_heads) {
12225 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
12226 auto maybe_layer = maybeCurrentDynamicLayer();
12227 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
12228 int64_t cur_level = maybe_layer->layerId();
12229 if (!isBatchedAtLevel(qkv, cur_level) && !isBatchedAtLevel(qkv_bias, cur_level)) {
12230 return at::_ops::_transform_bias_rescale_qkv::call(qkv, qkv_bias, num_heads);
12231 }
12232 Tensor qkv_value;
12233 optional<int64_t> qkv_bdim;
12234 std::tie(qkv_value, qkv_bdim) = unwrapTensorAtLevel(qkv, cur_level);
12235 Tensor qkv_bias_value;
12236 optional<int64_t> qkv_bias_bdim;
12237 std::tie(qkv_bias_value, qkv_bias_bdim) = unwrapTensorAtLevel(qkv_bias, cur_level);
12238 auto results = batch_rule(qkv_value, qkv_bdim, qkv_bias_value, qkv_bias_bdim, num_heads);
12239 return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level));
12240}
12241template <typename batch_rule_t, batch_rule_t batch_rule>
12242at::Tensor _nested_tensor_from_mask_generated_plumbing(const at::Tensor & t, const at::Tensor & mask, bool mask_check) {
12243 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
12244 auto maybe_layer = maybeCurrentDynamicLayer();
12245 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
12246 int64_t cur_level = maybe_layer->layerId();
12247 if (!isBatchedAtLevel(t, cur_level) && !isBatchedAtLevel(mask, cur_level)) {
12248 return at::_ops::_nested_tensor_from_mask::call(t, mask, mask_check);
12249 }
12250 Tensor t_value;
12251 optional<int64_t> t_bdim;
12252 std::tie(t_value, t_bdim) = unwrapTensorAtLevel(t, cur_level);
12253 Tensor mask_value;
12254 optional<int64_t> mask_bdim;
12255 std::tie(mask_value, mask_bdim) = unwrapTensorAtLevel(mask, cur_level);
12256 auto results = batch_rule(t_value, t_bdim, mask_value, mask_bdim, mask_check);
12257 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
12258}
12259template <typename batch_rule_t, batch_rule_t batch_rule>
12260at::Tensor _nested_from_padded_generated_plumbing(const at::Tensor & padded, const at::Tensor & cpu_nested_shape_example, bool fuse_transform_0213) {
12261 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
12262 auto maybe_layer = maybeCurrentDynamicLayer();
12263 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
12264 int64_t cur_level = maybe_layer->layerId();
12265 if (!isBatchedAtLevel(padded, cur_level) && !isBatchedAtLevel(cpu_nested_shape_example, cur_level)) {
12266 return at::_ops::_nested_from_padded::call(padded, cpu_nested_shape_example, fuse_transform_0213);
12267 }
12268 Tensor padded_value;
12269 optional<int64_t> padded_bdim;
12270 std::tie(padded_value, padded_bdim) = unwrapTensorAtLevel(padded, cur_level);
12271 Tensor cpu_nested_shape_example_value;
12272 optional<int64_t> cpu_nested_shape_example_bdim;
12273 std::tie(cpu_nested_shape_example_value, cpu_nested_shape_example_bdim) = unwrapTensorAtLevel(cpu_nested_shape_example, cur_level);
12274 auto results = batch_rule(padded_value, padded_bdim, cpu_nested_shape_example_value, cpu_nested_shape_example_bdim, fuse_transform_0213);
12275 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
12276}
12277template <typename batch_rule_t, batch_rule_t batch_rule>
12278at::Tensor _nested_tensor_size_generated_plumbing(const at::Tensor & self) {
12279 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
12280 auto maybe_layer = maybeCurrentDynamicLayer();
12281 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
12282 int64_t cur_level = maybe_layer->layerId();
12283 if (!isBatchedAtLevel(self, cur_level)) {
12284 return at::_ops::_nested_tensor_size::call(self);
12285 }
12286 Tensor self_value;
12287 optional<int64_t> self_bdim;
12288 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
12289 auto results = batch_rule(self_value, self_bdim);
12290 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
12291}
12292template <typename batch_rule_t, batch_rule_t batch_rule>
12293at::Tensor _nested_tensor_strides_generated_plumbing(const at::Tensor & self) {
12294 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
12295 auto maybe_layer = maybeCurrentDynamicLayer();
12296 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
12297 int64_t cur_level = maybe_layer->layerId();
12298 if (!isBatchedAtLevel(self, cur_level)) {
12299 return at::_ops::_nested_tensor_strides::call(self);
12300 }
12301 Tensor self_value;
12302 optional<int64_t> self_bdim;
12303 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
12304 auto results = batch_rule(self_value, self_bdim);
12305 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
12306}
12307template <typename batch_rule_t, batch_rule_t batch_rule>
12308at::Tensor _nested_from_padded_and_nested_example_generated_plumbing(const at::Tensor & padded, const at::Tensor & nt_example) {
12309 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
12310 auto maybe_layer = maybeCurrentDynamicLayer();
12311 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
12312 int64_t cur_level = maybe_layer->layerId();
12313 if (!isBatchedAtLevel(padded, cur_level) && !isBatchedAtLevel(nt_example, cur_level)) {
12314 return at::_ops::_nested_from_padded_and_nested_example::call(padded, nt_example);
12315 }
12316 Tensor padded_value;
12317 optional<int64_t> padded_bdim;
12318 std::tie(padded_value, padded_bdim) = unwrapTensorAtLevel(padded, cur_level);
12319 Tensor nt_example_value;
12320 optional<int64_t> nt_example_bdim;
12321 std::tie(nt_example_value, nt_example_bdim) = unwrapTensorAtLevel(nt_example, cur_level);
12322 auto results = batch_rule(padded_value, padded_bdim, nt_example_value, nt_example_bdim);
12323 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
12324}
12325template <typename batch_rule_t, batch_rule_t batch_rule>
12326at::Tensor _nested_view_from_buffer_generated_plumbing(const at::Tensor & self, const at::Tensor & nested_size, const at::Tensor & nested_strides, at::IntArrayRef offsets) {
12327 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
12328 auto maybe_layer = maybeCurrentDynamicLayer();
12329 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
12330 int64_t cur_level = maybe_layer->layerId();
12331 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(nested_size, cur_level) && !isBatchedAtLevel(nested_strides, cur_level)) {
12332 return at::_ops::_nested_view_from_buffer::call(self, nested_size, nested_strides, offsets);
12333 }
12334 Tensor self_value;
12335 optional<int64_t> self_bdim;
12336 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
12337 Tensor nested_size_value;
12338 optional<int64_t> nested_size_bdim;
12339 std::tie(nested_size_value, nested_size_bdim) = unwrapTensorAtLevel(nested_size, cur_level);
12340 Tensor nested_strides_value;
12341 optional<int64_t> nested_strides_bdim;
12342 std::tie(nested_strides_value, nested_strides_bdim) = unwrapTensorAtLevel(nested_strides, cur_level);
12343 auto results = batch_rule(self_value, self_bdim, nested_size_value, nested_size_bdim, nested_strides_value, nested_strides_bdim, offsets);
12344 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
12345}
12346template <typename batch_rule_t, batch_rule_t batch_rule>
12347at::Tensor _nested_view_from_buffer_copy_generated_plumbing(const at::Tensor & self, const at::Tensor & nested_size, const at::Tensor & nested_strides, at::IntArrayRef offsets) {
12348 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
12349 auto maybe_layer = maybeCurrentDynamicLayer();
12350 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
12351 int64_t cur_level = maybe_layer->layerId();
12352 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(nested_size, cur_level) && !isBatchedAtLevel(nested_strides, cur_level)) {
12353 return at::_ops::_nested_view_from_buffer_copy::call(self, nested_size, nested_strides, offsets);
12354 }
12355 Tensor self_value;
12356 optional<int64_t> self_bdim;
12357 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
12358 Tensor nested_size_value;
12359 optional<int64_t> nested_size_bdim;
12360 std::tie(nested_size_value, nested_size_bdim) = unwrapTensorAtLevel(nested_size, cur_level);
12361 Tensor nested_strides_value;
12362 optional<int64_t> nested_strides_bdim;
12363 std::tie(nested_strides_value, nested_strides_bdim) = unwrapTensorAtLevel(nested_strides, cur_level);
12364 auto results = batch_rule(self_value, self_bdim, nested_size_value, nested_size_bdim, nested_strides_value, nested_strides_bdim, offsets);
12365 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
12366}
12367template <typename batch_rule_t, batch_rule_t batch_rule>
12368at::Tensor _trilinear_generated_plumbing(const at::Tensor & i1, const at::Tensor & i2, const at::Tensor & i3, at::IntArrayRef expand1, at::IntArrayRef expand2, at::IntArrayRef expand3, at::IntArrayRef sumdim, int64_t unroll_dim) {
12369 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
12370 auto maybe_layer = maybeCurrentDynamicLayer();
12371 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
12372 int64_t cur_level = maybe_layer->layerId();
12373 if (!isBatchedAtLevel(i1, cur_level) && !isBatchedAtLevel(i2, cur_level) && !isBatchedAtLevel(i3, cur_level)) {
12374 return at::_ops::_trilinear::call(i1, i2, i3, expand1, expand2, expand3, sumdim, unroll_dim);
12375 }
12376 Tensor i1_value;
12377 optional<int64_t> i1_bdim;
12378 std::tie(i1_value, i1_bdim) = unwrapTensorAtLevel(i1, cur_level);
12379 Tensor i2_value;
12380 optional<int64_t> i2_bdim;
12381 std::tie(i2_value, i2_bdim) = unwrapTensorAtLevel(i2, cur_level);
12382 Tensor i3_value;
12383 optional<int64_t> i3_bdim;
12384 std::tie(i3_value, i3_bdim) = unwrapTensorAtLevel(i3, cur_level);
12385 auto results = batch_rule(i1_value, i1_bdim, i2_value, i2_bdim, i3_value, i3_bdim, expand1, expand2, expand3, sumdim, unroll_dim);
12386 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
12387}
12388template <typename batch_rule_t, batch_rule_t batch_rule>
12389at::Tensor triplet_margin_loss_generated_plumbing(const at::Tensor & anchor, const at::Tensor & positive, const at::Tensor & negative, double margin, double p, double eps, bool swap, int64_t reduction) {
12390 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
12391 auto maybe_layer = maybeCurrentDynamicLayer();
12392 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
12393 int64_t cur_level = maybe_layer->layerId();
12394 if (!isBatchedAtLevel(anchor, cur_level) && !isBatchedAtLevel(positive, cur_level) && !isBatchedAtLevel(negative, cur_level)) {
12395 return at::_ops::triplet_margin_loss::call(anchor, positive, negative, margin, p, eps, swap, reduction);
12396 }
12397 Tensor anchor_value;
12398 optional<int64_t> anchor_bdim;
12399 std::tie(anchor_value, anchor_bdim) = unwrapTensorAtLevel(anchor, cur_level);
12400 Tensor positive_value;
12401 optional<int64_t> positive_bdim;
12402 std::tie(positive_value, positive_bdim) = unwrapTensorAtLevel(positive, cur_level);
12403 Tensor negative_value;
12404 optional<int64_t> negative_bdim;
12405 std::tie(negative_value, negative_bdim) = unwrapTensorAtLevel(negative, cur_level);
12406 auto results = batch_rule(anchor_value, anchor_bdim, positive_value, positive_bdim, negative_value, negative_bdim, margin, p, eps, swap, reduction);
12407 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
12408}
12409template <typename batch_rule_t, batch_rule_t batch_rule>
12410at::Tensor trunc_generated_plumbing(const at::Tensor & self) {
12411 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
12412 auto maybe_layer = maybeCurrentDynamicLayer();
12413 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
12414 int64_t cur_level = maybe_layer->layerId();
12415 if (!isBatchedAtLevel(self, cur_level)) {
12416 return at::_ops::trunc::call(self);
12417 }
12418 Tensor self_value;
12419 optional<int64_t> self_bdim;
12420 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
12421 auto results = batch_rule(self_value, self_bdim);
12422 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
12423}
12424template <typename batch_rule_t, batch_rule_t batch_rule>
12425at::Tensor & trunc__generated_plumbing(at::Tensor & self) {
12426 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
12427 auto maybe_layer = maybeCurrentDynamicLayer();
12428 vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
12429 int64_t cur_level = maybe_layer->layerId();
12430 if (!isBatchedAtLevel(self, cur_level)) {
12431 return at::_ops::trunc_::call(self);
12432 }
12433 Tensor self_value;
12434 optional<int64_t> self_bdim;
12435 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
12436 batch_rule(self_value, self_bdim);
12437 return self;
12438}
12439template <typename batch_rule_t, batch_rule_t batch_rule>
12440at::Tensor fix_generated_plumbing(const at::Tensor & self) {
12441 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
12442 auto maybe_layer = maybeCurrentDynamicLayer();
12443 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
12444 int64_t cur_level = maybe_layer->layerId();
12445 if (!isBatchedAtLevel(self, cur_level)) {
12446 return at::_ops::fix::call(self);
12447 }
12448 Tensor self_value;
12449 optional<int64_t> self_bdim;
12450 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
12451 auto results = batch_rule(self_value, self_bdim);
12452 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
12453}
12454template <typename batch_rule_t, batch_rule_t batch_rule>
12455at::Tensor & fix__generated_plumbing(at::Tensor & self) {
12456 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
12457 auto maybe_layer = maybeCurrentDynamicLayer();
12458 vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
12459 int64_t cur_level = maybe_layer->layerId();
12460 if (!isBatchedAtLevel(self, cur_level)) {
12461 return at::_ops::fix_::call(self);
12462 }
12463 Tensor self_value;
12464 optional<int64_t> self_bdim;
12465 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
12466 batch_rule(self_value, self_bdim);
12467 return self;
12468}
12469template <typename batch_rule_t, batch_rule_t batch_rule>
12470at::Tensor type_as_generated_plumbing(const at::Tensor & self, const at::Tensor & other) {
12471 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
12472 auto maybe_layer = maybeCurrentDynamicLayer();
12473 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
12474 int64_t cur_level = maybe_layer->layerId();
12475 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
12476 return at::_ops::type_as::call(self, other);
12477 }
12478 Tensor self_value;
12479 optional<int64_t> self_bdim;
12480 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
12481 Tensor other_value;
12482 optional<int64_t> other_bdim;
12483 std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
12484 auto results = batch_rule(self_value, self_bdim, other_value, other_bdim);
12485 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
12486}
12487template <typename batch_rule_t, batch_rule_t batch_rule>
12488::std::tuple<at::Tensor,at::Tensor> _unique_generated_plumbing(const at::Tensor & self, bool sorted, bool return_inverse) {
12489 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
12490 auto maybe_layer = maybeCurrentDynamicLayer();
12491 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
12492 int64_t cur_level = maybe_layer->layerId();
12493 if (!isBatchedAtLevel(self, cur_level)) {
12494 return at::_ops::_unique::call(self, sorted, return_inverse);
12495 }
12496 Tensor self_value;
12497 optional<int64_t> self_bdim;
12498 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
12499 auto results = batch_rule(self_value, self_bdim, sorted, return_inverse);
12500 return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
12501}
12502template <typename batch_rule_t, batch_rule_t batch_rule>
12503::std::tuple<at::Tensor,at::Tensor,at::Tensor> unique_dim_generated_plumbing(const at::Tensor & self, int64_t dim, bool sorted, bool return_inverse, bool return_counts) {
12504 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
12505 auto maybe_layer = maybeCurrentDynamicLayer();
12506 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
12507 int64_t cur_level = maybe_layer->layerId();
12508 if (!isBatchedAtLevel(self, cur_level)) {
12509 return at::_ops::unique_dim::call(self, dim, sorted, return_inverse, return_counts);
12510 }
12511 Tensor self_value;
12512 optional<int64_t> self_bdim;
12513 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
12514 auto results = batch_rule(self_value, self_bdim, dim, sorted, return_inverse, return_counts);
12515 return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level));
12516}
12517template <typename batch_rule_t, batch_rule_t batch_rule>
12518::std::tuple<at::Tensor,at::Tensor,at::Tensor> unique_consecutive_generated_plumbing(const at::Tensor & self, bool return_inverse, bool return_counts, c10::optional<int64_t> dim) {
12519 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
12520 auto maybe_layer = maybeCurrentDynamicLayer();
12521 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
12522 int64_t cur_level = maybe_layer->layerId();
12523 if (!isBatchedAtLevel(self, cur_level)) {
12524 return at::_ops::unique_consecutive::call(self, return_inverse, return_counts, dim);
12525 }
12526 Tensor self_value;
12527 optional<int64_t> self_bdim;
12528 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
12529 auto results = batch_rule(self_value, self_bdim, return_inverse, return_counts, dim);
12530 return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level));
12531}
12532template <typename batch_rule_t, batch_rule_t batch_rule>
12533::std::tuple<at::Tensor,at::Tensor,at::Tensor> unique_dim_consecutive_generated_plumbing(const at::Tensor & self, int64_t dim, bool return_inverse, bool return_counts) {
12534 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
12535 auto maybe_layer = maybeCurrentDynamicLayer();
12536 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
12537 int64_t cur_level = maybe_layer->layerId();
12538 if (!isBatchedAtLevel(self, cur_level)) {
12539 return at::_ops::unique_dim_consecutive::call(self, dim, return_inverse, return_counts);
12540 }
12541 Tensor self_value;
12542 optional<int64_t> self_bdim;
12543 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
12544 auto results = batch_rule(self_value, self_bdim, dim, return_inverse, return_counts);
12545 return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level));
12546}
12547template <typename batch_rule_t, batch_rule_t batch_rule>
12548::std::tuple<at::Tensor,at::Tensor,at::Tensor> _unique2_generated_plumbing(const at::Tensor & self, bool sorted, bool return_inverse, bool return_counts) {
12549 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
12550 auto maybe_layer = maybeCurrentDynamicLayer();
12551 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
12552 int64_t cur_level = maybe_layer->layerId();
12553 if (!isBatchedAtLevel(self, cur_level)) {
12554 return at::_ops::_unique2::call(self, sorted, return_inverse, return_counts);
12555 }
12556 Tensor self_value;
12557 optional<int64_t> self_bdim;
12558 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
12559 auto results = batch_rule(self_value, self_bdim, sorted, return_inverse, return_counts);
12560 return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level));
12561}
12562template <typename batch_rule_t, batch_rule_t batch_rule>
12563at::Tensor _unsafe_view_generated_plumbing(const at::Tensor & self, c10::SymIntArrayRef size) {
12564 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
12565 auto maybe_layer = maybeCurrentDynamicLayer();
12566 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
12567 int64_t cur_level = maybe_layer->layerId();
12568 if (!isBatchedAtLevel(self, cur_level)) {
12569 return at::_ops::_unsafe_view::call(self, size);
12570 }
12571 Tensor self_value;
12572 optional<int64_t> self_bdim;
12573 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
12574 auto results = batch_rule(self_value, self_bdim, size);
12575 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
12576}
12577template <typename batch_rule_t, batch_rule_t batch_rule>
12578at::Tensor unsqueeze_generated_plumbing(const at::Tensor & self, int64_t dim) {
12579 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
12580 auto maybe_layer = maybeCurrentDynamicLayer();
12581 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
12582 int64_t cur_level = maybe_layer->layerId();
12583 if (!isBatchedAtLevel(self, cur_level)) {
12584 return at::_ops::unsqueeze::call(self, dim);
12585 }
12586 Tensor self_value;
12587 optional<int64_t> self_bdim;
12588 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
12589 auto results = batch_rule(self_value, self_bdim, dim);
12590 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
12591}
12592template <typename batch_rule_t, batch_rule_t batch_rule>
12593at::Tensor vander_generated_plumbing(const at::Tensor & x, c10::optional<int64_t> N, bool increasing) {
12594 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
12595 auto maybe_layer = maybeCurrentDynamicLayer();
12596 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
12597 int64_t cur_level = maybe_layer->layerId();
12598 if (!isBatchedAtLevel(x, cur_level)) {
12599 return at::_ops::vander::call(x, N, increasing);
12600 }
12601 Tensor x_value;
12602 optional<int64_t> x_bdim;
12603 std::tie(x_value, x_bdim) = unwrapTensorAtLevel(x, cur_level);
12604 auto results = batch_rule(x_value, x_bdim, N, increasing);
12605 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
12606}
12607template <typename batch_rule_t, batch_rule_t batch_rule>
12608at::Tensor var_generated_plumbing(const at::Tensor & self, bool unbiased) {
12609 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
12610 auto maybe_layer = maybeCurrentDynamicLayer();
12611 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
12612 int64_t cur_level = maybe_layer->layerId();
12613 if (!isBatchedAtLevel(self, cur_level)) {
12614 return at::_ops::var::call(self, unbiased);
12615 }
12616 Tensor self_value;
12617 optional<int64_t> self_bdim;
12618 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
12619 auto results = batch_rule(self_value, self_bdim, unbiased);
12620 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
12621}
12622template <typename batch_rule_t, batch_rule_t batch_rule>
12623at::Tensor var_dim_generated_plumbing(const at::Tensor & self, at::OptionalIntArrayRef dim, bool unbiased, bool keepdim) {
12624 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
12625 auto maybe_layer = maybeCurrentDynamicLayer();
12626 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
12627 int64_t cur_level = maybe_layer->layerId();
12628 if (!isBatchedAtLevel(self, cur_level)) {
12629 return at::_ops::var_dim::call(self, dim, unbiased, keepdim);
12630 }
12631 Tensor self_value;
12632 optional<int64_t> self_bdim;
12633 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
12634 auto results = batch_rule(self_value, self_bdim, dim, unbiased, keepdim);
12635 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
12636}
12637template <typename batch_rule_t, batch_rule_t batch_rule>
12638at::Tensor var_correction_generated_plumbing(const at::Tensor & self, at::OptionalIntArrayRef dim, c10::optional<int64_t> correction, bool keepdim) {
12639 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
12640 auto maybe_layer = maybeCurrentDynamicLayer();
12641 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
12642 int64_t cur_level = maybe_layer->layerId();
12643 if (!isBatchedAtLevel(self, cur_level)) {
12644 return at::_ops::var_correction::call(self, dim, correction, keepdim);
12645 }
12646 Tensor self_value;
12647 optional<int64_t> self_bdim;
12648 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
12649 auto results = batch_rule(self_value, self_bdim, dim, correction, keepdim);
12650 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
12651}
12652template <typename batch_rule_t, batch_rule_t batch_rule>
12653at::Tensor var_names_dim_generated_plumbing(const at::Tensor & self, at::DimnameList dim, bool unbiased, bool keepdim) {
12654 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
12655 auto maybe_layer = maybeCurrentDynamicLayer();
12656 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
12657 int64_t cur_level = maybe_layer->layerId();
12658 if (!isBatchedAtLevel(self, cur_level)) {
12659 return at::_ops::var_names_dim::call(self, dim, unbiased, keepdim);
12660 }
12661 Tensor self_value;
12662 optional<int64_t> self_bdim;
12663 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
12664 auto results = batch_rule(self_value, self_bdim, dim, unbiased, keepdim);
12665 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
12666}
12667template <typename batch_rule_t, batch_rule_t batch_rule>
12668at::Tensor var_correction_names_generated_plumbing(const at::Tensor & self, at::DimnameList dim, c10::optional<int64_t> correction, bool keepdim) {
12669 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
12670 auto maybe_layer = maybeCurrentDynamicLayer();
12671 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
12672 int64_t cur_level = maybe_layer->layerId();
12673 if (!isBatchedAtLevel(self, cur_level)) {
12674 return at::_ops::var_correction_names::call(self, dim, correction, keepdim);
12675 }
12676 Tensor self_value;
12677 optional<int64_t> self_bdim;
12678 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
12679 auto results = batch_rule(self_value, self_bdim, dim, correction, keepdim);
12680 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
12681}
12682template <typename batch_rule_t, batch_rule_t batch_rule>
12683::std::tuple<at::Tensor,at::Tensor> var_mean_generated_plumbing(const at::Tensor & self, bool unbiased) {
12684 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
12685 auto maybe_layer = maybeCurrentDynamicLayer();
12686 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
12687 int64_t cur_level = maybe_layer->layerId();
12688 if (!isBatchedAtLevel(self, cur_level)) {
12689 return at::_ops::var_mean::call(self, unbiased);
12690 }
12691 Tensor self_value;
12692 optional<int64_t> self_bdim;
12693 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
12694 auto results = batch_rule(self_value, self_bdim, unbiased);
12695 return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
12696}
12697template <typename batch_rule_t, batch_rule_t batch_rule>
12698::std::tuple<at::Tensor,at::Tensor> var_mean_dim_generated_plumbing(const at::Tensor & self, at::OptionalIntArrayRef dim, bool unbiased, bool keepdim) {
12699 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
12700 auto maybe_layer = maybeCurrentDynamicLayer();
12701 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
12702 int64_t cur_level = maybe_layer->layerId();
12703 if (!isBatchedAtLevel(self, cur_level)) {
12704 return at::_ops::var_mean_dim::call(self, dim, unbiased, keepdim);
12705 }
12706 Tensor self_value;
12707 optional<int64_t> self_bdim;
12708 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
12709 auto results = batch_rule(self_value, self_bdim, dim, unbiased, keepdim);
12710 return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
12711}
12712template <typename batch_rule_t, batch_rule_t batch_rule>
12713::std::tuple<at::Tensor,at::Tensor> var_mean_correction_generated_plumbing(const at::Tensor & self, at::OptionalIntArrayRef dim, c10::optional<int64_t> correction, bool keepdim) {
12714 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
12715 auto maybe_layer = maybeCurrentDynamicLayer();
12716 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
12717 int64_t cur_level = maybe_layer->layerId();
12718 if (!isBatchedAtLevel(self, cur_level)) {
12719 return at::_ops::var_mean_correction::call(self, dim, correction, keepdim);
12720 }
12721 Tensor self_value;
12722 optional<int64_t> self_bdim;
12723 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
12724 auto results = batch_rule(self_value, self_bdim, dim, correction, keepdim);
12725 return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
12726}
12727template <typename batch_rule_t, batch_rule_t batch_rule>
12728::std::tuple<at::Tensor,at::Tensor> var_mean_names_dim_generated_plumbing(const at::Tensor & self, at::DimnameList dim, bool unbiased, bool keepdim) {
12729 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
12730 auto maybe_layer = maybeCurrentDynamicLayer();
12731 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
12732 int64_t cur_level = maybe_layer->layerId();
12733 if (!isBatchedAtLevel(self, cur_level)) {
12734 return at::_ops::var_mean_names_dim::call(self, dim, unbiased, keepdim);
12735 }
12736 Tensor self_value;
12737 optional<int64_t> self_bdim;
12738 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
12739 auto results = batch_rule(self_value, self_bdim, dim, unbiased, keepdim);
12740 return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
12741}
12742template <typename batch_rule_t, batch_rule_t batch_rule>
12743::std::tuple<at::Tensor,at::Tensor> var_mean_correction_names_generated_plumbing(const at::Tensor & self, at::DimnameList dim, c10::optional<int64_t> correction, bool keepdim) {
12744 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
12745 auto maybe_layer = maybeCurrentDynamicLayer();
12746 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
12747 int64_t cur_level = maybe_layer->layerId();
12748 if (!isBatchedAtLevel(self, cur_level)) {
12749 return at::_ops::var_mean_correction_names::call(self, dim, correction, keepdim);
12750 }
12751 Tensor self_value;
12752 optional<int64_t> self_bdim;
12753 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
12754 auto results = batch_rule(self_value, self_bdim, dim, correction, keepdim);
12755 return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
12756}
12757template <typename batch_rule_t, batch_rule_t batch_rule>
12758at::Tensor view_as_generated_plumbing(const at::Tensor & self, const at::Tensor & other) {
12759 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
12760 auto maybe_layer = maybeCurrentDynamicLayer();
12761 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
12762 int64_t cur_level = maybe_layer->layerId();
12763 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
12764 return at::_ops::view_as::call(self, other);
12765 }
12766 Tensor self_value;
12767 optional<int64_t> self_bdim;
12768 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
12769 Tensor other_value;
12770 optional<int64_t> other_bdim;
12771 std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
12772 auto results = batch_rule(self_value, self_bdim, other_value, other_bdim);
12773 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
12774}
12775template <typename batch_rule_t, batch_rule_t batch_rule>
12776at::Tensor where_self_generated_plumbing(const at::Tensor & condition, const at::Tensor & self, const at::Tensor & other) {
12777 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
12778 auto maybe_layer = maybeCurrentDynamicLayer();
12779 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
12780 int64_t cur_level = maybe_layer->layerId();
12781 if (!isBatchedAtLevel(condition, cur_level) && !isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
12782 return at::_ops::where_self::call(condition, self, other);
12783 }
12784 Tensor condition_value;
12785 optional<int64_t> condition_bdim;
12786 std::tie(condition_value, condition_bdim) = unwrapTensorAtLevel(condition, cur_level);
12787 Tensor self_value;
12788 optional<int64_t> self_bdim;
12789 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
12790 Tensor other_value;
12791 optional<int64_t> other_bdim;
12792 std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
12793 auto results = batch_rule(condition_value, condition_bdim, self_value, self_bdim, other_value, other_bdim);
12794 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
12795}
12796template <typename batch_rule_t, batch_rule_t batch_rule>
12797at::Tensor where_ScalarSelf_generated_plumbing(const at::Tensor & condition, const at::Scalar & self, const at::Tensor & other) {
12798 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
12799 auto maybe_layer = maybeCurrentDynamicLayer();
12800 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
12801 int64_t cur_level = maybe_layer->layerId();
12802 if (!isBatchedAtLevel(condition, cur_level) && !isBatchedAtLevel(other, cur_level)) {
12803 return at::_ops::where_ScalarSelf::call(condition, self, other);
12804 }
12805 Tensor condition_value;
12806 optional<int64_t> condition_bdim;
12807 std::tie(condition_value, condition_bdim) = unwrapTensorAtLevel(condition, cur_level);
12808 Tensor other_value;
12809 optional<int64_t> other_bdim;
12810 std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
12811 auto results = batch_rule(condition_value, condition_bdim, self, other_value, other_bdim);
12812 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
12813}
12814template <typename batch_rule_t, batch_rule_t batch_rule>
12815at::Tensor where_ScalarOther_generated_plumbing(const at::Tensor & condition, const at::Tensor & self, const at::Scalar & other) {
12816 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
12817 auto maybe_layer = maybeCurrentDynamicLayer();
12818 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
12819 int64_t cur_level = maybe_layer->layerId();
12820 if (!isBatchedAtLevel(condition, cur_level) && !isBatchedAtLevel(self, cur_level)) {
12821 return at::_ops::where_ScalarOther::call(condition, self, other);
12822 }
12823 Tensor condition_value;
12824 optional<int64_t> condition_bdim;
12825 std::tie(condition_value, condition_bdim) = unwrapTensorAtLevel(condition, cur_level);
12826 Tensor self_value;
12827 optional<int64_t> self_bdim;
12828 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
12829 auto results = batch_rule(condition_value, condition_bdim, self_value, self_bdim, other);
12830 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
12831}
12832template <typename batch_rule_t, batch_rule_t batch_rule>
12833at::Tensor where_Scalar_generated_plumbing(const at::Tensor & condition, const at::Scalar & self, const at::Scalar & other) {
12834 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
12835 auto maybe_layer = maybeCurrentDynamicLayer();
12836 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
12837 int64_t cur_level = maybe_layer->layerId();
12838 if (!isBatchedAtLevel(condition, cur_level)) {
12839 return at::_ops::where_Scalar::call(condition, self, other);
12840 }
12841 Tensor condition_value;
12842 optional<int64_t> condition_bdim;
12843 std::tie(condition_value, condition_bdim) = unwrapTensorAtLevel(condition, cur_level);
12844 auto results = batch_rule(condition_value, condition_bdim, self, other);
12845 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
12846}
12847template <typename batch_rule_t, batch_rule_t batch_rule>
12848::std::vector<at::Tensor> where_generated_plumbing(const at::Tensor & condition) {
12849 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
12850 auto maybe_layer = maybeCurrentDynamicLayer();
12851 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
12852 int64_t cur_level = maybe_layer->layerId();
12853 if (!isBatchedAtLevel(condition, cur_level)) {
12854 return at::_ops::where::call(condition);
12855 }
12856 Tensor condition_value;
12857 optional<int64_t> condition_bdim;
12858 std::tie(condition_value, condition_bdim) = unwrapTensorAtLevel(condition, cur_level);
12859 auto results = batch_rule(condition_value, condition_bdim);
12860 return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
12861}
12862template <typename batch_rule_t, batch_rule_t batch_rule>
12863at::Tensor norm_except_dim_generated_plumbing(const at::Tensor & v, int64_t pow, int64_t dim) {
12864 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
12865 auto maybe_layer = maybeCurrentDynamicLayer();
12866 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
12867 int64_t cur_level = maybe_layer->layerId();
12868 if (!isBatchedAtLevel(v, cur_level)) {
12869 return at::_ops::norm_except_dim::call(v, pow, dim);
12870 }
12871 Tensor v_value;
12872 optional<int64_t> v_bdim;
12873 std::tie(v_value, v_bdim) = unwrapTensorAtLevel(v, cur_level);
12874 auto results = batch_rule(v_value, v_bdim, pow, dim);
12875 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
12876}
12877template <typename batch_rule_t, batch_rule_t batch_rule>
12878at::Tensor _weight_norm_generated_plumbing(const at::Tensor & v, const at::Tensor & g, int64_t dim) {
12879 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
12880 auto maybe_layer = maybeCurrentDynamicLayer();
12881 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
12882 int64_t cur_level = maybe_layer->layerId();
12883 if (!isBatchedAtLevel(v, cur_level) && !isBatchedAtLevel(g, cur_level)) {
12884 return at::_ops::_weight_norm::call(v, g, dim);
12885 }
12886 Tensor v_value;
12887 optional<int64_t> v_bdim;
12888 std::tie(v_value, v_bdim) = unwrapTensorAtLevel(v, cur_level);
12889 Tensor g_value;
12890 optional<int64_t> g_bdim;
12891 std::tie(g_value, g_bdim) = unwrapTensorAtLevel(g, cur_level);
12892 auto results = batch_rule(v_value, v_bdim, g_value, g_bdim, dim);
12893 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
12894}
12895template <typename batch_rule_t, batch_rule_t batch_rule>
12896::std::tuple<at::Tensor,at::Tensor> _weight_norm_interface_generated_plumbing(const at::Tensor & v, const at::Tensor & g, int64_t dim) {
12897 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
12898 auto maybe_layer = maybeCurrentDynamicLayer();
12899 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
12900 int64_t cur_level = maybe_layer->layerId();
12901 if (!isBatchedAtLevel(v, cur_level) && !isBatchedAtLevel(g, cur_level)) {
12902 return at::_ops::_weight_norm_interface::call(v, g, dim);
12903 }
12904 Tensor v_value;
12905 optional<int64_t> v_bdim;
12906 std::tie(v_value, v_bdim) = unwrapTensorAtLevel(v, cur_level);
12907 Tensor g_value;
12908 optional<int64_t> g_bdim;
12909 std::tie(g_value, g_bdim) = unwrapTensorAtLevel(g, cur_level);
12910 auto results = batch_rule(v_value, v_bdim, g_value, g_bdim, dim);
12911 return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
12912}
12913template <typename batch_rule_t, batch_rule_t batch_rule>
12914::std::tuple<at::Tensor,at::Tensor> _weight_norm_interface_backward_generated_plumbing(const at::Tensor & grad_w, const at::Tensor & saved_v, const at::Tensor & saved_g, const at::Tensor & saved_norms, int64_t dim) {
12915 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
12916 auto maybe_layer = maybeCurrentDynamicLayer();
12917 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
12918 int64_t cur_level = maybe_layer->layerId();
12919 if (!isBatchedAtLevel(grad_w, cur_level) && !isBatchedAtLevel(saved_v, cur_level) && !isBatchedAtLevel(saved_g, cur_level) && !isBatchedAtLevel(saved_norms, cur_level)) {
12920 return at::_ops::_weight_norm_interface_backward::call(grad_w, saved_v, saved_g, saved_norms, dim);
12921 }
12922 Tensor grad_w_value;
12923 optional<int64_t> grad_w_bdim;
12924 std::tie(grad_w_value, grad_w_bdim) = unwrapTensorAtLevel(grad_w, cur_level);
12925 Tensor saved_v_value;
12926 optional<int64_t> saved_v_bdim;
12927 std::tie(saved_v_value, saved_v_bdim) = unwrapTensorAtLevel(saved_v, cur_level);
12928 Tensor saved_g_value;
12929 optional<int64_t> saved_g_bdim;
12930 std::tie(saved_g_value, saved_g_bdim) = unwrapTensorAtLevel(saved_g, cur_level);
12931 Tensor saved_norms_value;
12932 optional<int64_t> saved_norms_bdim;
12933 std::tie(saved_norms_value, saved_norms_bdim) = unwrapTensorAtLevel(saved_norms, cur_level);
12934 auto results = batch_rule(grad_w_value, grad_w_bdim, saved_v_value, saved_v_bdim, saved_g_value, saved_g_bdim, saved_norms_value, saved_norms_bdim, dim);
12935 return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
12936}
12937template <typename batch_rule_t, batch_rule_t batch_rule>
12938::std::tuple<at::Tensor,at::Tensor> _weight_norm_differentiable_backward_generated_plumbing(const at::Tensor & grad_w, const at::Tensor & saved_v, const at::Tensor & saved_g, const at::Tensor & saved_norms, int64_t dim) {
12939 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
12940 auto maybe_layer = maybeCurrentDynamicLayer();
12941 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
12942 int64_t cur_level = maybe_layer->layerId();
12943 if (!isBatchedAtLevel(grad_w, cur_level) && !isBatchedAtLevel(saved_v, cur_level) && !isBatchedAtLevel(saved_g, cur_level) && !isBatchedAtLevel(saved_norms, cur_level)) {
12944 return at::_ops::_weight_norm_differentiable_backward::call(grad_w, saved_v, saved_g, saved_norms, dim);
12945 }
12946 Tensor grad_w_value;
12947 optional<int64_t> grad_w_bdim;
12948 std::tie(grad_w_value, grad_w_bdim) = unwrapTensorAtLevel(grad_w, cur_level);
12949 Tensor saved_v_value;
12950 optional<int64_t> saved_v_bdim;
12951 std::tie(saved_v_value, saved_v_bdim) = unwrapTensorAtLevel(saved_v, cur_level);
12952 Tensor saved_g_value;
12953 optional<int64_t> saved_g_bdim;
12954 std::tie(saved_g_value, saved_g_bdim) = unwrapTensorAtLevel(saved_g, cur_level);
12955 Tensor saved_norms_value;
12956 optional<int64_t> saved_norms_bdim;
12957 std::tie(saved_norms_value, saved_norms_bdim) = unwrapTensorAtLevel(saved_norms, cur_level);
12958 auto results = batch_rule(grad_w_value, grad_w_bdim, saved_v_value, saved_v_bdim, saved_g_value, saved_g_bdim, saved_norms_value, saved_norms_bdim, dim);
12959 return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
12960}
12961template <typename batch_rule_t, batch_rule_t batch_rule>
12962at::Tensor zeros_like_generated_plumbing(const at::Tensor & self, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory, c10::optional<at::MemoryFormat> memory_format) {
12963 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
12964 auto maybe_layer = maybeCurrentDynamicLayer();
12965 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
12966 int64_t cur_level = maybe_layer->layerId();
12967 if (!isBatchedAtLevel(self, cur_level)) {
12968 return at::_ops::zeros_like::call(self, dtype, layout, device, pin_memory, memory_format);
12969 }
12970 Tensor self_value;
12971 optional<int64_t> self_bdim;
12972 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
12973 auto results = batch_rule(self_value, self_bdim, dtype, layout, device, pin_memory, memory_format);
12974 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
12975}
12976template <typename batch_rule_t, batch_rule_t batch_rule>
12977at::Tensor _standard_gamma_grad_generated_plumbing(const at::Tensor & self, const at::Tensor & output) {
12978 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
12979 auto maybe_layer = maybeCurrentDynamicLayer();
12980 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
12981 int64_t cur_level = maybe_layer->layerId();
12982 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(output, cur_level)) {
12983 return at::_ops::_standard_gamma_grad::call(self, output);
12984 }
12985 Tensor self_value;
12986 optional<int64_t> self_bdim;
12987 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
12988 Tensor output_value;
12989 optional<int64_t> output_bdim;
12990 std::tie(output_value, output_bdim) = unwrapTensorAtLevel(output, cur_level);
12991 auto results = batch_rule(self_value, self_bdim, output_value, output_bdim);
12992 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
12993}
12994template <typename batch_rule_t, batch_rule_t batch_rule>
12995at::Tensor _standard_gamma_generated_plumbing(const at::Tensor & self, c10::optional<at::Generator> generator) {
12996 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
12997 auto maybe_layer = maybeCurrentDynamicLayer();
12998 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
12999 int64_t cur_level = maybe_layer->layerId();
13000 if (!isBatchedAtLevel(self, cur_level)) {
13001 return at::_ops::_standard_gamma::call(self, generator);
13002 }
13003 Tensor self_value;
13004 optional<int64_t> self_bdim;
13005 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
13006 auto results = batch_rule(self_value, self_bdim, generator);
13007 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
13008}
13009template <typename batch_rule_t, batch_rule_t batch_rule>
13010at::Tensor _dirichlet_grad_generated_plumbing(const at::Tensor & x, const at::Tensor & alpha, const at::Tensor & total) {
13011 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
13012 auto maybe_layer = maybeCurrentDynamicLayer();
13013 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
13014 int64_t cur_level = maybe_layer->layerId();
13015 if (!isBatchedAtLevel(x, cur_level) && !isBatchedAtLevel(alpha, cur_level) && !isBatchedAtLevel(total, cur_level)) {
13016 return at::_ops::_dirichlet_grad::call(x, alpha, total);
13017 }
13018 Tensor x_value;
13019 optional<int64_t> x_bdim;
13020 std::tie(x_value, x_bdim) = unwrapTensorAtLevel(x, cur_level);
13021 Tensor alpha_value;
13022 optional<int64_t> alpha_bdim;
13023 std::tie(alpha_value, alpha_bdim) = unwrapTensorAtLevel(alpha, cur_level);
13024 Tensor total_value;
13025 optional<int64_t> total_bdim;
13026 std::tie(total_value, total_bdim) = unwrapTensorAtLevel(total, cur_level);
13027 auto results = batch_rule(x_value, x_bdim, alpha_value, alpha_bdim, total_value, total_bdim);
13028 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
13029}
13030template <typename batch_rule_t, batch_rule_t batch_rule>
13031at::Tensor _sample_dirichlet_generated_plumbing(const at::Tensor & self, c10::optional<at::Generator> generator) {
13032 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
13033 auto maybe_layer = maybeCurrentDynamicLayer();
13034 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
13035 int64_t cur_level = maybe_layer->layerId();
13036 if (!isBatchedAtLevel(self, cur_level)) {
13037 return at::_ops::_sample_dirichlet::call(self, generator);
13038 }
13039 Tensor self_value;
13040 optional<int64_t> self_bdim;
13041 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
13042 auto results = batch_rule(self_value, self_bdim, generator);
13043 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
13044}
13045template <typename batch_rule_t, batch_rule_t batch_rule>
13046at::Tensor poisson_generated_plumbing(const at::Tensor & self, c10::optional<at::Generator> generator) {
13047 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
13048 auto maybe_layer = maybeCurrentDynamicLayer();
13049 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
13050 int64_t cur_level = maybe_layer->layerId();
13051 if (!isBatchedAtLevel(self, cur_level)) {
13052 return at::_ops::poisson::call(self, generator);
13053 }
13054 Tensor self_value;
13055 optional<int64_t> self_bdim;
13056 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
13057 auto results = batch_rule(self_value, self_bdim, generator);
13058 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
13059}
13060template <typename batch_rule_t, batch_rule_t batch_rule>
13061at::Tensor binomial_generated_plumbing(const at::Tensor & count, const at::Tensor & prob, c10::optional<at::Generator> generator) {
13062 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
13063 auto maybe_layer = maybeCurrentDynamicLayer();
13064 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
13065 int64_t cur_level = maybe_layer->layerId();
13066 if (!isBatchedAtLevel(count, cur_level) && !isBatchedAtLevel(prob, cur_level)) {
13067 return at::_ops::binomial::call(count, prob, generator);
13068 }
13069 Tensor count_value;
13070 optional<int64_t> count_bdim;
13071 std::tie(count_value, count_bdim) = unwrapTensorAtLevel(count, cur_level);
13072 Tensor prob_value;
13073 optional<int64_t> prob_bdim;
13074 std::tie(prob_value, prob_bdim) = unwrapTensorAtLevel(prob, cur_level);
13075 auto results = batch_rule(count_value, count_bdim, prob_value, prob_bdim, generator);
13076 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
13077}
13078template <typename batch_rule_t, batch_rule_t batch_rule>
13079at::Tensor native_norm_generated_plumbing(const at::Tensor & self, const at::Scalar & p) {
13080 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
13081 auto maybe_layer = maybeCurrentDynamicLayer();
13082 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
13083 int64_t cur_level = maybe_layer->layerId();
13084 if (!isBatchedAtLevel(self, cur_level)) {
13085 return at::_ops::native_norm::call(self, p);
13086 }
13087 Tensor self_value;
13088 optional<int64_t> self_bdim;
13089 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
13090 auto results = batch_rule(self_value, self_bdim, p);
13091 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
13092}
13093template <typename batch_rule_t, batch_rule_t batch_rule>
13094at::Tensor native_norm_ScalarOpt_dim_dtype_generated_plumbing(const at::Tensor & self, const c10::optional<at::Scalar> & p, at::IntArrayRef dim, bool keepdim, c10::optional<at::ScalarType> dtype) {
13095 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
13096 auto maybe_layer = maybeCurrentDynamicLayer();
13097 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
13098 int64_t cur_level = maybe_layer->layerId();
13099 if (!isBatchedAtLevel(self, cur_level)) {
13100 return at::_ops::native_norm_ScalarOpt_dim_dtype::call(self, p, dim, keepdim, dtype);
13101 }
13102 Tensor self_value;
13103 optional<int64_t> self_bdim;
13104 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
13105 auto results = batch_rule(self_value, self_bdim, p, dim, keepdim, dtype);
13106 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
13107}
13108template <typename batch_rule_t, batch_rule_t batch_rule>
13109at::Tensor _sparse_sum_generated_plumbing(const at::Tensor & self) {
13110 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
13111 auto maybe_layer = maybeCurrentDynamicLayer();
13112 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
13113 int64_t cur_level = maybe_layer->layerId();
13114 if (!isBatchedAtLevel(self, cur_level)) {
13115 return at::_ops::_sparse_sum::call(self);
13116 }
13117 Tensor self_value;
13118 optional<int64_t> self_bdim;
13119 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
13120 auto results = batch_rule(self_value, self_bdim);
13121 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
13122}
13123template <typename batch_rule_t, batch_rule_t batch_rule>
13124at::Tensor _sparse_sum_dtype_generated_plumbing(const at::Tensor & self, at::ScalarType dtype) {
13125 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
13126 auto maybe_layer = maybeCurrentDynamicLayer();
13127 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
13128 int64_t cur_level = maybe_layer->layerId();
13129 if (!isBatchedAtLevel(self, cur_level)) {
13130 return at::_ops::_sparse_sum_dtype::call(self, dtype);
13131 }
13132 Tensor self_value;
13133 optional<int64_t> self_bdim;
13134 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
13135 auto results = batch_rule(self_value, self_bdim, dtype);
13136 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
13137}
13138template <typename batch_rule_t, batch_rule_t batch_rule>
13139at::Tensor _sparse_sum_dim_generated_plumbing(const at::Tensor & self, at::IntArrayRef dim) {
13140 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
13141 auto maybe_layer = maybeCurrentDynamicLayer();
13142 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
13143 int64_t cur_level = maybe_layer->layerId();
13144 if (!isBatchedAtLevel(self, cur_level)) {
13145 return at::_ops::_sparse_sum_dim::call(self, dim);
13146 }
13147 Tensor self_value;
13148 optional<int64_t> self_bdim;
13149 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
13150 auto results = batch_rule(self_value, self_bdim, dim);
13151 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
13152}
13153template <typename batch_rule_t, batch_rule_t batch_rule>
13154at::Tensor _sparse_sum_dim_dtype_generated_plumbing(const at::Tensor & self, at::IntArrayRef dim, at::ScalarType dtype) {
13155 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
13156 auto maybe_layer = maybeCurrentDynamicLayer();
13157 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
13158 int64_t cur_level = maybe_layer->layerId();
13159 if (!isBatchedAtLevel(self, cur_level)) {
13160 return at::_ops::_sparse_sum_dim_dtype::call(self, dim, dtype);
13161 }
13162 Tensor self_value;
13163 optional<int64_t> self_bdim;
13164 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
13165 auto results = batch_rule(self_value, self_bdim, dim, dtype);
13166 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
13167}
13168template <typename batch_rule_t, batch_rule_t batch_rule>
13169at::Tensor _sparse_sum_backward_generated_plumbing(const at::Tensor & grad, const at::Tensor & self, at::IntArrayRef dim) {
13170 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
13171 auto maybe_layer = maybeCurrentDynamicLayer();
13172 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
13173 int64_t cur_level = maybe_layer->layerId();
13174 if (!isBatchedAtLevel(grad, cur_level) && !isBatchedAtLevel(self, cur_level)) {
13175 return at::_ops::_sparse_sum_backward::call(grad, self, dim);
13176 }
13177 Tensor grad_value;
13178 optional<int64_t> grad_bdim;
13179 std::tie(grad_value, grad_bdim) = unwrapTensorAtLevel(grad, cur_level);
13180 Tensor self_value;
13181 optional<int64_t> self_bdim;
13182 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
13183 auto results = batch_rule(grad_value, grad_bdim, self_value, self_bdim, dim);
13184 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
13185}
13186template <typename batch_rule_t, batch_rule_t batch_rule>
13187at::Tensor _sparse_csr_sum_dim_dtype_generated_plumbing(const at::Tensor & self, at::IntArrayRef dim, bool keepdim, c10::optional<at::ScalarType> dtype) {
13188 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
13189 auto maybe_layer = maybeCurrentDynamicLayer();
13190 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
13191 int64_t cur_level = maybe_layer->layerId();
13192 if (!isBatchedAtLevel(self, cur_level)) {
13193 return at::_ops::_sparse_csr_sum_dim_dtype::call(self, dim, keepdim, dtype);
13194 }
13195 Tensor self_value;
13196 optional<int64_t> self_bdim;
13197 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
13198 auto results = batch_rule(self_value, self_bdim, dim, keepdim, dtype);
13199 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
13200}
13201template <typename batch_rule_t, batch_rule_t batch_rule>
13202at::Tensor _sparse_csr_prod_dim_dtype_generated_plumbing(const at::Tensor & self, at::IntArrayRef dim, bool keepdim, c10::optional<at::ScalarType> dtype) {
13203 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
13204 auto maybe_layer = maybeCurrentDynamicLayer();
13205 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
13206 int64_t cur_level = maybe_layer->layerId();
13207 if (!isBatchedAtLevel(self, cur_level)) {
13208 return at::_ops::_sparse_csr_prod_dim_dtype::call(self, dim, keepdim, dtype);
13209 }
13210 Tensor self_value;
13211 optional<int64_t> self_bdim;
13212 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
13213 auto results = batch_rule(self_value, self_bdim, dim, keepdim, dtype);
13214 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
13215}
13216template <typename batch_rule_t, batch_rule_t batch_rule>
13217at::Tensor _sparse_softmax_int_generated_plumbing(const at::Tensor & self, int64_t dim, c10::optional<at::ScalarType> dtype) {
13218 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
13219 auto maybe_layer = maybeCurrentDynamicLayer();
13220 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
13221 int64_t cur_level = maybe_layer->layerId();
13222 if (!isBatchedAtLevel(self, cur_level)) {
13223 return at::_ops::_sparse_softmax_int::call(self, dim, dtype);
13224 }
13225 Tensor self_value;
13226 optional<int64_t> self_bdim;
13227 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
13228 auto results = batch_rule(self_value, self_bdim, dim, dtype);
13229 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
13230}
13231template <typename batch_rule_t, batch_rule_t batch_rule>
13232at::Tensor _sparse_softmax_Dimname_generated_plumbing(const at::Tensor & self, at::Dimname dim, c10::optional<at::ScalarType> dtype) {
13233 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
13234 auto maybe_layer = maybeCurrentDynamicLayer();
13235 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
13236 int64_t cur_level = maybe_layer->layerId();
13237 if (!isBatchedAtLevel(self, cur_level)) {
13238 return at::_ops::_sparse_softmax_Dimname::call(self, dim, dtype);
13239 }
13240 Tensor self_value;
13241 optional<int64_t> self_bdim;
13242 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
13243 auto results = batch_rule(self_value, self_bdim, dim, dtype);
13244 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
13245}
13246template <typename batch_rule_t, batch_rule_t batch_rule>
13247at::Tensor _sparse_softmax_generated_plumbing(const at::Tensor & self, int64_t dim, bool half_to_float) {
13248 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
13249 auto maybe_layer = maybeCurrentDynamicLayer();
13250 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
13251 int64_t cur_level = maybe_layer->layerId();
13252 if (!isBatchedAtLevel(self, cur_level)) {
13253 return at::_ops::_sparse_softmax::call(self, dim, half_to_float);
13254 }
13255 Tensor self_value;
13256 optional<int64_t> self_bdim;
13257 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
13258 auto results = batch_rule(self_value, self_bdim, dim, half_to_float);
13259 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
13260}
13261template <typename batch_rule_t, batch_rule_t batch_rule>
13262at::Tensor _sparse_softmax_backward_data_generated_plumbing(const at::Tensor & grad_output, const at::Tensor & output, int64_t dim, const at::Tensor & self) {
13263 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
13264 auto maybe_layer = maybeCurrentDynamicLayer();
13265 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
13266 int64_t cur_level = maybe_layer->layerId();
13267 if (!isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(output, cur_level) && !isBatchedAtLevel(self, cur_level)) {
13268 return at::_ops::_sparse_softmax_backward_data::call(grad_output, output, dim, self);
13269 }
13270 Tensor grad_output_value;
13271 optional<int64_t> grad_output_bdim;
13272 std::tie(grad_output_value, grad_output_bdim) = unwrapTensorAtLevel(grad_output, cur_level);
13273 Tensor output_value;
13274 optional<int64_t> output_bdim;
13275 std::tie(output_value, output_bdim) = unwrapTensorAtLevel(output, cur_level);
13276 Tensor self_value;
13277 optional<int64_t> self_bdim;
13278 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
13279 auto results = batch_rule(grad_output_value, grad_output_bdim, output_value, output_bdim, dim, self_value, self_bdim);
13280 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
13281}
13282template <typename batch_rule_t, batch_rule_t batch_rule>
13283at::Tensor _sparse_log_softmax_int_generated_plumbing(const at::Tensor & self, int64_t dim, c10::optional<at::ScalarType> dtype) {
13284 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
13285 auto maybe_layer = maybeCurrentDynamicLayer();
13286 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
13287 int64_t cur_level = maybe_layer->layerId();
13288 if (!isBatchedAtLevel(self, cur_level)) {
13289 return at::_ops::_sparse_log_softmax_int::call(self, dim, dtype);
13290 }
13291 Tensor self_value;
13292 optional<int64_t> self_bdim;
13293 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
13294 auto results = batch_rule(self_value, self_bdim, dim, dtype);
13295 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
13296}
13297template <typename batch_rule_t, batch_rule_t batch_rule>
13298at::Tensor _sparse_log_softmax_Dimname_generated_plumbing(const at::Tensor & self, at::Dimname dim, c10::optional<at::ScalarType> dtype) {
13299 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
13300 auto maybe_layer = maybeCurrentDynamicLayer();
13301 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
13302 int64_t cur_level = maybe_layer->layerId();
13303 if (!isBatchedAtLevel(self, cur_level)) {
13304 return at::_ops::_sparse_log_softmax_Dimname::call(self, dim, dtype);
13305 }
13306 Tensor self_value;
13307 optional<int64_t> self_bdim;
13308 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
13309 auto results = batch_rule(self_value, self_bdim, dim, dtype);
13310 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
13311}
13312template <typename batch_rule_t, batch_rule_t batch_rule>
13313at::Tensor _sparse_log_softmax_generated_plumbing(const at::Tensor & self, int64_t dim, bool half_to_float) {
13314 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
13315 auto maybe_layer = maybeCurrentDynamicLayer();
13316 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
13317 int64_t cur_level = maybe_layer->layerId();
13318 if (!isBatchedAtLevel(self, cur_level)) {
13319 return at::_ops::_sparse_log_softmax::call(self, dim, half_to_float);
13320 }
13321 Tensor self_value;
13322 optional<int64_t> self_bdim;
13323 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
13324 auto results = batch_rule(self_value, self_bdim, dim, half_to_float);
13325 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
13326}
13327template <typename batch_rule_t, batch_rule_t batch_rule>
13328at::Tensor _sparse_log_softmax_backward_data_generated_plumbing(const at::Tensor & grad_output, const at::Tensor & output, int64_t dim, const at::Tensor & self) {
13329 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
13330 auto maybe_layer = maybeCurrentDynamicLayer();
13331 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
13332 int64_t cur_level = maybe_layer->layerId();
13333 if (!isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(output, cur_level) && !isBatchedAtLevel(self, cur_level)) {
13334 return at::_ops::_sparse_log_softmax_backward_data::call(grad_output, output, dim, self);
13335 }
13336 Tensor grad_output_value;
13337 optional<int64_t> grad_output_bdim;
13338 std::tie(grad_output_value, grad_output_bdim) = unwrapTensorAtLevel(grad_output, cur_level);
13339 Tensor output_value;
13340 optional<int64_t> output_bdim;
13341 std::tie(output_value, output_bdim) = unwrapTensorAtLevel(output, cur_level);
13342 Tensor self_value;
13343 optional<int64_t> self_bdim;
13344 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
13345 auto results = batch_rule(grad_output_value, grad_output_bdim, output_value, output_bdim, dim, self_value, self_bdim);
13346 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
13347}
13348template <typename batch_rule_t, batch_rule_t batch_rule>
13349at::Tensor _spdiags_generated_plumbing(const at::Tensor & diagonals, const at::Tensor & offsets, at::IntArrayRef shape, c10::optional<at::Layout> layout) {
13350 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
13351 auto maybe_layer = maybeCurrentDynamicLayer();
13352 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
13353 int64_t cur_level = maybe_layer->layerId();
13354 if (!isBatchedAtLevel(diagonals, cur_level) && !isBatchedAtLevel(offsets, cur_level)) {
13355 return at::_ops::_spdiags::call(diagonals, offsets, shape, layout);
13356 }
13357 Tensor diagonals_value;
13358 optional<int64_t> diagonals_bdim;
13359 std::tie(diagonals_value, diagonals_bdim) = unwrapTensorAtLevel(diagonals, cur_level);
13360 Tensor offsets_value;
13361 optional<int64_t> offsets_bdim;
13362 std::tie(offsets_value, offsets_bdim) = unwrapTensorAtLevel(offsets, cur_level);
13363 auto results = batch_rule(diagonals_value, diagonals_bdim, offsets_value, offsets_bdim, shape, layout);
13364 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
13365}
13366template <typename batch_rule_t, batch_rule_t batch_rule>
13367at::Tensor norm_ScalarOpt_dtype_generated_plumbing(const at::Tensor & self, const c10::optional<at::Scalar> & p, at::ScalarType dtype) {
13368 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
13369 auto maybe_layer = maybeCurrentDynamicLayer();
13370 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
13371 int64_t cur_level = maybe_layer->layerId();
13372 if (!isBatchedAtLevel(self, cur_level)) {
13373 return at::_ops::norm_ScalarOpt_dtype::call(self, p, dtype);
13374 }
13375 Tensor self_value;
13376 optional<int64_t> self_bdim;
13377 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
13378 auto results = batch_rule(self_value, self_bdim, p, dtype);
13379 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
13380}
13381template <typename batch_rule_t, batch_rule_t batch_rule>
13382at::Tensor norm_Scalar_generated_plumbing(const at::Tensor & self, const at::Scalar & p) {
13383 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
13384 auto maybe_layer = maybeCurrentDynamicLayer();
13385 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
13386 int64_t cur_level = maybe_layer->layerId();
13387 if (!isBatchedAtLevel(self, cur_level)) {
13388 return at::_ops::norm_Scalar::call(self, p);
13389 }
13390 Tensor self_value;
13391 optional<int64_t> self_bdim;
13392 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
13393 auto results = batch_rule(self_value, self_bdim, p);
13394 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
13395}
13396template <typename batch_rule_t, batch_rule_t batch_rule>
13397at::Tensor norm_ScalarOpt_dim_dtype_generated_plumbing(const at::Tensor & self, const c10::optional<at::Scalar> & p, at::IntArrayRef dim, bool keepdim, at::ScalarType dtype) {
13398 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
13399 auto maybe_layer = maybeCurrentDynamicLayer();
13400 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
13401 int64_t cur_level = maybe_layer->layerId();
13402 if (!isBatchedAtLevel(self, cur_level)) {
13403 return at::_ops::norm_ScalarOpt_dim_dtype::call(self, p, dim, keepdim, dtype);
13404 }
13405 Tensor self_value;
13406 optional<int64_t> self_bdim;
13407 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
13408 auto results = batch_rule(self_value, self_bdim, p, dim, keepdim, dtype);
13409 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
13410}
13411template <typename batch_rule_t, batch_rule_t batch_rule>
13412at::Tensor norm_ScalarOpt_dim_generated_plumbing(const at::Tensor & self, const c10::optional<at::Scalar> & p, at::IntArrayRef dim, bool keepdim) {
13413 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
13414 auto maybe_layer = maybeCurrentDynamicLayer();
13415 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
13416 int64_t cur_level = maybe_layer->layerId();
13417 if (!isBatchedAtLevel(self, cur_level)) {
13418 return at::_ops::norm_ScalarOpt_dim::call(self, p, dim, keepdim);
13419 }
13420 Tensor self_value;
13421 optional<int64_t> self_bdim;
13422 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
13423 auto results = batch_rule(self_value, self_bdim, p, dim, keepdim);
13424 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
13425}
13426template <typename batch_rule_t, batch_rule_t batch_rule>
13427at::Tensor norm_names_ScalarOpt_dim_dtype_generated_plumbing(const at::Tensor & self, const c10::optional<at::Scalar> & p, at::DimnameList dim, bool keepdim, at::ScalarType dtype) {
13428 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
13429 auto maybe_layer = maybeCurrentDynamicLayer();
13430 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
13431 int64_t cur_level = maybe_layer->layerId();
13432 if (!isBatchedAtLevel(self, cur_level)) {
13433 return at::_ops::norm_names_ScalarOpt_dim_dtype::call(self, p, dim, keepdim, dtype);
13434 }
13435 Tensor self_value;
13436 optional<int64_t> self_bdim;
13437 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
13438 auto results = batch_rule(self_value, self_bdim, p, dim, keepdim, dtype);
13439 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
13440}
13441template <typename batch_rule_t, batch_rule_t batch_rule>
13442at::Tensor norm_names_ScalarOpt_dim_generated_plumbing(const at::Tensor & self, const c10::optional<at::Scalar> & p, at::DimnameList dim, bool keepdim) {
13443 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
13444 auto maybe_layer = maybeCurrentDynamicLayer();
13445 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
13446 int64_t cur_level = maybe_layer->layerId();
13447 if (!isBatchedAtLevel(self, cur_level)) {
13448 return at::_ops::norm_names_ScalarOpt_dim::call(self, p, dim, keepdim);
13449 }
13450 Tensor self_value;
13451 optional<int64_t> self_bdim;
13452 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
13453 auto results = batch_rule(self_value, self_bdim, p, dim, keepdim);
13454 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
13455}
13456template <typename batch_rule_t, batch_rule_t batch_rule>
13457::std::tuple<at::Tensor,at::Tensor> frexp_Tensor_generated_plumbing(const at::Tensor & self) {
13458 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
13459 auto maybe_layer = maybeCurrentDynamicLayer();
13460 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
13461 int64_t cur_level = maybe_layer->layerId();
13462 if (!isBatchedAtLevel(self, cur_level)) {
13463 return at::_ops::frexp_Tensor::call(self);
13464 }
13465 Tensor self_value;
13466 optional<int64_t> self_bdim;
13467 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
13468 auto results = batch_rule(self_value, self_bdim);
13469 return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
13470}
13471template <typename batch_rule_t, batch_rule_t batch_rule>
13472at::Tensor frobenius_norm_dim_generated_plumbing(const at::Tensor & self, at::IntArrayRef dim, bool keepdim) {
13473 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
13474 auto maybe_layer = maybeCurrentDynamicLayer();
13475 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
13476 int64_t cur_level = maybe_layer->layerId();
13477 if (!isBatchedAtLevel(self, cur_level)) {
13478 return at::_ops::frobenius_norm_dim::call(self, dim, keepdim);
13479 }
13480 Tensor self_value;
13481 optional<int64_t> self_bdim;
13482 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
13483 auto results = batch_rule(self_value, self_bdim, dim, keepdim);
13484 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
13485}
13486template <typename batch_rule_t, batch_rule_t batch_rule>
13487at::Tensor nuclear_norm_generated_plumbing(const at::Tensor & self, bool keepdim) {
13488 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
13489 auto maybe_layer = maybeCurrentDynamicLayer();
13490 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
13491 int64_t cur_level = maybe_layer->layerId();
13492 if (!isBatchedAtLevel(self, cur_level)) {
13493 return at::_ops::nuclear_norm::call(self, keepdim);
13494 }
13495 Tensor self_value;
13496 optional<int64_t> self_bdim;
13497 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
13498 auto results = batch_rule(self_value, self_bdim, keepdim);
13499 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
13500}
13501template <typename batch_rule_t, batch_rule_t batch_rule>
13502at::Tensor nuclear_norm_dim_generated_plumbing(const at::Tensor & self, at::IntArrayRef dim, bool keepdim) {
13503 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
13504 auto maybe_layer = maybeCurrentDynamicLayer();
13505 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
13506 int64_t cur_level = maybe_layer->layerId();
13507 if (!isBatchedAtLevel(self, cur_level)) {
13508 return at::_ops::nuclear_norm_dim::call(self, dim, keepdim);
13509 }
13510 Tensor self_value;
13511 optional<int64_t> self_bdim;
13512 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
13513 auto results = batch_rule(self_value, self_bdim, dim, keepdim);
13514 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
13515}
13516template <typename batch_rule_t, batch_rule_t batch_rule>
13517at::Tensor clone_generated_plumbing(const at::Tensor & self, c10::optional<at::MemoryFormat> memory_format) {
13518 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
13519 auto maybe_layer = maybeCurrentDynamicLayer();
13520 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
13521 int64_t cur_level = maybe_layer->layerId();
13522 if (!isBatchedAtLevel(self, cur_level)) {
13523 return at::_ops::clone::call(self, memory_format);
13524 }
13525 Tensor self_value;
13526 optional<int64_t> self_bdim;
13527 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
13528 auto results = batch_rule(self_value, self_bdim, memory_format);
13529 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
13530}
13531template <typename batch_rule_t, batch_rule_t batch_rule>
13532at::Tensor positive_generated_plumbing(const at::Tensor & self) {
13533 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
13534 auto maybe_layer = maybeCurrentDynamicLayer();
13535 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
13536 int64_t cur_level = maybe_layer->layerId();
13537 if (!isBatchedAtLevel(self, cur_level)) {
13538 return at::_ops::positive::call(self);
13539 }
13540 Tensor self_value;
13541 optional<int64_t> self_bdim;
13542 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
13543 auto results = batch_rule(self_value, self_bdim);
13544 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
13545}
13546template <typename batch_rule_t, batch_rule_t batch_rule>
13547const at::Tensor & resize_as__generated_plumbing(const at::Tensor & self, const at::Tensor & the_template, c10::optional<at::MemoryFormat> memory_format) {
13548 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
13549 auto maybe_layer = maybeCurrentDynamicLayer();
13550 vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
13551 int64_t cur_level = maybe_layer->layerId();
13552 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(the_template, cur_level)) {
13553 return at::_ops::resize_as_::call(self, the_template, memory_format);
13554 }
13555 Tensor self_value;
13556 optional<int64_t> self_bdim;
13557 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
13558 Tensor the_template_value;
13559 optional<int64_t> the_template_bdim;
13560 std::tie(the_template_value, the_template_bdim) = unwrapTensorAtLevel(the_template, cur_level);
13561 batch_rule(self_value, self_bdim, the_template_value, the_template_bdim, memory_format);
13562 return self;
13563}
13564template <typename batch_rule_t, batch_rule_t batch_rule>
13565const at::Tensor & resize_as_sparse__generated_plumbing(const at::Tensor & self, const at::Tensor & the_template) {
13566 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
13567 auto maybe_layer = maybeCurrentDynamicLayer();
13568 vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
13569 int64_t cur_level = maybe_layer->layerId();
13570 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(the_template, cur_level)) {
13571 return at::_ops::resize_as_sparse_::call(self, the_template);
13572 }
13573 Tensor self_value;
13574 optional<int64_t> self_bdim;
13575 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
13576 Tensor the_template_value;
13577 optional<int64_t> the_template_bdim;
13578 std::tie(the_template_value, the_template_bdim) = unwrapTensorAtLevel(the_template, cur_level);
13579 batch_rule(self_value, self_bdim, the_template_value, the_template_bdim);
13580 return self;
13581}
13582template <typename batch_rule_t, batch_rule_t batch_rule>
13583at::Tensor & zero__generated_plumbing(at::Tensor & self) {
13584 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
13585 auto maybe_layer = maybeCurrentDynamicLayer();
13586 vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
13587 int64_t cur_level = maybe_layer->layerId();
13588 if (!isBatchedAtLevel(self, cur_level)) {
13589 return at::_ops::zero_::call(self);
13590 }
13591 Tensor self_value;
13592 optional<int64_t> self_bdim;
13593 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
13594 batch_rule(self_value, self_bdim);
13595 return self;
13596}
13597template <typename batch_rule_t, batch_rule_t batch_rule>
13598at::Tensor sub_Tensor_generated_plumbing(const at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha) {
13599 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
13600 auto maybe_layer = maybeCurrentDynamicLayer();
13601 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
13602 int64_t cur_level = maybe_layer->layerId();
13603 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
13604 return at::_ops::sub_Tensor::call(self, other, alpha);
13605 }
13606 Tensor self_value;
13607 optional<int64_t> self_bdim;
13608 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
13609 Tensor other_value;
13610 optional<int64_t> other_bdim;
13611 std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
13612 auto results = batch_rule(self_value, self_bdim, other_value, other_bdim, alpha);
13613 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
13614}
13615template <typename batch_rule_t, batch_rule_t batch_rule>
13616at::Tensor & sub__Tensor_generated_plumbing(at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha) {
13617 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
13618 auto maybe_layer = maybeCurrentDynamicLayer();
13619 vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
13620 int64_t cur_level = maybe_layer->layerId();
13621 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
13622 return at::_ops::sub__Tensor::call(self, other, alpha);
13623 }
13624 Tensor self_value;
13625 optional<int64_t> self_bdim;
13626 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
13627 Tensor other_value;
13628 optional<int64_t> other_bdim;
13629 std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
13630 batch_rule(self_value, self_bdim, other_value, other_bdim, alpha);
13631 return self;
13632}
13633template <typename batch_rule_t, batch_rule_t batch_rule>
13634at::Tensor sub_Scalar_generated_plumbing(const at::Tensor & self, const at::Scalar & other, const at::Scalar & alpha) {
13635 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
13636 auto maybe_layer = maybeCurrentDynamicLayer();
13637 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
13638 int64_t cur_level = maybe_layer->layerId();
13639 if (!isBatchedAtLevel(self, cur_level)) {
13640 return at::_ops::sub_Scalar::call(self, other, alpha);
13641 }
13642 Tensor self_value;
13643 optional<int64_t> self_bdim;
13644 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
13645 auto results = batch_rule(self_value, self_bdim, other, alpha);
13646 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
13647}
13648template <typename batch_rule_t, batch_rule_t batch_rule>
13649at::Tensor & sub__Scalar_generated_plumbing(at::Tensor & self, const at::Scalar & other, const at::Scalar & alpha) {
13650 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
13651 auto maybe_layer = maybeCurrentDynamicLayer();
13652 vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
13653 int64_t cur_level = maybe_layer->layerId();
13654 if (!isBatchedAtLevel(self, cur_level)) {
13655 return at::_ops::sub__Scalar::call(self, other, alpha);
13656 }
13657 Tensor self_value;
13658 optional<int64_t> self_bdim;
13659 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
13660 batch_rule(self_value, self_bdim, other, alpha);
13661 return self;
13662}
13663template <typename batch_rule_t, batch_rule_t batch_rule>
13664at::Tensor subtract_Tensor_generated_plumbing(const at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha) {
13665 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
13666 auto maybe_layer = maybeCurrentDynamicLayer();
13667 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
13668 int64_t cur_level = maybe_layer->layerId();
13669 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
13670 return at::_ops::subtract_Tensor::call(self, other, alpha);
13671 }
13672 Tensor self_value;
13673 optional<int64_t> self_bdim;
13674 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
13675 Tensor other_value;
13676 optional<int64_t> other_bdim;
13677 std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
13678 auto results = batch_rule(self_value, self_bdim, other_value, other_bdim, alpha);
13679 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
13680}
13681template <typename batch_rule_t, batch_rule_t batch_rule>
13682at::Tensor & subtract__Tensor_generated_plumbing(at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha) {
13683 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
13684 auto maybe_layer = maybeCurrentDynamicLayer();
13685 vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
13686 int64_t cur_level = maybe_layer->layerId();
13687 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
13688 return at::_ops::subtract__Tensor::call(self, other, alpha);
13689 }
13690 Tensor self_value;
13691 optional<int64_t> self_bdim;
13692 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
13693 Tensor other_value;
13694 optional<int64_t> other_bdim;
13695 std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
13696 batch_rule(self_value, self_bdim, other_value, other_bdim, alpha);
13697 return self;
13698}
13699template <typename batch_rule_t, batch_rule_t batch_rule>
13700at::Tensor subtract_Scalar_generated_plumbing(const at::Tensor & self, const at::Scalar & other, const at::Scalar & alpha) {
13701 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
13702 auto maybe_layer = maybeCurrentDynamicLayer();
13703 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
13704 int64_t cur_level = maybe_layer->layerId();
13705 if (!isBatchedAtLevel(self, cur_level)) {
13706 return at::_ops::subtract_Scalar::call(self, other, alpha);
13707 }
13708 Tensor self_value;
13709 optional<int64_t> self_bdim;
13710 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
13711 auto results = batch_rule(self_value, self_bdim, other, alpha);
13712 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
13713}
13714template <typename batch_rule_t, batch_rule_t batch_rule>
13715at::Tensor & subtract__Scalar_generated_plumbing(at::Tensor & self, const at::Scalar & other, const at::Scalar & alpha) {
13716 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
13717 auto maybe_layer = maybeCurrentDynamicLayer();
13718 vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
13719 int64_t cur_level = maybe_layer->layerId();
13720 if (!isBatchedAtLevel(self, cur_level)) {
13721 return at::_ops::subtract__Scalar::call(self, other, alpha);
13722 }
13723 Tensor self_value;
13724 optional<int64_t> self_bdim;
13725 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
13726 batch_rule(self_value, self_bdim, other, alpha);
13727 return self;
13728}
13729template <typename batch_rule_t, batch_rule_t batch_rule>
13730at::Tensor rsub_Tensor_generated_plumbing(const at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha) {
13731 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
13732 auto maybe_layer = maybeCurrentDynamicLayer();
13733 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
13734 int64_t cur_level = maybe_layer->layerId();
13735 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
13736 return at::_ops::rsub_Tensor::call(self, other, alpha);
13737 }
13738 Tensor self_value;
13739 optional<int64_t> self_bdim;
13740 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
13741 Tensor other_value;
13742 optional<int64_t> other_bdim;
13743 std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
13744 auto results = batch_rule(self_value, self_bdim, other_value, other_bdim, alpha);
13745 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
13746}
13747template <typename batch_rule_t, batch_rule_t batch_rule>
13748at::Tensor heaviside_generated_plumbing(const at::Tensor & self, const at::Tensor & values) {
13749 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
13750 auto maybe_layer = maybeCurrentDynamicLayer();
13751 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
13752 int64_t cur_level = maybe_layer->layerId();
13753 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(values, cur_level)) {
13754 return at::_ops::heaviside::call(self, values);
13755 }
13756 Tensor self_value;
13757 optional<int64_t> self_bdim;
13758 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
13759 Tensor values_value;
13760 optional<int64_t> values_bdim;
13761 std::tie(values_value, values_bdim) = unwrapTensorAtLevel(values, cur_level);
13762 auto results = batch_rule(self_value, self_bdim, values_value, values_bdim);
13763 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
13764}
13765template <typename batch_rule_t, batch_rule_t batch_rule>
13766at::Tensor & heaviside__generated_plumbing(at::Tensor & self, const at::Tensor & values) {
13767 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
13768 auto maybe_layer = maybeCurrentDynamicLayer();
13769 vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
13770 int64_t cur_level = maybe_layer->layerId();
13771 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(values, cur_level)) {
13772 return at::_ops::heaviside_::call(self, values);
13773 }
13774 Tensor self_value;
13775 optional<int64_t> self_bdim;
13776 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
13777 Tensor values_value;
13778 optional<int64_t> values_bdim;
13779 std::tie(values_value, values_bdim) = unwrapTensorAtLevel(values, cur_level);
13780 batch_rule(self_value, self_bdim, values_value, values_bdim);
13781 return self;
13782}
13783template <typename batch_rule_t, batch_rule_t batch_rule>
13784at::Tensor rsub_Scalar_generated_plumbing(const at::Tensor & self, const at::Scalar & other, const at::Scalar & alpha) {
13785 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
13786 auto maybe_layer = maybeCurrentDynamicLayer();
13787 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
13788 int64_t cur_level = maybe_layer->layerId();
13789 if (!isBatchedAtLevel(self, cur_level)) {
13790 return at::_ops::rsub_Scalar::call(self, other, alpha);
13791 }
13792 Tensor self_value;
13793 optional<int64_t> self_bdim;
13794 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
13795 auto results = batch_rule(self_value, self_bdim, other, alpha);
13796 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
13797}
13798template <typename batch_rule_t, batch_rule_t batch_rule>
13799at::Tensor _sparse_addmm_generated_plumbing(const at::Tensor & self, const at::Tensor & mat1, const at::Tensor & mat2, const at::Scalar & beta, const at::Scalar & alpha) {
13800 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
13801 auto maybe_layer = maybeCurrentDynamicLayer();
13802 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
13803 int64_t cur_level = maybe_layer->layerId();
13804 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(mat1, cur_level) && !isBatchedAtLevel(mat2, cur_level)) {
13805 return at::_ops::_sparse_addmm::call(self, mat1, mat2, beta, alpha);
13806 }
13807 Tensor self_value;
13808 optional<int64_t> self_bdim;
13809 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
13810 Tensor mat1_value;
13811 optional<int64_t> mat1_bdim;
13812 std::tie(mat1_value, mat1_bdim) = unwrapTensorAtLevel(mat1, cur_level);
13813 Tensor mat2_value;
13814 optional<int64_t> mat2_bdim;
13815 std::tie(mat2_value, mat2_bdim) = unwrapTensorAtLevel(mat2, cur_level);
13816 auto results = batch_rule(self_value, self_bdim, mat1_value, mat1_bdim, mat2_value, mat2_bdim, beta, alpha);
13817 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
13818}
13819template <typename batch_rule_t, batch_rule_t batch_rule>
13820at::Tensor sparse_sampled_addmm_generated_plumbing(const at::Tensor & self, const at::Tensor & mat1, const at::Tensor & mat2, const at::Scalar & beta, const at::Scalar & alpha) {
13821 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
13822 auto maybe_layer = maybeCurrentDynamicLayer();
13823 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
13824 int64_t cur_level = maybe_layer->layerId();
13825 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(mat1, cur_level) && !isBatchedAtLevel(mat2, cur_level)) {
13826 return at::_ops::sparse_sampled_addmm::call(self, mat1, mat2, beta, alpha);
13827 }
13828 Tensor self_value;
13829 optional<int64_t> self_bdim;
13830 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
13831 Tensor mat1_value;
13832 optional<int64_t> mat1_bdim;
13833 std::tie(mat1_value, mat1_bdim) = unwrapTensorAtLevel(mat1, cur_level);
13834 Tensor mat2_value;
13835 optional<int64_t> mat2_bdim;
13836 std::tie(mat2_value, mat2_bdim) = unwrapTensorAtLevel(mat2, cur_level);
13837 auto results = batch_rule(self_value, self_bdim, mat1_value, mat1_bdim, mat2_value, mat2_bdim, beta, alpha);
13838 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
13839}
13840template <typename batch_rule_t, batch_rule_t batch_rule>
13841::std::tuple<at::Tensor,at::Tensor> _sparse_mm_reduce_impl_generated_plumbing(const at::Tensor & self, const at::Tensor & other, c10::string_view reduce) {
13842 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
13843 auto maybe_layer = maybeCurrentDynamicLayer();
13844 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
13845 int64_t cur_level = maybe_layer->layerId();
13846 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
13847 return at::_ops::_sparse_mm_reduce_impl::call(self, other, reduce);
13848 }
13849 Tensor self_value;
13850 optional<int64_t> self_bdim;
13851 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
13852 Tensor other_value;
13853 optional<int64_t> other_bdim;
13854 std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
13855 auto results = batch_rule(self_value, self_bdim, other_value, other_bdim, reduce);
13856 return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
13857}
13858template <typename batch_rule_t, batch_rule_t batch_rule>
13859::std::tuple<at::Tensor,at::Tensor> _sparse_mm_reduce_impl_backward_generated_plumbing(const at::Tensor & self, const at::Tensor & grad_out, const at::Tensor & weight, c10::string_view reduce, const at::Tensor & arg_out, ::std::array<bool,2> output_mask) {
13860 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
13861 auto maybe_layer = maybeCurrentDynamicLayer();
13862 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
13863 int64_t cur_level = maybe_layer->layerId();
13864 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(grad_out, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(arg_out, cur_level)) {
13865 return at::_ops::_sparse_mm_reduce_impl_backward::call(self, grad_out, weight, reduce, arg_out, output_mask);
13866 }
13867 Tensor self_value;
13868 optional<int64_t> self_bdim;
13869 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
13870 Tensor grad_out_value;
13871 optional<int64_t> grad_out_bdim;
13872 std::tie(grad_out_value, grad_out_bdim) = unwrapTensorAtLevel(grad_out, cur_level);
13873 Tensor weight_value;
13874 optional<int64_t> weight_bdim;
13875 std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight, cur_level);
13876 Tensor arg_out_value;
13877 optional<int64_t> arg_out_bdim;
13878 std::tie(arg_out_value, arg_out_bdim) = unwrapTensorAtLevel(arg_out, cur_level);
13879 auto results = batch_rule(self_value, self_bdim, grad_out_value, grad_out_bdim, weight_value, weight_bdim, reduce, arg_out_value, arg_out_bdim, output_mask);
13880 return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
13881}
13882template <typename batch_rule_t, batch_rule_t batch_rule>
13883at::Tensor addmm_generated_plumbing(const at::Tensor & self, const at::Tensor & mat1, const at::Tensor & mat2, const at::Scalar & beta, const at::Scalar & alpha) {
13884 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
13885 auto maybe_layer = maybeCurrentDynamicLayer();
13886 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
13887 int64_t cur_level = maybe_layer->layerId();
13888 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(mat1, cur_level) && !isBatchedAtLevel(mat2, cur_level)) {
13889 return at::_ops::addmm::call(self, mat1, mat2, beta, alpha);
13890 }
13891 Tensor self_value;
13892 optional<int64_t> self_bdim;
13893 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
13894 Tensor mat1_value;
13895 optional<int64_t> mat1_bdim;
13896 std::tie(mat1_value, mat1_bdim) = unwrapTensorAtLevel(mat1, cur_level);
13897 Tensor mat2_value;
13898 optional<int64_t> mat2_bdim;
13899 std::tie(mat2_value, mat2_bdim) = unwrapTensorAtLevel(mat2, cur_level);
13900 auto results = batch_rule(self_value, self_bdim, mat1_value, mat1_bdim, mat2_value, mat2_bdim, beta, alpha);
13901 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
13902}
13903template <typename batch_rule_t, batch_rule_t batch_rule>
13904at::Tensor & addmm__generated_plumbing(at::Tensor & self, const at::Tensor & mat1, const at::Tensor & mat2, const at::Scalar & beta, const at::Scalar & alpha) {
13905 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
13906 auto maybe_layer = maybeCurrentDynamicLayer();
13907 vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
13908 int64_t cur_level = maybe_layer->layerId();
13909 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(mat1, cur_level) && !isBatchedAtLevel(mat2, cur_level)) {
13910 return at::_ops::addmm_::call(self, mat1, mat2, beta, alpha);
13911 }
13912 Tensor self_value;
13913 optional<int64_t> self_bdim;
13914 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
13915 Tensor mat1_value;
13916 optional<int64_t> mat1_bdim;
13917 std::tie(mat1_value, mat1_bdim) = unwrapTensorAtLevel(mat1, cur_level);
13918 Tensor mat2_value;
13919 optional<int64_t> mat2_bdim;
13920 std::tie(mat2_value, mat2_bdim) = unwrapTensorAtLevel(mat2, cur_level);
13921 batch_rule(self_value, self_bdim, mat1_value, mat1_bdim, mat2_value, mat2_bdim, beta, alpha);
13922 return self;
13923}
13924template <typename batch_rule_t, batch_rule_t batch_rule>
13925at::Tensor _addmm_activation_generated_plumbing(const at::Tensor & self, const at::Tensor & mat1, const at::Tensor & mat2, const at::Scalar & beta, const at::Scalar & alpha, bool use_gelu) {
13926 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
13927 auto maybe_layer = maybeCurrentDynamicLayer();
13928 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
13929 int64_t cur_level = maybe_layer->layerId();
13930 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(mat1, cur_level) && !isBatchedAtLevel(mat2, cur_level)) {
13931 return at::_ops::_addmm_activation::call(self, mat1, mat2, beta, alpha, use_gelu);
13932 }
13933 Tensor self_value;
13934 optional<int64_t> self_bdim;
13935 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
13936 Tensor mat1_value;
13937 optional<int64_t> mat1_bdim;
13938 std::tie(mat1_value, mat1_bdim) = unwrapTensorAtLevel(mat1, cur_level);
13939 Tensor mat2_value;
13940 optional<int64_t> mat2_bdim;
13941 std::tie(mat2_value, mat2_bdim) = unwrapTensorAtLevel(mat2, cur_level);
13942 auto results = batch_rule(self_value, self_bdim, mat1_value, mat1_bdim, mat2_value, mat2_bdim, beta, alpha, use_gelu);
13943 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
13944}
13945template <typename batch_rule_t, batch_rule_t batch_rule>
13946at::Tensor sparse_compressed_tensor_comp_plain_value_size_generated_plumbing(const at::Tensor & compressed_indices, const at::Tensor & plain_indices, const at::Tensor & values, at::IntArrayRef size, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) {
13947 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
13948 auto maybe_layer = maybeCurrentDynamicLayer();
13949 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
13950 int64_t cur_level = maybe_layer->layerId();
13951 if (!isBatchedAtLevel(compressed_indices, cur_level) && !isBatchedAtLevel(plain_indices, cur_level) && !isBatchedAtLevel(values, cur_level)) {
13952 return at::_ops::sparse_compressed_tensor_comp_plain_value_size::call(compressed_indices, plain_indices, values, size, dtype, layout, device, pin_memory);
13953 }
13954 Tensor compressed_indices_value;
13955 optional<int64_t> compressed_indices_bdim;
13956 std::tie(compressed_indices_value, compressed_indices_bdim) = unwrapTensorAtLevel(compressed_indices, cur_level);
13957 Tensor plain_indices_value;
13958 optional<int64_t> plain_indices_bdim;
13959 std::tie(plain_indices_value, plain_indices_bdim) = unwrapTensorAtLevel(plain_indices, cur_level);
13960 Tensor values_value;
13961 optional<int64_t> values_bdim;
13962 std::tie(values_value, values_bdim) = unwrapTensorAtLevel(values, cur_level);
13963 auto results = batch_rule(compressed_indices_value, compressed_indices_bdim, plain_indices_value, plain_indices_bdim, values_value, values_bdim, size, dtype, layout, device, pin_memory);
13964 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
13965}
13966template <typename batch_rule_t, batch_rule_t batch_rule>
13967at::Tensor sparse_csr_tensor_crow_col_value_size_generated_plumbing(const at::Tensor & crow_indices, const at::Tensor & col_indices, const at::Tensor & values, at::IntArrayRef size, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) {
13968 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
13969 auto maybe_layer = maybeCurrentDynamicLayer();
13970 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
13971 int64_t cur_level = maybe_layer->layerId();
13972 if (!isBatchedAtLevel(crow_indices, cur_level) && !isBatchedAtLevel(col_indices, cur_level) && !isBatchedAtLevel(values, cur_level)) {
13973 return at::_ops::sparse_csr_tensor_crow_col_value_size::call(crow_indices, col_indices, values, size, dtype, layout, device, pin_memory);
13974 }
13975 Tensor crow_indices_value;
13976 optional<int64_t> crow_indices_bdim;
13977 std::tie(crow_indices_value, crow_indices_bdim) = unwrapTensorAtLevel(crow_indices, cur_level);
13978 Tensor col_indices_value;
13979 optional<int64_t> col_indices_bdim;
13980 std::tie(col_indices_value, col_indices_bdim) = unwrapTensorAtLevel(col_indices, cur_level);
13981 Tensor values_value;
13982 optional<int64_t> values_bdim;
13983 std::tie(values_value, values_bdim) = unwrapTensorAtLevel(values, cur_level);
13984 auto results = batch_rule(crow_indices_value, crow_indices_bdim, col_indices_value, col_indices_bdim, values_value, values_bdim, size, dtype, layout, device, pin_memory);
13985 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
13986}
13987template <typename batch_rule_t, batch_rule_t batch_rule>
13988at::Tensor sparse_csc_tensor_ccol_row_value_size_generated_plumbing(const at::Tensor & ccol_indices, const at::Tensor & row_indices, const at::Tensor & values, at::IntArrayRef size, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) {
13989 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
13990 auto maybe_layer = maybeCurrentDynamicLayer();
13991 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
13992 int64_t cur_level = maybe_layer->layerId();
13993 if (!isBatchedAtLevel(ccol_indices, cur_level) && !isBatchedAtLevel(row_indices, cur_level) && !isBatchedAtLevel(values, cur_level)) {
13994 return at::_ops::sparse_csc_tensor_ccol_row_value_size::call(ccol_indices, row_indices, values, size, dtype, layout, device, pin_memory);
13995 }
13996 Tensor ccol_indices_value;
13997 optional<int64_t> ccol_indices_bdim;
13998 std::tie(ccol_indices_value, ccol_indices_bdim) = unwrapTensorAtLevel(ccol_indices, cur_level);
13999 Tensor row_indices_value;
14000 optional<int64_t> row_indices_bdim;
14001 std::tie(row_indices_value, row_indices_bdim) = unwrapTensorAtLevel(row_indices, cur_level);
14002 Tensor values_value;
14003 optional<int64_t> values_bdim;
14004 std::tie(values_value, values_bdim) = unwrapTensorAtLevel(values, cur_level);
14005 auto results = batch_rule(ccol_indices_value, ccol_indices_bdim, row_indices_value, row_indices_bdim, values_value, values_bdim, size, dtype, layout, device, pin_memory);
14006 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
14007}
14008template <typename batch_rule_t, batch_rule_t batch_rule>
14009at::Tensor sparse_bsr_tensor_crow_col_value_size_generated_plumbing(const at::Tensor & crow_indices, const at::Tensor & col_indices, const at::Tensor & values, at::IntArrayRef size, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) {
14010 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
14011 auto maybe_layer = maybeCurrentDynamicLayer();
14012 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
14013 int64_t cur_level = maybe_layer->layerId();
14014 if (!isBatchedAtLevel(crow_indices, cur_level) && !isBatchedAtLevel(col_indices, cur_level) && !isBatchedAtLevel(values, cur_level)) {
14015 return at::_ops::sparse_bsr_tensor_crow_col_value_size::call(crow_indices, col_indices, values, size, dtype, layout, device, pin_memory);
14016 }
14017 Tensor crow_indices_value;
14018 optional<int64_t> crow_indices_bdim;
14019 std::tie(crow_indices_value, crow_indices_bdim) = unwrapTensorAtLevel(crow_indices, cur_level);
14020 Tensor col_indices_value;
14021 optional<int64_t> col_indices_bdim;
14022 std::tie(col_indices_value, col_indices_bdim) = unwrapTensorAtLevel(col_indices, cur_level);
14023 Tensor values_value;
14024 optional<int64_t> values_bdim;
14025 std::tie(values_value, values_bdim) = unwrapTensorAtLevel(values, cur_level);
14026 auto results = batch_rule(crow_indices_value, crow_indices_bdim, col_indices_value, col_indices_bdim, values_value, values_bdim, size, dtype, layout, device, pin_memory);
14027 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
14028}
14029template <typename batch_rule_t, batch_rule_t batch_rule>
14030at::Tensor sparse_bsc_tensor_ccol_row_value_size_generated_plumbing(const at::Tensor & ccol_indices, const at::Tensor & row_indices, const at::Tensor & values, at::IntArrayRef size, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) {
14031 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
14032 auto maybe_layer = maybeCurrentDynamicLayer();
14033 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
14034 int64_t cur_level = maybe_layer->layerId();
14035 if (!isBatchedAtLevel(ccol_indices, cur_level) && !isBatchedAtLevel(row_indices, cur_level) && !isBatchedAtLevel(values, cur_level)) {
14036 return at::_ops::sparse_bsc_tensor_ccol_row_value_size::call(ccol_indices, row_indices, values, size, dtype, layout, device, pin_memory);
14037 }
14038 Tensor ccol_indices_value;
14039 optional<int64_t> ccol_indices_bdim;
14040 std::tie(ccol_indices_value, ccol_indices_bdim) = unwrapTensorAtLevel(ccol_indices, cur_level);
14041 Tensor row_indices_value;
14042 optional<int64_t> row_indices_bdim;
14043 std::tie(row_indices_value, row_indices_bdim) = unwrapTensorAtLevel(row_indices, cur_level);
14044 Tensor values_value;
14045 optional<int64_t> values_bdim;
14046 std::tie(values_value, values_bdim) = unwrapTensorAtLevel(values, cur_level);
14047 auto results = batch_rule(ccol_indices_value, ccol_indices_bdim, row_indices_value, row_indices_bdim, values_value, values_bdim, size, dtype, layout, device, pin_memory);
14048 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
14049}
14050template <typename batch_rule_t, batch_rule_t batch_rule>
14051at::Tensor sparse_compressed_tensor_comp_plain_value_generated_plumbing(const at::Tensor & compressed_indices, const at::Tensor & plain_indices, const at::Tensor & values, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) {
14052 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
14053 auto maybe_layer = maybeCurrentDynamicLayer();
14054 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
14055 int64_t cur_level = maybe_layer->layerId();
14056 if (!isBatchedAtLevel(compressed_indices, cur_level) && !isBatchedAtLevel(plain_indices, cur_level) && !isBatchedAtLevel(values, cur_level)) {
14057 return at::_ops::sparse_compressed_tensor_comp_plain_value::call(compressed_indices, plain_indices, values, dtype, layout, device, pin_memory);
14058 }
14059 Tensor compressed_indices_value;
14060 optional<int64_t> compressed_indices_bdim;
14061 std::tie(compressed_indices_value, compressed_indices_bdim) = unwrapTensorAtLevel(compressed_indices, cur_level);
14062 Tensor plain_indices_value;
14063 optional<int64_t> plain_indices_bdim;
14064 std::tie(plain_indices_value, plain_indices_bdim) = unwrapTensorAtLevel(plain_indices, cur_level);
14065 Tensor values_value;
14066 optional<int64_t> values_bdim;
14067 std::tie(values_value, values_bdim) = unwrapTensorAtLevel(values, cur_level);
14068 auto results = batch_rule(compressed_indices_value, compressed_indices_bdim, plain_indices_value, plain_indices_bdim, values_value, values_bdim, dtype, layout, device, pin_memory);
14069 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
14070}
14071template <typename batch_rule_t, batch_rule_t batch_rule>
14072at::Tensor sparse_csr_tensor_crow_col_value_generated_plumbing(const at::Tensor & crow_indices, const at::Tensor & col_indices, const at::Tensor & values, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) {
14073 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
14074 auto maybe_layer = maybeCurrentDynamicLayer();
14075 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
14076 int64_t cur_level = maybe_layer->layerId();
14077 if (!isBatchedAtLevel(crow_indices, cur_level) && !isBatchedAtLevel(col_indices, cur_level) && !isBatchedAtLevel(values, cur_level)) {
14078 return at::_ops::sparse_csr_tensor_crow_col_value::call(crow_indices, col_indices, values, dtype, layout, device, pin_memory);
14079 }
14080 Tensor crow_indices_value;
14081 optional<int64_t> crow_indices_bdim;
14082 std::tie(crow_indices_value, crow_indices_bdim) = unwrapTensorAtLevel(crow_indices, cur_level);
14083 Tensor col_indices_value;
14084 optional<int64_t> col_indices_bdim;
14085 std::tie(col_indices_value, col_indices_bdim) = unwrapTensorAtLevel(col_indices, cur_level);
14086 Tensor values_value;
14087 optional<int64_t> values_bdim;
14088 std::tie(values_value, values_bdim) = unwrapTensorAtLevel(values, cur_level);
14089 auto results = batch_rule(crow_indices_value, crow_indices_bdim, col_indices_value, col_indices_bdim, values_value, values_bdim, dtype, layout, device, pin_memory);
14090 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
14091}
14092template <typename batch_rule_t, batch_rule_t batch_rule>
14093at::Tensor sparse_csc_tensor_ccol_row_value_generated_plumbing(const at::Tensor & ccol_indices, const at::Tensor & row_indices, const at::Tensor & values, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) {
14094 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
14095 auto maybe_layer = maybeCurrentDynamicLayer();
14096 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
14097 int64_t cur_level = maybe_layer->layerId();
14098 if (!isBatchedAtLevel(ccol_indices, cur_level) && !isBatchedAtLevel(row_indices, cur_level) && !isBatchedAtLevel(values, cur_level)) {
14099 return at::_ops::sparse_csc_tensor_ccol_row_value::call(ccol_indices, row_indices, values, dtype, layout, device, pin_memory);
14100 }
14101 Tensor ccol_indices_value;
14102 optional<int64_t> ccol_indices_bdim;
14103 std::tie(ccol_indices_value, ccol_indices_bdim) = unwrapTensorAtLevel(ccol_indices, cur_level);
14104 Tensor row_indices_value;
14105 optional<int64_t> row_indices_bdim;
14106 std::tie(row_indices_value, row_indices_bdim) = unwrapTensorAtLevel(row_indices, cur_level);
14107 Tensor values_value;
14108 optional<int64_t> values_bdim;
14109 std::tie(values_value, values_bdim) = unwrapTensorAtLevel(values, cur_level);
14110 auto results = batch_rule(ccol_indices_value, ccol_indices_bdim, row_indices_value, row_indices_bdim, values_value, values_bdim, dtype, layout, device, pin_memory);
14111 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
14112}
14113template <typename batch_rule_t, batch_rule_t batch_rule>
14114at::Tensor sparse_bsr_tensor_crow_col_value_generated_plumbing(const at::Tensor & crow_indices, const at::Tensor & col_indices, const at::Tensor & values, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) {
14115 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
14116 auto maybe_layer = maybeCurrentDynamicLayer();
14117 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
14118 int64_t cur_level = maybe_layer->layerId();
14119 if (!isBatchedAtLevel(crow_indices, cur_level) && !isBatchedAtLevel(col_indices, cur_level) && !isBatchedAtLevel(values, cur_level)) {
14120 return at::_ops::sparse_bsr_tensor_crow_col_value::call(crow_indices, col_indices, values, dtype, layout, device, pin_memory);
14121 }
14122 Tensor crow_indices_value;
14123 optional<int64_t> crow_indices_bdim;
14124 std::tie(crow_indices_value, crow_indices_bdim) = unwrapTensorAtLevel(crow_indices, cur_level);
14125 Tensor col_indices_value;
14126 optional<int64_t> col_indices_bdim;
14127 std::tie(col_indices_value, col_indices_bdim) = unwrapTensorAtLevel(col_indices, cur_level);
14128 Tensor values_value;
14129 optional<int64_t> values_bdim;
14130 std::tie(values_value, values_bdim) = unwrapTensorAtLevel(values, cur_level);
14131 auto results = batch_rule(crow_indices_value, crow_indices_bdim, col_indices_value, col_indices_bdim, values_value, values_bdim, dtype, layout, device, pin_memory);
14132 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
14133}
14134template <typename batch_rule_t, batch_rule_t batch_rule>
14135at::Tensor sparse_bsc_tensor_ccol_row_value_generated_plumbing(const at::Tensor & ccol_indices, const at::Tensor & row_indices, const at::Tensor & values, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) {
14136 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
14137 auto maybe_layer = maybeCurrentDynamicLayer();
14138 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
14139 int64_t cur_level = maybe_layer->layerId();
14140 if (!isBatchedAtLevel(ccol_indices, cur_level) && !isBatchedAtLevel(row_indices, cur_level) && !isBatchedAtLevel(values, cur_level)) {
14141 return at::_ops::sparse_bsc_tensor_ccol_row_value::call(ccol_indices, row_indices, values, dtype, layout, device, pin_memory);
14142 }
14143 Tensor ccol_indices_value;
14144 optional<int64_t> ccol_indices_bdim;
14145 std::tie(ccol_indices_value, ccol_indices_bdim) = unwrapTensorAtLevel(ccol_indices, cur_level);
14146 Tensor row_indices_value;
14147 optional<int64_t> row_indices_bdim;
14148 std::tie(row_indices_value, row_indices_bdim) = unwrapTensorAtLevel(row_indices, cur_level);
14149 Tensor values_value;
14150 optional<int64_t> values_bdim;
14151 std::tie(values_value, values_bdim) = unwrapTensorAtLevel(values, cur_level);
14152 auto results = batch_rule(ccol_indices_value, ccol_indices_bdim, row_indices_value, row_indices_bdim, values_value, values_bdim, dtype, layout, device, pin_memory);
14153 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
14154}
14155template <typename batch_rule_t, batch_rule_t batch_rule>
14156at::Tensor _sparse_compressed_tensor_unsafe_generated_plumbing(const at::Tensor & compressed_indices, const at::Tensor & plain_indices, const at::Tensor & values, at::IntArrayRef size, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) {
14157 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
14158 auto maybe_layer = maybeCurrentDynamicLayer();
14159 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
14160 int64_t cur_level = maybe_layer->layerId();
14161 if (!isBatchedAtLevel(compressed_indices, cur_level) && !isBatchedAtLevel(plain_indices, cur_level) && !isBatchedAtLevel(values, cur_level)) {
14162 return at::_ops::_sparse_compressed_tensor_unsafe::call(compressed_indices, plain_indices, values, size, dtype, layout, device, pin_memory);
14163 }
14164 Tensor compressed_indices_value;
14165 optional<int64_t> compressed_indices_bdim;
14166 std::tie(compressed_indices_value, compressed_indices_bdim) = unwrapTensorAtLevel(compressed_indices, cur_level);
14167 Tensor plain_indices_value;
14168 optional<int64_t> plain_indices_bdim;
14169 std::tie(plain_indices_value, plain_indices_bdim) = unwrapTensorAtLevel(plain_indices, cur_level);
14170 Tensor values_value;
14171 optional<int64_t> values_bdim;
14172 std::tie(values_value, values_bdim) = unwrapTensorAtLevel(values, cur_level);
14173 auto results = batch_rule(compressed_indices_value, compressed_indices_bdim, plain_indices_value, plain_indices_bdim, values_value, values_bdim, size, dtype, layout, device, pin_memory);
14174 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
14175}
14176template <typename batch_rule_t, batch_rule_t batch_rule>
14177at::Tensor _sparse_csr_tensor_unsafe_generated_plumbing(const at::Tensor & crow_indices, const at::Tensor & col_indices, const at::Tensor & values, at::IntArrayRef size, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) {
14178 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
14179 auto maybe_layer = maybeCurrentDynamicLayer();
14180 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
14181 int64_t cur_level = maybe_layer->layerId();
14182 if (!isBatchedAtLevel(crow_indices, cur_level) && !isBatchedAtLevel(col_indices, cur_level) && !isBatchedAtLevel(values, cur_level)) {
14183 return at::_ops::_sparse_csr_tensor_unsafe::call(crow_indices, col_indices, values, size, dtype, layout, device, pin_memory);
14184 }
14185 Tensor crow_indices_value;
14186 optional<int64_t> crow_indices_bdim;
14187 std::tie(crow_indices_value, crow_indices_bdim) = unwrapTensorAtLevel(crow_indices, cur_level);
14188 Tensor col_indices_value;
14189 optional<int64_t> col_indices_bdim;
14190 std::tie(col_indices_value, col_indices_bdim) = unwrapTensorAtLevel(col_indices, cur_level);
14191 Tensor values_value;
14192 optional<int64_t> values_bdim;
14193 std::tie(values_value, values_bdim) = unwrapTensorAtLevel(values, cur_level);
14194 auto results = batch_rule(crow_indices_value, crow_indices_bdim, col_indices_value, col_indices_bdim, values_value, values_bdim, size, dtype, layout, device, pin_memory);
14195 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
14196}
14197template <typename batch_rule_t, batch_rule_t batch_rule>
14198at::Tensor _sparse_csc_tensor_unsafe_generated_plumbing(const at::Tensor & ccol_indices, const at::Tensor & row_indices, const at::Tensor & values, at::IntArrayRef size, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) {
14199 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
14200 auto maybe_layer = maybeCurrentDynamicLayer();
14201 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
14202 int64_t cur_level = maybe_layer->layerId();
14203 if (!isBatchedAtLevel(ccol_indices, cur_level) && !isBatchedAtLevel(row_indices, cur_level) && !isBatchedAtLevel(values, cur_level)) {
14204 return at::_ops::_sparse_csc_tensor_unsafe::call(ccol_indices, row_indices, values, size, dtype, layout, device, pin_memory);
14205 }
14206 Tensor ccol_indices_value;
14207 optional<int64_t> ccol_indices_bdim;
14208 std::tie(ccol_indices_value, ccol_indices_bdim) = unwrapTensorAtLevel(ccol_indices, cur_level);
14209 Tensor row_indices_value;
14210 optional<int64_t> row_indices_bdim;
14211 std::tie(row_indices_value, row_indices_bdim) = unwrapTensorAtLevel(row_indices, cur_level);
14212 Tensor values_value;
14213 optional<int64_t> values_bdim;
14214 std::tie(values_value, values_bdim) = unwrapTensorAtLevel(values, cur_level);
14215 auto results = batch_rule(ccol_indices_value, ccol_indices_bdim, row_indices_value, row_indices_bdim, values_value, values_bdim, size, dtype, layout, device, pin_memory);
14216 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
14217}
14218template <typename batch_rule_t, batch_rule_t batch_rule>
14219at::Tensor _sparse_bsr_tensor_unsafe_generated_plumbing(const at::Tensor & crow_indices, const at::Tensor & col_indices, const at::Tensor & values, at::IntArrayRef size, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) {
14220 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
14221 auto maybe_layer = maybeCurrentDynamicLayer();
14222 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
14223 int64_t cur_level = maybe_layer->layerId();
14224 if (!isBatchedAtLevel(crow_indices, cur_level) && !isBatchedAtLevel(col_indices, cur_level) && !isBatchedAtLevel(values, cur_level)) {
14225 return at::_ops::_sparse_bsr_tensor_unsafe::call(crow_indices, col_indices, values, size, dtype, layout, device, pin_memory);
14226 }
14227 Tensor crow_indices_value;
14228 optional<int64_t> crow_indices_bdim;
14229 std::tie(crow_indices_value, crow_indices_bdim) = unwrapTensorAtLevel(crow_indices, cur_level);
14230 Tensor col_indices_value;
14231 optional<int64_t> col_indices_bdim;
14232 std::tie(col_indices_value, col_indices_bdim) = unwrapTensorAtLevel(col_indices, cur_level);
14233 Tensor values_value;
14234 optional<int64_t> values_bdim;
14235 std::tie(values_value, values_bdim) = unwrapTensorAtLevel(values, cur_level);
14236 auto results = batch_rule(crow_indices_value, crow_indices_bdim, col_indices_value, col_indices_bdim, values_value, values_bdim, size, dtype, layout, device, pin_memory);
14237 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
14238}
14239template <typename batch_rule_t, batch_rule_t batch_rule>
14240at::Tensor _sparse_bsc_tensor_unsafe_generated_plumbing(const at::Tensor & ccol_indices, const at::Tensor & row_indices, const at::Tensor & values, at::IntArrayRef size, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) {
14241 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
14242 auto maybe_layer = maybeCurrentDynamicLayer();
14243 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
14244 int64_t cur_level = maybe_layer->layerId();
14245 if (!isBatchedAtLevel(ccol_indices, cur_level) && !isBatchedAtLevel(row_indices, cur_level) && !isBatchedAtLevel(values, cur_level)) {
14246 return at::_ops::_sparse_bsc_tensor_unsafe::call(ccol_indices, row_indices, values, size, dtype, layout, device, pin_memory);
14247 }
14248 Tensor ccol_indices_value;
14249 optional<int64_t> ccol_indices_bdim;
14250 std::tie(ccol_indices_value, ccol_indices_bdim) = unwrapTensorAtLevel(ccol_indices, cur_level);
14251 Tensor row_indices_value;
14252 optional<int64_t> row_indices_bdim;
14253 std::tie(row_indices_value, row_indices_bdim) = unwrapTensorAtLevel(row_indices, cur_level);
14254 Tensor values_value;
14255 optional<int64_t> values_bdim;
14256 std::tie(values_value, values_bdim) = unwrapTensorAtLevel(values, cur_level);
14257 auto results = batch_rule(ccol_indices_value, ccol_indices_bdim, row_indices_value, row_indices_bdim, values_value, values_bdim, size, dtype, layout, device, pin_memory);
14258 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
14259}
14260template <typename batch_rule_t, batch_rule_t batch_rule>
14261at::Tensor sparse_coo_tensor_indices_generated_plumbing(const at::Tensor & indices, const at::Tensor & values, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) {
14262 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
14263 auto maybe_layer = maybeCurrentDynamicLayer();
14264 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
14265 int64_t cur_level = maybe_layer->layerId();
14266 if (!isBatchedAtLevel(indices, cur_level) && !isBatchedAtLevel(values, cur_level)) {
14267 return at::_ops::sparse_coo_tensor_indices::call(indices, values, dtype, layout, device, pin_memory);
14268 }
14269 Tensor indices_value;
14270 optional<int64_t> indices_bdim;
14271 std::tie(indices_value, indices_bdim) = unwrapTensorAtLevel(indices, cur_level);
14272 Tensor values_value;
14273 optional<int64_t> values_bdim;
14274 std::tie(values_value, values_bdim) = unwrapTensorAtLevel(values, cur_level);
14275 auto results = batch_rule(indices_value, indices_bdim, values_value, values_bdim, dtype, layout, device, pin_memory);
14276 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
14277}
14278template <typename batch_rule_t, batch_rule_t batch_rule>
14279at::Tensor sparse_coo_tensor_indices_size_generated_plumbing(const at::Tensor & indices, const at::Tensor & values, at::IntArrayRef size, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) {
14280 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
14281 auto maybe_layer = maybeCurrentDynamicLayer();
14282 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
14283 int64_t cur_level = maybe_layer->layerId();
14284 if (!isBatchedAtLevel(indices, cur_level) && !isBatchedAtLevel(values, cur_level)) {
14285 return at::_ops::sparse_coo_tensor_indices_size::call(indices, values, size, dtype, layout, device, pin_memory);
14286 }
14287 Tensor indices_value;
14288 optional<int64_t> indices_bdim;
14289 std::tie(indices_value, indices_bdim) = unwrapTensorAtLevel(indices, cur_level);
14290 Tensor values_value;
14291 optional<int64_t> values_bdim;
14292 std::tie(values_value, values_bdim) = unwrapTensorAtLevel(values, cur_level);
14293 auto results = batch_rule(indices_value, indices_bdim, values_value, values_bdim, size, dtype, layout, device, pin_memory);
14294 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
14295}
14296template <typename batch_rule_t, batch_rule_t batch_rule>
14297at::Tensor _sparse_coo_tensor_unsafe_generated_plumbing(const at::Tensor & indices, const at::Tensor & values, c10::SymIntArrayRef size, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) {
14298 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
14299 auto maybe_layer = maybeCurrentDynamicLayer();
14300 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
14301 int64_t cur_level = maybe_layer->layerId();
14302 if (!isBatchedAtLevel(indices, cur_level) && !isBatchedAtLevel(values, cur_level)) {
14303 return at::_ops::_sparse_coo_tensor_unsafe::call(indices, values, size, dtype, layout, device, pin_memory);
14304 }
14305 Tensor indices_value;
14306 optional<int64_t> indices_bdim;
14307 std::tie(indices_value, indices_bdim) = unwrapTensorAtLevel(indices, cur_level);
14308 Tensor values_value;
14309 optional<int64_t> values_bdim;
14310 std::tie(values_value, values_bdim) = unwrapTensorAtLevel(values, cur_level);
14311 auto results = batch_rule(indices_value, indices_bdim, values_value, values_bdim, size, dtype, layout, device, pin_memory);
14312 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
14313}
14314template <typename batch_rule_t, batch_rule_t batch_rule>
14315void _validate_sparse_coo_tensor_args_generated_plumbing(const at::Tensor & indices, const at::Tensor & values, at::IntArrayRef size) {
14316 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
14317 auto maybe_layer = maybeCurrentDynamicLayer();
14318 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
14319 int64_t cur_level = maybe_layer->layerId();
14320 if (!isBatchedAtLevel(indices, cur_level) && !isBatchedAtLevel(values, cur_level)) {
14321 return at::_ops::_validate_sparse_coo_tensor_args::call(indices, values, size);
14322 }
14323 Tensor indices_value;
14324 optional<int64_t> indices_bdim;
14325 std::tie(indices_value, indices_bdim) = unwrapTensorAtLevel(indices, cur_level);
14326 Tensor values_value;
14327 optional<int64_t> values_bdim;
14328 std::tie(values_value, values_bdim) = unwrapTensorAtLevel(values, cur_level);
14329 batch_rule(indices_value, indices_bdim, values_value, values_bdim, size);
14330}
14331template <typename batch_rule_t, batch_rule_t batch_rule>
14332void _validate_sparse_compressed_tensor_args_generated_plumbing(const at::Tensor & compressed_indices, const at::Tensor & plain_indices, const at::Tensor & values, at::IntArrayRef size, at::Layout layout) {
14333 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
14334 auto maybe_layer = maybeCurrentDynamicLayer();
14335 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
14336 int64_t cur_level = maybe_layer->layerId();
14337 if (!isBatchedAtLevel(compressed_indices, cur_level) && !isBatchedAtLevel(plain_indices, cur_level) && !isBatchedAtLevel(values, cur_level)) {
14338 return at::_ops::_validate_sparse_compressed_tensor_args::call(compressed_indices, plain_indices, values, size, layout);
14339 }
14340 Tensor compressed_indices_value;
14341 optional<int64_t> compressed_indices_bdim;
14342 std::tie(compressed_indices_value, compressed_indices_bdim) = unwrapTensorAtLevel(compressed_indices, cur_level);
14343 Tensor plain_indices_value;
14344 optional<int64_t> plain_indices_bdim;
14345 std::tie(plain_indices_value, plain_indices_bdim) = unwrapTensorAtLevel(plain_indices, cur_level);
14346 Tensor values_value;
14347 optional<int64_t> values_bdim;
14348 std::tie(values_value, values_bdim) = unwrapTensorAtLevel(values, cur_level);
14349 batch_rule(compressed_indices_value, compressed_indices_bdim, plain_indices_value, plain_indices_bdim, values_value, values_bdim, size, layout);
14350}
14351template <typename batch_rule_t, batch_rule_t batch_rule>
14352void _validate_sparse_csr_tensor_args_generated_plumbing(const at::Tensor & crow_indices, const at::Tensor & col_indices, const at::Tensor & values, at::IntArrayRef size) {
14353 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
14354 auto maybe_layer = maybeCurrentDynamicLayer();
14355 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
14356 int64_t cur_level = maybe_layer->layerId();
14357 if (!isBatchedAtLevel(crow_indices, cur_level) && !isBatchedAtLevel(col_indices, cur_level) && !isBatchedAtLevel(values, cur_level)) {
14358 return at::_ops::_validate_sparse_csr_tensor_args::call(crow_indices, col_indices, values, size);
14359 }
14360 Tensor crow_indices_value;
14361 optional<int64_t> crow_indices_bdim;
14362 std::tie(crow_indices_value, crow_indices_bdim) = unwrapTensorAtLevel(crow_indices, cur_level);
14363 Tensor col_indices_value;
14364 optional<int64_t> col_indices_bdim;
14365 std::tie(col_indices_value, col_indices_bdim) = unwrapTensorAtLevel(col_indices, cur_level);
14366 Tensor values_value;
14367 optional<int64_t> values_bdim;
14368 std::tie(values_value, values_bdim) = unwrapTensorAtLevel(values, cur_level);
14369 batch_rule(crow_indices_value, crow_indices_bdim, col_indices_value, col_indices_bdim, values_value, values_bdim, size);
14370}
14371template <typename batch_rule_t, batch_rule_t batch_rule>
14372void _validate_sparse_csc_tensor_args_generated_plumbing(const at::Tensor & ccol_indices, const at::Tensor & row_indices, const at::Tensor & values, at::IntArrayRef size) {
14373 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
14374 auto maybe_layer = maybeCurrentDynamicLayer();
14375 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
14376 int64_t cur_level = maybe_layer->layerId();
14377 if (!isBatchedAtLevel(ccol_indices, cur_level) && !isBatchedAtLevel(row_indices, cur_level) && !isBatchedAtLevel(values, cur_level)) {
14378 return at::_ops::_validate_sparse_csc_tensor_args::call(ccol_indices, row_indices, values, size);
14379 }
14380 Tensor ccol_indices_value;
14381 optional<int64_t> ccol_indices_bdim;
14382 std::tie(ccol_indices_value, ccol_indices_bdim) = unwrapTensorAtLevel(ccol_indices, cur_level);
14383 Tensor row_indices_value;
14384 optional<int64_t> row_indices_bdim;
14385 std::tie(row_indices_value, row_indices_bdim) = unwrapTensorAtLevel(row_indices, cur_level);
14386 Tensor values_value;
14387 optional<int64_t> values_bdim;
14388 std::tie(values_value, values_bdim) = unwrapTensorAtLevel(values, cur_level);
14389 batch_rule(ccol_indices_value, ccol_indices_bdim, row_indices_value, row_indices_bdim, values_value, values_bdim, size);
14390}
14391template <typename batch_rule_t, batch_rule_t batch_rule>
14392void _validate_sparse_bsr_tensor_args_generated_plumbing(const at::Tensor & crow_indices, const at::Tensor & col_indices, const at::Tensor & values, at::IntArrayRef size) {
14393 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
14394 auto maybe_layer = maybeCurrentDynamicLayer();
14395 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
14396 int64_t cur_level = maybe_layer->layerId();
14397 if (!isBatchedAtLevel(crow_indices, cur_level) && !isBatchedAtLevel(col_indices, cur_level) && !isBatchedAtLevel(values, cur_level)) {
14398 return at::_ops::_validate_sparse_bsr_tensor_args::call(crow_indices, col_indices, values, size);
14399 }
14400 Tensor crow_indices_value;
14401 optional<int64_t> crow_indices_bdim;
14402 std::tie(crow_indices_value, crow_indices_bdim) = unwrapTensorAtLevel(crow_indices, cur_level);
14403 Tensor col_indices_value;
14404 optional<int64_t> col_indices_bdim;
14405 std::tie(col_indices_value, col_indices_bdim) = unwrapTensorAtLevel(col_indices, cur_level);
14406 Tensor values_value;
14407 optional<int64_t> values_bdim;
14408 std::tie(values_value, values_bdim) = unwrapTensorAtLevel(values, cur_level);
14409 batch_rule(crow_indices_value, crow_indices_bdim, col_indices_value, col_indices_bdim, values_value, values_bdim, size);
14410}
14411template <typename batch_rule_t, batch_rule_t batch_rule>
14412void _validate_sparse_bsc_tensor_args_generated_plumbing(const at::Tensor & ccol_indices, const at::Tensor & row_indices, const at::Tensor & values, at::IntArrayRef size) {
14413 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
14414 auto maybe_layer = maybeCurrentDynamicLayer();
14415 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
14416 int64_t cur_level = maybe_layer->layerId();
14417 if (!isBatchedAtLevel(ccol_indices, cur_level) && !isBatchedAtLevel(row_indices, cur_level) && !isBatchedAtLevel(values, cur_level)) {
14418 return at::_ops::_validate_sparse_bsc_tensor_args::call(ccol_indices, row_indices, values, size);
14419 }
14420 Tensor ccol_indices_value;
14421 optional<int64_t> ccol_indices_bdim;
14422 std::tie(ccol_indices_value, ccol_indices_bdim) = unwrapTensorAtLevel(ccol_indices, cur_level);
14423 Tensor row_indices_value;
14424 optional<int64_t> row_indices_bdim;
14425 std::tie(row_indices_value, row_indices_bdim) = unwrapTensorAtLevel(row_indices, cur_level);
14426 Tensor values_value;
14427 optional<int64_t> values_bdim;
14428 std::tie(values_value, values_bdim) = unwrapTensorAtLevel(values, cur_level);
14429 batch_rule(ccol_indices_value, ccol_indices_bdim, row_indices_value, row_indices_bdim, values_value, values_bdim, size);
14430}
14431template <typename batch_rule_t, batch_rule_t batch_rule>
14432at::Tensor _sparse_coo_tensor_with_dims_and_tensors_generated_plumbing(int64_t sparse_dim, int64_t dense_dim, c10::SymIntArrayRef size, const at::Tensor & indices, const at::Tensor & values, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) {
14433 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
14434 auto maybe_layer = maybeCurrentDynamicLayer();
14435 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
14436 int64_t cur_level = maybe_layer->layerId();
14437 if (!isBatchedAtLevel(indices, cur_level) && !isBatchedAtLevel(values, cur_level)) {
14438 return at::_ops::_sparse_coo_tensor_with_dims_and_tensors::call(sparse_dim, dense_dim, size, indices, values, dtype, layout, device, pin_memory);
14439 }
14440 Tensor indices_value;
14441 optional<int64_t> indices_bdim;
14442 std::tie(indices_value, indices_bdim) = unwrapTensorAtLevel(indices, cur_level);
14443 Tensor values_value;
14444 optional<int64_t> values_bdim;
14445 std::tie(values_value, values_bdim) = unwrapTensorAtLevel(values, cur_level);
14446 auto results = batch_rule(sparse_dim, dense_dim, size, indices_value, indices_bdim, values_value, values_bdim, dtype, layout, device, pin_memory);
14447 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
14448}
14449template <typename batch_rule_t, batch_rule_t batch_rule>
14450const at::Tensor & sparse_resize__generated_plumbing(const at::Tensor & self, at::IntArrayRef size, int64_t sparse_dim, int64_t dense_dim) {
14451 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
14452 auto maybe_layer = maybeCurrentDynamicLayer();
14453 vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
14454 int64_t cur_level = maybe_layer->layerId();
14455 if (!isBatchedAtLevel(self, cur_level)) {
14456 return at::_ops::sparse_resize_::call(self, size, sparse_dim, dense_dim);
14457 }
14458 Tensor self_value;
14459 optional<int64_t> self_bdim;
14460 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
14461 batch_rule(self_value, self_bdim, size, sparse_dim, dense_dim);
14462 return self;
14463}
14464template <typename batch_rule_t, batch_rule_t batch_rule>
14465const at::Tensor & sparse_resize_and_clear__generated_plumbing(const at::Tensor & self, at::IntArrayRef size, int64_t sparse_dim, int64_t dense_dim) {
14466 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
14467 auto maybe_layer = maybeCurrentDynamicLayer();
14468 vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
14469 int64_t cur_level = maybe_layer->layerId();
14470 if (!isBatchedAtLevel(self, cur_level)) {
14471 return at::_ops::sparse_resize_and_clear_::call(self, size, sparse_dim, dense_dim);
14472 }
14473 Tensor self_value;
14474 optional<int64_t> self_bdim;
14475 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
14476 batch_rule(self_value, self_bdim, size, sparse_dim, dense_dim);
14477 return self;
14478}
14479template <typename batch_rule_t, batch_rule_t batch_rule>
14480at::Tensor sparse_mask_generated_plumbing(const at::Tensor & self, const at::Tensor & mask) {
14481 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
14482 auto maybe_layer = maybeCurrentDynamicLayer();
14483 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
14484 int64_t cur_level = maybe_layer->layerId();
14485 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(mask, cur_level)) {
14486 return at::_ops::sparse_mask::call(self, mask);
14487 }
14488 Tensor self_value;
14489 optional<int64_t> self_bdim;
14490 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
14491 Tensor mask_value;
14492 optional<int64_t> mask_bdim;
14493 std::tie(mask_value, mask_bdim) = unwrapTensorAtLevel(mask, cur_level);
14494 auto results = batch_rule(self_value, self_bdim, mask_value, mask_bdim);
14495 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
14496}
14497template <typename batch_rule_t, batch_rule_t batch_rule>
14498::std::vector<at::Tensor> _to_cpu_generated_plumbing(at::TensorList tensors) {
14499 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
14500 auto maybe_layer = maybeCurrentDynamicLayer();
14501 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
14502 int64_t cur_level = maybe_layer->layerId();
14503 if (!isBatchedAtLevel(tensors, cur_level)) {
14504 return at::_ops::_to_cpu::call(tensors);
14505 }
14506
14507 auto results = batch_rule(tensors);
14508 return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
14509}
14510template <typename batch_rule_t, batch_rule_t batch_rule>
14511at::Tensor to_dense_generated_plumbing(const at::Tensor & self, c10::optional<at::ScalarType> dtype) {
14512 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
14513 auto maybe_layer = maybeCurrentDynamicLayer();
14514 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
14515 int64_t cur_level = maybe_layer->layerId();
14516 if (!isBatchedAtLevel(self, cur_level)) {
14517 return at::_ops::to_dense::call(self, dtype);
14518 }
14519 Tensor self_value;
14520 optional<int64_t> self_bdim;
14521 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
14522 auto results = batch_rule(self_value, self_bdim, dtype);
14523 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
14524}
14525template <typename batch_rule_t, batch_rule_t batch_rule>
14526at::Tensor _to_dense_generated_plumbing(const at::Tensor & self, c10::optional<at::ScalarType> dtype) {
14527 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
14528 auto maybe_layer = maybeCurrentDynamicLayer();
14529 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
14530 int64_t cur_level = maybe_layer->layerId();
14531 if (!isBatchedAtLevel(self, cur_level)) {
14532 return at::_ops::_to_dense::call(self, dtype);
14533 }
14534 Tensor self_value;
14535 optional<int64_t> self_bdim;
14536 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
14537 auto results = batch_rule(self_value, self_bdim, dtype);
14538 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
14539}
14540template <typename batch_rule_t, batch_rule_t batch_rule>
14541at::Tensor to_dense_backward_generated_plumbing(const at::Tensor & grad, const at::Tensor & input) {
14542 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
14543 auto maybe_layer = maybeCurrentDynamicLayer();
14544 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
14545 int64_t cur_level = maybe_layer->layerId();
14546 if (!isBatchedAtLevel(grad, cur_level) && !isBatchedAtLevel(input, cur_level)) {
14547 return at::_ops::to_dense_backward::call(grad, input);
14548 }
14549 Tensor grad_value;
14550 optional<int64_t> grad_bdim;
14551 std::tie(grad_value, grad_bdim) = unwrapTensorAtLevel(grad, cur_level);
14552 Tensor input_value;
14553 optional<int64_t> input_bdim;
14554 std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level);
14555 auto results = batch_rule(grad_value, grad_bdim, input_value, input_bdim);
14556 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
14557}
14558template <typename batch_rule_t, batch_rule_t batch_rule>
14559at::Tensor coalesce_generated_plumbing(const at::Tensor & self) {
14560 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
14561 auto maybe_layer = maybeCurrentDynamicLayer();
14562 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
14563 int64_t cur_level = maybe_layer->layerId();
14564 if (!isBatchedAtLevel(self, cur_level)) {
14565 return at::_ops::coalesce::call(self);
14566 }
14567 Tensor self_value;
14568 optional<int64_t> self_bdim;
14569 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
14570 auto results = batch_rule(self_value, self_bdim);
14571 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
14572}
14573template <typename batch_rule_t, batch_rule_t batch_rule>
14574at::Tensor _coalesce_generated_plumbing(const at::Tensor & self) {
14575 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
14576 auto maybe_layer = maybeCurrentDynamicLayer();
14577 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
14578 int64_t cur_level = maybe_layer->layerId();
14579 if (!isBatchedAtLevel(self, cur_level)) {
14580 return at::_ops::_coalesce::call(self);
14581 }
14582 Tensor self_value;
14583 optional<int64_t> self_bdim;
14584 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
14585 auto results = batch_rule(self_value, self_bdim);
14586 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
14587}
14588template <typename batch_rule_t, batch_rule_t batch_rule>
14589at::Tensor _indices_generated_plumbing(const at::Tensor & self) {
14590 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
14591 auto maybe_layer = maybeCurrentDynamicLayer();
14592 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
14593 int64_t cur_level = maybe_layer->layerId();
14594 if (!isBatchedAtLevel(self, cur_level)) {
14595 return at::_ops::_indices::call(self);
14596 }
14597 Tensor self_value;
14598 optional<int64_t> self_bdim;
14599 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
14600 auto results = batch_rule(self_value, self_bdim);
14601 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
14602}
14603template <typename batch_rule_t, batch_rule_t batch_rule>
14604at::Tensor _values_generated_plumbing(const at::Tensor & self) {
14605 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
14606 auto maybe_layer = maybeCurrentDynamicLayer();
14607 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
14608 int64_t cur_level = maybe_layer->layerId();
14609 if (!isBatchedAtLevel(self, cur_level)) {
14610 return at::_ops::_values::call(self);
14611 }
14612 Tensor self_value;
14613 optional<int64_t> self_bdim;
14614 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
14615 auto results = batch_rule(self_value, self_bdim);
14616 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
14617}
14618template <typename batch_rule_t, batch_rule_t batch_rule>
14619at::Tensor & _coalesced__generated_plumbing(at::Tensor & self, bool coalesced) {
14620 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
14621 auto maybe_layer = maybeCurrentDynamicLayer();
14622 vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
14623 int64_t cur_level = maybe_layer->layerId();
14624 if (!isBatchedAtLevel(self, cur_level)) {
14625 return at::_ops::_coalesced_::call(self, coalesced);
14626 }
14627 Tensor self_value;
14628 optional<int64_t> self_bdim;
14629 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
14630 batch_rule(self_value, self_bdim, coalesced);
14631 return self;
14632}
14633template <typename batch_rule_t, batch_rule_t batch_rule>
14634at::Tensor indices_generated_plumbing(const at::Tensor & self) {
14635 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
14636 auto maybe_layer = maybeCurrentDynamicLayer();
14637 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
14638 int64_t cur_level = maybe_layer->layerId();
14639 if (!isBatchedAtLevel(self, cur_level)) {
14640 return at::_ops::indices::call(self);
14641 }
14642 Tensor self_value;
14643 optional<int64_t> self_bdim;
14644 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
14645 auto results = batch_rule(self_value, self_bdim);
14646 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
14647}
14648template <typename batch_rule_t, batch_rule_t batch_rule>
14649at::Tensor values_generated_plumbing(const at::Tensor & self) {
14650 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
14651 auto maybe_layer = maybeCurrentDynamicLayer();
14652 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
14653 int64_t cur_level = maybe_layer->layerId();
14654 if (!isBatchedAtLevel(self, cur_level)) {
14655 return at::_ops::values::call(self);
14656 }
14657 Tensor self_value;
14658 optional<int64_t> self_bdim;
14659 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
14660 auto results = batch_rule(self_value, self_bdim);
14661 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
14662}
14663template <typename batch_rule_t, batch_rule_t batch_rule>
14664at::Tensor crow_indices_generated_plumbing(const at::Tensor & self) {
14665 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
14666 auto maybe_layer = maybeCurrentDynamicLayer();
14667 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
14668 int64_t cur_level = maybe_layer->layerId();
14669 if (!isBatchedAtLevel(self, cur_level)) {
14670 return at::_ops::crow_indices::call(self);
14671 }
14672 Tensor self_value;
14673 optional<int64_t> self_bdim;
14674 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
14675 auto results = batch_rule(self_value, self_bdim);
14676 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
14677}
14678template <typename batch_rule_t, batch_rule_t batch_rule>
14679at::Tensor col_indices_generated_plumbing(const at::Tensor & self) {
14680 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
14681 auto maybe_layer = maybeCurrentDynamicLayer();
14682 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
14683 int64_t cur_level = maybe_layer->layerId();
14684 if (!isBatchedAtLevel(self, cur_level)) {
14685 return at::_ops::col_indices::call(self);
14686 }
14687 Tensor self_value;
14688 optional<int64_t> self_bdim;
14689 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
14690 auto results = batch_rule(self_value, self_bdim);
14691 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
14692}
14693template <typename batch_rule_t, batch_rule_t batch_rule>
14694at::Tensor ccol_indices_generated_plumbing(const at::Tensor & self) {
14695 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
14696 auto maybe_layer = maybeCurrentDynamicLayer();
14697 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
14698 int64_t cur_level = maybe_layer->layerId();
14699 if (!isBatchedAtLevel(self, cur_level)) {
14700 return at::_ops::ccol_indices::call(self);
14701 }
14702 Tensor self_value;
14703 optional<int64_t> self_bdim;
14704 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
14705 auto results = batch_rule(self_value, self_bdim);
14706 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
14707}
14708template <typename batch_rule_t, batch_rule_t batch_rule>
14709at::Tensor row_indices_generated_plumbing(const at::Tensor & self) {
14710 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
14711 auto maybe_layer = maybeCurrentDynamicLayer();
14712 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
14713 int64_t cur_level = maybe_layer->layerId();
14714 if (!isBatchedAtLevel(self, cur_level)) {
14715 return at::_ops::row_indices::call(self);
14716 }
14717 Tensor self_value;
14718 optional<int64_t> self_bdim;
14719 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
14720 auto results = batch_rule(self_value, self_bdim);
14721 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
14722}
14723template <typename batch_rule_t, batch_rule_t batch_rule>
14724at::Tensor hspmm_generated_plumbing(const at::Tensor & mat1, const at::Tensor & mat2) {
14725 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
14726 auto maybe_layer = maybeCurrentDynamicLayer();
14727 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
14728 int64_t cur_level = maybe_layer->layerId();
14729 if (!isBatchedAtLevel(mat1, cur_level) && !isBatchedAtLevel(mat2, cur_level)) {
14730 return at::_ops::hspmm::call(mat1, mat2);
14731 }
14732 Tensor mat1_value;
14733 optional<int64_t> mat1_bdim;
14734 std::tie(mat1_value, mat1_bdim) = unwrapTensorAtLevel(mat1, cur_level);
14735 Tensor mat2_value;
14736 optional<int64_t> mat2_bdim;
14737 std::tie(mat2_value, mat2_bdim) = unwrapTensorAtLevel(mat2, cur_level);
14738 auto results = batch_rule(mat1_value, mat1_bdim, mat2_value, mat2_bdim);
14739 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
14740}
14741template <typename batch_rule_t, batch_rule_t batch_rule>
14742at::Tensor & copy_sparse_to_sparse__generated_plumbing(at::Tensor & self, const at::Tensor & src, bool non_blocking) {
14743 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
14744 auto maybe_layer = maybeCurrentDynamicLayer();
14745 vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
14746 int64_t cur_level = maybe_layer->layerId();
14747 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(src, cur_level)) {
14748 return at::_ops::copy_sparse_to_sparse_::call(self, src, non_blocking);
14749 }
14750 Tensor self_value;
14751 optional<int64_t> self_bdim;
14752 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
14753 Tensor src_value;
14754 optional<int64_t> src_bdim;
14755 std::tie(src_value, src_bdim) = unwrapTensorAtLevel(src, cur_level);
14756 batch_rule(self_value, self_bdim, src_value, src_bdim, non_blocking);
14757 return self;
14758}
14759template <typename batch_rule_t, batch_rule_t batch_rule>
14760::std::vector<at::Tensor> unbind_int_generated_plumbing(const at::Tensor & self, int64_t dim) {
14761 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
14762 auto maybe_layer = maybeCurrentDynamicLayer();
14763 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
14764 int64_t cur_level = maybe_layer->layerId();
14765 if (!isBatchedAtLevel(self, cur_level)) {
14766 return at::_ops::unbind_int::call(self, dim);
14767 }
14768 Tensor self_value;
14769 optional<int64_t> self_bdim;
14770 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
14771 auto results = batch_rule(self_value, self_bdim, dim);
14772 return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
14773}
14774template <typename batch_rule_t, batch_rule_t batch_rule>
14775::std::vector<at::Tensor> unbind_Dimname_generated_plumbing(const at::Tensor & self, at::Dimname dim) {
14776 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
14777 auto maybe_layer = maybeCurrentDynamicLayer();
14778 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
14779 int64_t cur_level = maybe_layer->layerId();
14780 if (!isBatchedAtLevel(self, cur_level)) {
14781 return at::_ops::unbind_Dimname::call(self, dim);
14782 }
14783 Tensor self_value;
14784 optional<int64_t> self_bdim;
14785 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
14786 auto results = batch_rule(self_value, self_bdim, dim);
14787 return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
14788}
14789template <typename batch_rule_t, batch_rule_t batch_rule>
14790at::Tensor to_sparse_sparse_dim_generated_plumbing(const at::Tensor & self, int64_t sparse_dim) {
14791 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
14792 auto maybe_layer = maybeCurrentDynamicLayer();
14793 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
14794 int64_t cur_level = maybe_layer->layerId();
14795 if (!isBatchedAtLevel(self, cur_level)) {
14796 return at::_ops::to_sparse_sparse_dim::call(self, sparse_dim);
14797 }
14798 Tensor self_value;
14799 optional<int64_t> self_bdim;
14800 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
14801 auto results = batch_rule(self_value, self_bdim, sparse_dim);
14802 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
14803}
14804template <typename batch_rule_t, batch_rule_t batch_rule>
14805at::Tensor to_sparse_generated_plumbing(const at::Tensor & self, c10::optional<at::Layout> layout, at::OptionalIntArrayRef blocksize, c10::optional<int64_t> dense_dim) {
14806 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
14807 auto maybe_layer = maybeCurrentDynamicLayer();
14808 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
14809 int64_t cur_level = maybe_layer->layerId();
14810 if (!isBatchedAtLevel(self, cur_level)) {
14811 return at::_ops::to_sparse::call(self, layout, blocksize, dense_dim);
14812 }
14813 Tensor self_value;
14814 optional<int64_t> self_bdim;
14815 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
14816 auto results = batch_rule(self_value, self_bdim, layout, blocksize, dense_dim);
14817 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
14818}
14819template <typename batch_rule_t, batch_rule_t batch_rule>
14820at::Tensor to_sparse_csr_generated_plumbing(const at::Tensor & self, c10::optional<int64_t> dense_dim) {
14821 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
14822 auto maybe_layer = maybeCurrentDynamicLayer();
14823 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
14824 int64_t cur_level = maybe_layer->layerId();
14825 if (!isBatchedAtLevel(self, cur_level)) {
14826 return at::_ops::to_sparse_csr::call(self, dense_dim);
14827 }
14828 Tensor self_value;
14829 optional<int64_t> self_bdim;
14830 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
14831 auto results = batch_rule(self_value, self_bdim, dense_dim);
14832 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
14833}
14834template <typename batch_rule_t, batch_rule_t batch_rule>
14835at::Tensor to_sparse_csc_generated_plumbing(const at::Tensor & self, c10::optional<int64_t> dense_dim) {
14836 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
14837 auto maybe_layer = maybeCurrentDynamicLayer();
14838 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
14839 int64_t cur_level = maybe_layer->layerId();
14840 if (!isBatchedAtLevel(self, cur_level)) {
14841 return at::_ops::to_sparse_csc::call(self, dense_dim);
14842 }
14843 Tensor self_value;
14844 optional<int64_t> self_bdim;
14845 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
14846 auto results = batch_rule(self_value, self_bdim, dense_dim);
14847 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
14848}
14849template <typename batch_rule_t, batch_rule_t batch_rule>
14850at::Tensor to_sparse_bsr_generated_plumbing(const at::Tensor & self, at::IntArrayRef blocksize, c10::optional<int64_t> dense_dim) {
14851 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
14852 auto maybe_layer = maybeCurrentDynamicLayer();
14853 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
14854 int64_t cur_level = maybe_layer->layerId();
14855 if (!isBatchedAtLevel(self, cur_level)) {
14856 return at::_ops::to_sparse_bsr::call(self, blocksize, dense_dim);
14857 }
14858 Tensor self_value;
14859 optional<int64_t> self_bdim;
14860 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
14861 auto results = batch_rule(self_value, self_bdim, blocksize, dense_dim);
14862 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
14863}
14864template <typename batch_rule_t, batch_rule_t batch_rule>
14865at::Tensor to_sparse_bsc_generated_plumbing(const at::Tensor & self, at::IntArrayRef blocksize, c10::optional<int64_t> dense_dim) {
14866 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
14867 auto maybe_layer = maybeCurrentDynamicLayer();
14868 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
14869 int64_t cur_level = maybe_layer->layerId();
14870 if (!isBatchedAtLevel(self, cur_level)) {
14871 return at::_ops::to_sparse_bsc::call(self, blocksize, dense_dim);
14872 }
14873 Tensor self_value;
14874 optional<int64_t> self_bdim;
14875 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
14876 auto results = batch_rule(self_value, self_bdim, blocksize, dense_dim);
14877 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
14878}
14879template <typename batch_rule_t, batch_rule_t batch_rule>
14880at::Tensor to_mkldnn_generated_plumbing(const at::Tensor & self, c10::optional<at::ScalarType> dtype) {
14881 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
14882 auto maybe_layer = maybeCurrentDynamicLayer();
14883 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
14884 int64_t cur_level = maybe_layer->layerId();
14885 if (!isBatchedAtLevel(self, cur_level)) {
14886 return at::_ops::to_mkldnn::call(self, dtype);
14887 }
14888 Tensor self_value;
14889 optional<int64_t> self_bdim;
14890 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
14891 auto results = batch_rule(self_value, self_bdim, dtype);
14892 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
14893}
14894template <typename batch_rule_t, batch_rule_t batch_rule>
14895at::Tensor mkldnn_reorder_conv2d_weight_generated_plumbing(const at::Tensor & self, at::IntArrayRef padding, at::IntArrayRef stride, at::IntArrayRef dilation, int64_t groups, at::OptionalIntArrayRef input_size) {
14896 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
14897 auto maybe_layer = maybeCurrentDynamicLayer();
14898 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
14899 int64_t cur_level = maybe_layer->layerId();
14900 if (!isBatchedAtLevel(self, cur_level)) {
14901 return at::_ops::mkldnn_reorder_conv2d_weight::call(self, padding, stride, dilation, groups, input_size);
14902 }
14903 Tensor self_value;
14904 optional<int64_t> self_bdim;
14905 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
14906 auto results = batch_rule(self_value, self_bdim, padding, stride, dilation, groups, input_size);
14907 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
14908}
14909template <typename batch_rule_t, batch_rule_t batch_rule>
14910at::Tensor mkldnn_reorder_conv3d_weight_generated_plumbing(const at::Tensor & self, at::IntArrayRef padding, at::IntArrayRef stride, at::IntArrayRef dilation, int64_t groups) {
14911 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
14912 auto maybe_layer = maybeCurrentDynamicLayer();
14913 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
14914 int64_t cur_level = maybe_layer->layerId();
14915 if (!isBatchedAtLevel(self, cur_level)) {
14916 return at::_ops::mkldnn_reorder_conv3d_weight::call(self, padding, stride, dilation, groups);
14917 }
14918 Tensor self_value;
14919 optional<int64_t> self_bdim;
14920 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
14921 auto results = batch_rule(self_value, self_bdim, padding, stride, dilation, groups);
14922 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
14923}
14924template <typename batch_rule_t, batch_rule_t batch_rule>
14925at::Tensor to_mkldnn_backward_generated_plumbing(const at::Tensor & grad, const at::Tensor & input) {
14926 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
14927 auto maybe_layer = maybeCurrentDynamicLayer();
14928 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
14929 int64_t cur_level = maybe_layer->layerId();
14930 if (!isBatchedAtLevel(grad, cur_level) && !isBatchedAtLevel(input, cur_level)) {
14931 return at::_ops::to_mkldnn_backward::call(grad, input);
14932 }
14933 Tensor grad_value;
14934 optional<int64_t> grad_bdim;
14935 std::tie(grad_value, grad_bdim) = unwrapTensorAtLevel(grad, cur_level);
14936 Tensor input_value;
14937 optional<int64_t> input_bdim;
14938 std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level);
14939 auto results = batch_rule(grad_value, grad_bdim, input_value, input_bdim);
14940 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
14941}
14942template <typename batch_rule_t, batch_rule_t batch_rule>
14943at::Tensor quantize_per_tensor_dynamic_generated_plumbing(const at::Tensor & self, at::ScalarType dtype, bool reduce_range) {
14944 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
14945 auto maybe_layer = maybeCurrentDynamicLayer();
14946 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
14947 int64_t cur_level = maybe_layer->layerId();
14948 if (!isBatchedAtLevel(self, cur_level)) {
14949 return at::_ops::quantize_per_tensor_dynamic::call(self, dtype, reduce_range);
14950 }
14951 Tensor self_value;
14952 optional<int64_t> self_bdim;
14953 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
14954 auto results = batch_rule(self_value, self_bdim, dtype, reduce_range);
14955 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
14956}
14957template <typename batch_rule_t, batch_rule_t batch_rule>
14958at::Tensor quantize_per_tensor_generated_plumbing(const at::Tensor & self, double scale, int64_t zero_point, at::ScalarType dtype) {
14959 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
14960 auto maybe_layer = maybeCurrentDynamicLayer();
14961 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
14962 int64_t cur_level = maybe_layer->layerId();
14963 if (!isBatchedAtLevel(self, cur_level)) {
14964 return at::_ops::quantize_per_tensor::call(self, scale, zero_point, dtype);
14965 }
14966 Tensor self_value;
14967 optional<int64_t> self_bdim;
14968 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
14969 auto results = batch_rule(self_value, self_bdim, scale, zero_point, dtype);
14970 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
14971}
14972template <typename batch_rule_t, batch_rule_t batch_rule>
14973at::Tensor quantize_per_tensor_tensor_qparams_generated_plumbing(const at::Tensor & self, const at::Tensor & scale, const at::Tensor & zero_point, at::ScalarType dtype) {
14974 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
14975 auto maybe_layer = maybeCurrentDynamicLayer();
14976 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
14977 int64_t cur_level = maybe_layer->layerId();
14978 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(scale, cur_level) && !isBatchedAtLevel(zero_point, cur_level)) {
14979 return at::_ops::quantize_per_tensor_tensor_qparams::call(self, scale, zero_point, dtype);
14980 }
14981 Tensor self_value;
14982 optional<int64_t> self_bdim;
14983 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
14984 Tensor scale_value;
14985 optional<int64_t> scale_bdim;
14986 std::tie(scale_value, scale_bdim) = unwrapTensorAtLevel(scale, cur_level);
14987 Tensor zero_point_value;
14988 optional<int64_t> zero_point_bdim;
14989 std::tie(zero_point_value, zero_point_bdim) = unwrapTensorAtLevel(zero_point, cur_level);
14990 auto results = batch_rule(self_value, self_bdim, scale_value, scale_bdim, zero_point_value, zero_point_bdim, dtype);
14991 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
14992}
14993template <typename batch_rule_t, batch_rule_t batch_rule>
14994::std::vector<at::Tensor> quantize_per_tensor_tensors_generated_plumbing(at::TensorList tensors, const at::Tensor & scales, const at::Tensor & zero_points, at::ScalarType dtype) {
14995 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
14996 auto maybe_layer = maybeCurrentDynamicLayer();
14997 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
14998 int64_t cur_level = maybe_layer->layerId();
14999 if (!isBatchedAtLevel(tensors, cur_level) && !isBatchedAtLevel(scales, cur_level) && !isBatchedAtLevel(zero_points, cur_level)) {
15000 return at::_ops::quantize_per_tensor_tensors::call(tensors, scales, zero_points, dtype);
15001 }
15002 Tensor scales_value;
15003 optional<int64_t> scales_bdim;
15004 std::tie(scales_value, scales_bdim) = unwrapTensorAtLevel(scales, cur_level);
15005 Tensor zero_points_value;
15006 optional<int64_t> zero_points_bdim;
15007 std::tie(zero_points_value, zero_points_bdim) = unwrapTensorAtLevel(zero_points, cur_level);
15008 auto results = batch_rule(tensors, scales_value, scales_bdim, zero_points_value, zero_points_bdim, dtype);
15009 return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
15010}
15011template <typename batch_rule_t, batch_rule_t batch_rule>
15012at::Tensor quantize_per_channel_generated_plumbing(const at::Tensor & self, const at::Tensor & scales, const at::Tensor & zero_points, int64_t axis, at::ScalarType dtype) {
15013 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
15014 auto maybe_layer = maybeCurrentDynamicLayer();
15015 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
15016 int64_t cur_level = maybe_layer->layerId();
15017 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(scales, cur_level) && !isBatchedAtLevel(zero_points, cur_level)) {
15018 return at::_ops::quantize_per_channel::call(self, scales, zero_points, axis, dtype);
15019 }
15020 Tensor self_value;
15021 optional<int64_t> self_bdim;
15022 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
15023 Tensor scales_value;
15024 optional<int64_t> scales_bdim;
15025 std::tie(scales_value, scales_bdim) = unwrapTensorAtLevel(scales, cur_level);
15026 Tensor zero_points_value;
15027 optional<int64_t> zero_points_bdim;
15028 std::tie(zero_points_value, zero_points_bdim) = unwrapTensorAtLevel(zero_points, cur_level);
15029 auto results = batch_rule(self_value, self_bdim, scales_value, scales_bdim, zero_points_value, zero_points_bdim, axis, dtype);
15030 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
15031}
15032template <typename batch_rule_t, batch_rule_t batch_rule>
15033at::Tensor dequantize_self_generated_plumbing(const at::Tensor & self) {
15034 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
15035 auto maybe_layer = maybeCurrentDynamicLayer();
15036 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
15037 int64_t cur_level = maybe_layer->layerId();
15038 if (!isBatchedAtLevel(self, cur_level)) {
15039 return at::_ops::dequantize_self::call(self);
15040 }
15041 Tensor self_value;
15042 optional<int64_t> self_bdim;
15043 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
15044 auto results = batch_rule(self_value, self_bdim);
15045 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
15046}
15047template <typename batch_rule_t, batch_rule_t batch_rule>
15048::std::vector<at::Tensor> dequantize_tensors_generated_plumbing(at::TensorList tensors) {
15049 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
15050 auto maybe_layer = maybeCurrentDynamicLayer();
15051 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
15052 int64_t cur_level = maybe_layer->layerId();
15053 if (!isBatchedAtLevel(tensors, cur_level)) {
15054 return at::_ops::dequantize_tensors::call(tensors);
15055 }
15056
15057 auto results = batch_rule(tensors);
15058 return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
15059}
15060template <typename batch_rule_t, batch_rule_t batch_rule>
15061at::Tensor q_per_channel_scales_generated_plumbing(const at::Tensor & self) {
15062 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
15063 auto maybe_layer = maybeCurrentDynamicLayer();
15064 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
15065 int64_t cur_level = maybe_layer->layerId();
15066 if (!isBatchedAtLevel(self, cur_level)) {
15067 return at::_ops::q_per_channel_scales::call(self);
15068 }
15069 Tensor self_value;
15070 optional<int64_t> self_bdim;
15071 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
15072 auto results = batch_rule(self_value, self_bdim);
15073 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
15074}
15075template <typename batch_rule_t, batch_rule_t batch_rule>
15076at::Tensor q_per_channel_zero_points_generated_plumbing(const at::Tensor & self) {
15077 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
15078 auto maybe_layer = maybeCurrentDynamicLayer();
15079 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
15080 int64_t cur_level = maybe_layer->layerId();
15081 if (!isBatchedAtLevel(self, cur_level)) {
15082 return at::_ops::q_per_channel_zero_points::call(self);
15083 }
15084 Tensor self_value;
15085 optional<int64_t> self_bdim;
15086 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
15087 auto results = batch_rule(self_value, self_bdim);
15088 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
15089}
15090template <typename batch_rule_t, batch_rule_t batch_rule>
15091at::Tensor int_repr_generated_plumbing(const at::Tensor & self) {
15092 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
15093 auto maybe_layer = maybeCurrentDynamicLayer();
15094 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
15095 int64_t cur_level = maybe_layer->layerId();
15096 if (!isBatchedAtLevel(self, cur_level)) {
15097 return at::_ops::int_repr::call(self);
15098 }
15099 Tensor self_value;
15100 optional<int64_t> self_bdim;
15101 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
15102 auto results = batch_rule(self_value, self_bdim);
15103 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
15104}
15105template <typename batch_rule_t, batch_rule_t batch_rule>
15106at::Tensor _make_per_tensor_quantized_tensor_generated_plumbing(const at::Tensor & self, double scale, int64_t zero_point) {
15107 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
15108 auto maybe_layer = maybeCurrentDynamicLayer();
15109 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
15110 int64_t cur_level = maybe_layer->layerId();
15111 if (!isBatchedAtLevel(self, cur_level)) {
15112 return at::_ops::_make_per_tensor_quantized_tensor::call(self, scale, zero_point);
15113 }
15114 Tensor self_value;
15115 optional<int64_t> self_bdim;
15116 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
15117 auto results = batch_rule(self_value, self_bdim, scale, zero_point);
15118 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
15119}
15120template <typename batch_rule_t, batch_rule_t batch_rule>
15121at::Tensor _make_per_channel_quantized_tensor_generated_plumbing(const at::Tensor & self, const at::Tensor & scale, const at::Tensor & zero_point, int64_t axis) {
15122 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
15123 auto maybe_layer = maybeCurrentDynamicLayer();
15124 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
15125 int64_t cur_level = maybe_layer->layerId();
15126 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(scale, cur_level) && !isBatchedAtLevel(zero_point, cur_level)) {
15127 return at::_ops::_make_per_channel_quantized_tensor::call(self, scale, zero_point, axis);
15128 }
15129 Tensor self_value;
15130 optional<int64_t> self_bdim;
15131 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
15132 Tensor scale_value;
15133 optional<int64_t> scale_bdim;
15134 std::tie(scale_value, scale_bdim) = unwrapTensorAtLevel(scale, cur_level);
15135 Tensor zero_point_value;
15136 optional<int64_t> zero_point_bdim;
15137 std::tie(zero_point_value, zero_point_bdim) = unwrapTensorAtLevel(zero_point, cur_level);
15138 auto results = batch_rule(self_value, self_bdim, scale_value, scale_bdim, zero_point_value, zero_point_bdim, axis);
15139 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
15140}
15141template <typename batch_rule_t, batch_rule_t batch_rule>
15142at::Tensor fake_quantize_per_tensor_affine_generated_plumbing(const at::Tensor & self, double scale, int64_t zero_point, int64_t quant_min, int64_t quant_max) {
15143 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
15144 auto maybe_layer = maybeCurrentDynamicLayer();
15145 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
15146 int64_t cur_level = maybe_layer->layerId();
15147 if (!isBatchedAtLevel(self, cur_level)) {
15148 return at::_ops::fake_quantize_per_tensor_affine::call(self, scale, zero_point, quant_min, quant_max);
15149 }
15150 Tensor self_value;
15151 optional<int64_t> self_bdim;
15152 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
15153 auto results = batch_rule(self_value, self_bdim, scale, zero_point, quant_min, quant_max);
15154 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
15155}
15156template <typename batch_rule_t, batch_rule_t batch_rule>
15157at::Tensor fake_quantize_per_tensor_affine_tensor_qparams_generated_plumbing(const at::Tensor & self, const at::Tensor & scale, const at::Tensor & zero_point, int64_t quant_min, int64_t quant_max) {
15158 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
15159 auto maybe_layer = maybeCurrentDynamicLayer();
15160 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
15161 int64_t cur_level = maybe_layer->layerId();
15162 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(scale, cur_level) && !isBatchedAtLevel(zero_point, cur_level)) {
15163 return at::_ops::fake_quantize_per_tensor_affine_tensor_qparams::call(self, scale, zero_point, quant_min, quant_max);
15164 }
15165 Tensor self_value;
15166 optional<int64_t> self_bdim;
15167 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
15168 Tensor scale_value;
15169 optional<int64_t> scale_bdim;
15170 std::tie(scale_value, scale_bdim) = unwrapTensorAtLevel(scale, cur_level);
15171 Tensor zero_point_value;
15172 optional<int64_t> zero_point_bdim;
15173 std::tie(zero_point_value, zero_point_bdim) = unwrapTensorAtLevel(zero_point, cur_level);
15174 auto results = batch_rule(self_value, self_bdim, scale_value, scale_bdim, zero_point_value, zero_point_bdim, quant_min, quant_max);
15175 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
15176}
15177template <typename batch_rule_t, batch_rule_t batch_rule>
15178::std::tuple<at::Tensor,at::Tensor> fake_quantize_per_tensor_affine_cachemask_generated_plumbing(const at::Tensor & self, double scale, int64_t zero_point, int64_t quant_min, int64_t quant_max) {
15179 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
15180 auto maybe_layer = maybeCurrentDynamicLayer();
15181 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
15182 int64_t cur_level = maybe_layer->layerId();
15183 if (!isBatchedAtLevel(self, cur_level)) {
15184 return at::_ops::fake_quantize_per_tensor_affine_cachemask::call(self, scale, zero_point, quant_min, quant_max);
15185 }
15186 Tensor self_value;
15187 optional<int64_t> self_bdim;
15188 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
15189 auto results = batch_rule(self_value, self_bdim, scale, zero_point, quant_min, quant_max);
15190 return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
15191}
15192template <typename batch_rule_t, batch_rule_t batch_rule>
15193::std::tuple<at::Tensor,at::Tensor> _fake_quantize_per_tensor_affine_cachemask_tensor_qparams_generated_plumbing(const at::Tensor & self, const at::Tensor & scale, const at::Tensor & zero_point, const at::Tensor & fake_quant_enabled, int64_t quant_min, int64_t quant_max) {
15194 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
15195 auto maybe_layer = maybeCurrentDynamicLayer();
15196 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
15197 int64_t cur_level = maybe_layer->layerId();
15198 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(scale, cur_level) && !isBatchedAtLevel(zero_point, cur_level) && !isBatchedAtLevel(fake_quant_enabled, cur_level)) {
15199 return at::_ops::_fake_quantize_per_tensor_affine_cachemask_tensor_qparams::call(self, scale, zero_point, fake_quant_enabled, quant_min, quant_max);
15200 }
15201 Tensor self_value;
15202 optional<int64_t> self_bdim;
15203 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
15204 Tensor scale_value;
15205 optional<int64_t> scale_bdim;
15206 std::tie(scale_value, scale_bdim) = unwrapTensorAtLevel(scale, cur_level);
15207 Tensor zero_point_value;
15208 optional<int64_t> zero_point_bdim;
15209 std::tie(zero_point_value, zero_point_bdim) = unwrapTensorAtLevel(zero_point, cur_level);
15210 Tensor fake_quant_enabled_value;
15211 optional<int64_t> fake_quant_enabled_bdim;
15212 std::tie(fake_quant_enabled_value, fake_quant_enabled_bdim) = unwrapTensorAtLevel(fake_quant_enabled, cur_level);
15213 auto results = batch_rule(self_value, self_bdim, scale_value, scale_bdim, zero_point_value, zero_point_bdim, fake_quant_enabled_value, fake_quant_enabled_bdim, quant_min, quant_max);
15214 return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
15215}
15216template <typename batch_rule_t, batch_rule_t batch_rule>
15217at::Tensor fake_quantize_per_tensor_affine_cachemask_backward_generated_plumbing(const at::Tensor & grad, const at::Tensor & mask) {
15218 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
15219 auto maybe_layer = maybeCurrentDynamicLayer();
15220 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
15221 int64_t cur_level = maybe_layer->layerId();
15222 if (!isBatchedAtLevel(grad, cur_level) && !isBatchedAtLevel(mask, cur_level)) {
15223 return at::_ops::fake_quantize_per_tensor_affine_cachemask_backward::call(grad, mask);
15224 }
15225 Tensor grad_value;
15226 optional<int64_t> grad_bdim;
15227 std::tie(grad_value, grad_bdim) = unwrapTensorAtLevel(grad, cur_level);
15228 Tensor mask_value;
15229 optional<int64_t> mask_bdim;
15230 std::tie(mask_value, mask_bdim) = unwrapTensorAtLevel(mask, cur_level);
15231 auto results = batch_rule(grad_value, grad_bdim, mask_value, mask_bdim);
15232 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
15233}
15234template <typename batch_rule_t, batch_rule_t batch_rule>
15235at::Tensor _fake_quantize_learnable_per_tensor_affine_generated_plumbing(const at::Tensor & self, const at::Tensor & scale, const at::Tensor & zero_point, int64_t quant_min, int64_t quant_max, double grad_factor) {
15236 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
15237 auto maybe_layer = maybeCurrentDynamicLayer();
15238 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
15239 int64_t cur_level = maybe_layer->layerId();
15240 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(scale, cur_level) && !isBatchedAtLevel(zero_point, cur_level)) {
15241 return at::_ops::_fake_quantize_learnable_per_tensor_affine::call(self, scale, zero_point, quant_min, quant_max, grad_factor);
15242 }
15243 Tensor self_value;
15244 optional<int64_t> self_bdim;
15245 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
15246 Tensor scale_value;
15247 optional<int64_t> scale_bdim;
15248 std::tie(scale_value, scale_bdim) = unwrapTensorAtLevel(scale, cur_level);
15249 Tensor zero_point_value;
15250 optional<int64_t> zero_point_bdim;
15251 std::tie(zero_point_value, zero_point_bdim) = unwrapTensorAtLevel(zero_point, cur_level);
15252 auto results = batch_rule(self_value, self_bdim, scale_value, scale_bdim, zero_point_value, zero_point_bdim, quant_min, quant_max, grad_factor);
15253 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
15254}
15255template <typename batch_rule_t, batch_rule_t batch_rule>
15256::std::tuple<at::Tensor,at::Tensor,at::Tensor> _fake_quantize_learnable_per_tensor_affine_backward_generated_plumbing(const at::Tensor & grad, const at::Tensor & self, const at::Tensor & scale, const at::Tensor & zero_point, int64_t quant_min, int64_t quant_max, double grad_factor) {
15257 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
15258 auto maybe_layer = maybeCurrentDynamicLayer();
15259 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
15260 int64_t cur_level = maybe_layer->layerId();
15261 if (!isBatchedAtLevel(grad, cur_level) && !isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(scale, cur_level) && !isBatchedAtLevel(zero_point, cur_level)) {
15262 return at::_ops::_fake_quantize_learnable_per_tensor_affine_backward::call(grad, self, scale, zero_point, quant_min, quant_max, grad_factor);
15263 }
15264 Tensor grad_value;
15265 optional<int64_t> grad_bdim;
15266 std::tie(grad_value, grad_bdim) = unwrapTensorAtLevel(grad, cur_level);
15267 Tensor self_value;
15268 optional<int64_t> self_bdim;
15269 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
15270 Tensor scale_value;
15271 optional<int64_t> scale_bdim;
15272 std::tie(scale_value, scale_bdim) = unwrapTensorAtLevel(scale, cur_level);
15273 Tensor zero_point_value;
15274 optional<int64_t> zero_point_bdim;
15275 std::tie(zero_point_value, zero_point_bdim) = unwrapTensorAtLevel(zero_point, cur_level);
15276 auto results = batch_rule(grad_value, grad_bdim, self_value, self_bdim, scale_value, scale_bdim, zero_point_value, zero_point_bdim, quant_min, quant_max, grad_factor);
15277 return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level));
15278}
15279template <typename batch_rule_t, batch_rule_t batch_rule>
15280at::Tensor fake_quantize_per_channel_affine_generated_plumbing(const at::Tensor & self, const at::Tensor & scale, const at::Tensor & zero_point, int64_t axis, int64_t quant_min, int64_t quant_max) {
15281 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
15282 auto maybe_layer = maybeCurrentDynamicLayer();
15283 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
15284 int64_t cur_level = maybe_layer->layerId();
15285 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(scale, cur_level) && !isBatchedAtLevel(zero_point, cur_level)) {
15286 return at::_ops::fake_quantize_per_channel_affine::call(self, scale, zero_point, axis, quant_min, quant_max);
15287 }
15288 Tensor self_value;
15289 optional<int64_t> self_bdim;
15290 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
15291 Tensor scale_value;
15292 optional<int64_t> scale_bdim;
15293 std::tie(scale_value, scale_bdim) = unwrapTensorAtLevel(scale, cur_level);
15294 Tensor zero_point_value;
15295 optional<int64_t> zero_point_bdim;
15296 std::tie(zero_point_value, zero_point_bdim) = unwrapTensorAtLevel(zero_point, cur_level);
15297 auto results = batch_rule(self_value, self_bdim, scale_value, scale_bdim, zero_point_value, zero_point_bdim, axis, quant_min, quant_max);
15298 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
15299}
15300template <typename batch_rule_t, batch_rule_t batch_rule>
15301::std::tuple<at::Tensor,at::Tensor> fake_quantize_per_channel_affine_cachemask_generated_plumbing(const at::Tensor & self, const at::Tensor & scale, const at::Tensor & zero_point, int64_t axis, int64_t quant_min, int64_t quant_max) {
15302 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
15303 auto maybe_layer = maybeCurrentDynamicLayer();
15304 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
15305 int64_t cur_level = maybe_layer->layerId();
15306 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(scale, cur_level) && !isBatchedAtLevel(zero_point, cur_level)) {
15307 return at::_ops::fake_quantize_per_channel_affine_cachemask::call(self, scale, zero_point, axis, quant_min, quant_max);
15308 }
15309 Tensor self_value;
15310 optional<int64_t> self_bdim;
15311 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
15312 Tensor scale_value;
15313 optional<int64_t> scale_bdim;
15314 std::tie(scale_value, scale_bdim) = unwrapTensorAtLevel(scale, cur_level);
15315 Tensor zero_point_value;
15316 optional<int64_t> zero_point_bdim;
15317 std::tie(zero_point_value, zero_point_bdim) = unwrapTensorAtLevel(zero_point, cur_level);
15318 auto results = batch_rule(self_value, self_bdim, scale_value, scale_bdim, zero_point_value, zero_point_bdim, axis, quant_min, quant_max);
15319 return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
15320}
15321template <typename batch_rule_t, batch_rule_t batch_rule>
15322at::Tensor fake_quantize_per_channel_affine_cachemask_backward_generated_plumbing(const at::Tensor & grad, const at::Tensor & mask) {
15323 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
15324 auto maybe_layer = maybeCurrentDynamicLayer();
15325 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
15326 int64_t cur_level = maybe_layer->layerId();
15327 if (!isBatchedAtLevel(grad, cur_level) && !isBatchedAtLevel(mask, cur_level)) {
15328 return at::_ops::fake_quantize_per_channel_affine_cachemask_backward::call(grad, mask);
15329 }
15330 Tensor grad_value;
15331 optional<int64_t> grad_bdim;
15332 std::tie(grad_value, grad_bdim) = unwrapTensorAtLevel(grad, cur_level);
15333 Tensor mask_value;
15334 optional<int64_t> mask_bdim;
15335 std::tie(mask_value, mask_bdim) = unwrapTensorAtLevel(mask, cur_level);
15336 auto results = batch_rule(grad_value, grad_bdim, mask_value, mask_bdim);
15337 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
15338}
15339template <typename batch_rule_t, batch_rule_t batch_rule>
15340at::Tensor _fake_quantize_learnable_per_channel_affine_generated_plumbing(const at::Tensor & self, const at::Tensor & scale, const at::Tensor & zero_point, int64_t axis, int64_t quant_min, int64_t quant_max, double grad_factor) {
15341 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
15342 auto maybe_layer = maybeCurrentDynamicLayer();
15343 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
15344 int64_t cur_level = maybe_layer->layerId();
15345 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(scale, cur_level) && !isBatchedAtLevel(zero_point, cur_level)) {
15346 return at::_ops::_fake_quantize_learnable_per_channel_affine::call(self, scale, zero_point, axis, quant_min, quant_max, grad_factor);
15347 }
15348 Tensor self_value;
15349 optional<int64_t> self_bdim;
15350 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
15351 Tensor scale_value;
15352 optional<int64_t> scale_bdim;
15353 std::tie(scale_value, scale_bdim) = unwrapTensorAtLevel(scale, cur_level);
15354 Tensor zero_point_value;
15355 optional<int64_t> zero_point_bdim;
15356 std::tie(zero_point_value, zero_point_bdim) = unwrapTensorAtLevel(zero_point, cur_level);
15357 auto results = batch_rule(self_value, self_bdim, scale_value, scale_bdim, zero_point_value, zero_point_bdim, axis, quant_min, quant_max, grad_factor);
15358 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
15359}
15360template <typename batch_rule_t, batch_rule_t batch_rule>
15361::std::tuple<at::Tensor,at::Tensor,at::Tensor> _fake_quantize_learnable_per_channel_affine_backward_generated_plumbing(const at::Tensor & grad, const at::Tensor & self, const at::Tensor & scale, const at::Tensor & zero_point, int64_t axis, int64_t quant_min, int64_t quant_max, double grad_factor) {
15362 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
15363 auto maybe_layer = maybeCurrentDynamicLayer();
15364 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
15365 int64_t cur_level = maybe_layer->layerId();
15366 if (!isBatchedAtLevel(grad, cur_level) && !isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(scale, cur_level) && !isBatchedAtLevel(zero_point, cur_level)) {
15367 return at::_ops::_fake_quantize_learnable_per_channel_affine_backward::call(grad, self, scale, zero_point, axis, quant_min, quant_max, grad_factor);
15368 }
15369 Tensor grad_value;
15370 optional<int64_t> grad_bdim;
15371 std::tie(grad_value, grad_bdim) = unwrapTensorAtLevel(grad, cur_level);
15372 Tensor self_value;
15373 optional<int64_t> self_bdim;
15374 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
15375 Tensor scale_value;
15376 optional<int64_t> scale_bdim;
15377 std::tie(scale_value, scale_bdim) = unwrapTensorAtLevel(scale, cur_level);
15378 Tensor zero_point_value;
15379 optional<int64_t> zero_point_bdim;
15380 std::tie(zero_point_value, zero_point_bdim) = unwrapTensorAtLevel(zero_point, cur_level);
15381 auto results = batch_rule(grad_value, grad_bdim, self_value, self_bdim, scale_value, scale_bdim, zero_point_value, zero_point_bdim, axis, quant_min, quant_max, grad_factor);
15382 return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level));
15383}
15384template <typename batch_rule_t, batch_rule_t batch_rule>
15385at::Tensor _saturate_weight_to_fp16_generated_plumbing(const at::Tensor & weight) {
15386 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
15387 auto maybe_layer = maybeCurrentDynamicLayer();
15388 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
15389 int64_t cur_level = maybe_layer->layerId();
15390 if (!isBatchedAtLevel(weight, cur_level)) {
15391 return at::_ops::_saturate_weight_to_fp16::call(weight);
15392 }
15393 Tensor weight_value;
15394 optional<int64_t> weight_bdim;
15395 std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight, cur_level);
15396 auto results = batch_rule(weight_value, weight_bdim);
15397 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
15398}
15399template <typename batch_rule_t, batch_rule_t batch_rule>
15400::std::tuple<at::Tensor,at::Tensor> choose_qparams_optimized_generated_plumbing(const at::Tensor & input, int64_t numel, int64_t n_bins, double ratio, int64_t bit_width) {
15401 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
15402 auto maybe_layer = maybeCurrentDynamicLayer();
15403 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
15404 int64_t cur_level = maybe_layer->layerId();
15405 if (!isBatchedAtLevel(input, cur_level)) {
15406 return at::_ops::choose_qparams_optimized::call(input, numel, n_bins, ratio, bit_width);
15407 }
15408 Tensor input_value;
15409 optional<int64_t> input_bdim;
15410 std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level);
15411 auto results = batch_rule(input_value, input_bdim, numel, n_bins, ratio, bit_width);
15412 return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
15413}
15414template <typename batch_rule_t, batch_rule_t batch_rule>
15415at::Tensor _autocast_to_reduced_precision_generated_plumbing(const at::Tensor & self, bool cuda_enabled, bool cpu_enabled, at::ScalarType cuda_dtype, at::ScalarType cpu_dtype) {
15416 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
15417 auto maybe_layer = maybeCurrentDynamicLayer();
15418 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
15419 int64_t cur_level = maybe_layer->layerId();
15420 if (!isBatchedAtLevel(self, cur_level)) {
15421 return at::_ops::_autocast_to_reduced_precision::call(self, cuda_enabled, cpu_enabled, cuda_dtype, cpu_dtype);
15422 }
15423 Tensor self_value;
15424 optional<int64_t> self_bdim;
15425 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
15426 auto results = batch_rule(self_value, self_bdim, cuda_enabled, cpu_enabled, cuda_dtype, cpu_dtype);
15427 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
15428}
15429template <typename batch_rule_t, batch_rule_t batch_rule>
15430at::Tensor _autocast_to_full_precision_generated_plumbing(const at::Tensor & self, bool cuda_enabled, bool cpu_enabled) {
15431 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
15432 auto maybe_layer = maybeCurrentDynamicLayer();
15433 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
15434 int64_t cur_level = maybe_layer->layerId();
15435 if (!isBatchedAtLevel(self, cur_level)) {
15436 return at::_ops::_autocast_to_full_precision::call(self, cuda_enabled, cpu_enabled);
15437 }
15438 Tensor self_value;
15439 optional<int64_t> self_bdim;
15440 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
15441 auto results = batch_rule(self_value, self_bdim, cuda_enabled, cpu_enabled);
15442 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
15443}
15444template <typename batch_rule_t, batch_rule_t batch_rule>
15445at::Tensor _to_copy_generated_plumbing(const at::Tensor & self, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory, bool non_blocking, c10::optional<at::MemoryFormat> memory_format) {
15446 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
15447 auto maybe_layer = maybeCurrentDynamicLayer();
15448 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
15449 int64_t cur_level = maybe_layer->layerId();
15450 if (!isBatchedAtLevel(self, cur_level)) {
15451 return at::_ops::_to_copy::call(self, dtype, layout, device, pin_memory, non_blocking, memory_format);
15452 }
15453 Tensor self_value;
15454 optional<int64_t> self_bdim;
15455 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
15456 auto results = batch_rule(self_value, self_bdim, dtype, layout, device, pin_memory, non_blocking, memory_format);
15457 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
15458}
15459template <typename batch_rule_t, batch_rule_t batch_rule>
15460at::Tensor to_dtype_layout_generated_plumbing(const at::Tensor & self, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory, bool non_blocking, bool copy, c10::optional<at::MemoryFormat> memory_format) {
15461 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
15462 auto maybe_layer = maybeCurrentDynamicLayer();
15463 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
15464 int64_t cur_level = maybe_layer->layerId();
15465 if (!isBatchedAtLevel(self, cur_level)) {
15466 return at::_ops::to_dtype_layout::call(self, dtype, layout, device, pin_memory, non_blocking, copy, memory_format);
15467 }
15468 Tensor self_value;
15469 optional<int64_t> self_bdim;
15470 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
15471 auto results = batch_rule(self_value, self_bdim, dtype, layout, device, pin_memory, non_blocking, copy, memory_format);
15472 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
15473}
15474template <typename batch_rule_t, batch_rule_t batch_rule>
15475at::Tensor to_device_generated_plumbing(const at::Tensor & self, at::Device device, at::ScalarType dtype, bool non_blocking, bool copy, c10::optional<at::MemoryFormat> memory_format) {
15476 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
15477 auto maybe_layer = maybeCurrentDynamicLayer();
15478 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
15479 int64_t cur_level = maybe_layer->layerId();
15480 if (!isBatchedAtLevel(self, cur_level)) {
15481 return at::_ops::to_device::call(self, device, dtype, non_blocking, copy, memory_format);
15482 }
15483 Tensor self_value;
15484 optional<int64_t> self_bdim;
15485 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
15486 auto results = batch_rule(self_value, self_bdim, device, dtype, non_blocking, copy, memory_format);
15487 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
15488}
15489template <typename batch_rule_t, batch_rule_t batch_rule>
15490at::Tensor to_dtype_generated_plumbing(const at::Tensor & self, at::ScalarType dtype, bool non_blocking, bool copy, c10::optional<at::MemoryFormat> memory_format) {
15491 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
15492 auto maybe_layer = maybeCurrentDynamicLayer();
15493 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
15494 int64_t cur_level = maybe_layer->layerId();
15495 if (!isBatchedAtLevel(self, cur_level)) {
15496 return at::_ops::to_dtype::call(self, dtype, non_blocking, copy, memory_format);
15497 }
15498 Tensor self_value;
15499 optional<int64_t> self_bdim;
15500 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
15501 auto results = batch_rule(self_value, self_bdim, dtype, non_blocking, copy, memory_format);
15502 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
15503}
15504template <typename batch_rule_t, batch_rule_t batch_rule>
15505at::Tensor to_other_generated_plumbing(const at::Tensor & self, const at::Tensor & other, bool non_blocking, bool copy, c10::optional<at::MemoryFormat> memory_format) {
15506 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
15507 auto maybe_layer = maybeCurrentDynamicLayer();
15508 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
15509 int64_t cur_level = maybe_layer->layerId();
15510 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
15511 return at::_ops::to_other::call(self, other, non_blocking, copy, memory_format);
15512 }
15513 Tensor self_value;
15514 optional<int64_t> self_bdim;
15515 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
15516 Tensor other_value;
15517 optional<int64_t> other_bdim;
15518 std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
15519 auto results = batch_rule(self_value, self_bdim, other_value, other_bdim, non_blocking, copy, memory_format);
15520 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
15521}
15522template <typename batch_rule_t, batch_rule_t batch_rule>
15523::std::vector<at::Tensor> meshgrid_generated_plumbing(at::TensorList tensors) {
15524 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
15525 auto maybe_layer = maybeCurrentDynamicLayer();
15526 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
15527 int64_t cur_level = maybe_layer->layerId();
15528 if (!isBatchedAtLevel(tensors, cur_level)) {
15529 return at::_ops::meshgrid::call(tensors);
15530 }
15531
15532 auto results = batch_rule(tensors);
15533 return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
15534}
15535template <typename batch_rule_t, batch_rule_t batch_rule>
15536::std::vector<at::Tensor> meshgrid_indexing_generated_plumbing(at::TensorList tensors, c10::string_view indexing) {
15537 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
15538 auto maybe_layer = maybeCurrentDynamicLayer();
15539 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
15540 int64_t cur_level = maybe_layer->layerId();
15541 if (!isBatchedAtLevel(tensors, cur_level)) {
15542 return at::_ops::meshgrid_indexing::call(tensors, indexing);
15543 }
15544
15545 auto results = batch_rule(tensors, indexing);
15546 return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
15547}
15548template <typename batch_rule_t, batch_rule_t batch_rule>
15549at::Tensor cartesian_prod_generated_plumbing(at::TensorList tensors) {
15550 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
15551 auto maybe_layer = maybeCurrentDynamicLayer();
15552 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
15553 int64_t cur_level = maybe_layer->layerId();
15554 if (!isBatchedAtLevel(tensors, cur_level)) {
15555 return at::_ops::cartesian_prod::call(tensors);
15556 }
15557
15558 auto results = batch_rule(tensors);
15559 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
15560}
15561template <typename batch_rule_t, batch_rule_t batch_rule>
15562at::Tensor combinations_generated_plumbing(const at::Tensor & self, int64_t r, bool with_replacement) {
15563 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
15564 auto maybe_layer = maybeCurrentDynamicLayer();
15565 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
15566 int64_t cur_level = maybe_layer->layerId();
15567 if (!isBatchedAtLevel(self, cur_level)) {
15568 return at::_ops::combinations::call(self, r, with_replacement);
15569 }
15570 Tensor self_value;
15571 optional<int64_t> self_bdim;
15572 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
15573 auto results = batch_rule(self_value, self_bdim, r, with_replacement);
15574 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
15575}
15576template <typename batch_rule_t, batch_rule_t batch_rule>
15577::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor,at::Tensor> _lstm_mps_generated_plumbing(const at::Tensor & input, at::TensorList hx, at::TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional, bool batch_first) {
15578 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
15579 auto maybe_layer = maybeCurrentDynamicLayer();
15580 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
15581 int64_t cur_level = maybe_layer->layerId();
15582 if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(hx, cur_level) && !isBatchedAtLevel(params, cur_level)) {
15583 return at::_ops::_lstm_mps::call(input, hx, params, has_biases, num_layers, dropout, train, bidirectional, batch_first);
15584 }
15585 Tensor input_value;
15586 optional<int64_t> input_bdim;
15587 std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level);
15588 auto results = batch_rule(input_value, input_bdim, hx, params, has_biases, num_layers, dropout, train, bidirectional, batch_first);
15589 return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level), makeBatched(std::get<6>(results), std::get<7>(results), cur_level), makeBatched(std::get<8>(results), std::get<9>(results), cur_level));
15590}
15591template <typename batch_rule_t, batch_rule_t batch_rule>
15592::std::tuple<at::Tensor,::std::vector<at::Tensor>,::std::vector<at::Tensor>> lstm_mps_backward_generated_plumbing(const at::Tensor & grad_y, const c10::optional<at::Tensor> & grad_hy, const c10::optional<at::Tensor> & grad_cy, const at::Tensor & z_state, const at::Tensor & cell_state_fwd, const at::Tensor & input, at::TensorList hx, at::TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional, bool batch_first) {
15593 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
15594 auto maybe_layer = maybeCurrentDynamicLayer();
15595 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
15596 int64_t cur_level = maybe_layer->layerId();
15597 if (!isBatchedAtLevel(grad_y, cur_level) && !isBatchedAtLevel(grad_hy, cur_level) && !isBatchedAtLevel(grad_cy, cur_level) && !isBatchedAtLevel(z_state, cur_level) && !isBatchedAtLevel(cell_state_fwd, cur_level) && !isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(hx, cur_level) && !isBatchedAtLevel(params, cur_level)) {
15598 return at::_ops::lstm_mps_backward::call(grad_y, grad_hy, grad_cy, z_state, cell_state_fwd, input, hx, params, has_biases, num_layers, dropout, train, bidirectional, batch_first);
15599 }
15600 Tensor grad_y_value;
15601 optional<int64_t> grad_y_bdim;
15602 std::tie(grad_y_value, grad_y_bdim) = unwrapTensorAtLevel(grad_y, cur_level);
15603 Tensor z_state_value;
15604 optional<int64_t> z_state_bdim;
15605 std::tie(z_state_value, z_state_bdim) = unwrapTensorAtLevel(z_state, cur_level);
15606 Tensor cell_state_fwd_value;
15607 optional<int64_t> cell_state_fwd_bdim;
15608 std::tie(cell_state_fwd_value, cell_state_fwd_bdim) = unwrapTensorAtLevel(cell_state_fwd, cur_level);
15609 Tensor input_value;
15610 optional<int64_t> input_bdim;
15611 std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level);
15612 optional<Tensor> grad_hy_value;
15613 optional<int64_t> grad_hy_bdim;
15614 if (grad_hy) {
15615 std::tie(grad_hy_value, grad_hy_bdim) = unwrapTensorAtLevel(grad_hy.value(), cur_level);
15616 }
15617 optional<Tensor> grad_cy_value;
15618 optional<int64_t> grad_cy_bdim;
15619 if (grad_cy) {
15620 std::tie(grad_cy_value, grad_cy_bdim) = unwrapTensorAtLevel(grad_cy.value(), cur_level);
15621 }
15622 auto results = batch_rule(grad_y_value, grad_y_bdim, grad_hy_value, grad_hy_bdim, grad_cy_value, grad_cy_bdim, z_state_value, z_state_bdim, cell_state_fwd_value, cell_state_fwd_bdim, input_value, input_bdim, hx, params, has_biases, num_layers, dropout, train, bidirectional, batch_first);
15623 return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatchedVector(std::get<2>(results), std::get<3>(results), cur_level), makeBatchedVector(std::get<4>(results), std::get<5>(results), cur_level));
15624}
15625template <typename batch_rule_t, batch_rule_t batch_rule>
15626::std::tuple<at::Tensor,at::Tensor,at::Tensor> _thnn_fused_lstm_cell_generated_plumbing(const at::Tensor & input_gates, const at::Tensor & hidden_gates, const at::Tensor & cx, const c10::optional<at::Tensor> & input_bias, const c10::optional<at::Tensor> & hidden_bias) {
15627 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
15628 auto maybe_layer = maybeCurrentDynamicLayer();
15629 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
15630 int64_t cur_level = maybe_layer->layerId();
15631 if (!isBatchedAtLevel(input_gates, cur_level) && !isBatchedAtLevel(hidden_gates, cur_level) && !isBatchedAtLevel(cx, cur_level) && !isBatchedAtLevel(input_bias, cur_level) && !isBatchedAtLevel(hidden_bias, cur_level)) {
15632 return at::_ops::_thnn_fused_lstm_cell::call(input_gates, hidden_gates, cx, input_bias, hidden_bias);
15633 }
15634 Tensor input_gates_value;
15635 optional<int64_t> input_gates_bdim;
15636 std::tie(input_gates_value, input_gates_bdim) = unwrapTensorAtLevel(input_gates, cur_level);
15637 Tensor hidden_gates_value;
15638 optional<int64_t> hidden_gates_bdim;
15639 std::tie(hidden_gates_value, hidden_gates_bdim) = unwrapTensorAtLevel(hidden_gates, cur_level);
15640 Tensor cx_value;
15641 optional<int64_t> cx_bdim;
15642 std::tie(cx_value, cx_bdim) = unwrapTensorAtLevel(cx, cur_level);
15643 optional<Tensor> input_bias_value;
15644 optional<int64_t> input_bias_bdim;
15645 if (input_bias) {
15646 std::tie(input_bias_value, input_bias_bdim) = unwrapTensorAtLevel(input_bias.value(), cur_level);
15647 }
15648 optional<Tensor> hidden_bias_value;
15649 optional<int64_t> hidden_bias_bdim;
15650 if (hidden_bias) {
15651 std::tie(hidden_bias_value, hidden_bias_bdim) = unwrapTensorAtLevel(hidden_bias.value(), cur_level);
15652 }
15653 auto results = batch_rule(input_gates_value, input_gates_bdim, hidden_gates_value, hidden_gates_bdim, cx_value, cx_bdim, input_bias_value, input_bias_bdim, hidden_bias_value, hidden_bias_bdim);
15654 return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level));
15655}
15656template <typename batch_rule_t, batch_rule_t batch_rule>
15657::std::tuple<at::Tensor,at::Tensor,at::Tensor> _thnn_fused_lstm_cell_backward_impl_generated_plumbing(const c10::optional<at::Tensor> & grad_hy, const c10::optional<at::Tensor> & grad_cy, const at::Tensor & cx, const at::Tensor & cy, const at::Tensor & workspace, bool has_bias) {
15658 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
15659 auto maybe_layer = maybeCurrentDynamicLayer();
15660 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
15661 int64_t cur_level = maybe_layer->layerId();
15662 if (!isBatchedAtLevel(grad_hy, cur_level) && !isBatchedAtLevel(grad_cy, cur_level) && !isBatchedAtLevel(cx, cur_level) && !isBatchedAtLevel(cy, cur_level) && !isBatchedAtLevel(workspace, cur_level)) {
15663 return at::_ops::_thnn_fused_lstm_cell_backward_impl::call(grad_hy, grad_cy, cx, cy, workspace, has_bias);
15664 }
15665 Tensor cx_value;
15666 optional<int64_t> cx_bdim;
15667 std::tie(cx_value, cx_bdim) = unwrapTensorAtLevel(cx, cur_level);
15668 Tensor cy_value;
15669 optional<int64_t> cy_bdim;
15670 std::tie(cy_value, cy_bdim) = unwrapTensorAtLevel(cy, cur_level);
15671 Tensor workspace_value;
15672 optional<int64_t> workspace_bdim;
15673 std::tie(workspace_value, workspace_bdim) = unwrapTensorAtLevel(workspace, cur_level);
15674 optional<Tensor> grad_hy_value;
15675 optional<int64_t> grad_hy_bdim;
15676 if (grad_hy) {
15677 std::tie(grad_hy_value, grad_hy_bdim) = unwrapTensorAtLevel(grad_hy.value(), cur_level);
15678 }
15679 optional<Tensor> grad_cy_value;
15680 optional<int64_t> grad_cy_bdim;
15681 if (grad_cy) {
15682 std::tie(grad_cy_value, grad_cy_bdim) = unwrapTensorAtLevel(grad_cy.value(), cur_level);
15683 }
15684 auto results = batch_rule(grad_hy_value, grad_hy_bdim, grad_cy_value, grad_cy_bdim, cx_value, cx_bdim, cy_value, cy_bdim, workspace_value, workspace_bdim, has_bias);
15685 return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level));
15686}
15687template <typename batch_rule_t, batch_rule_t batch_rule>
15688::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor,at::Tensor> _thnn_fused_lstm_cell_backward_generated_plumbing(const c10::optional<at::Tensor> & grad_hy, const c10::optional<at::Tensor> & grad_cy, const at::Tensor & cx, const at::Tensor & cy, const at::Tensor & workspace, bool has_bias) {
15689 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
15690 auto maybe_layer = maybeCurrentDynamicLayer();
15691 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
15692 int64_t cur_level = maybe_layer->layerId();
15693 if (!isBatchedAtLevel(grad_hy, cur_level) && !isBatchedAtLevel(grad_cy, cur_level) && !isBatchedAtLevel(cx, cur_level) && !isBatchedAtLevel(cy, cur_level) && !isBatchedAtLevel(workspace, cur_level)) {
15694 return at::_ops::_thnn_fused_lstm_cell_backward::call(grad_hy, grad_cy, cx, cy, workspace, has_bias);
15695 }
15696 Tensor cx_value;
15697 optional<int64_t> cx_bdim;
15698 std::tie(cx_value, cx_bdim) = unwrapTensorAtLevel(cx, cur_level);
15699 Tensor cy_value;
15700 optional<int64_t> cy_bdim;
15701 std::tie(cy_value, cy_bdim) = unwrapTensorAtLevel(cy, cur_level);
15702 Tensor workspace_value;
15703 optional<int64_t> workspace_bdim;
15704 std::tie(workspace_value, workspace_bdim) = unwrapTensorAtLevel(workspace, cur_level);
15705 optional<Tensor> grad_hy_value;
15706 optional<int64_t> grad_hy_bdim;
15707 if (grad_hy) {
15708 std::tie(grad_hy_value, grad_hy_bdim) = unwrapTensorAtLevel(grad_hy.value(), cur_level);
15709 }
15710 optional<Tensor> grad_cy_value;
15711 optional<int64_t> grad_cy_bdim;
15712 if (grad_cy) {
15713 std::tie(grad_cy_value, grad_cy_bdim) = unwrapTensorAtLevel(grad_cy.value(), cur_level);
15714 }
15715 auto results = batch_rule(grad_hy_value, grad_hy_bdim, grad_cy_value, grad_cy_bdim, cx_value, cx_bdim, cy_value, cy_bdim, workspace_value, workspace_bdim, has_bias);
15716 return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level), makeBatched(std::get<6>(results), std::get<7>(results), cur_level), makeBatched(std::get<8>(results), std::get<9>(results), cur_level));
15717}
15718template <typename batch_rule_t, batch_rule_t batch_rule>
15719::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor,at::Tensor> _thnn_differentiable_lstm_cell_backward_generated_plumbing(const c10::optional<at::Tensor> & grad_hy, const c10::optional<at::Tensor> & grad_cy, const at::Tensor & input_gates, const at::Tensor & hidden_gates, const c10::optional<at::Tensor> & input_bias, const c10::optional<at::Tensor> & hidden_bias, const at::Tensor & cx, const at::Tensor & cy) {
15720 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
15721 auto maybe_layer = maybeCurrentDynamicLayer();
15722 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
15723 int64_t cur_level = maybe_layer->layerId();
15724 if (!isBatchedAtLevel(grad_hy, cur_level) && !isBatchedAtLevel(grad_cy, cur_level) && !isBatchedAtLevel(input_gates, cur_level) && !isBatchedAtLevel(hidden_gates, cur_level) && !isBatchedAtLevel(input_bias, cur_level) && !isBatchedAtLevel(hidden_bias, cur_level) && !isBatchedAtLevel(cx, cur_level) && !isBatchedAtLevel(cy, cur_level)) {
15725 return at::_ops::_thnn_differentiable_lstm_cell_backward::call(grad_hy, grad_cy, input_gates, hidden_gates, input_bias, hidden_bias, cx, cy);
15726 }
15727 Tensor input_gates_value;
15728 optional<int64_t> input_gates_bdim;
15729 std::tie(input_gates_value, input_gates_bdim) = unwrapTensorAtLevel(input_gates, cur_level);
15730 Tensor hidden_gates_value;
15731 optional<int64_t> hidden_gates_bdim;
15732 std::tie(hidden_gates_value, hidden_gates_bdim) = unwrapTensorAtLevel(hidden_gates, cur_level);
15733 Tensor cx_value;
15734 optional<int64_t> cx_bdim;
15735 std::tie(cx_value, cx_bdim) = unwrapTensorAtLevel(cx, cur_level);
15736 Tensor cy_value;
15737 optional<int64_t> cy_bdim;
15738 std::tie(cy_value, cy_bdim) = unwrapTensorAtLevel(cy, cur_level);
15739 optional<Tensor> grad_hy_value;
15740 optional<int64_t> grad_hy_bdim;
15741 if (grad_hy) {
15742 std::tie(grad_hy_value, grad_hy_bdim) = unwrapTensorAtLevel(grad_hy.value(), cur_level);
15743 }
15744 optional<Tensor> grad_cy_value;
15745 optional<int64_t> grad_cy_bdim;
15746 if (grad_cy) {
15747 std::tie(grad_cy_value, grad_cy_bdim) = unwrapTensorAtLevel(grad_cy.value(), cur_level);
15748 }
15749 optional<Tensor> input_bias_value;
15750 optional<int64_t> input_bias_bdim;
15751 if (input_bias) {
15752 std::tie(input_bias_value, input_bias_bdim) = unwrapTensorAtLevel(input_bias.value(), cur_level);
15753 }
15754 optional<Tensor> hidden_bias_value;
15755 optional<int64_t> hidden_bias_bdim;
15756 if (hidden_bias) {
15757 std::tie(hidden_bias_value, hidden_bias_bdim) = unwrapTensorAtLevel(hidden_bias.value(), cur_level);
15758 }
15759 auto results = batch_rule(grad_hy_value, grad_hy_bdim, grad_cy_value, grad_cy_bdim, input_gates_value, input_gates_bdim, hidden_gates_value, hidden_gates_bdim, input_bias_value, input_bias_bdim, hidden_bias_value, hidden_bias_bdim, cx_value, cx_bdim, cy_value, cy_bdim);
15760 return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level), makeBatched(std::get<6>(results), std::get<7>(results), cur_level), makeBatched(std::get<8>(results), std::get<9>(results), cur_level));
15761}
15762template <typename batch_rule_t, batch_rule_t batch_rule>
15763::std::tuple<at::Tensor,at::Tensor> _thnn_fused_gru_cell_generated_plumbing(const at::Tensor & input_gates, const at::Tensor & hidden_gates, const at::Tensor & hx, const c10::optional<at::Tensor> & input_bias, const c10::optional<at::Tensor> & hidden_bias) {
15764 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
15765 auto maybe_layer = maybeCurrentDynamicLayer();
15766 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
15767 int64_t cur_level = maybe_layer->layerId();
15768 if (!isBatchedAtLevel(input_gates, cur_level) && !isBatchedAtLevel(hidden_gates, cur_level) && !isBatchedAtLevel(hx, cur_level) && !isBatchedAtLevel(input_bias, cur_level) && !isBatchedAtLevel(hidden_bias, cur_level)) {
15769 return at::_ops::_thnn_fused_gru_cell::call(input_gates, hidden_gates, hx, input_bias, hidden_bias);
15770 }
15771 Tensor input_gates_value;
15772 optional<int64_t> input_gates_bdim;
15773 std::tie(input_gates_value, input_gates_bdim) = unwrapTensorAtLevel(input_gates, cur_level);
15774 Tensor hidden_gates_value;
15775 optional<int64_t> hidden_gates_bdim;
15776 std::tie(hidden_gates_value, hidden_gates_bdim) = unwrapTensorAtLevel(hidden_gates, cur_level);
15777 Tensor hx_value;
15778 optional<int64_t> hx_bdim;
15779 std::tie(hx_value, hx_bdim) = unwrapTensorAtLevel(hx, cur_level);
15780 optional<Tensor> input_bias_value;
15781 optional<int64_t> input_bias_bdim;
15782 if (input_bias) {
15783 std::tie(input_bias_value, input_bias_bdim) = unwrapTensorAtLevel(input_bias.value(), cur_level);
15784 }
15785 optional<Tensor> hidden_bias_value;
15786 optional<int64_t> hidden_bias_bdim;
15787 if (hidden_bias) {
15788 std::tie(hidden_bias_value, hidden_bias_bdim) = unwrapTensorAtLevel(hidden_bias.value(), cur_level);
15789 }
15790 auto results = batch_rule(input_gates_value, input_gates_bdim, hidden_gates_value, hidden_gates_bdim, hx_value, hx_bdim, input_bias_value, input_bias_bdim, hidden_bias_value, hidden_bias_bdim);
15791 return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
15792}
15793template <typename batch_rule_t, batch_rule_t batch_rule>
15794::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor,at::Tensor> _thnn_fused_gru_cell_backward_generated_plumbing(const at::Tensor & grad_hy, const at::Tensor & workspace, bool has_bias) {
15795 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
15796 auto maybe_layer = maybeCurrentDynamicLayer();
15797 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
15798 int64_t cur_level = maybe_layer->layerId();
15799 if (!isBatchedAtLevel(grad_hy, cur_level) && !isBatchedAtLevel(workspace, cur_level)) {
15800 return at::_ops::_thnn_fused_gru_cell_backward::call(grad_hy, workspace, has_bias);
15801 }
15802 Tensor grad_hy_value;
15803 optional<int64_t> grad_hy_bdim;
15804 std::tie(grad_hy_value, grad_hy_bdim) = unwrapTensorAtLevel(grad_hy, cur_level);
15805 Tensor workspace_value;
15806 optional<int64_t> workspace_bdim;
15807 std::tie(workspace_value, workspace_bdim) = unwrapTensorAtLevel(workspace, cur_level);
15808 auto results = batch_rule(grad_hy_value, grad_hy_bdim, workspace_value, workspace_bdim, has_bias);
15809 return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level), makeBatched(std::get<6>(results), std::get<7>(results), cur_level), makeBatched(std::get<8>(results), std::get<9>(results), cur_level));
15810}
15811template <typename batch_rule_t, batch_rule_t batch_rule>
15812::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor,at::Tensor> _thnn_differentiable_gru_cell_backward_generated_plumbing(const at::Tensor & grad_hy, const at::Tensor & input_gates, const at::Tensor & hidden_gates, const at::Tensor & hx, const c10::optional<at::Tensor> & input_bias, const c10::optional<at::Tensor> & hidden_bias) {
15813 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
15814 auto maybe_layer = maybeCurrentDynamicLayer();
15815 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
15816 int64_t cur_level = maybe_layer->layerId();
15817 if (!isBatchedAtLevel(grad_hy, cur_level) && !isBatchedAtLevel(input_gates, cur_level) && !isBatchedAtLevel(hidden_gates, cur_level) && !isBatchedAtLevel(hx, cur_level) && !isBatchedAtLevel(input_bias, cur_level) && !isBatchedAtLevel(hidden_bias, cur_level)) {
15818 return at::_ops::_thnn_differentiable_gru_cell_backward::call(grad_hy, input_gates, hidden_gates, hx, input_bias, hidden_bias);
15819 }
15820 Tensor grad_hy_value;
15821 optional<int64_t> grad_hy_bdim;
15822 std::tie(grad_hy_value, grad_hy_bdim) = unwrapTensorAtLevel(grad_hy, cur_level);
15823 Tensor input_gates_value;
15824 optional<int64_t> input_gates_bdim;
15825 std::tie(input_gates_value, input_gates_bdim) = unwrapTensorAtLevel(input_gates, cur_level);
15826 Tensor hidden_gates_value;
15827 optional<int64_t> hidden_gates_bdim;
15828 std::tie(hidden_gates_value, hidden_gates_bdim) = unwrapTensorAtLevel(hidden_gates, cur_level);
15829 Tensor hx_value;
15830 optional<int64_t> hx_bdim;
15831 std::tie(hx_value, hx_bdim) = unwrapTensorAtLevel(hx, cur_level);
15832 optional<Tensor> input_bias_value;
15833 optional<int64_t> input_bias_bdim;
15834 if (input_bias) {
15835 std::tie(input_bias_value, input_bias_bdim) = unwrapTensorAtLevel(input_bias.value(), cur_level);
15836 }
15837 optional<Tensor> hidden_bias_value;
15838 optional<int64_t> hidden_bias_bdim;
15839 if (hidden_bias) {
15840 std::tie(hidden_bias_value, hidden_bias_bdim) = unwrapTensorAtLevel(hidden_bias.value(), cur_level);
15841 }
15842 auto results = batch_rule(grad_hy_value, grad_hy_bdim, input_gates_value, input_gates_bdim, hidden_gates_value, hidden_gates_bdim, hx_value, hx_bdim, input_bias_value, input_bias_bdim, hidden_bias_value, hidden_bias_bdim);
15843 return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level), makeBatched(std::get<6>(results), std::get<7>(results), cur_level), makeBatched(std::get<8>(results), std::get<9>(results), cur_level));
15844}
15845template <typename batch_rule_t, batch_rule_t batch_rule>
15846::std::tuple<at::Tensor,at::Tensor,at::Tensor> lstm_input_generated_plumbing(const at::Tensor & input, at::TensorList hx, at::TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional, bool batch_first) {
15847 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
15848 auto maybe_layer = maybeCurrentDynamicLayer();
15849 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
15850 int64_t cur_level = maybe_layer->layerId();
15851 if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(hx, cur_level) && !isBatchedAtLevel(params, cur_level)) {
15852 return at::_ops::lstm_input::call(input, hx, params, has_biases, num_layers, dropout, train, bidirectional, batch_first);
15853 }
15854 Tensor input_value;
15855 optional<int64_t> input_bdim;
15856 std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level);
15857 auto results = batch_rule(input_value, input_bdim, hx, params, has_biases, num_layers, dropout, train, bidirectional, batch_first);
15858 return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level));
15859}
15860template <typename batch_rule_t, batch_rule_t batch_rule>
15861::std::tuple<at::Tensor,at::Tensor,at::Tensor> lstm_data_generated_plumbing(const at::Tensor & data, const at::Tensor & batch_sizes, at::TensorList hx, at::TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional) {
15862 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
15863 auto maybe_layer = maybeCurrentDynamicLayer();
15864 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
15865 int64_t cur_level = maybe_layer->layerId();
15866 if (!isBatchedAtLevel(data, cur_level) && !isBatchedAtLevel(batch_sizes, cur_level) && !isBatchedAtLevel(hx, cur_level) && !isBatchedAtLevel(params, cur_level)) {
15867 return at::_ops::lstm_data::call(data, batch_sizes, hx, params, has_biases, num_layers, dropout, train, bidirectional);
15868 }
15869 Tensor data_value;
15870 optional<int64_t> data_bdim;
15871 std::tie(data_value, data_bdim) = unwrapTensorAtLevel(data, cur_level);
15872 Tensor batch_sizes_value;
15873 optional<int64_t> batch_sizes_bdim;
15874 std::tie(batch_sizes_value, batch_sizes_bdim) = unwrapTensorAtLevel(batch_sizes, cur_level);
15875 auto results = batch_rule(data_value, data_bdim, batch_sizes_value, batch_sizes_bdim, hx, params, has_biases, num_layers, dropout, train, bidirectional);
15876 return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level));
15877}
15878template <typename batch_rule_t, batch_rule_t batch_rule>
15879::std::tuple<at::Tensor,at::Tensor> gru_input_generated_plumbing(const at::Tensor & input, const at::Tensor & hx, at::TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional, bool batch_first) {
15880 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
15881 auto maybe_layer = maybeCurrentDynamicLayer();
15882 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
15883 int64_t cur_level = maybe_layer->layerId();
15884 if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(hx, cur_level) && !isBatchedAtLevel(params, cur_level)) {
15885 return at::_ops::gru_input::call(input, hx, params, has_biases, num_layers, dropout, train, bidirectional, batch_first);
15886 }
15887 Tensor input_value;
15888 optional<int64_t> input_bdim;
15889 std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level);
15890 Tensor hx_value;
15891 optional<int64_t> hx_bdim;
15892 std::tie(hx_value, hx_bdim) = unwrapTensorAtLevel(hx, cur_level);
15893 auto results = batch_rule(input_value, input_bdim, hx_value, hx_bdim, params, has_biases, num_layers, dropout, train, bidirectional, batch_first);
15894 return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
15895}
15896template <typename batch_rule_t, batch_rule_t batch_rule>
15897::std::tuple<at::Tensor,at::Tensor> gru_data_generated_plumbing(const at::Tensor & data, const at::Tensor & batch_sizes, const at::Tensor & hx, at::TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional) {
15898 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
15899 auto maybe_layer = maybeCurrentDynamicLayer();
15900 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
15901 int64_t cur_level = maybe_layer->layerId();
15902 if (!isBatchedAtLevel(data, cur_level) && !isBatchedAtLevel(batch_sizes, cur_level) && !isBatchedAtLevel(hx, cur_level) && !isBatchedAtLevel(params, cur_level)) {
15903 return at::_ops::gru_data::call(data, batch_sizes, hx, params, has_biases, num_layers, dropout, train, bidirectional);
15904 }
15905 Tensor data_value;
15906 optional<int64_t> data_bdim;
15907 std::tie(data_value, data_bdim) = unwrapTensorAtLevel(data, cur_level);
15908 Tensor batch_sizes_value;
15909 optional<int64_t> batch_sizes_bdim;
15910 std::tie(batch_sizes_value, batch_sizes_bdim) = unwrapTensorAtLevel(batch_sizes, cur_level);
15911 Tensor hx_value;
15912 optional<int64_t> hx_bdim;
15913 std::tie(hx_value, hx_bdim) = unwrapTensorAtLevel(hx, cur_level);
15914 auto results = batch_rule(data_value, data_bdim, batch_sizes_value, batch_sizes_bdim, hx_value, hx_bdim, params, has_biases, num_layers, dropout, train, bidirectional);
15915 return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
15916}
15917template <typename batch_rule_t, batch_rule_t batch_rule>
15918::std::tuple<at::Tensor,at::Tensor> rnn_tanh_input_generated_plumbing(const at::Tensor & input, const at::Tensor & hx, at::TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional, bool batch_first) {
15919 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
15920 auto maybe_layer = maybeCurrentDynamicLayer();
15921 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
15922 int64_t cur_level = maybe_layer->layerId();
15923 if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(hx, cur_level) && !isBatchedAtLevel(params, cur_level)) {
15924 return at::_ops::rnn_tanh_input::call(input, hx, params, has_biases, num_layers, dropout, train, bidirectional, batch_first);
15925 }
15926 Tensor input_value;
15927 optional<int64_t> input_bdim;
15928 std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level);
15929 Tensor hx_value;
15930 optional<int64_t> hx_bdim;
15931 std::tie(hx_value, hx_bdim) = unwrapTensorAtLevel(hx, cur_level);
15932 auto results = batch_rule(input_value, input_bdim, hx_value, hx_bdim, params, has_biases, num_layers, dropout, train, bidirectional, batch_first);
15933 return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
15934}
15935template <typename batch_rule_t, batch_rule_t batch_rule>
15936::std::tuple<at::Tensor,at::Tensor> rnn_tanh_data_generated_plumbing(const at::Tensor & data, const at::Tensor & batch_sizes, const at::Tensor & hx, at::TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional) {
15937 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
15938 auto maybe_layer = maybeCurrentDynamicLayer();
15939 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
15940 int64_t cur_level = maybe_layer->layerId();
15941 if (!isBatchedAtLevel(data, cur_level) && !isBatchedAtLevel(batch_sizes, cur_level) && !isBatchedAtLevel(hx, cur_level) && !isBatchedAtLevel(params, cur_level)) {
15942 return at::_ops::rnn_tanh_data::call(data, batch_sizes, hx, params, has_biases, num_layers, dropout, train, bidirectional);
15943 }
15944 Tensor data_value;
15945 optional<int64_t> data_bdim;
15946 std::tie(data_value, data_bdim) = unwrapTensorAtLevel(data, cur_level);
15947 Tensor batch_sizes_value;
15948 optional<int64_t> batch_sizes_bdim;
15949 std::tie(batch_sizes_value, batch_sizes_bdim) = unwrapTensorAtLevel(batch_sizes, cur_level);
15950 Tensor hx_value;
15951 optional<int64_t> hx_bdim;
15952 std::tie(hx_value, hx_bdim) = unwrapTensorAtLevel(hx, cur_level);
15953 auto results = batch_rule(data_value, data_bdim, batch_sizes_value, batch_sizes_bdim, hx_value, hx_bdim, params, has_biases, num_layers, dropout, train, bidirectional);
15954 return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
15955}
15956template <typename batch_rule_t, batch_rule_t batch_rule>
15957::std::tuple<at::Tensor,at::Tensor> rnn_relu_input_generated_plumbing(const at::Tensor & input, const at::Tensor & hx, at::TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional, bool batch_first) {
15958 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
15959 auto maybe_layer = maybeCurrentDynamicLayer();
15960 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
15961 int64_t cur_level = maybe_layer->layerId();
15962 if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(hx, cur_level) && !isBatchedAtLevel(params, cur_level)) {
15963 return at::_ops::rnn_relu_input::call(input, hx, params, has_biases, num_layers, dropout, train, bidirectional, batch_first);
15964 }
15965 Tensor input_value;
15966 optional<int64_t> input_bdim;
15967 std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level);
15968 Tensor hx_value;
15969 optional<int64_t> hx_bdim;
15970 std::tie(hx_value, hx_bdim) = unwrapTensorAtLevel(hx, cur_level);
15971 auto results = batch_rule(input_value, input_bdim, hx_value, hx_bdim, params, has_biases, num_layers, dropout, train, bidirectional, batch_first);
15972 return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
15973}
15974template <typename batch_rule_t, batch_rule_t batch_rule>
15975::std::tuple<at::Tensor,at::Tensor> rnn_relu_data_generated_plumbing(const at::Tensor & data, const at::Tensor & batch_sizes, const at::Tensor & hx, at::TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional) {
15976 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
15977 auto maybe_layer = maybeCurrentDynamicLayer();
15978 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
15979 int64_t cur_level = maybe_layer->layerId();
15980 if (!isBatchedAtLevel(data, cur_level) && !isBatchedAtLevel(batch_sizes, cur_level) && !isBatchedAtLevel(hx, cur_level) && !isBatchedAtLevel(params, cur_level)) {
15981 return at::_ops::rnn_relu_data::call(data, batch_sizes, hx, params, has_biases, num_layers, dropout, train, bidirectional);
15982 }
15983 Tensor data_value;
15984 optional<int64_t> data_bdim;
15985 std::tie(data_value, data_bdim) = unwrapTensorAtLevel(data, cur_level);
15986 Tensor batch_sizes_value;
15987 optional<int64_t> batch_sizes_bdim;
15988 std::tie(batch_sizes_value, batch_sizes_bdim) = unwrapTensorAtLevel(batch_sizes, cur_level);
15989 Tensor hx_value;
15990 optional<int64_t> hx_bdim;
15991 std::tie(hx_value, hx_bdim) = unwrapTensorAtLevel(hx, cur_level);
15992 auto results = batch_rule(data_value, data_bdim, batch_sizes_value, batch_sizes_bdim, hx_value, hx_bdim, params, has_biases, num_layers, dropout, train, bidirectional);
15993 return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
15994}
15995template <typename batch_rule_t, batch_rule_t batch_rule>
15996::std::tuple<at::Tensor,at::Tensor> lstm_cell_generated_plumbing(const at::Tensor & input, at::TensorList hx, const at::Tensor & w_ih, const at::Tensor & w_hh, const c10::optional<at::Tensor> & b_ih, const c10::optional<at::Tensor> & b_hh) {
15997 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
15998 auto maybe_layer = maybeCurrentDynamicLayer();
15999 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
16000 int64_t cur_level = maybe_layer->layerId();
16001 if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(hx, cur_level) && !isBatchedAtLevel(w_ih, cur_level) && !isBatchedAtLevel(w_hh, cur_level) && !isBatchedAtLevel(b_ih, cur_level) && !isBatchedAtLevel(b_hh, cur_level)) {
16002 return at::_ops::lstm_cell::call(input, hx, w_ih, w_hh, b_ih, b_hh);
16003 }
16004 Tensor input_value;
16005 optional<int64_t> input_bdim;
16006 std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level);
16007 Tensor w_ih_value;
16008 optional<int64_t> w_ih_bdim;
16009 std::tie(w_ih_value, w_ih_bdim) = unwrapTensorAtLevel(w_ih, cur_level);
16010 Tensor w_hh_value;
16011 optional<int64_t> w_hh_bdim;
16012 std::tie(w_hh_value, w_hh_bdim) = unwrapTensorAtLevel(w_hh, cur_level);
16013 optional<Tensor> b_ih_value;
16014 optional<int64_t> b_ih_bdim;
16015 if (b_ih) {
16016 std::tie(b_ih_value, b_ih_bdim) = unwrapTensorAtLevel(b_ih.value(), cur_level);
16017 }
16018 optional<Tensor> b_hh_value;
16019 optional<int64_t> b_hh_bdim;
16020 if (b_hh) {
16021 std::tie(b_hh_value, b_hh_bdim) = unwrapTensorAtLevel(b_hh.value(), cur_level);
16022 }
16023 auto results = batch_rule(input_value, input_bdim, hx, w_ih_value, w_ih_bdim, w_hh_value, w_hh_bdim, b_ih_value, b_ih_bdim, b_hh_value, b_hh_bdim);
16024 return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
16025}
16026template <typename batch_rule_t, batch_rule_t batch_rule>
16027at::Tensor gru_cell_generated_plumbing(const at::Tensor & input, const at::Tensor & hx, const at::Tensor & w_ih, const at::Tensor & w_hh, const c10::optional<at::Tensor> & b_ih, const c10::optional<at::Tensor> & b_hh) {
16028 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
16029 auto maybe_layer = maybeCurrentDynamicLayer();
16030 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
16031 int64_t cur_level = maybe_layer->layerId();
16032 if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(hx, cur_level) && !isBatchedAtLevel(w_ih, cur_level) && !isBatchedAtLevel(w_hh, cur_level) && !isBatchedAtLevel(b_ih, cur_level) && !isBatchedAtLevel(b_hh, cur_level)) {
16033 return at::_ops::gru_cell::call(input, hx, w_ih, w_hh, b_ih, b_hh);
16034 }
16035 Tensor input_value;
16036 optional<int64_t> input_bdim;
16037 std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level);
16038 Tensor hx_value;
16039 optional<int64_t> hx_bdim;
16040 std::tie(hx_value, hx_bdim) = unwrapTensorAtLevel(hx, cur_level);
16041 Tensor w_ih_value;
16042 optional<int64_t> w_ih_bdim;
16043 std::tie(w_ih_value, w_ih_bdim) = unwrapTensorAtLevel(w_ih, cur_level);
16044 Tensor w_hh_value;
16045 optional<int64_t> w_hh_bdim;
16046 std::tie(w_hh_value, w_hh_bdim) = unwrapTensorAtLevel(w_hh, cur_level);
16047 optional<Tensor> b_ih_value;
16048 optional<int64_t> b_ih_bdim;
16049 if (b_ih) {
16050 std::tie(b_ih_value, b_ih_bdim) = unwrapTensorAtLevel(b_ih.value(), cur_level);
16051 }
16052 optional<Tensor> b_hh_value;
16053 optional<int64_t> b_hh_bdim;
16054 if (b_hh) {
16055 std::tie(b_hh_value, b_hh_bdim) = unwrapTensorAtLevel(b_hh.value(), cur_level);
16056 }
16057 auto results = batch_rule(input_value, input_bdim, hx_value, hx_bdim, w_ih_value, w_ih_bdim, w_hh_value, w_hh_bdim, b_ih_value, b_ih_bdim, b_hh_value, b_hh_bdim);
16058 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
16059}
16060template <typename batch_rule_t, batch_rule_t batch_rule>
16061at::Tensor rnn_tanh_cell_generated_plumbing(const at::Tensor & input, const at::Tensor & hx, const at::Tensor & w_ih, const at::Tensor & w_hh, const c10::optional<at::Tensor> & b_ih, const c10::optional<at::Tensor> & b_hh) {
16062 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
16063 auto maybe_layer = maybeCurrentDynamicLayer();
16064 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
16065 int64_t cur_level = maybe_layer->layerId();
16066 if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(hx, cur_level) && !isBatchedAtLevel(w_ih, cur_level) && !isBatchedAtLevel(w_hh, cur_level) && !isBatchedAtLevel(b_ih, cur_level) && !isBatchedAtLevel(b_hh, cur_level)) {
16067 return at::_ops::rnn_tanh_cell::call(input, hx, w_ih, w_hh, b_ih, b_hh);
16068 }
16069 Tensor input_value;
16070 optional<int64_t> input_bdim;
16071 std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level);
16072 Tensor hx_value;
16073 optional<int64_t> hx_bdim;
16074 std::tie(hx_value, hx_bdim) = unwrapTensorAtLevel(hx, cur_level);
16075 Tensor w_ih_value;
16076 optional<int64_t> w_ih_bdim;
16077 std::tie(w_ih_value, w_ih_bdim) = unwrapTensorAtLevel(w_ih, cur_level);
16078 Tensor w_hh_value;
16079 optional<int64_t> w_hh_bdim;
16080 std::tie(w_hh_value, w_hh_bdim) = unwrapTensorAtLevel(w_hh, cur_level);
16081 optional<Tensor> b_ih_value;
16082 optional<int64_t> b_ih_bdim;
16083 if (b_ih) {
16084 std::tie(b_ih_value, b_ih_bdim) = unwrapTensorAtLevel(b_ih.value(), cur_level);
16085 }
16086 optional<Tensor> b_hh_value;
16087 optional<int64_t> b_hh_bdim;
16088 if (b_hh) {
16089 std::tie(b_hh_value, b_hh_bdim) = unwrapTensorAtLevel(b_hh.value(), cur_level);
16090 }
16091 auto results = batch_rule(input_value, input_bdim, hx_value, hx_bdim, w_ih_value, w_ih_bdim, w_hh_value, w_hh_bdim, b_ih_value, b_ih_bdim, b_hh_value, b_hh_bdim);
16092 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
16093}
16094template <typename batch_rule_t, batch_rule_t batch_rule>
16095at::Tensor rnn_relu_cell_generated_plumbing(const at::Tensor & input, const at::Tensor & hx, const at::Tensor & w_ih, const at::Tensor & w_hh, const c10::optional<at::Tensor> & b_ih, const c10::optional<at::Tensor> & b_hh) {
16096 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
16097 auto maybe_layer = maybeCurrentDynamicLayer();
16098 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
16099 int64_t cur_level = maybe_layer->layerId();
16100 if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(hx, cur_level) && !isBatchedAtLevel(w_ih, cur_level) && !isBatchedAtLevel(w_hh, cur_level) && !isBatchedAtLevel(b_ih, cur_level) && !isBatchedAtLevel(b_hh, cur_level)) {
16101 return at::_ops::rnn_relu_cell::call(input, hx, w_ih, w_hh, b_ih, b_hh);
16102 }
16103 Tensor input_value;
16104 optional<int64_t> input_bdim;
16105 std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level);
16106 Tensor hx_value;
16107 optional<int64_t> hx_bdim;
16108 std::tie(hx_value, hx_bdim) = unwrapTensorAtLevel(hx, cur_level);
16109 Tensor w_ih_value;
16110 optional<int64_t> w_ih_bdim;
16111 std::tie(w_ih_value, w_ih_bdim) = unwrapTensorAtLevel(w_ih, cur_level);
16112 Tensor w_hh_value;
16113 optional<int64_t> w_hh_bdim;
16114 std::tie(w_hh_value, w_hh_bdim) = unwrapTensorAtLevel(w_hh, cur_level);
16115 optional<Tensor> b_ih_value;
16116 optional<int64_t> b_ih_bdim;
16117 if (b_ih) {
16118 std::tie(b_ih_value, b_ih_bdim) = unwrapTensorAtLevel(b_ih.value(), cur_level);
16119 }
16120 optional<Tensor> b_hh_value;
16121 optional<int64_t> b_hh_bdim;
16122 if (b_hh) {
16123 std::tie(b_hh_value, b_hh_bdim) = unwrapTensorAtLevel(b_hh.value(), cur_level);
16124 }
16125 auto results = batch_rule(input_value, input_bdim, hx_value, hx_bdim, w_ih_value, w_ih_bdim, w_hh_value, w_hh_bdim, b_ih_value, b_ih_bdim, b_hh_value, b_hh_bdim);
16126 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
16127}
16128template <typename batch_rule_t, batch_rule_t batch_rule>
16129::std::tuple<at::Tensor,at::Tensor> quantized_lstm_cell_generated_plumbing(const at::Tensor & input, at::TensorList hx, const at::Tensor & w_ih, const at::Tensor & w_hh, const at::Tensor & b_ih, const at::Tensor & b_hh, const at::Tensor & packed_ih, const at::Tensor & packed_hh, const at::Tensor & col_offsets_ih, const at::Tensor & col_offsets_hh, const at::Scalar & scale_ih, const at::Scalar & scale_hh, const at::Scalar & zero_point_ih, const at::Scalar & zero_point_hh) {
16130 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
16131 auto maybe_layer = maybeCurrentDynamicLayer();
16132 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
16133 int64_t cur_level = maybe_layer->layerId();
16134 if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(hx, cur_level) && !isBatchedAtLevel(w_ih, cur_level) && !isBatchedAtLevel(w_hh, cur_level) && !isBatchedAtLevel(b_ih, cur_level) && !isBatchedAtLevel(b_hh, cur_level) && !isBatchedAtLevel(packed_ih, cur_level) && !isBatchedAtLevel(packed_hh, cur_level) && !isBatchedAtLevel(col_offsets_ih, cur_level) && !isBatchedAtLevel(col_offsets_hh, cur_level)) {
16135 return at::_ops::quantized_lstm_cell::call(input, hx, w_ih, w_hh, b_ih, b_hh, packed_ih, packed_hh, col_offsets_ih, col_offsets_hh, scale_ih, scale_hh, zero_point_ih, zero_point_hh);
16136 }
16137 Tensor input_value;
16138 optional<int64_t> input_bdim;
16139 std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level);
16140 Tensor w_ih_value;
16141 optional<int64_t> w_ih_bdim;
16142 std::tie(w_ih_value, w_ih_bdim) = unwrapTensorAtLevel(w_ih, cur_level);
16143 Tensor w_hh_value;
16144 optional<int64_t> w_hh_bdim;
16145 std::tie(w_hh_value, w_hh_bdim) = unwrapTensorAtLevel(w_hh, cur_level);
16146 Tensor b_ih_value;
16147 optional<int64_t> b_ih_bdim;
16148 std::tie(b_ih_value, b_ih_bdim) = unwrapTensorAtLevel(b_ih, cur_level);
16149 Tensor b_hh_value;
16150 optional<int64_t> b_hh_bdim;
16151 std::tie(b_hh_value, b_hh_bdim) = unwrapTensorAtLevel(b_hh, cur_level);
16152 Tensor packed_ih_value;
16153 optional<int64_t> packed_ih_bdim;
16154 std::tie(packed_ih_value, packed_ih_bdim) = unwrapTensorAtLevel(packed_ih, cur_level);
16155 Tensor packed_hh_value;
16156 optional<int64_t> packed_hh_bdim;
16157 std::tie(packed_hh_value, packed_hh_bdim) = unwrapTensorAtLevel(packed_hh, cur_level);
16158 Tensor col_offsets_ih_value;
16159 optional<int64_t> col_offsets_ih_bdim;
16160 std::tie(col_offsets_ih_value, col_offsets_ih_bdim) = unwrapTensorAtLevel(col_offsets_ih, cur_level);
16161 Tensor col_offsets_hh_value;
16162 optional<int64_t> col_offsets_hh_bdim;
16163 std::tie(col_offsets_hh_value, col_offsets_hh_bdim) = unwrapTensorAtLevel(col_offsets_hh, cur_level);
16164 auto results = batch_rule(input_value, input_bdim, hx, w_ih_value, w_ih_bdim, w_hh_value, w_hh_bdim, b_ih_value, b_ih_bdim, b_hh_value, b_hh_bdim, packed_ih_value, packed_ih_bdim, packed_hh_value, packed_hh_bdim, col_offsets_ih_value, col_offsets_ih_bdim, col_offsets_hh_value, col_offsets_hh_bdim, scale_ih, scale_hh, zero_point_ih, zero_point_hh);
16165 return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
16166}
16167template <typename batch_rule_t, batch_rule_t batch_rule>
16168at::Tensor quantized_gru_cell_generated_plumbing(const at::Tensor & input, const at::Tensor & hx, const at::Tensor & w_ih, const at::Tensor & w_hh, const at::Tensor & b_ih, const at::Tensor & b_hh, const at::Tensor & packed_ih, const at::Tensor & packed_hh, const at::Tensor & col_offsets_ih, const at::Tensor & col_offsets_hh, const at::Scalar & scale_ih, const at::Scalar & scale_hh, const at::Scalar & zero_point_ih, const at::Scalar & zero_point_hh) {
16169 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
16170 auto maybe_layer = maybeCurrentDynamicLayer();
16171 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
16172 int64_t cur_level = maybe_layer->layerId();
16173 if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(hx, cur_level) && !isBatchedAtLevel(w_ih, cur_level) && !isBatchedAtLevel(w_hh, cur_level) && !isBatchedAtLevel(b_ih, cur_level) && !isBatchedAtLevel(b_hh, cur_level) && !isBatchedAtLevel(packed_ih, cur_level) && !isBatchedAtLevel(packed_hh, cur_level) && !isBatchedAtLevel(col_offsets_ih, cur_level) && !isBatchedAtLevel(col_offsets_hh, cur_level)) {
16174 return at::_ops::quantized_gru_cell::call(input, hx, w_ih, w_hh, b_ih, b_hh, packed_ih, packed_hh, col_offsets_ih, col_offsets_hh, scale_ih, scale_hh, zero_point_ih, zero_point_hh);
16175 }
16176 Tensor input_value;
16177 optional<int64_t> input_bdim;
16178 std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level);
16179 Tensor hx_value;
16180 optional<int64_t> hx_bdim;
16181 std::tie(hx_value, hx_bdim) = unwrapTensorAtLevel(hx, cur_level);
16182 Tensor w_ih_value;
16183 optional<int64_t> w_ih_bdim;
16184 std::tie(w_ih_value, w_ih_bdim) = unwrapTensorAtLevel(w_ih, cur_level);
16185 Tensor w_hh_value;
16186 optional<int64_t> w_hh_bdim;
16187 std::tie(w_hh_value, w_hh_bdim) = unwrapTensorAtLevel(w_hh, cur_level);
16188 Tensor b_ih_value;
16189 optional<int64_t> b_ih_bdim;
16190 std::tie(b_ih_value, b_ih_bdim) = unwrapTensorAtLevel(b_ih, cur_level);
16191 Tensor b_hh_value;
16192 optional<int64_t> b_hh_bdim;
16193 std::tie(b_hh_value, b_hh_bdim) = unwrapTensorAtLevel(b_hh, cur_level);
16194 Tensor packed_ih_value;
16195 optional<int64_t> packed_ih_bdim;
16196 std::tie(packed_ih_value, packed_ih_bdim) = unwrapTensorAtLevel(packed_ih, cur_level);
16197 Tensor packed_hh_value;
16198 optional<int64_t> packed_hh_bdim;
16199 std::tie(packed_hh_value, packed_hh_bdim) = unwrapTensorAtLevel(packed_hh, cur_level);
16200 Tensor col_offsets_ih_value;
16201 optional<int64_t> col_offsets_ih_bdim;
16202 std::tie(col_offsets_ih_value, col_offsets_ih_bdim) = unwrapTensorAtLevel(col_offsets_ih, cur_level);
16203 Tensor col_offsets_hh_value;
16204 optional<int64_t> col_offsets_hh_bdim;
16205 std::tie(col_offsets_hh_value, col_offsets_hh_bdim) = unwrapTensorAtLevel(col_offsets_hh, cur_level);
16206 auto results = batch_rule(input_value, input_bdim, hx_value, hx_bdim, w_ih_value, w_ih_bdim, w_hh_value, w_hh_bdim, b_ih_value, b_ih_bdim, b_hh_value, b_hh_bdim, packed_ih_value, packed_ih_bdim, packed_hh_value, packed_hh_bdim, col_offsets_ih_value, col_offsets_ih_bdim, col_offsets_hh_value, col_offsets_hh_bdim, scale_ih, scale_hh, zero_point_ih, zero_point_hh);
16207 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
16208}
16209template <typename batch_rule_t, batch_rule_t batch_rule>
16210at::Tensor quantized_rnn_relu_cell_generated_plumbing(const at::Tensor & input, const at::Tensor & hx, const at::Tensor & w_ih, const at::Tensor & w_hh, const at::Tensor & b_ih, const at::Tensor & b_hh, const at::Tensor & packed_ih, const at::Tensor & packed_hh, const at::Tensor & col_offsets_ih, const at::Tensor & col_offsets_hh, const at::Scalar & scale_ih, const at::Scalar & scale_hh, const at::Scalar & zero_point_ih, const at::Scalar & zero_point_hh) {
16211 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
16212 auto maybe_layer = maybeCurrentDynamicLayer();
16213 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
16214 int64_t cur_level = maybe_layer->layerId();
16215 if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(hx, cur_level) && !isBatchedAtLevel(w_ih, cur_level) && !isBatchedAtLevel(w_hh, cur_level) && !isBatchedAtLevel(b_ih, cur_level) && !isBatchedAtLevel(b_hh, cur_level) && !isBatchedAtLevel(packed_ih, cur_level) && !isBatchedAtLevel(packed_hh, cur_level) && !isBatchedAtLevel(col_offsets_ih, cur_level) && !isBatchedAtLevel(col_offsets_hh, cur_level)) {
16216 return at::_ops::quantized_rnn_relu_cell::call(input, hx, w_ih, w_hh, b_ih, b_hh, packed_ih, packed_hh, col_offsets_ih, col_offsets_hh, scale_ih, scale_hh, zero_point_ih, zero_point_hh);
16217 }
16218 Tensor input_value;
16219 optional<int64_t> input_bdim;
16220 std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level);
16221 Tensor hx_value;
16222 optional<int64_t> hx_bdim;
16223 std::tie(hx_value, hx_bdim) = unwrapTensorAtLevel(hx, cur_level);
16224 Tensor w_ih_value;
16225 optional<int64_t> w_ih_bdim;
16226 std::tie(w_ih_value, w_ih_bdim) = unwrapTensorAtLevel(w_ih, cur_level);
16227 Tensor w_hh_value;
16228 optional<int64_t> w_hh_bdim;
16229 std::tie(w_hh_value, w_hh_bdim) = unwrapTensorAtLevel(w_hh, cur_level);
16230 Tensor b_ih_value;
16231 optional<int64_t> b_ih_bdim;
16232 std::tie(b_ih_value, b_ih_bdim) = unwrapTensorAtLevel(b_ih, cur_level);
16233 Tensor b_hh_value;
16234 optional<int64_t> b_hh_bdim;
16235 std::tie(b_hh_value, b_hh_bdim) = unwrapTensorAtLevel(b_hh, cur_level);
16236 Tensor packed_ih_value;
16237 optional<int64_t> packed_ih_bdim;
16238 std::tie(packed_ih_value, packed_ih_bdim) = unwrapTensorAtLevel(packed_ih, cur_level);
16239 Tensor packed_hh_value;
16240 optional<int64_t> packed_hh_bdim;
16241 std::tie(packed_hh_value, packed_hh_bdim) = unwrapTensorAtLevel(packed_hh, cur_level);
16242 Tensor col_offsets_ih_value;
16243 optional<int64_t> col_offsets_ih_bdim;
16244 std::tie(col_offsets_ih_value, col_offsets_ih_bdim) = unwrapTensorAtLevel(col_offsets_ih, cur_level);
16245 Tensor col_offsets_hh_value;
16246 optional<int64_t> col_offsets_hh_bdim;
16247 std::tie(col_offsets_hh_value, col_offsets_hh_bdim) = unwrapTensorAtLevel(col_offsets_hh, cur_level);
16248 auto results = batch_rule(input_value, input_bdim, hx_value, hx_bdim, w_ih_value, w_ih_bdim, w_hh_value, w_hh_bdim, b_ih_value, b_ih_bdim, b_hh_value, b_hh_bdim, packed_ih_value, packed_ih_bdim, packed_hh_value, packed_hh_bdim, col_offsets_ih_value, col_offsets_ih_bdim, col_offsets_hh_value, col_offsets_hh_bdim, scale_ih, scale_hh, zero_point_ih, zero_point_hh);
16249 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
16250}
16251template <typename batch_rule_t, batch_rule_t batch_rule>
16252at::Tensor quantized_rnn_tanh_cell_generated_plumbing(const at::Tensor & input, const at::Tensor & hx, const at::Tensor & w_ih, const at::Tensor & w_hh, const at::Tensor & b_ih, const at::Tensor & b_hh, const at::Tensor & packed_ih, const at::Tensor & packed_hh, const at::Tensor & col_offsets_ih, const at::Tensor & col_offsets_hh, const at::Scalar & scale_ih, const at::Scalar & scale_hh, const at::Scalar & zero_point_ih, const at::Scalar & zero_point_hh) {
16253 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
16254 auto maybe_layer = maybeCurrentDynamicLayer();
16255 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
16256 int64_t cur_level = maybe_layer->layerId();
16257 if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(hx, cur_level) && !isBatchedAtLevel(w_ih, cur_level) && !isBatchedAtLevel(w_hh, cur_level) && !isBatchedAtLevel(b_ih, cur_level) && !isBatchedAtLevel(b_hh, cur_level) && !isBatchedAtLevel(packed_ih, cur_level) && !isBatchedAtLevel(packed_hh, cur_level) && !isBatchedAtLevel(col_offsets_ih, cur_level) && !isBatchedAtLevel(col_offsets_hh, cur_level)) {
16258 return at::_ops::quantized_rnn_tanh_cell::call(input, hx, w_ih, w_hh, b_ih, b_hh, packed_ih, packed_hh, col_offsets_ih, col_offsets_hh, scale_ih, scale_hh, zero_point_ih, zero_point_hh);
16259 }
16260 Tensor input_value;
16261 optional<int64_t> input_bdim;
16262 std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level);
16263 Tensor hx_value;
16264 optional<int64_t> hx_bdim;
16265 std::tie(hx_value, hx_bdim) = unwrapTensorAtLevel(hx, cur_level);
16266 Tensor w_ih_value;
16267 optional<int64_t> w_ih_bdim;
16268 std::tie(w_ih_value, w_ih_bdim) = unwrapTensorAtLevel(w_ih, cur_level);
16269 Tensor w_hh_value;
16270 optional<int64_t> w_hh_bdim;
16271 std::tie(w_hh_value, w_hh_bdim) = unwrapTensorAtLevel(w_hh, cur_level);
16272 Tensor b_ih_value;
16273 optional<int64_t> b_ih_bdim;
16274 std::tie(b_ih_value, b_ih_bdim) = unwrapTensorAtLevel(b_ih, cur_level);
16275 Tensor b_hh_value;
16276 optional<int64_t> b_hh_bdim;
16277 std::tie(b_hh_value, b_hh_bdim) = unwrapTensorAtLevel(b_hh, cur_level);
16278 Tensor packed_ih_value;
16279 optional<int64_t> packed_ih_bdim;
16280 std::tie(packed_ih_value, packed_ih_bdim) = unwrapTensorAtLevel(packed_ih, cur_level);
16281 Tensor packed_hh_value;
16282 optional<int64_t> packed_hh_bdim;
16283 std::tie(packed_hh_value, packed_hh_bdim) = unwrapTensorAtLevel(packed_hh, cur_level);
16284 Tensor col_offsets_ih_value;
16285 optional<int64_t> col_offsets_ih_bdim;
16286 std::tie(col_offsets_ih_value, col_offsets_ih_bdim) = unwrapTensorAtLevel(col_offsets_ih, cur_level);
16287 Tensor col_offsets_hh_value;
16288 optional<int64_t> col_offsets_hh_bdim;
16289 std::tie(col_offsets_hh_value, col_offsets_hh_bdim) = unwrapTensorAtLevel(col_offsets_hh, cur_level);
16290 auto results = batch_rule(input_value, input_bdim, hx_value, hx_bdim, w_ih_value, w_ih_bdim, w_hh_value, w_hh_bdim, b_ih_value, b_ih_bdim, b_hh_value, b_hh_bdim, packed_ih_value, packed_ih_bdim, packed_hh_value, packed_hh_bdim, col_offsets_ih_value, col_offsets_ih_bdim, col_offsets_hh_value, col_offsets_hh_bdim, scale_ih, scale_hh, zero_point_ih, zero_point_hh);
16291 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
16292}
16293template <typename batch_rule_t, batch_rule_t batch_rule>
16294::std::tuple<at::Tensor,at::Tensor> _pack_padded_sequence_generated_plumbing(const at::Tensor & input, const at::Tensor & lengths, bool batch_first) {
16295 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
16296 auto maybe_layer = maybeCurrentDynamicLayer();
16297 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
16298 int64_t cur_level = maybe_layer->layerId();
16299 if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(lengths, cur_level)) {
16300 return at::_ops::_pack_padded_sequence::call(input, lengths, batch_first);
16301 }
16302 Tensor input_value;
16303 optional<int64_t> input_bdim;
16304 std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level);
16305 Tensor lengths_value;
16306 optional<int64_t> lengths_bdim;
16307 std::tie(lengths_value, lengths_bdim) = unwrapTensorAtLevel(lengths, cur_level);
16308 auto results = batch_rule(input_value, input_bdim, lengths_value, lengths_bdim, batch_first);
16309 return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
16310}
16311template <typename batch_rule_t, batch_rule_t batch_rule>
16312at::Tensor _pack_padded_sequence_backward_generated_plumbing(const at::Tensor & grad, c10::SymIntArrayRef input_size, const at::Tensor & batch_sizes, bool batch_first) {
16313 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
16314 auto maybe_layer = maybeCurrentDynamicLayer();
16315 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
16316 int64_t cur_level = maybe_layer->layerId();
16317 if (!isBatchedAtLevel(grad, cur_level) && !isBatchedAtLevel(batch_sizes, cur_level)) {
16318 return at::_ops::_pack_padded_sequence_backward::call(grad, input_size, batch_sizes, batch_first);
16319 }
16320 Tensor grad_value;
16321 optional<int64_t> grad_bdim;
16322 std::tie(grad_value, grad_bdim) = unwrapTensorAtLevel(grad, cur_level);
16323 Tensor batch_sizes_value;
16324 optional<int64_t> batch_sizes_bdim;
16325 std::tie(batch_sizes_value, batch_sizes_bdim) = unwrapTensorAtLevel(batch_sizes, cur_level);
16326 auto results = batch_rule(grad_value, grad_bdim, input_size, batch_sizes_value, batch_sizes_bdim, batch_first);
16327 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
16328}
16329template <typename batch_rule_t, batch_rule_t batch_rule>
16330::std::tuple<at::Tensor,at::Tensor> _pad_packed_sequence_generated_plumbing(const at::Tensor & data, const at::Tensor & batch_sizes, bool batch_first, const at::Scalar & padding_value, int64_t total_length) {
16331 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
16332 auto maybe_layer = maybeCurrentDynamicLayer();
16333 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
16334 int64_t cur_level = maybe_layer->layerId();
16335 if (!isBatchedAtLevel(data, cur_level) && !isBatchedAtLevel(batch_sizes, cur_level)) {
16336 return at::_ops::_pad_packed_sequence::call(data, batch_sizes, batch_first, padding_value, total_length);
16337 }
16338 Tensor data_value;
16339 optional<int64_t> data_bdim;
16340 std::tie(data_value, data_bdim) = unwrapTensorAtLevel(data, cur_level);
16341 Tensor batch_sizes_value;
16342 optional<int64_t> batch_sizes_bdim;
16343 std::tie(batch_sizes_value, batch_sizes_bdim) = unwrapTensorAtLevel(batch_sizes, cur_level);
16344 auto results = batch_rule(data_value, data_bdim, batch_sizes_value, batch_sizes_bdim, batch_first, padding_value, total_length);
16345 return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
16346}
16347template <typename batch_rule_t, batch_rule_t batch_rule>
16348at::Tensor & set__source_Storage_generated_plumbing(at::Tensor & self, at::Storage source) {
16349 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
16350 auto maybe_layer = maybeCurrentDynamicLayer();
16351 vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
16352 int64_t cur_level = maybe_layer->layerId();
16353 if (!isBatchedAtLevel(self, cur_level)) {
16354 return at::_ops::set__source_Storage::call(self, source);
16355 }
16356 Tensor self_value;
16357 optional<int64_t> self_bdim;
16358 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
16359 batch_rule(self_value, self_bdim, source);
16360 return self;
16361}
16362template <typename batch_rule_t, batch_rule_t batch_rule>
16363at::Tensor & set__source_Storage_storage_offset_generated_plumbing(at::Tensor & self, at::Storage source, c10::SymInt storage_offset, c10::SymIntArrayRef size, c10::SymIntArrayRef stride) {
16364 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
16365 auto maybe_layer = maybeCurrentDynamicLayer();
16366 vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
16367 int64_t cur_level = maybe_layer->layerId();
16368 if (!isBatchedAtLevel(self, cur_level)) {
16369 return at::_ops::set__source_Storage_storage_offset::call(self, source, storage_offset, size, stride);
16370 }
16371 Tensor self_value;
16372 optional<int64_t> self_bdim;
16373 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
16374 batch_rule(self_value, self_bdim, source, storage_offset, size, stride);
16375 return self;
16376}
16377template <typename batch_rule_t, batch_rule_t batch_rule>
16378at::Tensor & set__source_Tensor_storage_offset_generated_plumbing(at::Tensor & self, const at::Tensor & source, c10::SymInt storage_offset, c10::SymIntArrayRef size, c10::SymIntArrayRef stride) {
16379 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
16380 auto maybe_layer = maybeCurrentDynamicLayer();
16381 vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
16382 int64_t cur_level = maybe_layer->layerId();
16383 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(source, cur_level)) {
16384 return at::_ops::set__source_Tensor_storage_offset::call(self, source, storage_offset, size, stride);
16385 }
16386 Tensor self_value;
16387 optional<int64_t> self_bdim;
16388 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
16389 Tensor source_value;
16390 optional<int64_t> source_bdim;
16391 std::tie(source_value, source_bdim) = unwrapTensorAtLevel(source, cur_level);
16392 batch_rule(self_value, self_bdim, source_value, source_bdim, storage_offset, size, stride);
16393 return self;
16394}
16395template <typename batch_rule_t, batch_rule_t batch_rule>
16396at::Tensor & set__source_Tensor_generated_plumbing(at::Tensor & self, const at::Tensor & source) {
16397 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
16398 auto maybe_layer = maybeCurrentDynamicLayer();
16399 vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
16400 int64_t cur_level = maybe_layer->layerId();
16401 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(source, cur_level)) {
16402 return at::_ops::set__source_Tensor::call(self, source);
16403 }
16404 Tensor self_value;
16405 optional<int64_t> self_bdim;
16406 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
16407 Tensor source_value;
16408 optional<int64_t> source_bdim;
16409 std::tie(source_value, source_bdim) = unwrapTensorAtLevel(source, cur_level);
16410 batch_rule(self_value, self_bdim, source_value, source_bdim);
16411 return self;
16412}
16413template <typename batch_rule_t, batch_rule_t batch_rule>
16414at::Tensor & set__generated_plumbing(at::Tensor & self) {
16415 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
16416 auto maybe_layer = maybeCurrentDynamicLayer();
16417 vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
16418 int64_t cur_level = maybe_layer->layerId();
16419 if (!isBatchedAtLevel(self, cur_level)) {
16420 return at::_ops::set_::call(self);
16421 }
16422 Tensor self_value;
16423 optional<int64_t> self_bdim;
16424 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
16425 batch_rule(self_value, self_bdim);
16426 return self;
16427}
16428template <typename batch_rule_t, batch_rule_t batch_rule>
16429at::Tensor lift_generated_plumbing(const at::Tensor & self) {
16430 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
16431 auto maybe_layer = maybeCurrentDynamicLayer();
16432 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
16433 int64_t cur_level = maybe_layer->layerId();
16434 if (!isBatchedAtLevel(self, cur_level)) {
16435 return at::_ops::lift::call(self);
16436 }
16437 Tensor self_value;
16438 optional<int64_t> self_bdim;
16439 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
16440 auto results = batch_rule(self_value, self_bdim);
16441 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
16442}
16443template <typename batch_rule_t, batch_rule_t batch_rule>
16444at::Tensor lift_fresh_generated_plumbing(const at::Tensor & self) {
16445 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
16446 auto maybe_layer = maybeCurrentDynamicLayer();
16447 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
16448 int64_t cur_level = maybe_layer->layerId();
16449 if (!isBatchedAtLevel(self, cur_level)) {
16450 return at::_ops::lift_fresh::call(self);
16451 }
16452 Tensor self_value;
16453 optional<int64_t> self_bdim;
16454 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
16455 auto results = batch_rule(self_value, self_bdim);
16456 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
16457}
16458template <typename batch_rule_t, batch_rule_t batch_rule>
16459at::Tensor lift_fresh_copy_generated_plumbing(const at::Tensor & self) {
16460 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
16461 auto maybe_layer = maybeCurrentDynamicLayer();
16462 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
16463 int64_t cur_level = maybe_layer->layerId();
16464 if (!isBatchedAtLevel(self, cur_level)) {
16465 return at::_ops::lift_fresh_copy::call(self);
16466 }
16467 Tensor self_value;
16468 optional<int64_t> self_bdim;
16469 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
16470 auto results = batch_rule(self_value, self_bdim);
16471 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
16472}
16473template <typename batch_rule_t, batch_rule_t batch_rule>
16474at::Tensor & masked_fill__Scalar_generated_plumbing(at::Tensor & self, const at::Tensor & mask, const at::Scalar & value) {
16475 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
16476 auto maybe_layer = maybeCurrentDynamicLayer();
16477 vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
16478 int64_t cur_level = maybe_layer->layerId();
16479 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(mask, cur_level)) {
16480 return at::_ops::masked_fill__Scalar::call(self, mask, value);
16481 }
16482 Tensor self_value;
16483 optional<int64_t> self_bdim;
16484 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
16485 Tensor mask_value;
16486 optional<int64_t> mask_bdim;
16487 std::tie(mask_value, mask_bdim) = unwrapTensorAtLevel(mask, cur_level);
16488 batch_rule(self_value, self_bdim, mask_value, mask_bdim, value);
16489 return self;
16490}
16491template <typename batch_rule_t, batch_rule_t batch_rule>
16492at::Tensor masked_fill_Scalar_generated_plumbing(const at::Tensor & self, const at::Tensor & mask, const at::Scalar & value) {
16493 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
16494 auto maybe_layer = maybeCurrentDynamicLayer();
16495 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
16496 int64_t cur_level = maybe_layer->layerId();
16497 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(mask, cur_level)) {
16498 return at::_ops::masked_fill_Scalar::call(self, mask, value);
16499 }
16500 Tensor self_value;
16501 optional<int64_t> self_bdim;
16502 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
16503 Tensor mask_value;
16504 optional<int64_t> mask_bdim;
16505 std::tie(mask_value, mask_bdim) = unwrapTensorAtLevel(mask, cur_level);
16506 auto results = batch_rule(self_value, self_bdim, mask_value, mask_bdim, value);
16507 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
16508}
16509template <typename batch_rule_t, batch_rule_t batch_rule>
16510at::Tensor & masked_fill__Tensor_generated_plumbing(at::Tensor & self, const at::Tensor & mask, const at::Tensor & value) {
16511 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
16512 auto maybe_layer = maybeCurrentDynamicLayer();
16513 vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
16514 int64_t cur_level = maybe_layer->layerId();
16515 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(mask, cur_level) && !isBatchedAtLevel(value, cur_level)) {
16516 return at::_ops::masked_fill__Tensor::call(self, mask, value);
16517 }
16518 Tensor self_value;
16519 optional<int64_t> self_bdim;
16520 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
16521 Tensor mask_value;
16522 optional<int64_t> mask_bdim;
16523 std::tie(mask_value, mask_bdim) = unwrapTensorAtLevel(mask, cur_level);
16524 Tensor value_value;
16525 optional<int64_t> value_bdim;
16526 std::tie(value_value, value_bdim) = unwrapTensorAtLevel(value, cur_level);
16527 batch_rule(self_value, self_bdim, mask_value, mask_bdim, value_value, value_bdim);
16528 return self;
16529}
16530template <typename batch_rule_t, batch_rule_t batch_rule>
16531at::Tensor masked_fill_Tensor_generated_plumbing(const at::Tensor & self, const at::Tensor & mask, const at::Tensor & value) {
16532 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
16533 auto maybe_layer = maybeCurrentDynamicLayer();
16534 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
16535 int64_t cur_level = maybe_layer->layerId();
16536 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(mask, cur_level) && !isBatchedAtLevel(value, cur_level)) {
16537 return at::_ops::masked_fill_Tensor::call(self, mask, value);
16538 }
16539 Tensor self_value;
16540 optional<int64_t> self_bdim;
16541 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
16542 Tensor mask_value;
16543 optional<int64_t> mask_bdim;
16544 std::tie(mask_value, mask_bdim) = unwrapTensorAtLevel(mask, cur_level);
16545 Tensor value_value;
16546 optional<int64_t> value_bdim;
16547 std::tie(value_value, value_bdim) = unwrapTensorAtLevel(value, cur_level);
16548 auto results = batch_rule(self_value, self_bdim, mask_value, mask_bdim, value_value, value_bdim);
16549 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
16550}
16551template <typename batch_rule_t, batch_rule_t batch_rule>
16552at::Tensor & masked_scatter__generated_plumbing(at::Tensor & self, const at::Tensor & mask, const at::Tensor & source) {
16553 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
16554 auto maybe_layer = maybeCurrentDynamicLayer();
16555 vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
16556 int64_t cur_level = maybe_layer->layerId();
16557 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(mask, cur_level) && !isBatchedAtLevel(source, cur_level)) {
16558 return at::_ops::masked_scatter_::call(self, mask, source);
16559 }
16560 Tensor self_value;
16561 optional<int64_t> self_bdim;
16562 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
16563 Tensor mask_value;
16564 optional<int64_t> mask_bdim;
16565 std::tie(mask_value, mask_bdim) = unwrapTensorAtLevel(mask, cur_level);
16566 Tensor source_value;
16567 optional<int64_t> source_bdim;
16568 std::tie(source_value, source_bdim) = unwrapTensorAtLevel(source, cur_level);
16569 batch_rule(self_value, self_bdim, mask_value, mask_bdim, source_value, source_bdim);
16570 return self;
16571}
16572template <typename batch_rule_t, batch_rule_t batch_rule>
16573at::Tensor masked_scatter_generated_plumbing(const at::Tensor & self, const at::Tensor & mask, const at::Tensor & source) {
16574 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
16575 auto maybe_layer = maybeCurrentDynamicLayer();
16576 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
16577 int64_t cur_level = maybe_layer->layerId();
16578 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(mask, cur_level) && !isBatchedAtLevel(source, cur_level)) {
16579 return at::_ops::masked_scatter::call(self, mask, source);
16580 }
16581 Tensor self_value;
16582 optional<int64_t> self_bdim;
16583 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
16584 Tensor mask_value;
16585 optional<int64_t> mask_bdim;
16586 std::tie(mask_value, mask_bdim) = unwrapTensorAtLevel(mask, cur_level);
16587 Tensor source_value;
16588 optional<int64_t> source_bdim;
16589 std::tie(source_value, source_bdim) = unwrapTensorAtLevel(source, cur_level);
16590 auto results = batch_rule(self_value, self_bdim, mask_value, mask_bdim, source_value, source_bdim);
16591 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
16592}
16593template <typename batch_rule_t, batch_rule_t batch_rule>
16594at::Tensor _masked_softmax_generated_plumbing(const at::Tensor & self, const at::Tensor & mask, c10::optional<int64_t> dim, c10::optional<int64_t> mask_type) {
16595 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
16596 auto maybe_layer = maybeCurrentDynamicLayer();
16597 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
16598 int64_t cur_level = maybe_layer->layerId();
16599 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(mask, cur_level)) {
16600 return at::_ops::_masked_softmax::call(self, mask, dim, mask_type);
16601 }
16602 Tensor self_value;
16603 optional<int64_t> self_bdim;
16604 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
16605 Tensor mask_value;
16606 optional<int64_t> mask_bdim;
16607 std::tie(mask_value, mask_bdim) = unwrapTensorAtLevel(mask, cur_level);
16608 auto results = batch_rule(self_value, self_bdim, mask_value, mask_bdim, dim, mask_type);
16609 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
16610}
16611template <typename batch_rule_t, batch_rule_t batch_rule>
16612at::Tensor _masked_softmax_backward_generated_plumbing(const at::Tensor & grad_output, const at::Tensor & output, const at::Tensor & mask, c10::optional<int64_t> dim) {
16613 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
16614 auto maybe_layer = maybeCurrentDynamicLayer();
16615 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
16616 int64_t cur_level = maybe_layer->layerId();
16617 if (!isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(output, cur_level) && !isBatchedAtLevel(mask, cur_level)) {
16618 return at::_ops::_masked_softmax_backward::call(grad_output, output, mask, dim);
16619 }
16620 Tensor grad_output_value;
16621 optional<int64_t> grad_output_bdim;
16622 std::tie(grad_output_value, grad_output_bdim) = unwrapTensorAtLevel(grad_output, cur_level);
16623 Tensor output_value;
16624 optional<int64_t> output_bdim;
16625 std::tie(output_value, output_bdim) = unwrapTensorAtLevel(output, cur_level);
16626 Tensor mask_value;
16627 optional<int64_t> mask_bdim;
16628 std::tie(mask_value, mask_bdim) = unwrapTensorAtLevel(mask, cur_level);
16629 auto results = batch_rule(grad_output_value, grad_output_bdim, output_value, output_bdim, mask_value, mask_bdim, dim);
16630 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
16631}
16632template <typename batch_rule_t, batch_rule_t batch_rule>
16633at::Tensor view_generated_plumbing(const at::Tensor & self, c10::SymIntArrayRef size) {
16634 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
16635 auto maybe_layer = maybeCurrentDynamicLayer();
16636 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
16637 int64_t cur_level = maybe_layer->layerId();
16638 if (!isBatchedAtLevel(self, cur_level)) {
16639 return at::_ops::view::call(self, size);
16640 }
16641 Tensor self_value;
16642 optional<int64_t> self_bdim;
16643 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
16644 auto results = batch_rule(self_value, self_bdim, size);
16645 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
16646}
16647template <typename batch_rule_t, batch_rule_t batch_rule>
16648at::Tensor view_dtype_generated_plumbing(const at::Tensor & self, at::ScalarType dtype) {
16649 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
16650 auto maybe_layer = maybeCurrentDynamicLayer();
16651 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
16652 int64_t cur_level = maybe_layer->layerId();
16653 if (!isBatchedAtLevel(self, cur_level)) {
16654 return at::_ops::view_dtype::call(self, dtype);
16655 }
16656 Tensor self_value;
16657 optional<int64_t> self_bdim;
16658 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
16659 auto results = batch_rule(self_value, self_bdim, dtype);
16660 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
16661}
16662template <typename batch_rule_t, batch_rule_t batch_rule>
16663at::Tensor & put__generated_plumbing(at::Tensor & self, const at::Tensor & index, const at::Tensor & source, bool accumulate) {
16664 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
16665 auto maybe_layer = maybeCurrentDynamicLayer();
16666 vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
16667 int64_t cur_level = maybe_layer->layerId();
16668 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(index, cur_level) && !isBatchedAtLevel(source, cur_level)) {
16669 return at::_ops::put_::call(self, index, source, accumulate);
16670 }
16671 Tensor self_value;
16672 optional<int64_t> self_bdim;
16673 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
16674 Tensor index_value;
16675 optional<int64_t> index_bdim;
16676 std::tie(index_value, index_bdim) = unwrapTensorAtLevel(index, cur_level);
16677 Tensor source_value;
16678 optional<int64_t> source_bdim;
16679 std::tie(source_value, source_bdim) = unwrapTensorAtLevel(source, cur_level);
16680 batch_rule(self_value, self_bdim, index_value, index_bdim, source_value, source_bdim, accumulate);
16681 return self;
16682}
16683template <typename batch_rule_t, batch_rule_t batch_rule>
16684at::Tensor put_generated_plumbing(const at::Tensor & self, const at::Tensor & index, const at::Tensor & source, bool accumulate) {
16685 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
16686 auto maybe_layer = maybeCurrentDynamicLayer();
16687 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
16688 int64_t cur_level = maybe_layer->layerId();
16689 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(index, cur_level) && !isBatchedAtLevel(source, cur_level)) {
16690 return at::_ops::put::call(self, index, source, accumulate);
16691 }
16692 Tensor self_value;
16693 optional<int64_t> self_bdim;
16694 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
16695 Tensor index_value;
16696 optional<int64_t> index_bdim;
16697 std::tie(index_value, index_bdim) = unwrapTensorAtLevel(index, cur_level);
16698 Tensor source_value;
16699 optional<int64_t> source_bdim;
16700 std::tie(source_value, source_bdim) = unwrapTensorAtLevel(source, cur_level);
16701 auto results = batch_rule(self_value, self_bdim, index_value, index_bdim, source_value, source_bdim, accumulate);
16702 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
16703}
16704template <typename batch_rule_t, batch_rule_t batch_rule>
16705at::Tensor & index_add__generated_plumbing(at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & source, const at::Scalar & alpha) {
16706 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
16707 auto maybe_layer = maybeCurrentDynamicLayer();
16708 vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
16709 int64_t cur_level = maybe_layer->layerId();
16710 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(index, cur_level) && !isBatchedAtLevel(source, cur_level)) {
16711 return at::_ops::index_add_::call(self, dim, index, source, alpha);
16712 }
16713 Tensor self_value;
16714 optional<int64_t> self_bdim;
16715 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
16716 Tensor index_value;
16717 optional<int64_t> index_bdim;
16718 std::tie(index_value, index_bdim) = unwrapTensorAtLevel(index, cur_level);
16719 Tensor source_value;
16720 optional<int64_t> source_bdim;
16721 std::tie(source_value, source_bdim) = unwrapTensorAtLevel(source, cur_level);
16722 batch_rule(self_value, self_bdim, dim, index_value, index_bdim, source_value, source_bdim, alpha);
16723 return self;
16724}
16725template <typename batch_rule_t, batch_rule_t batch_rule>
16726at::Tensor index_add_generated_plumbing(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & source, const at::Scalar & alpha) {
16727 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
16728 auto maybe_layer = maybeCurrentDynamicLayer();
16729 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
16730 int64_t cur_level = maybe_layer->layerId();
16731 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(index, cur_level) && !isBatchedAtLevel(source, cur_level)) {
16732 return at::_ops::index_add::call(self, dim, index, source, alpha);
16733 }
16734 Tensor self_value;
16735 optional<int64_t> self_bdim;
16736 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
16737 Tensor index_value;
16738 optional<int64_t> index_bdim;
16739 std::tie(index_value, index_bdim) = unwrapTensorAtLevel(index, cur_level);
16740 Tensor source_value;
16741 optional<int64_t> source_bdim;
16742 std::tie(source_value, source_bdim) = unwrapTensorAtLevel(source, cur_level);
16743 auto results = batch_rule(self_value, self_bdim, dim, index_value, index_bdim, source_value, source_bdim, alpha);
16744 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
16745}
16746template <typename batch_rule_t, batch_rule_t batch_rule>
16747at::Tensor index_add_dimname_generated_plumbing(const at::Tensor & self, at::Dimname dim, const at::Tensor & index, const at::Tensor & source, const at::Scalar & alpha) {
16748 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
16749 auto maybe_layer = maybeCurrentDynamicLayer();
16750 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
16751 int64_t cur_level = maybe_layer->layerId();
16752 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(index, cur_level) && !isBatchedAtLevel(source, cur_level)) {
16753 return at::_ops::index_add_dimname::call(self, dim, index, source, alpha);
16754 }
16755 Tensor self_value;
16756 optional<int64_t> self_bdim;
16757 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
16758 Tensor index_value;
16759 optional<int64_t> index_bdim;
16760 std::tie(index_value, index_bdim) = unwrapTensorAtLevel(index, cur_level);
16761 Tensor source_value;
16762 optional<int64_t> source_bdim;
16763 std::tie(source_value, source_bdim) = unwrapTensorAtLevel(source, cur_level);
16764 auto results = batch_rule(self_value, self_bdim, dim, index_value, index_bdim, source_value, source_bdim, alpha);
16765 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
16766}
16767template <typename batch_rule_t, batch_rule_t batch_rule>
16768at::Tensor & index_reduce__generated_plumbing(at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & source, c10::string_view reduce, bool include_self) {
16769 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
16770 auto maybe_layer = maybeCurrentDynamicLayer();
16771 vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
16772 int64_t cur_level = maybe_layer->layerId();
16773 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(index, cur_level) && !isBatchedAtLevel(source, cur_level)) {
16774 return at::_ops::index_reduce_::call(self, dim, index, source, reduce, include_self);
16775 }
16776 Tensor self_value;
16777 optional<int64_t> self_bdim;
16778 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
16779 Tensor index_value;
16780 optional<int64_t> index_bdim;
16781 std::tie(index_value, index_bdim) = unwrapTensorAtLevel(index, cur_level);
16782 Tensor source_value;
16783 optional<int64_t> source_bdim;
16784 std::tie(source_value, source_bdim) = unwrapTensorAtLevel(source, cur_level);
16785 batch_rule(self_value, self_bdim, dim, index_value, index_bdim, source_value, source_bdim, reduce, include_self);
16786 return self;
16787}
16788template <typename batch_rule_t, batch_rule_t batch_rule>
16789at::Tensor index_reduce_generated_plumbing(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & source, c10::string_view reduce, bool include_self) {
16790 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
16791 auto maybe_layer = maybeCurrentDynamicLayer();
16792 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
16793 int64_t cur_level = maybe_layer->layerId();
16794 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(index, cur_level) && !isBatchedAtLevel(source, cur_level)) {
16795 return at::_ops::index_reduce::call(self, dim, index, source, reduce, include_self);
16796 }
16797 Tensor self_value;
16798 optional<int64_t> self_bdim;
16799 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
16800 Tensor index_value;
16801 optional<int64_t> index_bdim;
16802 std::tie(index_value, index_bdim) = unwrapTensorAtLevel(index, cur_level);
16803 Tensor source_value;
16804 optional<int64_t> source_bdim;
16805 std::tie(source_value, source_bdim) = unwrapTensorAtLevel(source, cur_level);
16806 auto results = batch_rule(self_value, self_bdim, dim, index_value, index_bdim, source_value, source_bdim, reduce, include_self);
16807 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
16808}
16809template <typename batch_rule_t, batch_rule_t batch_rule>
16810at::Tensor & index_fill__int_Scalar_generated_plumbing(at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Scalar & value) {
16811 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
16812 auto maybe_layer = maybeCurrentDynamicLayer();
16813 vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
16814 int64_t cur_level = maybe_layer->layerId();
16815 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(index, cur_level)) {
16816 return at::_ops::index_fill__int_Scalar::call(self, dim, index, value);
16817 }
16818 Tensor self_value;
16819 optional<int64_t> self_bdim;
16820 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
16821 Tensor index_value;
16822 optional<int64_t> index_bdim;
16823 std::tie(index_value, index_bdim) = unwrapTensorAtLevel(index, cur_level);
16824 batch_rule(self_value, self_bdim, dim, index_value, index_bdim, value);
16825 return self;
16826}
16827template <typename batch_rule_t, batch_rule_t batch_rule>
16828at::Tensor index_fill_int_Scalar_generated_plumbing(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Scalar & value) {
16829 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
16830 auto maybe_layer = maybeCurrentDynamicLayer();
16831 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
16832 int64_t cur_level = maybe_layer->layerId();
16833 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(index, cur_level)) {
16834 return at::_ops::index_fill_int_Scalar::call(self, dim, index, value);
16835 }
16836 Tensor self_value;
16837 optional<int64_t> self_bdim;
16838 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
16839 Tensor index_value;
16840 optional<int64_t> index_bdim;
16841 std::tie(index_value, index_bdim) = unwrapTensorAtLevel(index, cur_level);
16842 auto results = batch_rule(self_value, self_bdim, dim, index_value, index_bdim, value);
16843 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
16844}
16845template <typename batch_rule_t, batch_rule_t batch_rule>
16846at::Tensor & index_fill__int_Tensor_generated_plumbing(at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & value) {
16847 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
16848 auto maybe_layer = maybeCurrentDynamicLayer();
16849 vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
16850 int64_t cur_level = maybe_layer->layerId();
16851 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(index, cur_level) && !isBatchedAtLevel(value, cur_level)) {
16852 return at::_ops::index_fill__int_Tensor::call(self, dim, index, value);
16853 }
16854 Tensor self_value;
16855 optional<int64_t> self_bdim;
16856 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
16857 Tensor index_value;
16858 optional<int64_t> index_bdim;
16859 std::tie(index_value, index_bdim) = unwrapTensorAtLevel(index, cur_level);
16860 Tensor value_value;
16861 optional<int64_t> value_bdim;
16862 std::tie(value_value, value_bdim) = unwrapTensorAtLevel(value, cur_level);
16863 batch_rule(self_value, self_bdim, dim, index_value, index_bdim, value_value, value_bdim);
16864 return self;
16865}
16866template <typename batch_rule_t, batch_rule_t batch_rule>
16867at::Tensor index_fill_int_Tensor_generated_plumbing(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & value) {
16868 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
16869 auto maybe_layer = maybeCurrentDynamicLayer();
16870 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
16871 int64_t cur_level = maybe_layer->layerId();
16872 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(index, cur_level) && !isBatchedAtLevel(value, cur_level)) {
16873 return at::_ops::index_fill_int_Tensor::call(self, dim, index, value);
16874 }
16875 Tensor self_value;
16876 optional<int64_t> self_bdim;
16877 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
16878 Tensor index_value;
16879 optional<int64_t> index_bdim;
16880 std::tie(index_value, index_bdim) = unwrapTensorAtLevel(index, cur_level);
16881 Tensor value_value;
16882 optional<int64_t> value_bdim;
16883 std::tie(value_value, value_bdim) = unwrapTensorAtLevel(value, cur_level);
16884 auto results = batch_rule(self_value, self_bdim, dim, index_value, index_bdim, value_value, value_bdim);
16885 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
16886}
16887template <typename batch_rule_t, batch_rule_t batch_rule>
16888at::Tensor & index_fill__Dimname_Scalar_generated_plumbing(at::Tensor & self, at::Dimname dim, const at::Tensor & index, const at::Scalar & value) {
16889 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
16890 auto maybe_layer = maybeCurrentDynamicLayer();
16891 vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
16892 int64_t cur_level = maybe_layer->layerId();
16893 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(index, cur_level)) {
16894 return at::_ops::index_fill__Dimname_Scalar::call(self, dim, index, value);
16895 }
16896 Tensor self_value;
16897 optional<int64_t> self_bdim;
16898 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
16899 Tensor index_value;
16900 optional<int64_t> index_bdim;
16901 std::tie(index_value, index_bdim) = unwrapTensorAtLevel(index, cur_level);
16902 batch_rule(self_value, self_bdim, dim, index_value, index_bdim, value);
16903 return self;
16904}
16905template <typename batch_rule_t, batch_rule_t batch_rule>
16906at::Tensor & index_fill__Dimname_Tensor_generated_plumbing(at::Tensor & self, at::Dimname dim, const at::Tensor & index, const at::Tensor & value) {
16907 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
16908 auto maybe_layer = maybeCurrentDynamicLayer();
16909 vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
16910 int64_t cur_level = maybe_layer->layerId();
16911 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(index, cur_level) && !isBatchedAtLevel(value, cur_level)) {
16912 return at::_ops::index_fill__Dimname_Tensor::call(self, dim, index, value);
16913 }
16914 Tensor self_value;
16915 optional<int64_t> self_bdim;
16916 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
16917 Tensor index_value;
16918 optional<int64_t> index_bdim;
16919 std::tie(index_value, index_bdim) = unwrapTensorAtLevel(index, cur_level);
16920 Tensor value_value;
16921 optional<int64_t> value_bdim;
16922 std::tie(value_value, value_bdim) = unwrapTensorAtLevel(value, cur_level);
16923 batch_rule(self_value, self_bdim, dim, index_value, index_bdim, value_value, value_bdim);
16924 return self;
16925}
16926template <typename batch_rule_t, batch_rule_t batch_rule>
16927at::Tensor index_fill_Dimname_Scalar_generated_plumbing(const at::Tensor & self, at::Dimname dim, const at::Tensor & index, const at::Scalar & value) {
16928 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
16929 auto maybe_layer = maybeCurrentDynamicLayer();
16930 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
16931 int64_t cur_level = maybe_layer->layerId();
16932 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(index, cur_level)) {
16933 return at::_ops::index_fill_Dimname_Scalar::call(self, dim, index, value);
16934 }
16935 Tensor self_value;
16936 optional<int64_t> self_bdim;
16937 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
16938 Tensor index_value;
16939 optional<int64_t> index_bdim;
16940 std::tie(index_value, index_bdim) = unwrapTensorAtLevel(index, cur_level);
16941 auto results = batch_rule(self_value, self_bdim, dim, index_value, index_bdim, value);
16942 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
16943}
16944template <typename batch_rule_t, batch_rule_t batch_rule>
16945at::Tensor index_fill_Dimname_Tensor_generated_plumbing(const at::Tensor & self, at::Dimname dim, const at::Tensor & index, const at::Tensor & value) {
16946 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
16947 auto maybe_layer = maybeCurrentDynamicLayer();
16948 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
16949 int64_t cur_level = maybe_layer->layerId();
16950 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(index, cur_level) && !isBatchedAtLevel(value, cur_level)) {
16951 return at::_ops::index_fill_Dimname_Tensor::call(self, dim, index, value);
16952 }
16953 Tensor self_value;
16954 optional<int64_t> self_bdim;
16955 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
16956 Tensor index_value;
16957 optional<int64_t> index_bdim;
16958 std::tie(index_value, index_bdim) = unwrapTensorAtLevel(index, cur_level);
16959 Tensor value_value;
16960 optional<int64_t> value_bdim;
16961 std::tie(value_value, value_bdim) = unwrapTensorAtLevel(value, cur_level);
16962 auto results = batch_rule(self_value, self_bdim, dim, index_value, index_bdim, value_value, value_bdim);
16963 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
16964}
16965template <typename batch_rule_t, batch_rule_t batch_rule>
16966at::Tensor scatter_src_generated_plumbing(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & src) {
16967 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
16968 auto maybe_layer = maybeCurrentDynamicLayer();
16969 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
16970 int64_t cur_level = maybe_layer->layerId();
16971 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(index, cur_level) && !isBatchedAtLevel(src, cur_level)) {
16972 return at::_ops::scatter_src::call(self, dim, index, src);
16973 }
16974 Tensor self_value;
16975 optional<int64_t> self_bdim;
16976 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
16977 Tensor index_value;
16978 optional<int64_t> index_bdim;
16979 std::tie(index_value, index_bdim) = unwrapTensorAtLevel(index, cur_level);
16980 Tensor src_value;
16981 optional<int64_t> src_bdim;
16982 std::tie(src_value, src_bdim) = unwrapTensorAtLevel(src, cur_level);
16983 auto results = batch_rule(self_value, self_bdim, dim, index_value, index_bdim, src_value, src_bdim);
16984 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
16985}
16986template <typename batch_rule_t, batch_rule_t batch_rule>
16987at::Tensor & scatter__src_generated_plumbing(at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & src) {
16988 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
16989 auto maybe_layer = maybeCurrentDynamicLayer();
16990 vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
16991 int64_t cur_level = maybe_layer->layerId();
16992 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(index, cur_level) && !isBatchedAtLevel(src, cur_level)) {
16993 return at::_ops::scatter__src::call(self, dim, index, src);
16994 }
16995 Tensor self_value;
16996 optional<int64_t> self_bdim;
16997 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
16998 Tensor index_value;
16999 optional<int64_t> index_bdim;
17000 std::tie(index_value, index_bdim) = unwrapTensorAtLevel(index, cur_level);
17001 Tensor src_value;
17002 optional<int64_t> src_bdim;
17003 std::tie(src_value, src_bdim) = unwrapTensorAtLevel(src, cur_level);
17004 batch_rule(self_value, self_bdim, dim, index_value, index_bdim, src_value, src_bdim);
17005 return self;
17006}
17007template <typename batch_rule_t, batch_rule_t batch_rule>
17008at::Tensor scatter_value_generated_plumbing(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Scalar & value) {
17009 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
17010 auto maybe_layer = maybeCurrentDynamicLayer();
17011 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
17012 int64_t cur_level = maybe_layer->layerId();
17013 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(index, cur_level)) {
17014 return at::_ops::scatter_value::call(self, dim, index, value);
17015 }
17016 Tensor self_value;
17017 optional<int64_t> self_bdim;
17018 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
17019 Tensor index_value;
17020 optional<int64_t> index_bdim;
17021 std::tie(index_value, index_bdim) = unwrapTensorAtLevel(index, cur_level);
17022 auto results = batch_rule(self_value, self_bdim, dim, index_value, index_bdim, value);
17023 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
17024}
17025template <typename batch_rule_t, batch_rule_t batch_rule>
17026at::Tensor & scatter__value_generated_plumbing(at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Scalar & value) {
17027 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
17028 auto maybe_layer = maybeCurrentDynamicLayer();
17029 vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
17030 int64_t cur_level = maybe_layer->layerId();
17031 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(index, cur_level)) {
17032 return at::_ops::scatter__value::call(self, dim, index, value);
17033 }
17034 Tensor self_value;
17035 optional<int64_t> self_bdim;
17036 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
17037 Tensor index_value;
17038 optional<int64_t> index_bdim;
17039 std::tie(index_value, index_bdim) = unwrapTensorAtLevel(index, cur_level);
17040 batch_rule(self_value, self_bdim, dim, index_value, index_bdim, value);
17041 return self;
17042}
17043template <typename batch_rule_t, batch_rule_t batch_rule>
17044at::Tensor scatter_reduce_generated_plumbing(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & src, c10::string_view reduce) {
17045 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
17046 auto maybe_layer = maybeCurrentDynamicLayer();
17047 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
17048 int64_t cur_level = maybe_layer->layerId();
17049 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(index, cur_level) && !isBatchedAtLevel(src, cur_level)) {
17050 return at::_ops::scatter_reduce::call(self, dim, index, src, reduce);
17051 }
17052 Tensor self_value;
17053 optional<int64_t> self_bdim;
17054 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
17055 Tensor index_value;
17056 optional<int64_t> index_bdim;
17057 std::tie(index_value, index_bdim) = unwrapTensorAtLevel(index, cur_level);
17058 Tensor src_value;
17059 optional<int64_t> src_bdim;
17060 std::tie(src_value, src_bdim) = unwrapTensorAtLevel(src, cur_level);
17061 auto results = batch_rule(self_value, self_bdim, dim, index_value, index_bdim, src_value, src_bdim, reduce);
17062 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
17063}
17064template <typename batch_rule_t, batch_rule_t batch_rule>
17065at::Tensor & scatter__reduce_generated_plumbing(at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & src, c10::string_view reduce) {
17066 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
17067 auto maybe_layer = maybeCurrentDynamicLayer();
17068 vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
17069 int64_t cur_level = maybe_layer->layerId();
17070 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(index, cur_level) && !isBatchedAtLevel(src, cur_level)) {
17071 return at::_ops::scatter__reduce::call(self, dim, index, src, reduce);
17072 }
17073 Tensor self_value;
17074 optional<int64_t> self_bdim;
17075 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
17076 Tensor index_value;
17077 optional<int64_t> index_bdim;
17078 std::tie(index_value, index_bdim) = unwrapTensorAtLevel(index, cur_level);
17079 Tensor src_value;
17080 optional<int64_t> src_bdim;
17081 std::tie(src_value, src_bdim) = unwrapTensorAtLevel(src, cur_level);
17082 batch_rule(self_value, self_bdim, dim, index_value, index_bdim, src_value, src_bdim, reduce);
17083 return self;
17084}
17085template <typename batch_rule_t, batch_rule_t batch_rule>
17086at::Tensor scatter_value_reduce_generated_plumbing(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Scalar & value, c10::string_view reduce) {
17087 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
17088 auto maybe_layer = maybeCurrentDynamicLayer();
17089 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
17090 int64_t cur_level = maybe_layer->layerId();
17091 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(index, cur_level)) {
17092 return at::_ops::scatter_value_reduce::call(self, dim, index, value, reduce);
17093 }
17094 Tensor self_value;
17095 optional<int64_t> self_bdim;
17096 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
17097 Tensor index_value;
17098 optional<int64_t> index_bdim;
17099 std::tie(index_value, index_bdim) = unwrapTensorAtLevel(index, cur_level);
17100 auto results = batch_rule(self_value, self_bdim, dim, index_value, index_bdim, value, reduce);
17101 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
17102}
17103template <typename batch_rule_t, batch_rule_t batch_rule>
17104at::Tensor & scatter__value_reduce_generated_plumbing(at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Scalar & value, c10::string_view reduce) {
17105 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
17106 auto maybe_layer = maybeCurrentDynamicLayer();
17107 vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
17108 int64_t cur_level = maybe_layer->layerId();
17109 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(index, cur_level)) {
17110 return at::_ops::scatter__value_reduce::call(self, dim, index, value, reduce);
17111 }
17112 Tensor self_value;
17113 optional<int64_t> self_bdim;
17114 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
17115 Tensor index_value;
17116 optional<int64_t> index_bdim;
17117 std::tie(index_value, index_bdim) = unwrapTensorAtLevel(index, cur_level);
17118 batch_rule(self_value, self_bdim, dim, index_value, index_bdim, value, reduce);
17119 return self;
17120}
17121template <typename batch_rule_t, batch_rule_t batch_rule>
17122at::Tensor scatter_dimname_src_generated_plumbing(const at::Tensor & self, at::Dimname dim, const at::Tensor & index, const at::Tensor & src) {
17123 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
17124 auto maybe_layer = maybeCurrentDynamicLayer();
17125 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
17126 int64_t cur_level = maybe_layer->layerId();
17127 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(index, cur_level) && !isBatchedAtLevel(src, cur_level)) {
17128 return at::_ops::scatter_dimname_src::call(self, dim, index, src);
17129 }
17130 Tensor self_value;
17131 optional<int64_t> self_bdim;
17132 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
17133 Tensor index_value;
17134 optional<int64_t> index_bdim;
17135 std::tie(index_value, index_bdim) = unwrapTensorAtLevel(index, cur_level);
17136 Tensor src_value;
17137 optional<int64_t> src_bdim;
17138 std::tie(src_value, src_bdim) = unwrapTensorAtLevel(src, cur_level);
17139 auto results = batch_rule(self_value, self_bdim, dim, index_value, index_bdim, src_value, src_bdim);
17140 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
17141}
17142template <typename batch_rule_t, batch_rule_t batch_rule>
17143at::Tensor scatter_dimname_value_generated_plumbing(const at::Tensor & self, at::Dimname dim, const at::Tensor & index, const at::Scalar & value) {
17144 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
17145 auto maybe_layer = maybeCurrentDynamicLayer();
17146 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
17147 int64_t cur_level = maybe_layer->layerId();
17148 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(index, cur_level)) {
17149 return at::_ops::scatter_dimname_value::call(self, dim, index, value);
17150 }
17151 Tensor self_value;
17152 optional<int64_t> self_bdim;
17153 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
17154 Tensor index_value;
17155 optional<int64_t> index_bdim;
17156 std::tie(index_value, index_bdim) = unwrapTensorAtLevel(index, cur_level);
17157 auto results = batch_rule(self_value, self_bdim, dim, index_value, index_bdim, value);
17158 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
17159}
17160template <typename batch_rule_t, batch_rule_t batch_rule>
17161at::Tensor scatter_add_generated_plumbing(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & src) {
17162 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
17163 auto maybe_layer = maybeCurrentDynamicLayer();
17164 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
17165 int64_t cur_level = maybe_layer->layerId();
17166 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(index, cur_level) && !isBatchedAtLevel(src, cur_level)) {
17167 return at::_ops::scatter_add::call(self, dim, index, src);
17168 }
17169 Tensor self_value;
17170 optional<int64_t> self_bdim;
17171 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
17172 Tensor index_value;
17173 optional<int64_t> index_bdim;
17174 std::tie(index_value, index_bdim) = unwrapTensorAtLevel(index, cur_level);
17175 Tensor src_value;
17176 optional<int64_t> src_bdim;
17177 std::tie(src_value, src_bdim) = unwrapTensorAtLevel(src, cur_level);
17178 auto results = batch_rule(self_value, self_bdim, dim, index_value, index_bdim, src_value, src_bdim);
17179 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
17180}
17181template <typename batch_rule_t, batch_rule_t batch_rule>
17182at::Tensor & scatter_add__generated_plumbing(at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & src) {
17183 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
17184 auto maybe_layer = maybeCurrentDynamicLayer();
17185 vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
17186 int64_t cur_level = maybe_layer->layerId();
17187 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(index, cur_level) && !isBatchedAtLevel(src, cur_level)) {
17188 return at::_ops::scatter_add_::call(self, dim, index, src);
17189 }
17190 Tensor self_value;
17191 optional<int64_t> self_bdim;
17192 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
17193 Tensor index_value;
17194 optional<int64_t> index_bdim;
17195 std::tie(index_value, index_bdim) = unwrapTensorAtLevel(index, cur_level);
17196 Tensor src_value;
17197 optional<int64_t> src_bdim;
17198 std::tie(src_value, src_bdim) = unwrapTensorAtLevel(src, cur_level);
17199 batch_rule(self_value, self_bdim, dim, index_value, index_bdim, src_value, src_bdim);
17200 return self;
17201}
17202template <typename batch_rule_t, batch_rule_t batch_rule>
17203at::Tensor scatter_add_dimname_generated_plumbing(const at::Tensor & self, at::Dimname dim, const at::Tensor & index, const at::Tensor & src) {
17204 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
17205 auto maybe_layer = maybeCurrentDynamicLayer();
17206 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
17207 int64_t cur_level = maybe_layer->layerId();
17208 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(index, cur_level) && !isBatchedAtLevel(src, cur_level)) {
17209 return at::_ops::scatter_add_dimname::call(self, dim, index, src);
17210 }
17211 Tensor self_value;
17212 optional<int64_t> self_bdim;
17213 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
17214 Tensor index_value;
17215 optional<int64_t> index_bdim;
17216 std::tie(index_value, index_bdim) = unwrapTensorAtLevel(index, cur_level);
17217 Tensor src_value;
17218 optional<int64_t> src_bdim;
17219 std::tie(src_value, src_bdim) = unwrapTensorAtLevel(src, cur_level);
17220 auto results = batch_rule(self_value, self_bdim, dim, index_value, index_bdim, src_value, src_bdim);
17221 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
17222}
17223template <typename batch_rule_t, batch_rule_t batch_rule>
17224at::Tensor scatter_reduce_two_generated_plumbing(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & src, c10::string_view reduce, bool include_self) {
17225 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
17226 auto maybe_layer = maybeCurrentDynamicLayer();
17227 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
17228 int64_t cur_level = maybe_layer->layerId();
17229 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(index, cur_level) && !isBatchedAtLevel(src, cur_level)) {
17230 return at::_ops::scatter_reduce_two::call(self, dim, index, src, reduce, include_self);
17231 }
17232 Tensor self_value;
17233 optional<int64_t> self_bdim;
17234 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
17235 Tensor index_value;
17236 optional<int64_t> index_bdim;
17237 std::tie(index_value, index_bdim) = unwrapTensorAtLevel(index, cur_level);
17238 Tensor src_value;
17239 optional<int64_t> src_bdim;
17240 std::tie(src_value, src_bdim) = unwrapTensorAtLevel(src, cur_level);
17241 auto results = batch_rule(self_value, self_bdim, dim, index_value, index_bdim, src_value, src_bdim, reduce, include_self);
17242 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
17243}
17244template <typename batch_rule_t, batch_rule_t batch_rule>
17245at::Tensor & scatter_reduce__two_generated_plumbing(at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & src, c10::string_view reduce, bool include_self) {
17246 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
17247 auto maybe_layer = maybeCurrentDynamicLayer();
17248 vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
17249 int64_t cur_level = maybe_layer->layerId();
17250 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(index, cur_level) && !isBatchedAtLevel(src, cur_level)) {
17251 return at::_ops::scatter_reduce__two::call(self, dim, index, src, reduce, include_self);
17252 }
17253 Tensor self_value;
17254 optional<int64_t> self_bdim;
17255 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
17256 Tensor index_value;
17257 optional<int64_t> index_bdim;
17258 std::tie(index_value, index_bdim) = unwrapTensorAtLevel(index, cur_level);
17259 Tensor src_value;
17260 optional<int64_t> src_bdim;
17261 std::tie(src_value, src_bdim) = unwrapTensorAtLevel(src, cur_level);
17262 batch_rule(self_value, self_bdim, dim, index_value, index_bdim, src_value, src_bdim, reduce, include_self);
17263 return self;
17264}
17265template <typename batch_rule_t, batch_rule_t batch_rule>
17266at::Tensor & eq__Scalar_generated_plumbing(at::Tensor & self, const at::Scalar & other) {
17267 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
17268 auto maybe_layer = maybeCurrentDynamicLayer();
17269 vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
17270 int64_t cur_level = maybe_layer->layerId();
17271 if (!isBatchedAtLevel(self, cur_level)) {
17272 return at::_ops::eq__Scalar::call(self, other);
17273 }
17274 Tensor self_value;
17275 optional<int64_t> self_bdim;
17276 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
17277 batch_rule(self_value, self_bdim, other);
17278 return self;
17279}
17280template <typename batch_rule_t, batch_rule_t batch_rule>
17281at::Tensor & eq__Tensor_generated_plumbing(at::Tensor & self, const at::Tensor & other) {
17282 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
17283 auto maybe_layer = maybeCurrentDynamicLayer();
17284 vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
17285 int64_t cur_level = maybe_layer->layerId();
17286 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
17287 return at::_ops::eq__Tensor::call(self, other);
17288 }
17289 Tensor self_value;
17290 optional<int64_t> self_bdim;
17291 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
17292 Tensor other_value;
17293 optional<int64_t> other_bdim;
17294 std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
17295 batch_rule(self_value, self_bdim, other_value, other_bdim);
17296 return self;
17297}
17298template <typename batch_rule_t, batch_rule_t batch_rule>
17299at::Tensor bitwise_and_Scalar_generated_plumbing(const at::Tensor & self, const at::Scalar & other) {
17300 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
17301 auto maybe_layer = maybeCurrentDynamicLayer();
17302 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
17303 int64_t cur_level = maybe_layer->layerId();
17304 if (!isBatchedAtLevel(self, cur_level)) {
17305 return at::_ops::bitwise_and_Scalar::call(self, other);
17306 }
17307 Tensor self_value;
17308 optional<int64_t> self_bdim;
17309 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
17310 auto results = batch_rule(self_value, self_bdim, other);
17311 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
17312}
17313template <typename batch_rule_t, batch_rule_t batch_rule>
17314at::Tensor bitwise_and_Scalar_Tensor_generated_plumbing(const at::Scalar & self, const at::Tensor & other) {
17315 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
17316 auto maybe_layer = maybeCurrentDynamicLayer();
17317 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
17318 int64_t cur_level = maybe_layer->layerId();
17319 if (!isBatchedAtLevel(other, cur_level)) {
17320 return at::_ops::bitwise_and_Scalar_Tensor::call(self, other);
17321 }
17322 Tensor other_value;
17323 optional<int64_t> other_bdim;
17324 std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
17325 auto results = batch_rule(self, other_value, other_bdim);
17326 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
17327}
17328template <typename batch_rule_t, batch_rule_t batch_rule>
17329at::Tensor bitwise_and_Tensor_generated_plumbing(const at::Tensor & self, const at::Tensor & other) {
17330 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
17331 auto maybe_layer = maybeCurrentDynamicLayer();
17332 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
17333 int64_t cur_level = maybe_layer->layerId();
17334 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
17335 return at::_ops::bitwise_and_Tensor::call(self, other);
17336 }
17337 Tensor self_value;
17338 optional<int64_t> self_bdim;
17339 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
17340 Tensor other_value;
17341 optional<int64_t> other_bdim;
17342 std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
17343 auto results = batch_rule(self_value, self_bdim, other_value, other_bdim);
17344 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
17345}
17346template <typename batch_rule_t, batch_rule_t batch_rule>
17347at::Tensor & bitwise_and__Scalar_generated_plumbing(at::Tensor & self, const at::Scalar & other) {
17348 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
17349 auto maybe_layer = maybeCurrentDynamicLayer();
17350 vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
17351 int64_t cur_level = maybe_layer->layerId();
17352 if (!isBatchedAtLevel(self, cur_level)) {
17353 return at::_ops::bitwise_and__Scalar::call(self, other);
17354 }
17355 Tensor self_value;
17356 optional<int64_t> self_bdim;
17357 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
17358 batch_rule(self_value, self_bdim, other);
17359 return self;
17360}
17361template <typename batch_rule_t, batch_rule_t batch_rule>
17362at::Tensor & bitwise_and__Tensor_generated_plumbing(at::Tensor & self, const at::Tensor & other) {
17363 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
17364 auto maybe_layer = maybeCurrentDynamicLayer();
17365 vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
17366 int64_t cur_level = maybe_layer->layerId();
17367 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
17368 return at::_ops::bitwise_and__Tensor::call(self, other);
17369 }
17370 Tensor self_value;
17371 optional<int64_t> self_bdim;
17372 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
17373 Tensor other_value;
17374 optional<int64_t> other_bdim;
17375 std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
17376 batch_rule(self_value, self_bdim, other_value, other_bdim);
17377 return self;
17378}
17379template <typename batch_rule_t, batch_rule_t batch_rule>
17380at::Tensor __and___Scalar_generated_plumbing(const at::Tensor & self, const at::Scalar & other) {
17381 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
17382 auto maybe_layer = maybeCurrentDynamicLayer();
17383 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
17384 int64_t cur_level = maybe_layer->layerId();
17385 if (!isBatchedAtLevel(self, cur_level)) {
17386 return at::_ops::__and___Scalar::call(self, other);
17387 }
17388 Tensor self_value;
17389 optional<int64_t> self_bdim;
17390 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
17391 auto results = batch_rule(self_value, self_bdim, other);
17392 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
17393}
17394template <typename batch_rule_t, batch_rule_t batch_rule>
17395at::Tensor __and___Tensor_generated_plumbing(const at::Tensor & self, const at::Tensor & other) {
17396 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
17397 auto maybe_layer = maybeCurrentDynamicLayer();
17398 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
17399 int64_t cur_level = maybe_layer->layerId();
17400 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
17401 return at::_ops::__and___Tensor::call(self, other);
17402 }
17403 Tensor self_value;
17404 optional<int64_t> self_bdim;
17405 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
17406 Tensor other_value;
17407 optional<int64_t> other_bdim;
17408 std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
17409 auto results = batch_rule(self_value, self_bdim, other_value, other_bdim);
17410 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
17411}
17412template <typename batch_rule_t, batch_rule_t batch_rule>
17413at::Tensor & __iand___Scalar_generated_plumbing(at::Tensor & self, const at::Scalar & other) {
17414 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
17415 auto maybe_layer = maybeCurrentDynamicLayer();
17416 vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
17417 int64_t cur_level = maybe_layer->layerId();
17418 if (!isBatchedAtLevel(self, cur_level)) {
17419 return at::_ops::__iand___Scalar::call(self, other);
17420 }
17421 Tensor self_value;
17422 optional<int64_t> self_bdim;
17423 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
17424 batch_rule(self_value, self_bdim, other);
17425 return self;
17426}
17427template <typename batch_rule_t, batch_rule_t batch_rule>
17428at::Tensor & __iand___Tensor_generated_plumbing(at::Tensor & self, const at::Tensor & other) {
17429 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
17430 auto maybe_layer = maybeCurrentDynamicLayer();
17431 vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
17432 int64_t cur_level = maybe_layer->layerId();
17433 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
17434 return at::_ops::__iand___Tensor::call(self, other);
17435 }
17436 Tensor self_value;
17437 optional<int64_t> self_bdim;
17438 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
17439 Tensor other_value;
17440 optional<int64_t> other_bdim;
17441 std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
17442 batch_rule(self_value, self_bdim, other_value, other_bdim);
17443 return self;
17444}
17445template <typename batch_rule_t, batch_rule_t batch_rule>
17446at::Tensor bitwise_or_Scalar_generated_plumbing(const at::Tensor & self, const at::Scalar & other) {
17447 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
17448 auto maybe_layer = maybeCurrentDynamicLayer();
17449 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
17450 int64_t cur_level = maybe_layer->layerId();
17451 if (!isBatchedAtLevel(self, cur_level)) {
17452 return at::_ops::bitwise_or_Scalar::call(self, other);
17453 }
17454 Tensor self_value;
17455 optional<int64_t> self_bdim;
17456 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
17457 auto results = batch_rule(self_value, self_bdim, other);
17458 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
17459}
17460template <typename batch_rule_t, batch_rule_t batch_rule>
17461at::Tensor bitwise_or_Scalar_Tensor_generated_plumbing(const at::Scalar & self, const at::Tensor & other) {
17462 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
17463 auto maybe_layer = maybeCurrentDynamicLayer();
17464 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
17465 int64_t cur_level = maybe_layer->layerId();
17466 if (!isBatchedAtLevel(other, cur_level)) {
17467 return at::_ops::bitwise_or_Scalar_Tensor::call(self, other);
17468 }
17469 Tensor other_value;
17470 optional<int64_t> other_bdim;
17471 std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
17472 auto results = batch_rule(self, other_value, other_bdim);
17473 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
17474}
17475template <typename batch_rule_t, batch_rule_t batch_rule>
17476at::Tensor bitwise_or_Tensor_generated_plumbing(const at::Tensor & self, const at::Tensor & other) {
17477 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
17478 auto maybe_layer = maybeCurrentDynamicLayer();
17479 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
17480 int64_t cur_level = maybe_layer->layerId();
17481 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
17482 return at::_ops::bitwise_or_Tensor::call(self, other);
17483 }
17484 Tensor self_value;
17485 optional<int64_t> self_bdim;
17486 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
17487 Tensor other_value;
17488 optional<int64_t> other_bdim;
17489 std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
17490 auto results = batch_rule(self_value, self_bdim, other_value, other_bdim);
17491 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
17492}
17493template <typename batch_rule_t, batch_rule_t batch_rule>
17494at::Tensor & bitwise_or__Scalar_generated_plumbing(at::Tensor & self, const at::Scalar & other) {
17495 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
17496 auto maybe_layer = maybeCurrentDynamicLayer();
17497 vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
17498 int64_t cur_level = maybe_layer->layerId();
17499 if (!isBatchedAtLevel(self, cur_level)) {
17500 return at::_ops::bitwise_or__Scalar::call(self, other);
17501 }
17502 Tensor self_value;
17503 optional<int64_t> self_bdim;
17504 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
17505 batch_rule(self_value, self_bdim, other);
17506 return self;
17507}
17508template <typename batch_rule_t, batch_rule_t batch_rule>
17509at::Tensor & bitwise_or__Tensor_generated_plumbing(at::Tensor & self, const at::Tensor & other) {
17510 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
17511 auto maybe_layer = maybeCurrentDynamicLayer();
17512 vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
17513 int64_t cur_level = maybe_layer->layerId();
17514 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
17515 return at::_ops::bitwise_or__Tensor::call(self, other);
17516 }
17517 Tensor self_value;
17518 optional<int64_t> self_bdim;
17519 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
17520 Tensor other_value;
17521 optional<int64_t> other_bdim;
17522 std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
17523 batch_rule(self_value, self_bdim, other_value, other_bdim);
17524 return self;
17525}
17526template <typename batch_rule_t, batch_rule_t batch_rule>
17527at::Tensor __or___Scalar_generated_plumbing(const at::Tensor & self, const at::Scalar & other) {
17528 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
17529 auto maybe_layer = maybeCurrentDynamicLayer();
17530 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
17531 int64_t cur_level = maybe_layer->layerId();
17532 if (!isBatchedAtLevel(self, cur_level)) {
17533 return at::_ops::__or___Scalar::call(self, other);
17534 }
17535 Tensor self_value;
17536 optional<int64_t> self_bdim;
17537 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
17538 auto results = batch_rule(self_value, self_bdim, other);
17539 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
17540}
17541template <typename batch_rule_t, batch_rule_t batch_rule>
17542at::Tensor __or___Tensor_generated_plumbing(const at::Tensor & self, const at::Tensor & other) {
17543 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
17544 auto maybe_layer = maybeCurrentDynamicLayer();
17545 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
17546 int64_t cur_level = maybe_layer->layerId();
17547 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
17548 return at::_ops::__or___Tensor::call(self, other);
17549 }
17550 Tensor self_value;
17551 optional<int64_t> self_bdim;
17552 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
17553 Tensor other_value;
17554 optional<int64_t> other_bdim;
17555 std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
17556 auto results = batch_rule(self_value, self_bdim, other_value, other_bdim);
17557 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
17558}
17559template <typename batch_rule_t, batch_rule_t batch_rule>
17560at::Tensor & __ior___Scalar_generated_plumbing(at::Tensor & self, const at::Scalar & other) {
17561 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
17562 auto maybe_layer = maybeCurrentDynamicLayer();
17563 vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
17564 int64_t cur_level = maybe_layer->layerId();
17565 if (!isBatchedAtLevel(self, cur_level)) {
17566 return at::_ops::__ior___Scalar::call(self, other);
17567 }
17568 Tensor self_value;
17569 optional<int64_t> self_bdim;
17570 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
17571 batch_rule(self_value, self_bdim, other);
17572 return self;
17573}
17574template <typename batch_rule_t, batch_rule_t batch_rule>
17575at::Tensor & __ior___Tensor_generated_plumbing(at::Tensor & self, const at::Tensor & other) {
17576 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
17577 auto maybe_layer = maybeCurrentDynamicLayer();
17578 vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
17579 int64_t cur_level = maybe_layer->layerId();
17580 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
17581 return at::_ops::__ior___Tensor::call(self, other);
17582 }
17583 Tensor self_value;
17584 optional<int64_t> self_bdim;
17585 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
17586 Tensor other_value;
17587 optional<int64_t> other_bdim;
17588 std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
17589 batch_rule(self_value, self_bdim, other_value, other_bdim);
17590 return self;
17591}
17592template <typename batch_rule_t, batch_rule_t batch_rule>
17593at::Tensor bitwise_xor_Scalar_generated_plumbing(const at::Tensor & self, const at::Scalar & other) {
17594 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
17595 auto maybe_layer = maybeCurrentDynamicLayer();
17596 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
17597 int64_t cur_level = maybe_layer->layerId();
17598 if (!isBatchedAtLevel(self, cur_level)) {
17599 return at::_ops::bitwise_xor_Scalar::call(self, other);
17600 }
17601 Tensor self_value;
17602 optional<int64_t> self_bdim;
17603 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
17604 auto results = batch_rule(self_value, self_bdim, other);
17605 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
17606}
17607template <typename batch_rule_t, batch_rule_t batch_rule>
17608at::Tensor bitwise_xor_Scalar_Tensor_generated_plumbing(const at::Scalar & self, const at::Tensor & other) {
17609 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
17610 auto maybe_layer = maybeCurrentDynamicLayer();
17611 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
17612 int64_t cur_level = maybe_layer->layerId();
17613 if (!isBatchedAtLevel(other, cur_level)) {
17614 return at::_ops::bitwise_xor_Scalar_Tensor::call(self, other);
17615 }
17616 Tensor other_value;
17617 optional<int64_t> other_bdim;
17618 std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
17619 auto results = batch_rule(self, other_value, other_bdim);
17620 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
17621}
17622template <typename batch_rule_t, batch_rule_t batch_rule>
17623at::Tensor bitwise_xor_Tensor_generated_plumbing(const at::Tensor & self, const at::Tensor & other) {
17624 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
17625 auto maybe_layer = maybeCurrentDynamicLayer();
17626 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
17627 int64_t cur_level = maybe_layer->layerId();
17628 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
17629 return at::_ops::bitwise_xor_Tensor::call(self, other);
17630 }
17631 Tensor self_value;
17632 optional<int64_t> self_bdim;
17633 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
17634 Tensor other_value;
17635 optional<int64_t> other_bdim;
17636 std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
17637 auto results = batch_rule(self_value, self_bdim, other_value, other_bdim);
17638 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
17639}
17640template <typename batch_rule_t, batch_rule_t batch_rule>
17641at::Tensor & bitwise_xor__Scalar_generated_plumbing(at::Tensor & self, const at::Scalar & other) {
17642 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
17643 auto maybe_layer = maybeCurrentDynamicLayer();
17644 vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
17645 int64_t cur_level = maybe_layer->layerId();
17646 if (!isBatchedAtLevel(self, cur_level)) {
17647 return at::_ops::bitwise_xor__Scalar::call(self, other);
17648 }
17649 Tensor self_value;
17650 optional<int64_t> self_bdim;
17651 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
17652 batch_rule(self_value, self_bdim, other);
17653 return self;
17654}
17655template <typename batch_rule_t, batch_rule_t batch_rule>
17656at::Tensor & bitwise_xor__Tensor_generated_plumbing(at::Tensor & self, const at::Tensor & other) {
17657 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
17658 auto maybe_layer = maybeCurrentDynamicLayer();
17659 vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
17660 int64_t cur_level = maybe_layer->layerId();
17661 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
17662 return at::_ops::bitwise_xor__Tensor::call(self, other);
17663 }
17664 Tensor self_value;
17665 optional<int64_t> self_bdim;
17666 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
17667 Tensor other_value;
17668 optional<int64_t> other_bdim;
17669 std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
17670 batch_rule(self_value, self_bdim, other_value, other_bdim);
17671 return self;
17672}
17673template <typename batch_rule_t, batch_rule_t batch_rule>
17674at::Tensor __xor___Scalar_generated_plumbing(const at::Tensor & self, const at::Scalar & other) {
17675 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
17676 auto maybe_layer = maybeCurrentDynamicLayer();
17677 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
17678 int64_t cur_level = maybe_layer->layerId();
17679 if (!isBatchedAtLevel(self, cur_level)) {
17680 return at::_ops::__xor___Scalar::call(self, other);
17681 }
17682 Tensor self_value;
17683 optional<int64_t> self_bdim;
17684 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
17685 auto results = batch_rule(self_value, self_bdim, other);
17686 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
17687}
17688template <typename batch_rule_t, batch_rule_t batch_rule>
17689at::Tensor __xor___Tensor_generated_plumbing(const at::Tensor & self, const at::Tensor & other) {
17690 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
17691 auto maybe_layer = maybeCurrentDynamicLayer();
17692 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
17693 int64_t cur_level = maybe_layer->layerId();
17694 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
17695 return at::_ops::__xor___Tensor::call(self, other);
17696 }
17697 Tensor self_value;
17698 optional<int64_t> self_bdim;
17699 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
17700 Tensor other_value;
17701 optional<int64_t> other_bdim;
17702 std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
17703 auto results = batch_rule(self_value, self_bdim, other_value, other_bdim);
17704 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
17705}
17706template <typename batch_rule_t, batch_rule_t batch_rule>
17707at::Tensor & __ixor___Scalar_generated_plumbing(at::Tensor & self, const at::Scalar & other) {
17708 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
17709 auto maybe_layer = maybeCurrentDynamicLayer();
17710 vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
17711 int64_t cur_level = maybe_layer->layerId();
17712 if (!isBatchedAtLevel(self, cur_level)) {
17713 return at::_ops::__ixor___Scalar::call(self, other);
17714 }
17715 Tensor self_value;
17716 optional<int64_t> self_bdim;
17717 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
17718 batch_rule(self_value, self_bdim, other);
17719 return self;
17720}
17721template <typename batch_rule_t, batch_rule_t batch_rule>
17722at::Tensor & __ixor___Tensor_generated_plumbing(at::Tensor & self, const at::Tensor & other) {
17723 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
17724 auto maybe_layer = maybeCurrentDynamicLayer();
17725 vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
17726 int64_t cur_level = maybe_layer->layerId();
17727 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
17728 return at::_ops::__ixor___Tensor::call(self, other);
17729 }
17730 Tensor self_value;
17731 optional<int64_t> self_bdim;
17732 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
17733 Tensor other_value;
17734 optional<int64_t> other_bdim;
17735 std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
17736 batch_rule(self_value, self_bdim, other_value, other_bdim);
17737 return self;
17738}
17739template <typename batch_rule_t, batch_rule_t batch_rule>
17740at::Tensor __lshift___Scalar_generated_plumbing(const at::Tensor & self, const at::Scalar & other) {
17741 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
17742 auto maybe_layer = maybeCurrentDynamicLayer();
17743 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
17744 int64_t cur_level = maybe_layer->layerId();
17745 if (!isBatchedAtLevel(self, cur_level)) {
17746 return at::_ops::__lshift___Scalar::call(self, other);
17747 }
17748 Tensor self_value;
17749 optional<int64_t> self_bdim;
17750 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
17751 auto results = batch_rule(self_value, self_bdim, other);
17752 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
17753}
17754template <typename batch_rule_t, batch_rule_t batch_rule>
17755at::Tensor __lshift___Tensor_generated_plumbing(const at::Tensor & self, const at::Tensor & other) {
17756 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
17757 auto maybe_layer = maybeCurrentDynamicLayer();
17758 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
17759 int64_t cur_level = maybe_layer->layerId();
17760 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
17761 return at::_ops::__lshift___Tensor::call(self, other);
17762 }
17763 Tensor self_value;
17764 optional<int64_t> self_bdim;
17765 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
17766 Tensor other_value;
17767 optional<int64_t> other_bdim;
17768 std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
17769 auto results = batch_rule(self_value, self_bdim, other_value, other_bdim);
17770 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
17771}
17772template <typename batch_rule_t, batch_rule_t batch_rule>
17773at::Tensor & __ilshift___Scalar_generated_plumbing(at::Tensor & self, const at::Scalar & other) {
17774 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
17775 auto maybe_layer = maybeCurrentDynamicLayer();
17776 vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
17777 int64_t cur_level = maybe_layer->layerId();
17778 if (!isBatchedAtLevel(self, cur_level)) {
17779 return at::_ops::__ilshift___Scalar::call(self, other);
17780 }
17781 Tensor self_value;
17782 optional<int64_t> self_bdim;
17783 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
17784 batch_rule(self_value, self_bdim, other);
17785 return self;
17786}
17787template <typename batch_rule_t, batch_rule_t batch_rule>
17788at::Tensor & __ilshift___Tensor_generated_plumbing(at::Tensor & self, const at::Tensor & other) {
17789 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
17790 auto maybe_layer = maybeCurrentDynamicLayer();
17791 vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
17792 int64_t cur_level = maybe_layer->layerId();
17793 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
17794 return at::_ops::__ilshift___Tensor::call(self, other);
17795 }
17796 Tensor self_value;
17797 optional<int64_t> self_bdim;
17798 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
17799 Tensor other_value;
17800 optional<int64_t> other_bdim;
17801 std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
17802 batch_rule(self_value, self_bdim, other_value, other_bdim);
17803 return self;
17804}
17805template <typename batch_rule_t, batch_rule_t batch_rule>
17806at::Tensor bitwise_left_shift_Tensor_generated_plumbing(const at::Tensor & self, const at::Tensor & other) {
17807 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
17808 auto maybe_layer = maybeCurrentDynamicLayer();
17809 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
17810 int64_t cur_level = maybe_layer->layerId();
17811 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
17812 return at::_ops::bitwise_left_shift_Tensor::call(self, other);
17813 }
17814 Tensor self_value;
17815 optional<int64_t> self_bdim;
17816 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
17817 Tensor other_value;
17818 optional<int64_t> other_bdim;
17819 std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
17820 auto results = batch_rule(self_value, self_bdim, other_value, other_bdim);
17821 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
17822}
17823template <typename batch_rule_t, batch_rule_t batch_rule>
17824at::Tensor & bitwise_left_shift__Tensor_generated_plumbing(at::Tensor & self, const at::Tensor & other) {
17825 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
17826 auto maybe_layer = maybeCurrentDynamicLayer();
17827 vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
17828 int64_t cur_level = maybe_layer->layerId();
17829 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
17830 return at::_ops::bitwise_left_shift__Tensor::call(self, other);
17831 }
17832 Tensor self_value;
17833 optional<int64_t> self_bdim;
17834 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
17835 Tensor other_value;
17836 optional<int64_t> other_bdim;
17837 std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
17838 batch_rule(self_value, self_bdim, other_value, other_bdim);
17839 return self;
17840}
17841template <typename batch_rule_t, batch_rule_t batch_rule>
17842at::Tensor bitwise_left_shift_Tensor_Scalar_generated_plumbing(const at::Tensor & self, const at::Scalar & other) {
17843 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
17844 auto maybe_layer = maybeCurrentDynamicLayer();
17845 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
17846 int64_t cur_level = maybe_layer->layerId();
17847 if (!isBatchedAtLevel(self, cur_level)) {
17848 return at::_ops::bitwise_left_shift_Tensor_Scalar::call(self, other);
17849 }
17850 Tensor self_value;
17851 optional<int64_t> self_bdim;
17852 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
17853 auto results = batch_rule(self_value, self_bdim, other);
17854 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
17855}
17856template <typename batch_rule_t, batch_rule_t batch_rule>
17857at::Tensor & bitwise_left_shift__Tensor_Scalar_generated_plumbing(at::Tensor & self, const at::Scalar & other) {
17858 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
17859 auto maybe_layer = maybeCurrentDynamicLayer();
17860 vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
17861 int64_t cur_level = maybe_layer->layerId();
17862 if (!isBatchedAtLevel(self, cur_level)) {
17863 return at::_ops::bitwise_left_shift__Tensor_Scalar::call(self, other);
17864 }
17865 Tensor self_value;
17866 optional<int64_t> self_bdim;
17867 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
17868 batch_rule(self_value, self_bdim, other);
17869 return self;
17870}
17871template <typename batch_rule_t, batch_rule_t batch_rule>
17872at::Tensor bitwise_left_shift_Scalar_Tensor_generated_plumbing(const at::Scalar & self, const at::Tensor & other) {
17873 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
17874 auto maybe_layer = maybeCurrentDynamicLayer();
17875 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
17876 int64_t cur_level = maybe_layer->layerId();
17877 if (!isBatchedAtLevel(other, cur_level)) {
17878 return at::_ops::bitwise_left_shift_Scalar_Tensor::call(self, other);
17879 }
17880 Tensor other_value;
17881 optional<int64_t> other_bdim;
17882 std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
17883 auto results = batch_rule(self, other_value, other_bdim);
17884 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
17885}
17886template <typename batch_rule_t, batch_rule_t batch_rule>
17887at::Tensor __rshift___Scalar_generated_plumbing(const at::Tensor & self, const at::Scalar & other) {
17888 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
17889 auto maybe_layer = maybeCurrentDynamicLayer();
17890 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
17891 int64_t cur_level = maybe_layer->layerId();
17892 if (!isBatchedAtLevel(self, cur_level)) {
17893 return at::_ops::__rshift___Scalar::call(self, other);
17894 }
17895 Tensor self_value;
17896 optional<int64_t> self_bdim;
17897 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
17898 auto results = batch_rule(self_value, self_bdim, other);
17899 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
17900}
17901template <typename batch_rule_t, batch_rule_t batch_rule>
17902at::Tensor __rshift___Tensor_generated_plumbing(const at::Tensor & self, const at::Tensor & other) {
17903 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
17904 auto maybe_layer = maybeCurrentDynamicLayer();
17905 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
17906 int64_t cur_level = maybe_layer->layerId();
17907 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
17908 return at::_ops::__rshift___Tensor::call(self, other);
17909 }
17910 Tensor self_value;
17911 optional<int64_t> self_bdim;
17912 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
17913 Tensor other_value;
17914 optional<int64_t> other_bdim;
17915 std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
17916 auto results = batch_rule(self_value, self_bdim, other_value, other_bdim);
17917 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
17918}
17919template <typename batch_rule_t, batch_rule_t batch_rule>
17920at::Tensor & __irshift___Scalar_generated_plumbing(at::Tensor & self, const at::Scalar & other) {
17921 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
17922 auto maybe_layer = maybeCurrentDynamicLayer();
17923 vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
17924 int64_t cur_level = maybe_layer->layerId();
17925 if (!isBatchedAtLevel(self, cur_level)) {
17926 return at::_ops::__irshift___Scalar::call(self, other);
17927 }
17928 Tensor self_value;
17929 optional<int64_t> self_bdim;
17930 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
17931 batch_rule(self_value, self_bdim, other);
17932 return self;
17933}
17934template <typename batch_rule_t, batch_rule_t batch_rule>
17935at::Tensor & __irshift___Tensor_generated_plumbing(at::Tensor & self, const at::Tensor & other) {
17936 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
17937 auto maybe_layer = maybeCurrentDynamicLayer();
17938 vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
17939 int64_t cur_level = maybe_layer->layerId();
17940 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
17941 return at::_ops::__irshift___Tensor::call(self, other);
17942 }
17943 Tensor self_value;
17944 optional<int64_t> self_bdim;
17945 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
17946 Tensor other_value;
17947 optional<int64_t> other_bdim;
17948 std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
17949 batch_rule(self_value, self_bdim, other_value, other_bdim);
17950 return self;
17951}
17952template <typename batch_rule_t, batch_rule_t batch_rule>
17953at::Tensor bitwise_right_shift_Tensor_generated_plumbing(const at::Tensor & self, const at::Tensor & other) {
17954 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
17955 auto maybe_layer = maybeCurrentDynamicLayer();
17956 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
17957 int64_t cur_level = maybe_layer->layerId();
17958 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
17959 return at::_ops::bitwise_right_shift_Tensor::call(self, other);
17960 }
17961 Tensor self_value;
17962 optional<int64_t> self_bdim;
17963 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
17964 Tensor other_value;
17965 optional<int64_t> other_bdim;
17966 std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
17967 auto results = batch_rule(self_value, self_bdim, other_value, other_bdim);
17968 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
17969}
17970template <typename batch_rule_t, batch_rule_t batch_rule>
17971at::Tensor & bitwise_right_shift__Tensor_generated_plumbing(at::Tensor & self, const at::Tensor & other) {
17972 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
17973 auto maybe_layer = maybeCurrentDynamicLayer();
17974 vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
17975 int64_t cur_level = maybe_layer->layerId();
17976 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
17977 return at::_ops::bitwise_right_shift__Tensor::call(self, other);
17978 }
17979 Tensor self_value;
17980 optional<int64_t> self_bdim;
17981 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
17982 Tensor other_value;
17983 optional<int64_t> other_bdim;
17984 std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
17985 batch_rule(self_value, self_bdim, other_value, other_bdim);
17986 return self;
17987}
17988template <typename batch_rule_t, batch_rule_t batch_rule>
17989at::Tensor bitwise_right_shift_Tensor_Scalar_generated_plumbing(const at::Tensor & self, const at::Scalar & other) {
17990 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
17991 auto maybe_layer = maybeCurrentDynamicLayer();
17992 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
17993 int64_t cur_level = maybe_layer->layerId();
17994 if (!isBatchedAtLevel(self, cur_level)) {
17995 return at::_ops::bitwise_right_shift_Tensor_Scalar::call(self, other);
17996 }
17997 Tensor self_value;
17998 optional<int64_t> self_bdim;
17999 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
18000 auto results = batch_rule(self_value, self_bdim, other);
18001 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
18002}
18003template <typename batch_rule_t, batch_rule_t batch_rule>
18004at::Tensor & bitwise_right_shift__Tensor_Scalar_generated_plumbing(at::Tensor & self, const at::Scalar & other) {
18005 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
18006 auto maybe_layer = maybeCurrentDynamicLayer();
18007 vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
18008 int64_t cur_level = maybe_layer->layerId();
18009 if (!isBatchedAtLevel(self, cur_level)) {
18010 return at::_ops::bitwise_right_shift__Tensor_Scalar::call(self, other);
18011 }
18012 Tensor self_value;
18013 optional<int64_t> self_bdim;
18014 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
18015 batch_rule(self_value, self_bdim, other);
18016 return self;
18017}
18018template <typename batch_rule_t, batch_rule_t batch_rule>
18019at::Tensor bitwise_right_shift_Scalar_Tensor_generated_plumbing(const at::Scalar & self, const at::Tensor & other) {
18020 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
18021 auto maybe_layer = maybeCurrentDynamicLayer();
18022 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
18023 int64_t cur_level = maybe_layer->layerId();
18024 if (!isBatchedAtLevel(other, cur_level)) {
18025 return at::_ops::bitwise_right_shift_Scalar_Tensor::call(self, other);
18026 }
18027 Tensor other_value;
18028 optional<int64_t> other_bdim;
18029 std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
18030 auto results = batch_rule(self, other_value, other_bdim);
18031 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
18032}
18033template <typename batch_rule_t, batch_rule_t batch_rule>
18034at::Tensor & tril__generated_plumbing(at::Tensor & self, int64_t diagonal) {
18035 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
18036 auto maybe_layer = maybeCurrentDynamicLayer();
18037 vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
18038 int64_t cur_level = maybe_layer->layerId();
18039 if (!isBatchedAtLevel(self, cur_level)) {
18040 return at::_ops::tril_::call(self, diagonal);
18041 }
18042 Tensor self_value;
18043 optional<int64_t> self_bdim;
18044 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
18045 batch_rule(self_value, self_bdim, diagonal);
18046 return self;
18047}
18048template <typename batch_rule_t, batch_rule_t batch_rule>
18049at::Tensor & triu__generated_plumbing(at::Tensor & self, int64_t diagonal) {
18050 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
18051 auto maybe_layer = maybeCurrentDynamicLayer();
18052 vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
18053 int64_t cur_level = maybe_layer->layerId();
18054 if (!isBatchedAtLevel(self, cur_level)) {
18055 return at::_ops::triu_::call(self, diagonal);
18056 }
18057 Tensor self_value;
18058 optional<int64_t> self_bdim;
18059 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
18060 batch_rule(self_value, self_bdim, diagonal);
18061 return self;
18062}
18063template <typename batch_rule_t, batch_rule_t batch_rule>
18064at::Tensor & digamma__generated_plumbing(at::Tensor & self) {
18065 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
18066 auto maybe_layer = maybeCurrentDynamicLayer();
18067 vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
18068 int64_t cur_level = maybe_layer->layerId();
18069 if (!isBatchedAtLevel(self, cur_level)) {
18070 return at::_ops::digamma_::call(self);
18071 }
18072 Tensor self_value;
18073 optional<int64_t> self_bdim;
18074 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
18075 batch_rule(self_value, self_bdim);
18076 return self;
18077}
18078template <typename batch_rule_t, batch_rule_t batch_rule>
18079at::Tensor & lerp__Scalar_generated_plumbing(at::Tensor & self, const at::Tensor & end, const at::Scalar & weight) {
18080 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
18081 auto maybe_layer = maybeCurrentDynamicLayer();
18082 vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
18083 int64_t cur_level = maybe_layer->layerId();
18084 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(end, cur_level)) {
18085 return at::_ops::lerp__Scalar::call(self, end, weight);
18086 }
18087 Tensor self_value;
18088 optional<int64_t> self_bdim;
18089 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
18090 Tensor end_value;
18091 optional<int64_t> end_bdim;
18092 std::tie(end_value, end_bdim) = unwrapTensorAtLevel(end, cur_level);
18093 batch_rule(self_value, self_bdim, end_value, end_bdim, weight);
18094 return self;
18095}
18096template <typename batch_rule_t, batch_rule_t batch_rule>
18097at::Tensor & lerp__Tensor_generated_plumbing(at::Tensor & self, const at::Tensor & end, const at::Tensor & weight) {
18098 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
18099 auto maybe_layer = maybeCurrentDynamicLayer();
18100 vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
18101 int64_t cur_level = maybe_layer->layerId();
18102 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(end, cur_level) && !isBatchedAtLevel(weight, cur_level)) {
18103 return at::_ops::lerp__Tensor::call(self, end, weight);
18104 }
18105 Tensor self_value;
18106 optional<int64_t> self_bdim;
18107 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
18108 Tensor end_value;
18109 optional<int64_t> end_bdim;
18110 std::tie(end_value, end_bdim) = unwrapTensorAtLevel(end, cur_level);
18111 Tensor weight_value;
18112 optional<int64_t> weight_bdim;
18113 std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight, cur_level);
18114 batch_rule(self_value, self_bdim, end_value, end_bdim, weight_value, weight_bdim);
18115 return self;
18116}
18117template <typename batch_rule_t, batch_rule_t batch_rule>
18118at::Tensor & addbmm__generated_plumbing(at::Tensor & self, const at::Tensor & batch1, const at::Tensor & batch2, const at::Scalar & beta, const at::Scalar & alpha) {
18119 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
18120 auto maybe_layer = maybeCurrentDynamicLayer();
18121 vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
18122 int64_t cur_level = maybe_layer->layerId();
18123 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(batch1, cur_level) && !isBatchedAtLevel(batch2, cur_level)) {
18124 return at::_ops::addbmm_::call(self, batch1, batch2, beta, alpha);
18125 }
18126 Tensor self_value;
18127 optional<int64_t> self_bdim;
18128 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
18129 Tensor batch1_value;
18130 optional<int64_t> batch1_bdim;
18131 std::tie(batch1_value, batch1_bdim) = unwrapTensorAtLevel(batch1, cur_level);
18132 Tensor batch2_value;
18133 optional<int64_t> batch2_bdim;
18134 std::tie(batch2_value, batch2_bdim) = unwrapTensorAtLevel(batch2, cur_level);
18135 batch_rule(self_value, self_bdim, batch1_value, batch1_bdim, batch2_value, batch2_bdim, beta, alpha);
18136 return self;
18137}
18138template <typename batch_rule_t, batch_rule_t batch_rule>
18139at::Tensor addbmm_generated_plumbing(const at::Tensor & self, const at::Tensor & batch1, const at::Tensor & batch2, const at::Scalar & beta, const at::Scalar & alpha) {
18140 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
18141 auto maybe_layer = maybeCurrentDynamicLayer();
18142 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
18143 int64_t cur_level = maybe_layer->layerId();
18144 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(batch1, cur_level) && !isBatchedAtLevel(batch2, cur_level)) {
18145 return at::_ops::addbmm::call(self, batch1, batch2, beta, alpha);
18146 }
18147 Tensor self_value;
18148 optional<int64_t> self_bdim;
18149 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
18150 Tensor batch1_value;
18151 optional<int64_t> batch1_bdim;
18152 std::tie(batch1_value, batch1_bdim) = unwrapTensorAtLevel(batch1, cur_level);
18153 Tensor batch2_value;
18154 optional<int64_t> batch2_bdim;
18155 std::tie(batch2_value, batch2_bdim) = unwrapTensorAtLevel(batch2, cur_level);
18156 auto results = batch_rule(self_value, self_bdim, batch1_value, batch1_bdim, batch2_value, batch2_bdim, beta, alpha);
18157 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
18158}
18159template <typename batch_rule_t, batch_rule_t batch_rule>
18160at::Tensor & random__from_generated_plumbing(at::Tensor & self, int64_t from, c10::optional<int64_t> to, c10::optional<at::Generator> generator) {
18161 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
18162 auto maybe_layer = maybeCurrentDynamicLayer();
18163 vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
18164 int64_t cur_level = maybe_layer->layerId();
18165 if (!isBatchedAtLevel(self, cur_level)) {
18166 return at::_ops::random__from::call(self, from, to, generator);
18167 }
18168 Tensor self_value;
18169 optional<int64_t> self_bdim;
18170 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
18171 batch_rule(self_value, self_bdim, from, to, generator);
18172 return self;
18173}
18174template <typename batch_rule_t, batch_rule_t batch_rule>
18175at::Tensor & random__to_generated_plumbing(at::Tensor & self, int64_t to, c10::optional<at::Generator> generator) {
18176 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
18177 auto maybe_layer = maybeCurrentDynamicLayer();
18178 vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
18179 int64_t cur_level = maybe_layer->layerId();
18180 if (!isBatchedAtLevel(self, cur_level)) {
18181 return at::_ops::random__to::call(self, to, generator);
18182 }
18183 Tensor self_value;
18184 optional<int64_t> self_bdim;
18185 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
18186 batch_rule(self_value, self_bdim, to, generator);
18187 return self;
18188}
18189template <typename batch_rule_t, batch_rule_t batch_rule>
18190at::Tensor & random__generated_plumbing(at::Tensor & self, c10::optional<at::Generator> generator) {
18191 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
18192 auto maybe_layer = maybeCurrentDynamicLayer();
18193 vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
18194 int64_t cur_level = maybe_layer->layerId();
18195 if (!isBatchedAtLevel(self, cur_level)) {
18196 return at::_ops::random_::call(self, generator);
18197 }
18198 Tensor self_value;
18199 optional<int64_t> self_bdim;
18200 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
18201 batch_rule(self_value, self_bdim, generator);
18202 return self;
18203}
18204template <typename batch_rule_t, batch_rule_t batch_rule>
18205at::Tensor & uniform__generated_plumbing(at::Tensor & self, double from, double to, c10::optional<at::Generator> generator) {
18206 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
18207 auto maybe_layer = maybeCurrentDynamicLayer();
18208 vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
18209 int64_t cur_level = maybe_layer->layerId();
18210 if (!isBatchedAtLevel(self, cur_level)) {
18211 return at::_ops::uniform_::call(self, from, to, generator);
18212 }
18213 Tensor self_value;
18214 optional<int64_t> self_bdim;
18215 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
18216 batch_rule(self_value, self_bdim, from, to, generator);
18217 return self;
18218}
18219template <typename batch_rule_t, batch_rule_t batch_rule>
18220at::Tensor & cauchy__generated_plumbing(at::Tensor & self, double median, double sigma, c10::optional<at::Generator> generator) {
18221 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
18222 auto maybe_layer = maybeCurrentDynamicLayer();
18223 vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
18224 int64_t cur_level = maybe_layer->layerId();
18225 if (!isBatchedAtLevel(self, cur_level)) {
18226 return at::_ops::cauchy_::call(self, median, sigma, generator);
18227 }
18228 Tensor self_value;
18229 optional<int64_t> self_bdim;
18230 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
18231 batch_rule(self_value, self_bdim, median, sigma, generator);
18232 return self;
18233}
18234template <typename batch_rule_t, batch_rule_t batch_rule>
18235at::Tensor & log_normal__generated_plumbing(at::Tensor & self, double mean, double std, c10::optional<at::Generator> generator) {
18236 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
18237 auto maybe_layer = maybeCurrentDynamicLayer();
18238 vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
18239 int64_t cur_level = maybe_layer->layerId();
18240 if (!isBatchedAtLevel(self, cur_level)) {
18241 return at::_ops::log_normal_::call(self, mean, std, generator);
18242 }
18243 Tensor self_value;
18244 optional<int64_t> self_bdim;
18245 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
18246 batch_rule(self_value, self_bdim, mean, std, generator);
18247 return self;
18248}
18249template <typename batch_rule_t, batch_rule_t batch_rule>
18250at::Tensor & exponential__generated_plumbing(at::Tensor & self, double lambd, c10::optional<at::Generator> generator) {
18251 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
18252 auto maybe_layer = maybeCurrentDynamicLayer();
18253 vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
18254 int64_t cur_level = maybe_layer->layerId();
18255 if (!isBatchedAtLevel(self, cur_level)) {
18256 return at::_ops::exponential_::call(self, lambd, generator);
18257 }
18258 Tensor self_value;
18259 optional<int64_t> self_bdim;
18260 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
18261 batch_rule(self_value, self_bdim, lambd, generator);
18262 return self;
18263}
18264template <typename batch_rule_t, batch_rule_t batch_rule>
18265at::Tensor & geometric__generated_plumbing(at::Tensor & self, double p, c10::optional<at::Generator> generator) {
18266 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
18267 auto maybe_layer = maybeCurrentDynamicLayer();
18268 vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
18269 int64_t cur_level = maybe_layer->layerId();
18270 if (!isBatchedAtLevel(self, cur_level)) {
18271 return at::_ops::geometric_::call(self, p, generator);
18272 }
18273 Tensor self_value;
18274 optional<int64_t> self_bdim;
18275 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
18276 batch_rule(self_value, self_bdim, p, generator);
18277 return self;
18278}
18279template <typename batch_rule_t, batch_rule_t batch_rule>
18280at::Tensor diag_generated_plumbing(const at::Tensor & self, int64_t diagonal) {
18281 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
18282 auto maybe_layer = maybeCurrentDynamicLayer();
18283 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
18284 int64_t cur_level = maybe_layer->layerId();
18285 if (!isBatchedAtLevel(self, cur_level)) {
18286 return at::_ops::diag::call(self, diagonal);
18287 }
18288 Tensor self_value;
18289 optional<int64_t> self_bdim;
18290 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
18291 auto results = batch_rule(self_value, self_bdim, diagonal);
18292 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
18293}
18294template <typename batch_rule_t, batch_rule_t batch_rule>
18295at::Tensor cross_generated_plumbing(const at::Tensor & self, const at::Tensor & other, c10::optional<int64_t> dim) {
18296 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
18297 auto maybe_layer = maybeCurrentDynamicLayer();
18298 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
18299 int64_t cur_level = maybe_layer->layerId();
18300 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
18301 return at::_ops::cross::call(self, other, dim);
18302 }
18303 Tensor self_value;
18304 optional<int64_t> self_bdim;
18305 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
18306 Tensor other_value;
18307 optional<int64_t> other_bdim;
18308 std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
18309 auto results = batch_rule(self_value, self_bdim, other_value, other_bdim, dim);
18310 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
18311}
18312template <typename batch_rule_t, batch_rule_t batch_rule>
18313at::Tensor triu_generated_plumbing(const at::Tensor & self, int64_t diagonal) {
18314 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
18315 auto maybe_layer = maybeCurrentDynamicLayer();
18316 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
18317 int64_t cur_level = maybe_layer->layerId();
18318 if (!isBatchedAtLevel(self, cur_level)) {
18319 return at::_ops::triu::call(self, diagonal);
18320 }
18321 Tensor self_value;
18322 optional<int64_t> self_bdim;
18323 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
18324 auto results = batch_rule(self_value, self_bdim, diagonal);
18325 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
18326}
18327template <typename batch_rule_t, batch_rule_t batch_rule>
18328at::Tensor tril_generated_plumbing(const at::Tensor & self, int64_t diagonal) {
18329 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
18330 auto maybe_layer = maybeCurrentDynamicLayer();
18331 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
18332 int64_t cur_level = maybe_layer->layerId();
18333 if (!isBatchedAtLevel(self, cur_level)) {
18334 return at::_ops::tril::call(self, diagonal);
18335 }
18336 Tensor self_value;
18337 optional<int64_t> self_bdim;
18338 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
18339 auto results = batch_rule(self_value, self_bdim, diagonal);
18340 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
18341}
18342template <typename batch_rule_t, batch_rule_t batch_rule>
18343at::Tensor trace_generated_plumbing(const at::Tensor & self) {
18344 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
18345 auto maybe_layer = maybeCurrentDynamicLayer();
18346 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
18347 int64_t cur_level = maybe_layer->layerId();
18348 if (!isBatchedAtLevel(self, cur_level)) {
18349 return at::_ops::trace::call(self);
18350 }
18351 Tensor self_value;
18352 optional<int64_t> self_bdim;
18353 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
18354 auto results = batch_rule(self_value, self_bdim);
18355 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
18356}
18357template <typename batch_rule_t, batch_rule_t batch_rule>
18358at::Tensor trace_backward_generated_plumbing(const at::Tensor & grad, c10::SymIntArrayRef sizes) {
18359 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
18360 auto maybe_layer = maybeCurrentDynamicLayer();
18361 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
18362 int64_t cur_level = maybe_layer->layerId();
18363 if (!isBatchedAtLevel(grad, cur_level)) {
18364 return at::_ops::trace_backward::call(grad, sizes);
18365 }
18366 Tensor grad_value;
18367 optional<int64_t> grad_bdim;
18368 std::tie(grad_value, grad_bdim) = unwrapTensorAtLevel(grad, cur_level);
18369 auto results = batch_rule(grad_value, grad_bdim, sizes);
18370 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
18371}
18372template <typename batch_rule_t, batch_rule_t batch_rule>
18373at::Tensor ne_Scalar_generated_plumbing(const at::Tensor & self, const at::Scalar & other) {
18374 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
18375 auto maybe_layer = maybeCurrentDynamicLayer();
18376 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
18377 int64_t cur_level = maybe_layer->layerId();
18378 if (!isBatchedAtLevel(self, cur_level)) {
18379 return at::_ops::ne_Scalar::call(self, other);
18380 }
18381 Tensor self_value;
18382 optional<int64_t> self_bdim;
18383 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
18384 auto results = batch_rule(self_value, self_bdim, other);
18385 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
18386}
18387template <typename batch_rule_t, batch_rule_t batch_rule>
18388at::Tensor ne_Tensor_generated_plumbing(const at::Tensor & self, const at::Tensor & other) {
18389 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
18390 auto maybe_layer = maybeCurrentDynamicLayer();
18391 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
18392 int64_t cur_level = maybe_layer->layerId();
18393 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
18394 return at::_ops::ne_Tensor::call(self, other);
18395 }
18396 Tensor self_value;
18397 optional<int64_t> self_bdim;
18398 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
18399 Tensor other_value;
18400 optional<int64_t> other_bdim;
18401 std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
18402 auto results = batch_rule(self_value, self_bdim, other_value, other_bdim);
18403 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
18404}
18405template <typename batch_rule_t, batch_rule_t batch_rule>
18406at::Tensor & ne__Scalar_generated_plumbing(at::Tensor & self, const at::Scalar & other) {
18407 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
18408 auto maybe_layer = maybeCurrentDynamicLayer();
18409 vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
18410 int64_t cur_level = maybe_layer->layerId();
18411 if (!isBatchedAtLevel(self, cur_level)) {
18412 return at::_ops::ne__Scalar::call(self, other);
18413 }
18414 Tensor self_value;
18415 optional<int64_t> self_bdim;
18416 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
18417 batch_rule(self_value, self_bdim, other);
18418 return self;
18419}
18420template <typename batch_rule_t, batch_rule_t batch_rule>
18421at::Tensor & ne__Tensor_generated_plumbing(at::Tensor & self, const at::Tensor & other) {
18422 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
18423 auto maybe_layer = maybeCurrentDynamicLayer();
18424 vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
18425 int64_t cur_level = maybe_layer->layerId();
18426 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
18427 return at::_ops::ne__Tensor::call(self, other);
18428 }
18429 Tensor self_value;
18430 optional<int64_t> self_bdim;
18431 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
18432 Tensor other_value;
18433 optional<int64_t> other_bdim;
18434 std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
18435 batch_rule(self_value, self_bdim, other_value, other_bdim);
18436 return self;
18437}
18438template <typename batch_rule_t, batch_rule_t batch_rule>
18439at::Tensor not_equal_Scalar_generated_plumbing(const at::Tensor & self, const at::Scalar & other) {
18440 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
18441 auto maybe_layer = maybeCurrentDynamicLayer();
18442 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
18443 int64_t cur_level = maybe_layer->layerId();
18444 if (!isBatchedAtLevel(self, cur_level)) {
18445 return at::_ops::not_equal_Scalar::call(self, other);
18446 }
18447 Tensor self_value;
18448 optional<int64_t> self_bdim;
18449 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
18450 auto results = batch_rule(self_value, self_bdim, other);
18451 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
18452}
18453template <typename batch_rule_t, batch_rule_t batch_rule>
18454at::Tensor not_equal_Tensor_generated_plumbing(const at::Tensor & self, const at::Tensor & other) {
18455 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
18456 auto maybe_layer = maybeCurrentDynamicLayer();
18457 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
18458 int64_t cur_level = maybe_layer->layerId();
18459 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
18460 return at::_ops::not_equal_Tensor::call(self, other);
18461 }
18462 Tensor self_value;
18463 optional<int64_t> self_bdim;
18464 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
18465 Tensor other_value;
18466 optional<int64_t> other_bdim;
18467 std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
18468 auto results = batch_rule(self_value, self_bdim, other_value, other_bdim);
18469 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
18470}
18471template <typename batch_rule_t, batch_rule_t batch_rule>
18472at::Tensor & not_equal__Scalar_generated_plumbing(at::Tensor & self, const at::Scalar & other) {
18473 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
18474 auto maybe_layer = maybeCurrentDynamicLayer();
18475 vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
18476 int64_t cur_level = maybe_layer->layerId();
18477 if (!isBatchedAtLevel(self, cur_level)) {
18478 return at::_ops::not_equal__Scalar::call(self, other);
18479 }
18480 Tensor self_value;
18481 optional<int64_t> self_bdim;
18482 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
18483 batch_rule(self_value, self_bdim, other);
18484 return self;
18485}
18486template <typename batch_rule_t, batch_rule_t batch_rule>
18487at::Tensor & not_equal__Tensor_generated_plumbing(at::Tensor & self, const at::Tensor & other) {
18488 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
18489 auto maybe_layer = maybeCurrentDynamicLayer();
18490 vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
18491 int64_t cur_level = maybe_layer->layerId();
18492 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
18493 return at::_ops::not_equal__Tensor::call(self, other);
18494 }
18495 Tensor self_value;
18496 optional<int64_t> self_bdim;
18497 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
18498 Tensor other_value;
18499 optional<int64_t> other_bdim;
18500 std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
18501 batch_rule(self_value, self_bdim, other_value, other_bdim);
18502 return self;
18503}
18504template <typename batch_rule_t, batch_rule_t batch_rule>
18505at::Tensor eq_Scalar_generated_plumbing(const at::Tensor & self, const at::Scalar & other) {
18506 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
18507 auto maybe_layer = maybeCurrentDynamicLayer();
18508 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
18509 int64_t cur_level = maybe_layer->layerId();
18510 if (!isBatchedAtLevel(self, cur_level)) {
18511 return at::_ops::eq_Scalar::call(self, other);
18512 }
18513 Tensor self_value;
18514 optional<int64_t> self_bdim;
18515 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
18516 auto results = batch_rule(self_value, self_bdim, other);
18517 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
18518}
18519template <typename batch_rule_t, batch_rule_t batch_rule>
18520at::Tensor eq_Tensor_generated_plumbing(const at::Tensor & self, const at::Tensor & other) {
18521 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
18522 auto maybe_layer = maybeCurrentDynamicLayer();
18523 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
18524 int64_t cur_level = maybe_layer->layerId();
18525 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
18526 return at::_ops::eq_Tensor::call(self, other);
18527 }
18528 Tensor self_value;
18529 optional<int64_t> self_bdim;
18530 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
18531 Tensor other_value;
18532 optional<int64_t> other_bdim;
18533 std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
18534 auto results = batch_rule(self_value, self_bdim, other_value, other_bdim);
18535 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
18536}
18537template <typename batch_rule_t, batch_rule_t batch_rule>
18538at::Tensor ge_Scalar_generated_plumbing(const at::Tensor & self, const at::Scalar & other) {
18539 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
18540 auto maybe_layer = maybeCurrentDynamicLayer();
18541 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
18542 int64_t cur_level = maybe_layer->layerId();
18543 if (!isBatchedAtLevel(self, cur_level)) {
18544 return at::_ops::ge_Scalar::call(self, other);
18545 }
18546 Tensor self_value;
18547 optional<int64_t> self_bdim;
18548 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
18549 auto results = batch_rule(self_value, self_bdim, other);
18550 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
18551}
18552template <typename batch_rule_t, batch_rule_t batch_rule>
18553at::Tensor ge_Tensor_generated_plumbing(const at::Tensor & self, const at::Tensor & other) {
18554 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
18555 auto maybe_layer = maybeCurrentDynamicLayer();
18556 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
18557 int64_t cur_level = maybe_layer->layerId();
18558 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
18559 return at::_ops::ge_Tensor::call(self, other);
18560 }
18561 Tensor self_value;
18562 optional<int64_t> self_bdim;
18563 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
18564 Tensor other_value;
18565 optional<int64_t> other_bdim;
18566 std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
18567 auto results = batch_rule(self_value, self_bdim, other_value, other_bdim);
18568 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
18569}
18570template <typename batch_rule_t, batch_rule_t batch_rule>
18571at::Tensor & ge__Scalar_generated_plumbing(at::Tensor & self, const at::Scalar & other) {
18572 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
18573 auto maybe_layer = maybeCurrentDynamicLayer();
18574 vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
18575 int64_t cur_level = maybe_layer->layerId();
18576 if (!isBatchedAtLevel(self, cur_level)) {
18577 return at::_ops::ge__Scalar::call(self, other);
18578 }
18579 Tensor self_value;
18580 optional<int64_t> self_bdim;
18581 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
18582 batch_rule(self_value, self_bdim, other);
18583 return self;
18584}
18585template <typename batch_rule_t, batch_rule_t batch_rule>
18586at::Tensor & ge__Tensor_generated_plumbing(at::Tensor & self, const at::Tensor & other) {
18587 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
18588 auto maybe_layer = maybeCurrentDynamicLayer();
18589 vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
18590 int64_t cur_level = maybe_layer->layerId();
18591 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
18592 return at::_ops::ge__Tensor::call(self, other);
18593 }
18594 Tensor self_value;
18595 optional<int64_t> self_bdim;
18596 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
18597 Tensor other_value;
18598 optional<int64_t> other_bdim;
18599 std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
18600 batch_rule(self_value, self_bdim, other_value, other_bdim);
18601 return self;
18602}
18603template <typename batch_rule_t, batch_rule_t batch_rule>
18604at::Tensor greater_equal_Scalar_generated_plumbing(const at::Tensor & self, const at::Scalar & other) {
18605 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
18606 auto maybe_layer = maybeCurrentDynamicLayer();
18607 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
18608 int64_t cur_level = maybe_layer->layerId();
18609 if (!isBatchedAtLevel(self, cur_level)) {
18610 return at::_ops::greater_equal_Scalar::call(self, other);
18611 }
18612 Tensor self_value;
18613 optional<int64_t> self_bdim;
18614 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
18615 auto results = batch_rule(self_value, self_bdim, other);
18616 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
18617}
18618template <typename batch_rule_t, batch_rule_t batch_rule>
18619at::Tensor greater_equal_Tensor_generated_plumbing(const at::Tensor & self, const at::Tensor & other) {
18620 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
18621 auto maybe_layer = maybeCurrentDynamicLayer();
18622 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
18623 int64_t cur_level = maybe_layer->layerId();
18624 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
18625 return at::_ops::greater_equal_Tensor::call(self, other);
18626 }
18627 Tensor self_value;
18628 optional<int64_t> self_bdim;
18629 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
18630 Tensor other_value;
18631 optional<int64_t> other_bdim;
18632 std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
18633 auto results = batch_rule(self_value, self_bdim, other_value, other_bdim);
18634 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
18635}
18636template <typename batch_rule_t, batch_rule_t batch_rule>
18637at::Tensor & greater_equal__Scalar_generated_plumbing(at::Tensor & self, const at::Scalar & other) {
18638 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
18639 auto maybe_layer = maybeCurrentDynamicLayer();
18640 vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
18641 int64_t cur_level = maybe_layer->layerId();
18642 if (!isBatchedAtLevel(self, cur_level)) {
18643 return at::_ops::greater_equal__Scalar::call(self, other);
18644 }
18645 Tensor self_value;
18646 optional<int64_t> self_bdim;
18647 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
18648 batch_rule(self_value, self_bdim, other);
18649 return self;
18650}
18651template <typename batch_rule_t, batch_rule_t batch_rule>
18652at::Tensor & greater_equal__Tensor_generated_plumbing(at::Tensor & self, const at::Tensor & other) {
18653 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
18654 auto maybe_layer = maybeCurrentDynamicLayer();
18655 vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
18656 int64_t cur_level = maybe_layer->layerId();
18657 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
18658 return at::_ops::greater_equal__Tensor::call(self, other);
18659 }
18660 Tensor self_value;
18661 optional<int64_t> self_bdim;
18662 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
18663 Tensor other_value;
18664 optional<int64_t> other_bdim;
18665 std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
18666 batch_rule(self_value, self_bdim, other_value, other_bdim);
18667 return self;
18668}
18669template <typename batch_rule_t, batch_rule_t batch_rule>
18670at::Tensor le_Scalar_generated_plumbing(const at::Tensor & self, const at::Scalar & other) {
18671 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
18672 auto maybe_layer = maybeCurrentDynamicLayer();
18673 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
18674 int64_t cur_level = maybe_layer->layerId();
18675 if (!isBatchedAtLevel(self, cur_level)) {
18676 return at::_ops::le_Scalar::call(self, other);
18677 }
18678 Tensor self_value;
18679 optional<int64_t> self_bdim;
18680 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
18681 auto results = batch_rule(self_value, self_bdim, other);
18682 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
18683}
18684template <typename batch_rule_t, batch_rule_t batch_rule>
18685at::Tensor le_Tensor_generated_plumbing(const at::Tensor & self, const at::Tensor & other) {
18686 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
18687 auto maybe_layer = maybeCurrentDynamicLayer();
18688 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
18689 int64_t cur_level = maybe_layer->layerId();
18690 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
18691 return at::_ops::le_Tensor::call(self, other);
18692 }
18693 Tensor self_value;
18694 optional<int64_t> self_bdim;
18695 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
18696 Tensor other_value;
18697 optional<int64_t> other_bdim;
18698 std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
18699 auto results = batch_rule(self_value, self_bdim, other_value, other_bdim);
18700 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
18701}
18702template <typename batch_rule_t, batch_rule_t batch_rule>
18703at::Tensor & le__Scalar_generated_plumbing(at::Tensor & self, const at::Scalar & other) {
18704 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
18705 auto maybe_layer = maybeCurrentDynamicLayer();
18706 vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
18707 int64_t cur_level = maybe_layer->layerId();
18708 if (!isBatchedAtLevel(self, cur_level)) {
18709 return at::_ops::le__Scalar::call(self, other);
18710 }
18711 Tensor self_value;
18712 optional<int64_t> self_bdim;
18713 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
18714 batch_rule(self_value, self_bdim, other);
18715 return self;
18716}
18717template <typename batch_rule_t, batch_rule_t batch_rule>
18718at::Tensor & le__Tensor_generated_plumbing(at::Tensor & self, const at::Tensor & other) {
18719 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
18720 auto maybe_layer = maybeCurrentDynamicLayer();
18721 vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
18722 int64_t cur_level = maybe_layer->layerId();
18723 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
18724 return at::_ops::le__Tensor::call(self, other);
18725 }
18726 Tensor self_value;
18727 optional<int64_t> self_bdim;
18728 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
18729 Tensor other_value;
18730 optional<int64_t> other_bdim;
18731 std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
18732 batch_rule(self_value, self_bdim, other_value, other_bdim);
18733 return self;
18734}
18735template <typename batch_rule_t, batch_rule_t batch_rule>
18736at::Tensor less_equal_Scalar_generated_plumbing(const at::Tensor & self, const at::Scalar & other) {
18737 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
18738 auto maybe_layer = maybeCurrentDynamicLayer();
18739 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
18740 int64_t cur_level = maybe_layer->layerId();
18741 if (!isBatchedAtLevel(self, cur_level)) {
18742 return at::_ops::less_equal_Scalar::call(self, other);
18743 }
18744 Tensor self_value;
18745 optional<int64_t> self_bdim;
18746 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
18747 auto results = batch_rule(self_value, self_bdim, other);
18748 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
18749}
18750template <typename batch_rule_t, batch_rule_t batch_rule>
18751at::Tensor less_equal_Tensor_generated_plumbing(const at::Tensor & self, const at::Tensor & other) {
18752 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
18753 auto maybe_layer = maybeCurrentDynamicLayer();
18754 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
18755 int64_t cur_level = maybe_layer->layerId();
18756 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
18757 return at::_ops::less_equal_Tensor::call(self, other);
18758 }
18759 Tensor self_value;
18760 optional<int64_t> self_bdim;
18761 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
18762 Tensor other_value;
18763 optional<int64_t> other_bdim;
18764 std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
18765 auto results = batch_rule(self_value, self_bdim, other_value, other_bdim);
18766 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
18767}
18768template <typename batch_rule_t, batch_rule_t batch_rule>
18769at::Tensor & less_equal__Scalar_generated_plumbing(at::Tensor & self, const at::Scalar & other) {
18770 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
18771 auto maybe_layer = maybeCurrentDynamicLayer();
18772 vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
18773 int64_t cur_level = maybe_layer->layerId();
18774 if (!isBatchedAtLevel(self, cur_level)) {
18775 return at::_ops::less_equal__Scalar::call(self, other);
18776 }
18777 Tensor self_value;
18778 optional<int64_t> self_bdim;
18779 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
18780 batch_rule(self_value, self_bdim, other);
18781 return self;
18782}
18783template <typename batch_rule_t, batch_rule_t batch_rule>
18784at::Tensor & less_equal__Tensor_generated_plumbing(at::Tensor & self, const at::Tensor & other) {
18785 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
18786 auto maybe_layer = maybeCurrentDynamicLayer();
18787 vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
18788 int64_t cur_level = maybe_layer->layerId();
18789 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
18790 return at::_ops::less_equal__Tensor::call(self, other);
18791 }
18792 Tensor self_value;
18793 optional<int64_t> self_bdim;
18794 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
18795 Tensor other_value;
18796 optional<int64_t> other_bdim;
18797 std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
18798 batch_rule(self_value, self_bdim, other_value, other_bdim);
18799 return self;
18800}
18801template <typename batch_rule_t, batch_rule_t batch_rule>
18802at::Tensor gt_Scalar_generated_plumbing(const at::Tensor & self, const at::Scalar & other) {
18803 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
18804 auto maybe_layer = maybeCurrentDynamicLayer();
18805 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
18806 int64_t cur_level = maybe_layer->layerId();
18807 if (!isBatchedAtLevel(self, cur_level)) {
18808 return at::_ops::gt_Scalar::call(self, other);
18809 }
18810 Tensor self_value;
18811 optional<int64_t> self_bdim;
18812 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
18813 auto results = batch_rule(self_value, self_bdim, other);
18814 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
18815}
18816template <typename batch_rule_t, batch_rule_t batch_rule>
18817at::Tensor gt_Tensor_generated_plumbing(const at::Tensor & self, const at::Tensor & other) {
18818 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
18819 auto maybe_layer = maybeCurrentDynamicLayer();
18820 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
18821 int64_t cur_level = maybe_layer->layerId();
18822 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
18823 return at::_ops::gt_Tensor::call(self, other);
18824 }
18825 Tensor self_value;
18826 optional<int64_t> self_bdim;
18827 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
18828 Tensor other_value;
18829 optional<int64_t> other_bdim;
18830 std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
18831 auto results = batch_rule(self_value, self_bdim, other_value, other_bdim);
18832 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
18833}
18834template <typename batch_rule_t, batch_rule_t batch_rule>
18835at::Tensor & gt__Scalar_generated_plumbing(at::Tensor & self, const at::Scalar & other) {
18836 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
18837 auto maybe_layer = maybeCurrentDynamicLayer();
18838 vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
18839 int64_t cur_level = maybe_layer->layerId();
18840 if (!isBatchedAtLevel(self, cur_level)) {
18841 return at::_ops::gt__Scalar::call(self, other);
18842 }
18843 Tensor self_value;
18844 optional<int64_t> self_bdim;
18845 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
18846 batch_rule(self_value, self_bdim, other);
18847 return self;
18848}
18849template <typename batch_rule_t, batch_rule_t batch_rule>
18850at::Tensor & gt__Tensor_generated_plumbing(at::Tensor & self, const at::Tensor & other) {
18851 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
18852 auto maybe_layer = maybeCurrentDynamicLayer();
18853 vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
18854 int64_t cur_level = maybe_layer->layerId();
18855 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
18856 return at::_ops::gt__Tensor::call(self, other);
18857 }
18858 Tensor self_value;
18859 optional<int64_t> self_bdim;
18860 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
18861 Tensor other_value;
18862 optional<int64_t> other_bdim;
18863 std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
18864 batch_rule(self_value, self_bdim, other_value, other_bdim);
18865 return self;
18866}
18867template <typename batch_rule_t, batch_rule_t batch_rule>
18868at::Tensor greater_Scalar_generated_plumbing(const at::Tensor & self, const at::Scalar & other) {
18869 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
18870 auto maybe_layer = maybeCurrentDynamicLayer();
18871 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
18872 int64_t cur_level = maybe_layer->layerId();
18873 if (!isBatchedAtLevel(self, cur_level)) {
18874 return at::_ops::greater_Scalar::call(self, other);
18875 }
18876 Tensor self_value;
18877 optional<int64_t> self_bdim;
18878 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
18879 auto results = batch_rule(self_value, self_bdim, other);
18880 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
18881}
18882template <typename batch_rule_t, batch_rule_t batch_rule>
18883at::Tensor greater_Tensor_generated_plumbing(const at::Tensor & self, const at::Tensor & other) {
18884 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
18885 auto maybe_layer = maybeCurrentDynamicLayer();
18886 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
18887 int64_t cur_level = maybe_layer->layerId();
18888 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
18889 return at::_ops::greater_Tensor::call(self, other);
18890 }
18891 Tensor self_value;
18892 optional<int64_t> self_bdim;
18893 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
18894 Tensor other_value;
18895 optional<int64_t> other_bdim;
18896 std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
18897 auto results = batch_rule(self_value, self_bdim, other_value, other_bdim);
18898 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
18899}
18900template <typename batch_rule_t, batch_rule_t batch_rule>
18901at::Tensor & greater__Scalar_generated_plumbing(at::Tensor & self, const at::Scalar & other) {
18902 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
18903 auto maybe_layer = maybeCurrentDynamicLayer();
18904 vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
18905 int64_t cur_level = maybe_layer->layerId();
18906 if (!isBatchedAtLevel(self, cur_level)) {
18907 return at::_ops::greater__Scalar::call(self, other);
18908 }
18909 Tensor self_value;
18910 optional<int64_t> self_bdim;
18911 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
18912 batch_rule(self_value, self_bdim, other);
18913 return self;
18914}
18915template <typename batch_rule_t, batch_rule_t batch_rule>
18916at::Tensor & greater__Tensor_generated_plumbing(at::Tensor & self, const at::Tensor & other) {
18917 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
18918 auto maybe_layer = maybeCurrentDynamicLayer();
18919 vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
18920 int64_t cur_level = maybe_layer->layerId();
18921 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
18922 return at::_ops::greater__Tensor::call(self, other);
18923 }
18924 Tensor self_value;
18925 optional<int64_t> self_bdim;
18926 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
18927 Tensor other_value;
18928 optional<int64_t> other_bdim;
18929 std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
18930 batch_rule(self_value, self_bdim, other_value, other_bdim);
18931 return self;
18932}
18933template <typename batch_rule_t, batch_rule_t batch_rule>
18934at::Tensor lt_Scalar_generated_plumbing(const at::Tensor & self, const at::Scalar & other) {
18935 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
18936 auto maybe_layer = maybeCurrentDynamicLayer();
18937 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
18938 int64_t cur_level = maybe_layer->layerId();
18939 if (!isBatchedAtLevel(self, cur_level)) {
18940 return at::_ops::lt_Scalar::call(self, other);
18941 }
18942 Tensor self_value;
18943 optional<int64_t> self_bdim;
18944 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
18945 auto results = batch_rule(self_value, self_bdim, other);
18946 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
18947}
18948template <typename batch_rule_t, batch_rule_t batch_rule>
18949at::Tensor lt_Tensor_generated_plumbing(const at::Tensor & self, const at::Tensor & other) {
18950 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
18951 auto maybe_layer = maybeCurrentDynamicLayer();
18952 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
18953 int64_t cur_level = maybe_layer->layerId();
18954 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
18955 return at::_ops::lt_Tensor::call(self, other);
18956 }
18957 Tensor self_value;
18958 optional<int64_t> self_bdim;
18959 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
18960 Tensor other_value;
18961 optional<int64_t> other_bdim;
18962 std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
18963 auto results = batch_rule(self_value, self_bdim, other_value, other_bdim);
18964 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
18965}
18966template <typename batch_rule_t, batch_rule_t batch_rule>
18967at::Tensor & lt__Scalar_generated_plumbing(at::Tensor & self, const at::Scalar & other) {
18968 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
18969 auto maybe_layer = maybeCurrentDynamicLayer();
18970 vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
18971 int64_t cur_level = maybe_layer->layerId();
18972 if (!isBatchedAtLevel(self, cur_level)) {
18973 return at::_ops::lt__Scalar::call(self, other);
18974 }
18975 Tensor self_value;
18976 optional<int64_t> self_bdim;
18977 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
18978 batch_rule(self_value, self_bdim, other);
18979 return self;
18980}
18981template <typename batch_rule_t, batch_rule_t batch_rule>
18982at::Tensor & lt__Tensor_generated_plumbing(at::Tensor & self, const at::Tensor & other) {
18983 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
18984 auto maybe_layer = maybeCurrentDynamicLayer();
18985 vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
18986 int64_t cur_level = maybe_layer->layerId();
18987 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
18988 return at::_ops::lt__Tensor::call(self, other);
18989 }
18990 Tensor self_value;
18991 optional<int64_t> self_bdim;
18992 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
18993 Tensor other_value;
18994 optional<int64_t> other_bdim;
18995 std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
18996 batch_rule(self_value, self_bdim, other_value, other_bdim);
18997 return self;
18998}
18999template <typename batch_rule_t, batch_rule_t batch_rule>
19000at::Tensor less_Scalar_generated_plumbing(const at::Tensor & self, const at::Scalar & other) {
19001 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
19002 auto maybe_layer = maybeCurrentDynamicLayer();
19003 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
19004 int64_t cur_level = maybe_layer->layerId();
19005 if (!isBatchedAtLevel(self, cur_level)) {
19006 return at::_ops::less_Scalar::call(self, other);
19007 }
19008 Tensor self_value;
19009 optional<int64_t> self_bdim;
19010 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
19011 auto results = batch_rule(self_value, self_bdim, other);
19012 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
19013}
19014template <typename batch_rule_t, batch_rule_t batch_rule>
19015at::Tensor less_Tensor_generated_plumbing(const at::Tensor & self, const at::Tensor & other) {
19016 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
19017 auto maybe_layer = maybeCurrentDynamicLayer();
19018 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
19019 int64_t cur_level = maybe_layer->layerId();
19020 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
19021 return at::_ops::less_Tensor::call(self, other);
19022 }
19023 Tensor self_value;
19024 optional<int64_t> self_bdim;
19025 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
19026 Tensor other_value;
19027 optional<int64_t> other_bdim;
19028 std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
19029 auto results = batch_rule(self_value, self_bdim, other_value, other_bdim);
19030 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
19031}
19032template <typename batch_rule_t, batch_rule_t batch_rule>
19033at::Tensor & less__Scalar_generated_plumbing(at::Tensor & self, const at::Scalar & other) {
19034 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
19035 auto maybe_layer = maybeCurrentDynamicLayer();
19036 vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
19037 int64_t cur_level = maybe_layer->layerId();
19038 if (!isBatchedAtLevel(self, cur_level)) {
19039 return at::_ops::less__Scalar::call(self, other);
19040 }
19041 Tensor self_value;
19042 optional<int64_t> self_bdim;
19043 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
19044 batch_rule(self_value, self_bdim, other);
19045 return self;
19046}
19047template <typename batch_rule_t, batch_rule_t batch_rule>
19048at::Tensor & less__Tensor_generated_plumbing(at::Tensor & self, const at::Tensor & other) {
19049 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
19050 auto maybe_layer = maybeCurrentDynamicLayer();
19051 vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
19052 int64_t cur_level = maybe_layer->layerId();
19053 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
19054 return at::_ops::less__Tensor::call(self, other);
19055 }
19056 Tensor self_value;
19057 optional<int64_t> self_bdim;
19058 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
19059 Tensor other_value;
19060 optional<int64_t> other_bdim;
19061 std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
19062 batch_rule(self_value, self_bdim, other_value, other_bdim);
19063 return self;
19064}
19065template <typename batch_rule_t, batch_rule_t batch_rule>
19066at::Tensor take_generated_plumbing(const at::Tensor & self, const at::Tensor & index) {
19067 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
19068 auto maybe_layer = maybeCurrentDynamicLayer();
19069 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
19070 int64_t cur_level = maybe_layer->layerId();
19071 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(index, cur_level)) {
19072 return at::_ops::take::call(self, index);
19073 }
19074 Tensor self_value;
19075 optional<int64_t> self_bdim;
19076 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
19077 Tensor index_value;
19078 optional<int64_t> index_bdim;
19079 std::tie(index_value, index_bdim) = unwrapTensorAtLevel(index, cur_level);
19080 auto results = batch_rule(self_value, self_bdim, index_value, index_bdim);
19081 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
19082}
19083template <typename batch_rule_t, batch_rule_t batch_rule>
19084at::Tensor take_along_dim_generated_plumbing(const at::Tensor & self, const at::Tensor & indices, c10::optional<int64_t> dim) {
19085 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
19086 auto maybe_layer = maybeCurrentDynamicLayer();
19087 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
19088 int64_t cur_level = maybe_layer->layerId();
19089 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(indices, cur_level)) {
19090 return at::_ops::take_along_dim::call(self, indices, dim);
19091 }
19092 Tensor self_value;
19093 optional<int64_t> self_bdim;
19094 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
19095 Tensor indices_value;
19096 optional<int64_t> indices_bdim;
19097 std::tie(indices_value, indices_bdim) = unwrapTensorAtLevel(indices, cur_level);
19098 auto results = batch_rule(self_value, self_bdim, indices_value, indices_bdim, dim);
19099 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
19100}
19101template <typename batch_rule_t, batch_rule_t batch_rule>
19102at::Tensor index_select_generated_plumbing(const at::Tensor & self, int64_t dim, const at::Tensor & index) {
19103 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
19104 auto maybe_layer = maybeCurrentDynamicLayer();
19105 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
19106 int64_t cur_level = maybe_layer->layerId();
19107 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(index, cur_level)) {
19108 return at::_ops::index_select::call(self, dim, index);
19109 }
19110 Tensor self_value;
19111 optional<int64_t> self_bdim;
19112 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
19113 Tensor index_value;
19114 optional<int64_t> index_bdim;
19115 std::tie(index_value, index_bdim) = unwrapTensorAtLevel(index, cur_level);
19116 auto results = batch_rule(self_value, self_bdim, dim, index_value, index_bdim);
19117 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
19118}
19119template <typename batch_rule_t, batch_rule_t batch_rule>
19120at::Tensor index_select_dimname_generated_plumbing(const at::Tensor & self, at::Dimname dim, const at::Tensor & index) {
19121 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
19122 auto maybe_layer = maybeCurrentDynamicLayer();
19123 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
19124 int64_t cur_level = maybe_layer->layerId();
19125 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(index, cur_level)) {
19126 return at::_ops::index_select_dimname::call(self, dim, index);
19127 }
19128 Tensor self_value;
19129 optional<int64_t> self_bdim;
19130 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
19131 Tensor index_value;
19132 optional<int64_t> index_bdim;
19133 std::tie(index_value, index_bdim) = unwrapTensorAtLevel(index, cur_level);
19134 auto results = batch_rule(self_value, self_bdim, dim, index_value, index_bdim);
19135 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
19136}
19137template <typename batch_rule_t, batch_rule_t batch_rule>
19138at::Tensor index_select_backward_generated_plumbing(const at::Tensor & grad, c10::SymIntArrayRef self_sizes, int64_t dim, const at::Tensor & index) {
19139 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
19140 auto maybe_layer = maybeCurrentDynamicLayer();
19141 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
19142 int64_t cur_level = maybe_layer->layerId();
19143 if (!isBatchedAtLevel(grad, cur_level) && !isBatchedAtLevel(index, cur_level)) {
19144 return at::_ops::index_select_backward::call(grad, self_sizes, dim, index);
19145 }
19146 Tensor grad_value;
19147 optional<int64_t> grad_bdim;
19148 std::tie(grad_value, grad_bdim) = unwrapTensorAtLevel(grad, cur_level);
19149 Tensor index_value;
19150 optional<int64_t> index_bdim;
19151 std::tie(index_value, index_bdim) = unwrapTensorAtLevel(index, cur_level);
19152 auto results = batch_rule(grad_value, grad_bdim, self_sizes, dim, index_value, index_bdim);
19153 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
19154}
19155template <typename batch_rule_t, batch_rule_t batch_rule>
19156at::Tensor masked_select_generated_plumbing(const at::Tensor & self, const at::Tensor & mask) {
19157 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
19158 auto maybe_layer = maybeCurrentDynamicLayer();
19159 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
19160 int64_t cur_level = maybe_layer->layerId();
19161 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(mask, cur_level)) {
19162 return at::_ops::masked_select::call(self, mask);
19163 }
19164 Tensor self_value;
19165 optional<int64_t> self_bdim;
19166 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
19167 Tensor mask_value;
19168 optional<int64_t> mask_bdim;
19169 std::tie(mask_value, mask_bdim) = unwrapTensorAtLevel(mask, cur_level);
19170 auto results = batch_rule(self_value, self_bdim, mask_value, mask_bdim);
19171 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
19172}
19173template <typename batch_rule_t, batch_rule_t batch_rule>
19174at::Tensor masked_select_backward_generated_plumbing(const at::Tensor & grad, const at::Tensor & input, const at::Tensor & mask) {
19175 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
19176 auto maybe_layer = maybeCurrentDynamicLayer();
19177 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
19178 int64_t cur_level = maybe_layer->layerId();
19179 if (!isBatchedAtLevel(grad, cur_level) && !isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(mask, cur_level)) {
19180 return at::_ops::masked_select_backward::call(grad, input, mask);
19181 }
19182 Tensor grad_value;
19183 optional<int64_t> grad_bdim;
19184 std::tie(grad_value, grad_bdim) = unwrapTensorAtLevel(grad, cur_level);
19185 Tensor input_value;
19186 optional<int64_t> input_bdim;
19187 std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level);
19188 Tensor mask_value;
19189 optional<int64_t> mask_bdim;
19190 std::tie(mask_value, mask_bdim) = unwrapTensorAtLevel(mask, cur_level);
19191 auto results = batch_rule(grad_value, grad_bdim, input_value, input_bdim, mask_value, mask_bdim);
19192 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
19193}
19194template <typename batch_rule_t, batch_rule_t batch_rule>
19195at::Tensor nonzero_generated_plumbing(const at::Tensor & self) {
19196 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
19197 auto maybe_layer = maybeCurrentDynamicLayer();
19198 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
19199 int64_t cur_level = maybe_layer->layerId();
19200 if (!isBatchedAtLevel(self, cur_level)) {
19201 return at::_ops::nonzero::call(self);
19202 }
19203 Tensor self_value;
19204 optional<int64_t> self_bdim;
19205 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
19206 auto results = batch_rule(self_value, self_bdim);
19207 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
19208}
19209template <typename batch_rule_t, batch_rule_t batch_rule>
19210::std::vector<at::Tensor> nonzero_numpy_generated_plumbing(const at::Tensor & self) {
19211 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
19212 auto maybe_layer = maybeCurrentDynamicLayer();
19213 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
19214 int64_t cur_level = maybe_layer->layerId();
19215 if (!isBatchedAtLevel(self, cur_level)) {
19216 return at::_ops::nonzero_numpy::call(self);
19217 }
19218 Tensor self_value;
19219 optional<int64_t> self_bdim;
19220 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
19221 auto results = batch_rule(self_value, self_bdim);
19222 return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
19223}
19224template <typename batch_rule_t, batch_rule_t batch_rule>
19225at::Tensor argwhere_generated_plumbing(const at::Tensor & self) {
19226 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
19227 auto maybe_layer = maybeCurrentDynamicLayer();
19228 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
19229 int64_t cur_level = maybe_layer->layerId();
19230 if (!isBatchedAtLevel(self, cur_level)) {
19231 return at::_ops::argwhere::call(self);
19232 }
19233 Tensor self_value;
19234 optional<int64_t> self_bdim;
19235 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
19236 auto results = batch_rule(self_value, self_bdim);
19237 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
19238}
19239template <typename batch_rule_t, batch_rule_t batch_rule>
19240at::Tensor gather_generated_plumbing(const at::Tensor & self, int64_t dim, const at::Tensor & index, bool sparse_grad) {
19241 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
19242 auto maybe_layer = maybeCurrentDynamicLayer();
19243 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
19244 int64_t cur_level = maybe_layer->layerId();
19245 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(index, cur_level)) {
19246 return at::_ops::gather::call(self, dim, index, sparse_grad);
19247 }
19248 Tensor self_value;
19249 optional<int64_t> self_bdim;
19250 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
19251 Tensor index_value;
19252 optional<int64_t> index_bdim;
19253 std::tie(index_value, index_bdim) = unwrapTensorAtLevel(index, cur_level);
19254 auto results = batch_rule(self_value, self_bdim, dim, index_value, index_bdim, sparse_grad);
19255 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
19256}
19257template <typename batch_rule_t, batch_rule_t batch_rule>
19258at::Tensor gather_backward_generated_plumbing(const at::Tensor & grad, const at::Tensor & self, int64_t dim, const at::Tensor & index, bool sparse_grad) {
19259 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
19260 auto maybe_layer = maybeCurrentDynamicLayer();
19261 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
19262 int64_t cur_level = maybe_layer->layerId();
19263 if (!isBatchedAtLevel(grad, cur_level) && !isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(index, cur_level)) {
19264 return at::_ops::gather_backward::call(grad, self, dim, index, sparse_grad);
19265 }
19266 Tensor grad_value;
19267 optional<int64_t> grad_bdim;
19268 std::tie(grad_value, grad_bdim) = unwrapTensorAtLevel(grad, cur_level);
19269 Tensor self_value;
19270 optional<int64_t> self_bdim;
19271 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
19272 Tensor index_value;
19273 optional<int64_t> index_bdim;
19274 std::tie(index_value, index_bdim) = unwrapTensorAtLevel(index, cur_level);
19275 auto results = batch_rule(grad_value, grad_bdim, self_value, self_bdim, dim, index_value, index_bdim, sparse_grad);
19276 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
19277}
19278template <typename batch_rule_t, batch_rule_t batch_rule>
19279at::Tensor gather_dimname_generated_plumbing(const at::Tensor & self, at::Dimname dim, const at::Tensor & index, bool sparse_grad) {
19280 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
19281 auto maybe_layer = maybeCurrentDynamicLayer();
19282 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
19283 int64_t cur_level = maybe_layer->layerId();
19284 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(index, cur_level)) {
19285 return at::_ops::gather_dimname::call(self, dim, index, sparse_grad);
19286 }
19287 Tensor self_value;
19288 optional<int64_t> self_bdim;
19289 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
19290 Tensor index_value;
19291 optional<int64_t> index_bdim;
19292 std::tie(index_value, index_bdim) = unwrapTensorAtLevel(index, cur_level);
19293 auto results = batch_rule(self_value, self_bdim, dim, index_value, index_bdim, sparse_grad);
19294 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
19295}
19296template <typename batch_rule_t, batch_rule_t batch_rule>
19297at::Tensor _gather_sparse_backward_generated_plumbing(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & grad) {
19298 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
19299 auto maybe_layer = maybeCurrentDynamicLayer();
19300 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
19301 int64_t cur_level = maybe_layer->layerId();
19302 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(index, cur_level) && !isBatchedAtLevel(grad, cur_level)) {
19303 return at::_ops::_gather_sparse_backward::call(self, dim, index, grad);
19304 }
19305 Tensor self_value;
19306 optional<int64_t> self_bdim;
19307 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
19308 Tensor index_value;
19309 optional<int64_t> index_bdim;
19310 std::tie(index_value, index_bdim) = unwrapTensorAtLevel(index, cur_level);
19311 Tensor grad_value;
19312 optional<int64_t> grad_bdim;
19313 std::tie(grad_value, grad_bdim) = unwrapTensorAtLevel(grad, cur_level);
19314 auto results = batch_rule(self_value, self_bdim, dim, index_value, index_bdim, grad_value, grad_bdim);
19315 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
19316}
19317template <typename batch_rule_t, batch_rule_t batch_rule>
19318at::Tensor addcmul_generated_plumbing(const at::Tensor & self, const at::Tensor & tensor1, const at::Tensor & tensor2, const at::Scalar & value) {
19319 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
19320 auto maybe_layer = maybeCurrentDynamicLayer();
19321 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
19322 int64_t cur_level = maybe_layer->layerId();
19323 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(tensor1, cur_level) && !isBatchedAtLevel(tensor2, cur_level)) {
19324 return at::_ops::addcmul::call(self, tensor1, tensor2, value);
19325 }
19326 Tensor self_value;
19327 optional<int64_t> self_bdim;
19328 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
19329 Tensor tensor1_value;
19330 optional<int64_t> tensor1_bdim;
19331 std::tie(tensor1_value, tensor1_bdim) = unwrapTensorAtLevel(tensor1, cur_level);
19332 Tensor tensor2_value;
19333 optional<int64_t> tensor2_bdim;
19334 std::tie(tensor2_value, tensor2_bdim) = unwrapTensorAtLevel(tensor2, cur_level);
19335 auto results = batch_rule(self_value, self_bdim, tensor1_value, tensor1_bdim, tensor2_value, tensor2_bdim, value);
19336 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
19337}
19338template <typename batch_rule_t, batch_rule_t batch_rule>
19339at::Tensor & addcmul__generated_plumbing(at::Tensor & self, const at::Tensor & tensor1, const at::Tensor & tensor2, const at::Scalar & value) {
19340 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
19341 auto maybe_layer = maybeCurrentDynamicLayer();
19342 vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
19343 int64_t cur_level = maybe_layer->layerId();
19344 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(tensor1, cur_level) && !isBatchedAtLevel(tensor2, cur_level)) {
19345 return at::_ops::addcmul_::call(self, tensor1, tensor2, value);
19346 }
19347 Tensor self_value;
19348 optional<int64_t> self_bdim;
19349 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
19350 Tensor tensor1_value;
19351 optional<int64_t> tensor1_bdim;
19352 std::tie(tensor1_value, tensor1_bdim) = unwrapTensorAtLevel(tensor1, cur_level);
19353 Tensor tensor2_value;
19354 optional<int64_t> tensor2_bdim;
19355 std::tie(tensor2_value, tensor2_bdim) = unwrapTensorAtLevel(tensor2, cur_level);
19356 batch_rule(self_value, self_bdim, tensor1_value, tensor1_bdim, tensor2_value, tensor2_bdim, value);
19357 return self;
19358}
19359template <typename batch_rule_t, batch_rule_t batch_rule>
19360at::Tensor addcdiv_generated_plumbing(const at::Tensor & self, const at::Tensor & tensor1, const at::Tensor & tensor2, const at::Scalar & value) {
19361 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
19362 auto maybe_layer = maybeCurrentDynamicLayer();
19363 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
19364 int64_t cur_level = maybe_layer->layerId();
19365 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(tensor1, cur_level) && !isBatchedAtLevel(tensor2, cur_level)) {
19366 return at::_ops::addcdiv::call(self, tensor1, tensor2, value);
19367 }
19368 Tensor self_value;
19369 optional<int64_t> self_bdim;
19370 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
19371 Tensor tensor1_value;
19372 optional<int64_t> tensor1_bdim;
19373 std::tie(tensor1_value, tensor1_bdim) = unwrapTensorAtLevel(tensor1, cur_level);
19374 Tensor tensor2_value;
19375 optional<int64_t> tensor2_bdim;
19376 std::tie(tensor2_value, tensor2_bdim) = unwrapTensorAtLevel(tensor2, cur_level);
19377 auto results = batch_rule(self_value, self_bdim, tensor1_value, tensor1_bdim, tensor2_value, tensor2_bdim, value);
19378 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
19379}
19380template <typename batch_rule_t, batch_rule_t batch_rule>
19381at::Tensor & addcdiv__generated_plumbing(at::Tensor & self, const at::Tensor & tensor1, const at::Tensor & tensor2, const at::Scalar & value) {
19382 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
19383 auto maybe_layer = maybeCurrentDynamicLayer();
19384 vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
19385 int64_t cur_level = maybe_layer->layerId();
19386 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(tensor1, cur_level) && !isBatchedAtLevel(tensor2, cur_level)) {
19387 return at::_ops::addcdiv_::call(self, tensor1, tensor2, value);
19388 }
19389 Tensor self_value;
19390 optional<int64_t> self_bdim;
19391 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
19392 Tensor tensor1_value;
19393 optional<int64_t> tensor1_bdim;
19394 std::tie(tensor1_value, tensor1_bdim) = unwrapTensorAtLevel(tensor1, cur_level);
19395 Tensor tensor2_value;
19396 optional<int64_t> tensor2_bdim;
19397 std::tie(tensor2_value, tensor2_bdim) = unwrapTensorAtLevel(tensor2, cur_level);
19398 batch_rule(self_value, self_bdim, tensor1_value, tensor1_bdim, tensor2_value, tensor2_bdim, value);
19399 return self;
19400}
19401template <typename batch_rule_t, batch_rule_t batch_rule>
19402at::Tensor cross_entropy_loss_generated_plumbing(const at::Tensor & self, const at::Tensor & target, const c10::optional<at::Tensor> & weight, int64_t reduction, c10::SymInt ignore_index, double label_smoothing) {
19403 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
19404 auto maybe_layer = maybeCurrentDynamicLayer();
19405 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
19406 int64_t cur_level = maybe_layer->layerId();
19407 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(target, cur_level) && !isBatchedAtLevel(weight, cur_level)) {
19408 return at::_ops::cross_entropy_loss::call(self, target, weight, reduction, ignore_index, label_smoothing);
19409 }
19410 Tensor self_value;
19411 optional<int64_t> self_bdim;
19412 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
19413 Tensor target_value;
19414 optional<int64_t> target_bdim;
19415 std::tie(target_value, target_bdim) = unwrapTensorAtLevel(target, cur_level);
19416 optional<Tensor> weight_value;
19417 optional<int64_t> weight_bdim;
19418 if (weight) {
19419 std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight.value(), cur_level);
19420 }
19421 auto results = batch_rule(self_value, self_bdim, target_value, target_bdim, weight_value, weight_bdim, reduction, ignore_index, label_smoothing);
19422 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
19423}
19424template <typename batch_rule_t, batch_rule_t batch_rule>
19425::std::tuple<at::Tensor,at::Tensor> triangular_solve_generated_plumbing(const at::Tensor & self, const at::Tensor & A, bool upper, bool transpose, bool unitriangular) {
19426 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
19427 auto maybe_layer = maybeCurrentDynamicLayer();
19428 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
19429 int64_t cur_level = maybe_layer->layerId();
19430 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(A, cur_level)) {
19431 return at::_ops::triangular_solve::call(self, A, upper, transpose, unitriangular);
19432 }
19433 Tensor self_value;
19434 optional<int64_t> self_bdim;
19435 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
19436 Tensor A_value;
19437 optional<int64_t> A_bdim;
19438 std::tie(A_value, A_bdim) = unwrapTensorAtLevel(A, cur_level);
19439 auto results = batch_rule(self_value, self_bdim, A_value, A_bdim, upper, transpose, unitriangular);
19440 return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
19441}
19442template <typename batch_rule_t, batch_rule_t batch_rule>
19443void _linalg_check_errors_generated_plumbing(const at::Tensor & info, c10::string_view api_name, bool is_matrix) {
19444 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
19445 auto maybe_layer = maybeCurrentDynamicLayer();
19446 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
19447 int64_t cur_level = maybe_layer->layerId();
19448 if (!isBatchedAtLevel(info, cur_level)) {
19449 return at::_ops::_linalg_check_errors::call(info, api_name, is_matrix);
19450 }
19451 Tensor info_value;
19452 optional<int64_t> info_bdim;
19453 std::tie(info_value, info_bdim) = unwrapTensorAtLevel(info, cur_level);
19454 batch_rule(info_value, info_bdim, api_name, is_matrix);
19455}
19456template <typename batch_rule_t, batch_rule_t batch_rule>
19457at::Tensor linalg_solve_triangular_generated_plumbing(const at::Tensor & self, const at::Tensor & B, bool upper, bool left, bool unitriangular) {
19458 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
19459 auto maybe_layer = maybeCurrentDynamicLayer();
19460 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
19461 int64_t cur_level = maybe_layer->layerId();
19462 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(B, cur_level)) {
19463 return at::_ops::linalg_solve_triangular::call(self, B, upper, left, unitriangular);
19464 }
19465 Tensor self_value;
19466 optional<int64_t> self_bdim;
19467 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
19468 Tensor B_value;
19469 optional<int64_t> B_bdim;
19470 std::tie(B_value, B_bdim) = unwrapTensorAtLevel(B, cur_level);
19471 auto results = batch_rule(self_value, self_bdim, B_value, B_bdim, upper, left, unitriangular);
19472 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
19473}
19474template <typename batch_rule_t, batch_rule_t batch_rule>
19475at::Tensor linalg_vander_generated_plumbing(const at::Tensor & x, c10::optional<int64_t> N) {
19476 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
19477 auto maybe_layer = maybeCurrentDynamicLayer();
19478 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
19479 int64_t cur_level = maybe_layer->layerId();
19480 if (!isBatchedAtLevel(x, cur_level)) {
19481 return at::_ops::linalg_vander::call(x, N);
19482 }
19483 Tensor x_value;
19484 optional<int64_t> x_bdim;
19485 std::tie(x_value, x_bdim) = unwrapTensorAtLevel(x, cur_level);
19486 auto results = batch_rule(x_value, x_bdim, N);
19487 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
19488}
19489template <typename batch_rule_t, batch_rule_t batch_rule>
19490::std::tuple<at::Tensor,at::Tensor,at::Tensor> svd_generated_plumbing(const at::Tensor & self, bool some, bool compute_uv) {
19491 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
19492 auto maybe_layer = maybeCurrentDynamicLayer();
19493 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
19494 int64_t cur_level = maybe_layer->layerId();
19495 if (!isBatchedAtLevel(self, cur_level)) {
19496 return at::_ops::svd::call(self, some, compute_uv);
19497 }
19498 Tensor self_value;
19499 optional<int64_t> self_bdim;
19500 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
19501 auto results = batch_rule(self_value, self_bdim, some, compute_uv);
19502 return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level));
19503}
19504template <typename batch_rule_t, batch_rule_t batch_rule>
19505at::Tensor swapaxes_generated_plumbing(const at::Tensor & self, int64_t axis0, int64_t axis1) {
19506 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
19507 auto maybe_layer = maybeCurrentDynamicLayer();
19508 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
19509 int64_t cur_level = maybe_layer->layerId();
19510 if (!isBatchedAtLevel(self, cur_level)) {
19511 return at::_ops::swapaxes::call(self, axis0, axis1);
19512 }
19513 Tensor self_value;
19514 optional<int64_t> self_bdim;
19515 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
19516 auto results = batch_rule(self_value, self_bdim, axis0, axis1);
19517 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
19518}
19519template <typename batch_rule_t, batch_rule_t batch_rule>
19520at::Tensor swapdims_generated_plumbing(const at::Tensor & self, int64_t dim0, int64_t dim1) {
19521 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
19522 auto maybe_layer = maybeCurrentDynamicLayer();
19523 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
19524 int64_t cur_level = maybe_layer->layerId();
19525 if (!isBatchedAtLevel(self, cur_level)) {
19526 return at::_ops::swapdims::call(self, dim0, dim1);
19527 }
19528 Tensor self_value;
19529 optional<int64_t> self_bdim;
19530 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
19531 auto results = batch_rule(self_value, self_bdim, dim0, dim1);
19532 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
19533}
19534template <typename batch_rule_t, batch_rule_t batch_rule>
19535at::Tensor cholesky_generated_plumbing(const at::Tensor & self, bool upper) {
19536 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
19537 auto maybe_layer = maybeCurrentDynamicLayer();
19538 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
19539 int64_t cur_level = maybe_layer->layerId();
19540 if (!isBatchedAtLevel(self, cur_level)) {
19541 return at::_ops::cholesky::call(self, upper);
19542 }
19543 Tensor self_value;
19544 optional<int64_t> self_bdim;
19545 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
19546 auto results = batch_rule(self_value, self_bdim, upper);
19547 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
19548}
19549template <typename batch_rule_t, batch_rule_t batch_rule>
19550at::Tensor cholesky_solve_generated_plumbing(const at::Tensor & self, const at::Tensor & input2, bool upper) {
19551 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
19552 auto maybe_layer = maybeCurrentDynamicLayer();
19553 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
19554 int64_t cur_level = maybe_layer->layerId();
19555 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(input2, cur_level)) {
19556 return at::_ops::cholesky_solve::call(self, input2, upper);
19557 }
19558 Tensor self_value;
19559 optional<int64_t> self_bdim;
19560 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
19561 Tensor input2_value;
19562 optional<int64_t> input2_bdim;
19563 std::tie(input2_value, input2_bdim) = unwrapTensorAtLevel(input2, cur_level);
19564 auto results = batch_rule(self_value, self_bdim, input2_value, input2_bdim, upper);
19565 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
19566}
19567template <typename batch_rule_t, batch_rule_t batch_rule>
19568at::Tensor _cholesky_solve_helper_generated_plumbing(const at::Tensor & self, const at::Tensor & A, bool upper) {
19569 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
19570 auto maybe_layer = maybeCurrentDynamicLayer();
19571 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
19572 int64_t cur_level = maybe_layer->layerId();
19573 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(A, cur_level)) {
19574 return at::_ops::_cholesky_solve_helper::call(self, A, upper);
19575 }
19576 Tensor self_value;
19577 optional<int64_t> self_bdim;
19578 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
19579 Tensor A_value;
19580 optional<int64_t> A_bdim;
19581 std::tie(A_value, A_bdim) = unwrapTensorAtLevel(A, cur_level);
19582 auto results = batch_rule(self_value, self_bdim, A_value, A_bdim, upper);
19583 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
19584}
19585template <typename batch_rule_t, batch_rule_t batch_rule>
19586at::Tensor cholesky_inverse_generated_plumbing(const at::Tensor & self, bool upper) {
19587 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
19588 auto maybe_layer = maybeCurrentDynamicLayer();
19589 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
19590 int64_t cur_level = maybe_layer->layerId();
19591 if (!isBatchedAtLevel(self, cur_level)) {
19592 return at::_ops::cholesky_inverse::call(self, upper);
19593 }
19594 Tensor self_value;
19595 optional<int64_t> self_bdim;
19596 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
19597 auto results = batch_rule(self_value, self_bdim, upper);
19598 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
19599}
19600template <typename batch_rule_t, batch_rule_t batch_rule>
19601::std::tuple<at::Tensor,at::Tensor> qr_generated_plumbing(const at::Tensor & self, bool some) {
19602 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
19603 auto maybe_layer = maybeCurrentDynamicLayer();
19604 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
19605 int64_t cur_level = maybe_layer->layerId();
19606 if (!isBatchedAtLevel(self, cur_level)) {
19607 return at::_ops::qr::call(self, some);
19608 }
19609 Tensor self_value;
19610 optional<int64_t> self_bdim;
19611 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
19612 auto results = batch_rule(self_value, self_bdim, some);
19613 return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
19614}
19615template <typename batch_rule_t, batch_rule_t batch_rule>
19616::std::tuple<at::Tensor,at::Tensor> geqrf_generated_plumbing(const at::Tensor & self) {
19617 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
19618 auto maybe_layer = maybeCurrentDynamicLayer();
19619 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
19620 int64_t cur_level = maybe_layer->layerId();
19621 if (!isBatchedAtLevel(self, cur_level)) {
19622 return at::_ops::geqrf::call(self);
19623 }
19624 Tensor self_value;
19625 optional<int64_t> self_bdim;
19626 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
19627 auto results = batch_rule(self_value, self_bdim);
19628 return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
19629}
19630template <typename batch_rule_t, batch_rule_t batch_rule>
19631at::Tensor orgqr_generated_plumbing(const at::Tensor & self, const at::Tensor & input2) {
19632 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
19633 auto maybe_layer = maybeCurrentDynamicLayer();
19634 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
19635 int64_t cur_level = maybe_layer->layerId();
19636 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(input2, cur_level)) {
19637 return at::_ops::orgqr::call(self, input2);
19638 }
19639 Tensor self_value;
19640 optional<int64_t> self_bdim;
19641 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
19642 Tensor input2_value;
19643 optional<int64_t> input2_bdim;
19644 std::tie(input2_value, input2_bdim) = unwrapTensorAtLevel(input2, cur_level);
19645 auto results = batch_rule(self_value, self_bdim, input2_value, input2_bdim);
19646 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
19647}
19648template <typename batch_rule_t, batch_rule_t batch_rule>
19649at::Tensor ormqr_generated_plumbing(const at::Tensor & self, const at::Tensor & input2, const at::Tensor & input3, bool left, bool transpose) {
19650 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
19651 auto maybe_layer = maybeCurrentDynamicLayer();
19652 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
19653 int64_t cur_level = maybe_layer->layerId();
19654 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(input2, cur_level) && !isBatchedAtLevel(input3, cur_level)) {
19655 return at::_ops::ormqr::call(self, input2, input3, left, transpose);
19656 }
19657 Tensor self_value;
19658 optional<int64_t> self_bdim;
19659 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
19660 Tensor input2_value;
19661 optional<int64_t> input2_bdim;
19662 std::tie(input2_value, input2_bdim) = unwrapTensorAtLevel(input2, cur_level);
19663 Tensor input3_value;
19664 optional<int64_t> input3_bdim;
19665 std::tie(input3_value, input3_bdim) = unwrapTensorAtLevel(input3, cur_level);
19666 auto results = batch_rule(self_value, self_bdim, input2_value, input2_bdim, input3_value, input3_bdim, left, transpose);
19667 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
19668}
19669template <typename batch_rule_t, batch_rule_t batch_rule>
19670::std::tuple<at::Tensor,at::Tensor,at::Tensor> _lu_with_info_generated_plumbing(const at::Tensor & self, bool pivot, bool check_errors) {
19671 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
19672 auto maybe_layer = maybeCurrentDynamicLayer();
19673 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
19674 int64_t cur_level = maybe_layer->layerId();
19675 if (!isBatchedAtLevel(self, cur_level)) {
19676 return at::_ops::_lu_with_info::call(self, pivot, check_errors);
19677 }
19678 Tensor self_value;
19679 optional<int64_t> self_bdim;
19680 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
19681 auto results = batch_rule(self_value, self_bdim, pivot, check_errors);
19682 return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level));
19683}
19684template <typename batch_rule_t, batch_rule_t batch_rule>
19685at::Tensor lu_solve_generated_plumbing(const at::Tensor & self, const at::Tensor & LU_data, const at::Tensor & LU_pivots) {
19686 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
19687 auto maybe_layer = maybeCurrentDynamicLayer();
19688 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
19689 int64_t cur_level = maybe_layer->layerId();
19690 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(LU_data, cur_level) && !isBatchedAtLevel(LU_pivots, cur_level)) {
19691 return at::_ops::lu_solve::call(self, LU_data, LU_pivots);
19692 }
19693 Tensor self_value;
19694 optional<int64_t> self_bdim;
19695 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
19696 Tensor LU_data_value;
19697 optional<int64_t> LU_data_bdim;
19698 std::tie(LU_data_value, LU_data_bdim) = unwrapTensorAtLevel(LU_data, cur_level);
19699 Tensor LU_pivots_value;
19700 optional<int64_t> LU_pivots_bdim;
19701 std::tie(LU_pivots_value, LU_pivots_bdim) = unwrapTensorAtLevel(LU_pivots, cur_level);
19702 auto results = batch_rule(self_value, self_bdim, LU_data_value, LU_data_bdim, LU_pivots_value, LU_pivots_bdim);
19703 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
19704}
19705template <typename batch_rule_t, batch_rule_t batch_rule>
19706::std::tuple<at::Tensor,at::Tensor,at::Tensor> lu_unpack_generated_plumbing(const at::Tensor & LU_data, const at::Tensor & LU_pivots, bool unpack_data, bool unpack_pivots) {
19707 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
19708 auto maybe_layer = maybeCurrentDynamicLayer();
19709 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
19710 int64_t cur_level = maybe_layer->layerId();
19711 if (!isBatchedAtLevel(LU_data, cur_level) && !isBatchedAtLevel(LU_pivots, cur_level)) {
19712 return at::_ops::lu_unpack::call(LU_data, LU_pivots, unpack_data, unpack_pivots);
19713 }
19714 Tensor LU_data_value;
19715 optional<int64_t> LU_data_bdim;
19716 std::tie(LU_data_value, LU_data_bdim) = unwrapTensorAtLevel(LU_data, cur_level);
19717 Tensor LU_pivots_value;
19718 optional<int64_t> LU_pivots_bdim;
19719 std::tie(LU_pivots_value, LU_pivots_bdim) = unwrapTensorAtLevel(LU_pivots, cur_level);
19720 auto results = batch_rule(LU_data_value, LU_data_bdim, LU_pivots_value, LU_pivots_bdim, unpack_data, unpack_pivots);
19721 return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level));
19722}
19723template <typename batch_rule_t, batch_rule_t batch_rule>
19724at::Tensor multinomial_generated_plumbing(const at::Tensor & self, int64_t num_samples, bool replacement, c10::optional<at::Generator> generator) {
19725 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
19726 auto maybe_layer = maybeCurrentDynamicLayer();
19727 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
19728 int64_t cur_level = maybe_layer->layerId();
19729 if (!isBatchedAtLevel(self, cur_level)) {
19730 return at::_ops::multinomial::call(self, num_samples, replacement, generator);
19731 }
19732 Tensor self_value;
19733 optional<int64_t> self_bdim;
19734 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
19735 auto results = batch_rule(self_value, self_bdim, num_samples, replacement, generator);
19736 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
19737}
19738template <typename batch_rule_t, batch_rule_t batch_rule>
19739at::Tensor & lgamma__generated_plumbing(at::Tensor & self) {
19740 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
19741 auto maybe_layer = maybeCurrentDynamicLayer();
19742 vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
19743 int64_t cur_level = maybe_layer->layerId();
19744 if (!isBatchedAtLevel(self, cur_level)) {
19745 return at::_ops::lgamma_::call(self);
19746 }
19747 Tensor self_value;
19748 optional<int64_t> self_bdim;
19749 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
19750 batch_rule(self_value, self_bdim);
19751 return self;
19752}
19753template <typename batch_rule_t, batch_rule_t batch_rule>
19754at::Tensor lgamma_generated_plumbing(const at::Tensor & self) {
19755 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
19756 auto maybe_layer = maybeCurrentDynamicLayer();
19757 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
19758 int64_t cur_level = maybe_layer->layerId();
19759 if (!isBatchedAtLevel(self, cur_level)) {
19760 return at::_ops::lgamma::call(self);
19761 }
19762 Tensor self_value;
19763 optional<int64_t> self_bdim;
19764 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
19765 auto results = batch_rule(self_value, self_bdim);
19766 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
19767}
19768template <typename batch_rule_t, batch_rule_t batch_rule>
19769at::Tensor digamma_generated_plumbing(const at::Tensor & self) {
19770 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
19771 auto maybe_layer = maybeCurrentDynamicLayer();
19772 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
19773 int64_t cur_level = maybe_layer->layerId();
19774 if (!isBatchedAtLevel(self, cur_level)) {
19775 return at::_ops::digamma::call(self);
19776 }
19777 Tensor self_value;
19778 optional<int64_t> self_bdim;
19779 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
19780 auto results = batch_rule(self_value, self_bdim);
19781 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
19782}
19783template <typename batch_rule_t, batch_rule_t batch_rule>
19784at::Tensor polygamma_generated_plumbing(int64_t n, const at::Tensor & self) {
19785 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
19786 auto maybe_layer = maybeCurrentDynamicLayer();
19787 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
19788 int64_t cur_level = maybe_layer->layerId();
19789 if (!isBatchedAtLevel(self, cur_level)) {
19790 return at::_ops::polygamma::call(n, self);
19791 }
19792 Tensor self_value;
19793 optional<int64_t> self_bdim;
19794 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
19795 auto results = batch_rule(n, self_value, self_bdim);
19796 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
19797}
19798template <typename batch_rule_t, batch_rule_t batch_rule>
19799at::Tensor & polygamma__generated_plumbing(at::Tensor & self, int64_t n) {
19800 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
19801 auto maybe_layer = maybeCurrentDynamicLayer();
19802 vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
19803 int64_t cur_level = maybe_layer->layerId();
19804 if (!isBatchedAtLevel(self, cur_level)) {
19805 return at::_ops::polygamma_::call(self, n);
19806 }
19807 Tensor self_value;
19808 optional<int64_t> self_bdim;
19809 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
19810 batch_rule(self_value, self_bdim, n);
19811 return self;
19812}
19813template <typename batch_rule_t, batch_rule_t batch_rule>
19814at::Tensor erfinv_generated_plumbing(const at::Tensor & self) {
19815 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
19816 auto maybe_layer = maybeCurrentDynamicLayer();
19817 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
19818 int64_t cur_level = maybe_layer->layerId();
19819 if (!isBatchedAtLevel(self, cur_level)) {
19820 return at::_ops::erfinv::call(self);
19821 }
19822 Tensor self_value;
19823 optional<int64_t> self_bdim;
19824 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
19825 auto results = batch_rule(self_value, self_bdim);
19826 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
19827}
19828template <typename batch_rule_t, batch_rule_t batch_rule>
19829at::Tensor & erfinv__generated_plumbing(at::Tensor & self) {
19830 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
19831 auto maybe_layer = maybeCurrentDynamicLayer();
19832 vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
19833 int64_t cur_level = maybe_layer->layerId();
19834 if (!isBatchedAtLevel(self, cur_level)) {
19835 return at::_ops::erfinv_::call(self);
19836 }
19837 Tensor self_value;
19838 optional<int64_t> self_bdim;
19839 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
19840 batch_rule(self_value, self_bdim);
19841 return self;
19842}
19843template <typename batch_rule_t, batch_rule_t batch_rule>
19844at::Tensor i0_generated_plumbing(const at::Tensor & self) {
19845 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
19846 auto maybe_layer = maybeCurrentDynamicLayer();
19847 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
19848 int64_t cur_level = maybe_layer->layerId();
19849 if (!isBatchedAtLevel(self, cur_level)) {
19850 return at::_ops::i0::call(self);
19851 }
19852 Tensor self_value;
19853 optional<int64_t> self_bdim;
19854 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
19855 auto results = batch_rule(self_value, self_bdim);
19856 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
19857}
19858template <typename batch_rule_t, batch_rule_t batch_rule>
19859at::Tensor & i0__generated_plumbing(at::Tensor & self) {
19860 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
19861 auto maybe_layer = maybeCurrentDynamicLayer();
19862 vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
19863 int64_t cur_level = maybe_layer->layerId();
19864 if (!isBatchedAtLevel(self, cur_level)) {
19865 return at::_ops::i0_::call(self);
19866 }
19867 Tensor self_value;
19868 optional<int64_t> self_bdim;
19869 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
19870 batch_rule(self_value, self_bdim);
19871 return self;
19872}
19873template <typename batch_rule_t, batch_rule_t batch_rule>
19874at::Tensor sign_generated_plumbing(const at::Tensor & self) {
19875 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
19876 auto maybe_layer = maybeCurrentDynamicLayer();
19877 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
19878 int64_t cur_level = maybe_layer->layerId();
19879 if (!isBatchedAtLevel(self, cur_level)) {
19880 return at::_ops::sign::call(self);
19881 }
19882 Tensor self_value;
19883 optional<int64_t> self_bdim;
19884 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
19885 auto results = batch_rule(self_value, self_bdim);
19886 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
19887}
19888template <typename batch_rule_t, batch_rule_t batch_rule>
19889at::Tensor & sign__generated_plumbing(at::Tensor & self) {
19890 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
19891 auto maybe_layer = maybeCurrentDynamicLayer();
19892 vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
19893 int64_t cur_level = maybe_layer->layerId();
19894 if (!isBatchedAtLevel(self, cur_level)) {
19895 return at::_ops::sign_::call(self);
19896 }
19897 Tensor self_value;
19898 optional<int64_t> self_bdim;
19899 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
19900 batch_rule(self_value, self_bdim);
19901 return self;
19902}
19903template <typename batch_rule_t, batch_rule_t batch_rule>
19904at::Tensor signbit_generated_plumbing(const at::Tensor & self) {
19905 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
19906 auto maybe_layer = maybeCurrentDynamicLayer();
19907 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
19908 int64_t cur_level = maybe_layer->layerId();
19909 if (!isBatchedAtLevel(self, cur_level)) {
19910 return at::_ops::signbit::call(self);
19911 }
19912 Tensor self_value;
19913 optional<int64_t> self_bdim;
19914 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
19915 auto results = batch_rule(self_value, self_bdim);
19916 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
19917}
19918template <typename batch_rule_t, batch_rule_t batch_rule>
19919at::Tensor dist_generated_plumbing(const at::Tensor & self, const at::Tensor & other, const at::Scalar & p) {
19920 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
19921 auto maybe_layer = maybeCurrentDynamicLayer();
19922 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
19923 int64_t cur_level = maybe_layer->layerId();
19924 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
19925 return at::_ops::dist::call(self, other, p);
19926 }
19927 Tensor self_value;
19928 optional<int64_t> self_bdim;
19929 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
19930 Tensor other_value;
19931 optional<int64_t> other_bdim;
19932 std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
19933 auto results = batch_rule(self_value, self_bdim, other_value, other_bdim, p);
19934 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
19935}
19936template <typename batch_rule_t, batch_rule_t batch_rule>
19937at::Tensor & atan2__generated_plumbing(at::Tensor & self, const at::Tensor & other) {
19938 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
19939 auto maybe_layer = maybeCurrentDynamicLayer();
19940 vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
19941 int64_t cur_level = maybe_layer->layerId();
19942 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
19943 return at::_ops::atan2_::call(self, other);
19944 }
19945 Tensor self_value;
19946 optional<int64_t> self_bdim;
19947 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
19948 Tensor other_value;
19949 optional<int64_t> other_bdim;
19950 std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
19951 batch_rule(self_value, self_bdim, other_value, other_bdim);
19952 return self;
19953}
19954template <typename batch_rule_t, batch_rule_t batch_rule>
19955at::Tensor atan2_generated_plumbing(const at::Tensor & self, const at::Tensor & other) {
19956 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
19957 auto maybe_layer = maybeCurrentDynamicLayer();
19958 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
19959 int64_t cur_level = maybe_layer->layerId();
19960 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
19961 return at::_ops::atan2::call(self, other);
19962 }
19963 Tensor self_value;
19964 optional<int64_t> self_bdim;
19965 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
19966 Tensor other_value;
19967 optional<int64_t> other_bdim;
19968 std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
19969 auto results = batch_rule(self_value, self_bdim, other_value, other_bdim);
19970 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
19971}
19972template <typename batch_rule_t, batch_rule_t batch_rule>
19973at::Tensor arctan2_generated_plumbing(const at::Tensor & self, const at::Tensor & other) {
19974 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
19975 auto maybe_layer = maybeCurrentDynamicLayer();
19976 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
19977 int64_t cur_level = maybe_layer->layerId();
19978 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
19979 return at::_ops::arctan2::call(self, other);
19980 }
19981 Tensor self_value;
19982 optional<int64_t> self_bdim;
19983 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
19984 Tensor other_value;
19985 optional<int64_t> other_bdim;
19986 std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
19987 auto results = batch_rule(self_value, self_bdim, other_value, other_bdim);
19988 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
19989}
19990template <typename batch_rule_t, batch_rule_t batch_rule>
19991at::Tensor & arctan2__generated_plumbing(at::Tensor & self, const at::Tensor & other) {
19992 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
19993 auto maybe_layer = maybeCurrentDynamicLayer();
19994 vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
19995 int64_t cur_level = maybe_layer->layerId();
19996 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
19997 return at::_ops::arctan2_::call(self, other);
19998 }
19999 Tensor self_value;
20000 optional<int64_t> self_bdim;
20001 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
20002 Tensor other_value;
20003 optional<int64_t> other_bdim;
20004 std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
20005 batch_rule(self_value, self_bdim, other_value, other_bdim);
20006 return self;
20007}
20008template <typename batch_rule_t, batch_rule_t batch_rule>
20009at::Tensor lerp_Scalar_generated_plumbing(const at::Tensor & self, const at::Tensor & end, const at::Scalar & weight) {
20010 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
20011 auto maybe_layer = maybeCurrentDynamicLayer();
20012 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
20013 int64_t cur_level = maybe_layer->layerId();
20014 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(end, cur_level)) {
20015 return at::_ops::lerp_Scalar::call(self, end, weight);
20016 }
20017 Tensor self_value;
20018 optional<int64_t> self_bdim;
20019 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
20020 Tensor end_value;
20021 optional<int64_t> end_bdim;
20022 std::tie(end_value, end_bdim) = unwrapTensorAtLevel(end, cur_level);
20023 auto results = batch_rule(self_value, self_bdim, end_value, end_bdim, weight);
20024 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
20025}
20026template <typename batch_rule_t, batch_rule_t batch_rule>
20027at::Tensor lerp_Tensor_generated_plumbing(const at::Tensor & self, const at::Tensor & end, const at::Tensor & weight) {
20028 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
20029 auto maybe_layer = maybeCurrentDynamicLayer();
20030 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
20031 int64_t cur_level = maybe_layer->layerId();
20032 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(end, cur_level) && !isBatchedAtLevel(weight, cur_level)) {
20033 return at::_ops::lerp_Tensor::call(self, end, weight);
20034 }
20035 Tensor self_value;
20036 optional<int64_t> self_bdim;
20037 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
20038 Tensor end_value;
20039 optional<int64_t> end_bdim;
20040 std::tie(end_value, end_bdim) = unwrapTensorAtLevel(end, cur_level);
20041 Tensor weight_value;
20042 optional<int64_t> weight_bdim;
20043 std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight, cur_level);
20044 auto results = batch_rule(self_value, self_bdim, end_value, end_bdim, weight_value, weight_bdim);
20045 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
20046}
20047template <typename batch_rule_t, batch_rule_t batch_rule>
20048at::Tensor histc_generated_plumbing(const at::Tensor & self, int64_t bins, const at::Scalar & min, const at::Scalar & max) {
20049 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
20050 auto maybe_layer = maybeCurrentDynamicLayer();
20051 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
20052 int64_t cur_level = maybe_layer->layerId();
20053 if (!isBatchedAtLevel(self, cur_level)) {
20054 return at::_ops::histc::call(self, bins, min, max);
20055 }
20056 Tensor self_value;
20057 optional<int64_t> self_bdim;
20058 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
20059 auto results = batch_rule(self_value, self_bdim, bins, min, max);
20060 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
20061}
20062template <typename batch_rule_t, batch_rule_t batch_rule>
20063::std::tuple<at::Tensor,at::Tensor> histogram_bins_tensor_generated_plumbing(const at::Tensor & self, const at::Tensor & bins, const c10::optional<at::Tensor> & weight, bool density) {
20064 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
20065 auto maybe_layer = maybeCurrentDynamicLayer();
20066 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
20067 int64_t cur_level = maybe_layer->layerId();
20068 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(bins, cur_level) && !isBatchedAtLevel(weight, cur_level)) {
20069 return at::_ops::histogram_bins_tensor::call(self, bins, weight, density);
20070 }
20071 Tensor self_value;
20072 optional<int64_t> self_bdim;
20073 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
20074 Tensor bins_value;
20075 optional<int64_t> bins_bdim;
20076 std::tie(bins_value, bins_bdim) = unwrapTensorAtLevel(bins, cur_level);
20077 optional<Tensor> weight_value;
20078 optional<int64_t> weight_bdim;
20079 if (weight) {
20080 std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight.value(), cur_level);
20081 }
20082 auto results = batch_rule(self_value, self_bdim, bins_value, bins_bdim, weight_value, weight_bdim, density);
20083 return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
20084}
20085template <typename batch_rule_t, batch_rule_t batch_rule>
20086::std::tuple<at::Tensor,at::Tensor> histogram_bin_ct_generated_plumbing(const at::Tensor & self, int64_t bins, c10::optional<at::ArrayRef<double>> range, const c10::optional<at::Tensor> & weight, bool density) {
20087 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
20088 auto maybe_layer = maybeCurrentDynamicLayer();
20089 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
20090 int64_t cur_level = maybe_layer->layerId();
20091 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(weight, cur_level)) {
20092 return at::_ops::histogram_bin_ct::call(self, bins, range, weight, density);
20093 }
20094 Tensor self_value;
20095 optional<int64_t> self_bdim;
20096 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
20097 optional<Tensor> weight_value;
20098 optional<int64_t> weight_bdim;
20099 if (weight) {
20100 std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight.value(), cur_level);
20101 }
20102 auto results = batch_rule(self_value, self_bdim, bins, range, weight_value, weight_bdim, density);
20103 return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
20104}
20105template <typename batch_rule_t, batch_rule_t batch_rule>
20106::std::vector<at::Tensor> _histogramdd_bin_edges_generated_plumbing(const at::Tensor & self, at::IntArrayRef bins, c10::optional<at::ArrayRef<double>> range, const c10::optional<at::Tensor> & weight, bool density) {
20107 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
20108 auto maybe_layer = maybeCurrentDynamicLayer();
20109 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
20110 int64_t cur_level = maybe_layer->layerId();
20111 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(weight, cur_level)) {
20112 return at::_ops::_histogramdd_bin_edges::call(self, bins, range, weight, density);
20113 }
20114 Tensor self_value;
20115 optional<int64_t> self_bdim;
20116 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
20117 optional<Tensor> weight_value;
20118 optional<int64_t> weight_bdim;
20119 if (weight) {
20120 std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight.value(), cur_level);
20121 }
20122 auto results = batch_rule(self_value, self_bdim, bins, range, weight_value, weight_bdim, density);
20123 return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
20124}
20125template <typename batch_rule_t, batch_rule_t batch_rule>
20126at::Tensor _histogramdd_from_bin_cts_generated_plumbing(const at::Tensor & self, at::IntArrayRef bins, c10::optional<at::ArrayRef<double>> range, const c10::optional<at::Tensor> & weight, bool density) {
20127 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
20128 auto maybe_layer = maybeCurrentDynamicLayer();
20129 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
20130 int64_t cur_level = maybe_layer->layerId();
20131 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(weight, cur_level)) {
20132 return at::_ops::_histogramdd_from_bin_cts::call(self, bins, range, weight, density);
20133 }
20134 Tensor self_value;
20135 optional<int64_t> self_bdim;
20136 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
20137 optional<Tensor> weight_value;
20138 optional<int64_t> weight_bdim;
20139 if (weight) {
20140 std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight.value(), cur_level);
20141 }
20142 auto results = batch_rule(self_value, self_bdim, bins, range, weight_value, weight_bdim, density);
20143 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
20144}
20145template <typename batch_rule_t, batch_rule_t batch_rule>
20146at::Tensor _histogramdd_from_bin_tensors_generated_plumbing(const at::Tensor & self, at::TensorList bins, const c10::optional<at::Tensor> & weight, bool density) {
20147 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
20148 auto maybe_layer = maybeCurrentDynamicLayer();
20149 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
20150 int64_t cur_level = maybe_layer->layerId();
20151 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(bins, cur_level) && !isBatchedAtLevel(weight, cur_level)) {
20152 return at::_ops::_histogramdd_from_bin_tensors::call(self, bins, weight, density);
20153 }
20154 Tensor self_value;
20155 optional<int64_t> self_bdim;
20156 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
20157 optional<Tensor> weight_value;
20158 optional<int64_t> weight_bdim;
20159 if (weight) {
20160 std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight.value(), cur_level);
20161 }
20162 auto results = batch_rule(self_value, self_bdim, bins, weight_value, weight_bdim, density);
20163 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
20164}
20165template <typename batch_rule_t, batch_rule_t batch_rule>
20166::std::tuple<at::Tensor,::std::vector<at::Tensor>> histogramdd_generated_plumbing(const at::Tensor & self, at::IntArrayRef bins, c10::optional<at::ArrayRef<double>> range, const c10::optional<at::Tensor> & weight, bool density) {
20167 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
20168 auto maybe_layer = maybeCurrentDynamicLayer();
20169 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
20170 int64_t cur_level = maybe_layer->layerId();
20171 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(weight, cur_level)) {
20172 return at::_ops::histogramdd::call(self, bins, range, weight, density);
20173 }
20174 Tensor self_value;
20175 optional<int64_t> self_bdim;
20176 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
20177 optional<Tensor> weight_value;
20178 optional<int64_t> weight_bdim;
20179 if (weight) {
20180 std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight.value(), cur_level);
20181 }
20182 auto results = batch_rule(self_value, self_bdim, bins, range, weight_value, weight_bdim, density);
20183 return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatchedVector(std::get<2>(results), std::get<3>(results), cur_level));
20184}
20185template <typename batch_rule_t, batch_rule_t batch_rule>
20186::std::tuple<at::Tensor,::std::vector<at::Tensor>> histogramdd_int_bins_generated_plumbing(const at::Tensor & self, int64_t bins, c10::optional<at::ArrayRef<double>> range, const c10::optional<at::Tensor> & weight, bool density) {
20187 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
20188 auto maybe_layer = maybeCurrentDynamicLayer();
20189 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
20190 int64_t cur_level = maybe_layer->layerId();
20191 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(weight, cur_level)) {
20192 return at::_ops::histogramdd_int_bins::call(self, bins, range, weight, density);
20193 }
20194 Tensor self_value;
20195 optional<int64_t> self_bdim;
20196 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
20197 optional<Tensor> weight_value;
20198 optional<int64_t> weight_bdim;
20199 if (weight) {
20200 std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight.value(), cur_level);
20201 }
20202 auto results = batch_rule(self_value, self_bdim, bins, range, weight_value, weight_bdim, density);
20203 return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatchedVector(std::get<2>(results), std::get<3>(results), cur_level));
20204}
20205template <typename batch_rule_t, batch_rule_t batch_rule>
20206::std::tuple<at::Tensor,::std::vector<at::Tensor>> histogramdd_TensorList_bins_generated_plumbing(const at::Tensor & self, at::TensorList bins, c10::optional<at::ArrayRef<double>> range, const c10::optional<at::Tensor> & weight, bool density) {
20207 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
20208 auto maybe_layer = maybeCurrentDynamicLayer();
20209 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
20210 int64_t cur_level = maybe_layer->layerId();
20211 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(bins, cur_level) && !isBatchedAtLevel(weight, cur_level)) {
20212 return at::_ops::histogramdd_TensorList_bins::call(self, bins, range, weight, density);
20213 }
20214 Tensor self_value;
20215 optional<int64_t> self_bdim;
20216 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
20217 optional<Tensor> weight_value;
20218 optional<int64_t> weight_bdim;
20219 if (weight) {
20220 std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight.value(), cur_level);
20221 }
20222 auto results = batch_rule(self_value, self_bdim, bins, range, weight_value, weight_bdim, density);
20223 return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatchedVector(std::get<2>(results), std::get<3>(results), cur_level));
20224}
20225template <typename batch_rule_t, batch_rule_t batch_rule>
20226at::Tensor fmod_Scalar_generated_plumbing(const at::Tensor & self, const at::Scalar & other) {
20227 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
20228 auto maybe_layer = maybeCurrentDynamicLayer();
20229 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
20230 int64_t cur_level = maybe_layer->layerId();
20231 if (!isBatchedAtLevel(self, cur_level)) {
20232 return at::_ops::fmod_Scalar::call(self, other);
20233 }
20234 Tensor self_value;
20235 optional<int64_t> self_bdim;
20236 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
20237 auto results = batch_rule(self_value, self_bdim, other);
20238 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
20239}
20240template <typename batch_rule_t, batch_rule_t batch_rule>
20241at::Tensor & fmod__Scalar_generated_plumbing(at::Tensor & self, const at::Scalar & other) {
20242 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
20243 auto maybe_layer = maybeCurrentDynamicLayer();
20244 vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
20245 int64_t cur_level = maybe_layer->layerId();
20246 if (!isBatchedAtLevel(self, cur_level)) {
20247 return at::_ops::fmod__Scalar::call(self, other);
20248 }
20249 Tensor self_value;
20250 optional<int64_t> self_bdim;
20251 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
20252 batch_rule(self_value, self_bdim, other);
20253 return self;
20254}
20255template <typename batch_rule_t, batch_rule_t batch_rule>
20256at::Tensor fmod_Tensor_generated_plumbing(const at::Tensor & self, const at::Tensor & other) {
20257 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
20258 auto maybe_layer = maybeCurrentDynamicLayer();
20259 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
20260 int64_t cur_level = maybe_layer->layerId();
20261 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
20262 return at::_ops::fmod_Tensor::call(self, other);
20263 }
20264 Tensor self_value;
20265 optional<int64_t> self_bdim;
20266 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
20267 Tensor other_value;
20268 optional<int64_t> other_bdim;
20269 std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
20270 auto results = batch_rule(self_value, self_bdim, other_value, other_bdim);
20271 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
20272}
20273template <typename batch_rule_t, batch_rule_t batch_rule>
20274at::Tensor & fmod__Tensor_generated_plumbing(at::Tensor & self, const at::Tensor & other) {
20275 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
20276 auto maybe_layer = maybeCurrentDynamicLayer();
20277 vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
20278 int64_t cur_level = maybe_layer->layerId();
20279 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
20280 return at::_ops::fmod__Tensor::call(self, other);
20281 }
20282 Tensor self_value;
20283 optional<int64_t> self_bdim;
20284 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
20285 Tensor other_value;
20286 optional<int64_t> other_bdim;
20287 std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
20288 batch_rule(self_value, self_bdim, other_value, other_bdim);
20289 return self;
20290}
20291template <typename batch_rule_t, batch_rule_t batch_rule>
20292at::Tensor hypot_generated_plumbing(const at::Tensor & self, const at::Tensor & other) {
20293 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
20294 auto maybe_layer = maybeCurrentDynamicLayer();
20295 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
20296 int64_t cur_level = maybe_layer->layerId();
20297 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
20298 return at::_ops::hypot::call(self, other);
20299 }
20300 Tensor self_value;
20301 optional<int64_t> self_bdim;
20302 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
20303 Tensor other_value;
20304 optional<int64_t> other_bdim;
20305 std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
20306 auto results = batch_rule(self_value, self_bdim, other_value, other_bdim);
20307 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
20308}
20309template <typename batch_rule_t, batch_rule_t batch_rule>
20310at::Tensor & hypot__generated_plumbing(at::Tensor & self, const at::Tensor & other) {
20311 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
20312 auto maybe_layer = maybeCurrentDynamicLayer();
20313 vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
20314 int64_t cur_level = maybe_layer->layerId();
20315 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
20316 return at::_ops::hypot_::call(self, other);
20317 }
20318 Tensor self_value;
20319 optional<int64_t> self_bdim;
20320 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
20321 Tensor other_value;
20322 optional<int64_t> other_bdim;
20323 std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
20324 batch_rule(self_value, self_bdim, other_value, other_bdim);
20325 return self;
20326}
20327template <typename batch_rule_t, batch_rule_t batch_rule>
20328at::Tensor igamma_generated_plumbing(const at::Tensor & self, const at::Tensor & other) {
20329 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
20330 auto maybe_layer = maybeCurrentDynamicLayer();
20331 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
20332 int64_t cur_level = maybe_layer->layerId();
20333 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
20334 return at::_ops::igamma::call(self, other);
20335 }
20336 Tensor self_value;
20337 optional<int64_t> self_bdim;
20338 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
20339 Tensor other_value;
20340 optional<int64_t> other_bdim;
20341 std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
20342 auto results = batch_rule(self_value, self_bdim, other_value, other_bdim);
20343 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
20344}
20345template <typename batch_rule_t, batch_rule_t batch_rule>
20346at::Tensor & igamma__generated_plumbing(at::Tensor & self, const at::Tensor & other) {
20347 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
20348 auto maybe_layer = maybeCurrentDynamicLayer();
20349 vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
20350 int64_t cur_level = maybe_layer->layerId();
20351 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
20352 return at::_ops::igamma_::call(self, other);
20353 }
20354 Tensor self_value;
20355 optional<int64_t> self_bdim;
20356 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
20357 Tensor other_value;
20358 optional<int64_t> other_bdim;
20359 std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
20360 batch_rule(self_value, self_bdim, other_value, other_bdim);
20361 return self;
20362}
20363template <typename batch_rule_t, batch_rule_t batch_rule>
20364at::Tensor igammac_generated_plumbing(const at::Tensor & self, const at::Tensor & other) {
20365 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
20366 auto maybe_layer = maybeCurrentDynamicLayer();
20367 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
20368 int64_t cur_level = maybe_layer->layerId();
20369 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
20370 return at::_ops::igammac::call(self, other);
20371 }
20372 Tensor self_value;
20373 optional<int64_t> self_bdim;
20374 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
20375 Tensor other_value;
20376 optional<int64_t> other_bdim;
20377 std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
20378 auto results = batch_rule(self_value, self_bdim, other_value, other_bdim);
20379 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
20380}
20381template <typename batch_rule_t, batch_rule_t batch_rule>
20382at::Tensor & igammac__generated_plumbing(at::Tensor & self, const at::Tensor & other) {
20383 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
20384 auto maybe_layer = maybeCurrentDynamicLayer();
20385 vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
20386 int64_t cur_level = maybe_layer->layerId();
20387 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
20388 return at::_ops::igammac_::call(self, other);
20389 }
20390 Tensor self_value;
20391 optional<int64_t> self_bdim;
20392 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
20393 Tensor other_value;
20394 optional<int64_t> other_bdim;
20395 std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
20396 batch_rule(self_value, self_bdim, other_value, other_bdim);
20397 return self;
20398}
20399template <typename batch_rule_t, batch_rule_t batch_rule>
20400at::Tensor nextafter_generated_plumbing(const at::Tensor & self, const at::Tensor & other) {
20401 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
20402 auto maybe_layer = maybeCurrentDynamicLayer();
20403 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
20404 int64_t cur_level = maybe_layer->layerId();
20405 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
20406 return at::_ops::nextafter::call(self, other);
20407 }
20408 Tensor self_value;
20409 optional<int64_t> self_bdim;
20410 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
20411 Tensor other_value;
20412 optional<int64_t> other_bdim;
20413 std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
20414 auto results = batch_rule(self_value, self_bdim, other_value, other_bdim);
20415 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
20416}
20417template <typename batch_rule_t, batch_rule_t batch_rule>
20418at::Tensor & nextafter__generated_plumbing(at::Tensor & self, const at::Tensor & other) {
20419 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
20420 auto maybe_layer = maybeCurrentDynamicLayer();
20421 vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
20422 int64_t cur_level = maybe_layer->layerId();
20423 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
20424 return at::_ops::nextafter_::call(self, other);
20425 }
20426 Tensor self_value;
20427 optional<int64_t> self_bdim;
20428 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
20429 Tensor other_value;
20430 optional<int64_t> other_bdim;
20431 std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
20432 batch_rule(self_value, self_bdim, other_value, other_bdim);
20433 return self;
20434}
20435template <typename batch_rule_t, batch_rule_t batch_rule>
20436at::Tensor remainder_Scalar_generated_plumbing(const at::Tensor & self, const at::Scalar & other) {
20437 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
20438 auto maybe_layer = maybeCurrentDynamicLayer();
20439 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
20440 int64_t cur_level = maybe_layer->layerId();
20441 if (!isBatchedAtLevel(self, cur_level)) {
20442 return at::_ops::remainder_Scalar::call(self, other);
20443 }
20444 Tensor self_value;
20445 optional<int64_t> self_bdim;
20446 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
20447 auto results = batch_rule(self_value, self_bdim, other);
20448 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
20449}
20450template <typename batch_rule_t, batch_rule_t batch_rule>
20451at::Tensor & remainder__Scalar_generated_plumbing(at::Tensor & self, const at::Scalar & other) {
20452 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
20453 auto maybe_layer = maybeCurrentDynamicLayer();
20454 vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
20455 int64_t cur_level = maybe_layer->layerId();
20456 if (!isBatchedAtLevel(self, cur_level)) {
20457 return at::_ops::remainder__Scalar::call(self, other);
20458 }
20459 Tensor self_value;
20460 optional<int64_t> self_bdim;
20461 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
20462 batch_rule(self_value, self_bdim, other);
20463 return self;
20464}
20465template <typename batch_rule_t, batch_rule_t batch_rule>
20466at::Tensor remainder_Tensor_generated_plumbing(const at::Tensor & self, const at::Tensor & other) {
20467 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
20468 auto maybe_layer = maybeCurrentDynamicLayer();
20469 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
20470 int64_t cur_level = maybe_layer->layerId();
20471 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
20472 return at::_ops::remainder_Tensor::call(self, other);
20473 }
20474 Tensor self_value;
20475 optional<int64_t> self_bdim;
20476 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
20477 Tensor other_value;
20478 optional<int64_t> other_bdim;
20479 std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
20480 auto results = batch_rule(self_value, self_bdim, other_value, other_bdim);
20481 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
20482}
20483template <typename batch_rule_t, batch_rule_t batch_rule>
20484at::Tensor & remainder__Tensor_generated_plumbing(at::Tensor & self, const at::Tensor & other) {
20485 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
20486 auto maybe_layer = maybeCurrentDynamicLayer();
20487 vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
20488 int64_t cur_level = maybe_layer->layerId();
20489 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
20490 return at::_ops::remainder__Tensor::call(self, other);
20491 }
20492 Tensor self_value;
20493 optional<int64_t> self_bdim;
20494 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
20495 Tensor other_value;
20496 optional<int64_t> other_bdim;
20497 std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
20498 batch_rule(self_value, self_bdim, other_value, other_bdim);
20499 return self;
20500}
20501template <typename batch_rule_t, batch_rule_t batch_rule>
20502at::Tensor remainder_Scalar_Tensor_generated_plumbing(const at::Scalar & self, const at::Tensor & other) {
20503 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
20504 auto maybe_layer = maybeCurrentDynamicLayer();
20505 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
20506 int64_t cur_level = maybe_layer->layerId();
20507 if (!isBatchedAtLevel(other, cur_level)) {
20508 return at::_ops::remainder_Scalar_Tensor::call(self, other);
20509 }
20510 Tensor other_value;
20511 optional<int64_t> other_bdim;
20512 std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
20513 auto results = batch_rule(self, other_value, other_bdim);
20514 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
20515}
20516template <typename batch_rule_t, batch_rule_t batch_rule>
20517at::Tensor min_generated_plumbing(const at::Tensor & self) {
20518 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
20519 auto maybe_layer = maybeCurrentDynamicLayer();
20520 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
20521 int64_t cur_level = maybe_layer->layerId();
20522 if (!isBatchedAtLevel(self, cur_level)) {
20523 return at::_ops::min::call(self);
20524 }
20525 Tensor self_value;
20526 optional<int64_t> self_bdim;
20527 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
20528 auto results = batch_rule(self_value, self_bdim);
20529 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
20530}
20531template <typename batch_rule_t, batch_rule_t batch_rule>
20532at::Tensor fmin_generated_plumbing(const at::Tensor & self, const at::Tensor & other) {
20533 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
20534 auto maybe_layer = maybeCurrentDynamicLayer();
20535 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
20536 int64_t cur_level = maybe_layer->layerId();
20537 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
20538 return at::_ops::fmin::call(self, other);
20539 }
20540 Tensor self_value;
20541 optional<int64_t> self_bdim;
20542 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
20543 Tensor other_value;
20544 optional<int64_t> other_bdim;
20545 std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
20546 auto results = batch_rule(self_value, self_bdim, other_value, other_bdim);
20547 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
20548}
20549template <typename batch_rule_t, batch_rule_t batch_rule>
20550at::Tensor max_generated_plumbing(const at::Tensor & self) {
20551 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
20552 auto maybe_layer = maybeCurrentDynamicLayer();
20553 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
20554 int64_t cur_level = maybe_layer->layerId();
20555 if (!isBatchedAtLevel(self, cur_level)) {
20556 return at::_ops::max::call(self);
20557 }
20558 Tensor self_value;
20559 optional<int64_t> self_bdim;
20560 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
20561 auto results = batch_rule(self_value, self_bdim);
20562 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
20563}
20564template <typename batch_rule_t, batch_rule_t batch_rule>
20565at::Tensor fmax_generated_plumbing(const at::Tensor & self, const at::Tensor & other) {
20566 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
20567 auto maybe_layer = maybeCurrentDynamicLayer();
20568 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
20569 int64_t cur_level = maybe_layer->layerId();
20570 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
20571 return at::_ops::fmax::call(self, other);
20572 }
20573 Tensor self_value;
20574 optional<int64_t> self_bdim;
20575 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
20576 Tensor other_value;
20577 optional<int64_t> other_bdim;
20578 std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
20579 auto results = batch_rule(self_value, self_bdim, other_value, other_bdim);
20580 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
20581}
20582template <typename batch_rule_t, batch_rule_t batch_rule>
20583at::Tensor maximum_generated_plumbing(const at::Tensor & self, const at::Tensor & other) {
20584 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
20585 auto maybe_layer = maybeCurrentDynamicLayer();
20586 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
20587 int64_t cur_level = maybe_layer->layerId();
20588 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
20589 return at::_ops::maximum::call(self, other);
20590 }
20591 Tensor self_value;
20592 optional<int64_t> self_bdim;
20593 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
20594 Tensor other_value;
20595 optional<int64_t> other_bdim;
20596 std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
20597 auto results = batch_rule(self_value, self_bdim, other_value, other_bdim);
20598 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
20599}
20600template <typename batch_rule_t, batch_rule_t batch_rule>
20601at::Tensor max_other_generated_plumbing(const at::Tensor & self, const at::Tensor & other) {
20602 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
20603 auto maybe_layer = maybeCurrentDynamicLayer();
20604 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
20605 int64_t cur_level = maybe_layer->layerId();
20606 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
20607 return at::_ops::max_other::call(self, other);
20608 }
20609 Tensor self_value;
20610 optional<int64_t> self_bdim;
20611 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
20612 Tensor other_value;
20613 optional<int64_t> other_bdim;
20614 std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
20615 auto results = batch_rule(self_value, self_bdim, other_value, other_bdim);
20616 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
20617}
20618template <typename batch_rule_t, batch_rule_t batch_rule>
20619at::Tensor minimum_generated_plumbing(const at::Tensor & self, const at::Tensor & other) {
20620 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
20621 auto maybe_layer = maybeCurrentDynamicLayer();
20622 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
20623 int64_t cur_level = maybe_layer->layerId();
20624 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
20625 return at::_ops::minimum::call(self, other);
20626 }
20627 Tensor self_value;
20628 optional<int64_t> self_bdim;
20629 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
20630 Tensor other_value;
20631 optional<int64_t> other_bdim;
20632 std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
20633 auto results = batch_rule(self_value, self_bdim, other_value, other_bdim);
20634 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
20635}
20636template <typename batch_rule_t, batch_rule_t batch_rule>
20637at::Tensor min_other_generated_plumbing(const at::Tensor & self, const at::Tensor & other) {
20638 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
20639 auto maybe_layer = maybeCurrentDynamicLayer();
20640 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
20641 int64_t cur_level = maybe_layer->layerId();
20642 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
20643 return at::_ops::min_other::call(self, other);
20644 }
20645 Tensor self_value;
20646 optional<int64_t> self_bdim;
20647 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
20648 Tensor other_value;
20649 optional<int64_t> other_bdim;
20650 std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
20651 auto results = batch_rule(self_value, self_bdim, other_value, other_bdim);
20652 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
20653}
20654template <typename batch_rule_t, batch_rule_t batch_rule>
20655at::Tensor quantile_generated_plumbing(const at::Tensor & self, const at::Tensor & q, c10::optional<int64_t> dim, bool keepdim, c10::string_view interpolation) {
20656 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
20657 auto maybe_layer = maybeCurrentDynamicLayer();
20658 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
20659 int64_t cur_level = maybe_layer->layerId();
20660 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(q, cur_level)) {
20661 return at::_ops::quantile::call(self, q, dim, keepdim, interpolation);
20662 }
20663 Tensor self_value;
20664 optional<int64_t> self_bdim;
20665 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
20666 Tensor q_value;
20667 optional<int64_t> q_bdim;
20668 std::tie(q_value, q_bdim) = unwrapTensorAtLevel(q, cur_level);
20669 auto results = batch_rule(self_value, self_bdim, q_value, q_bdim, dim, keepdim, interpolation);
20670 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
20671}
20672template <typename batch_rule_t, batch_rule_t batch_rule>
20673at::Tensor quantile_scalar_generated_plumbing(const at::Tensor & self, double q, c10::optional<int64_t> dim, bool keepdim, c10::string_view interpolation) {
20674 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
20675 auto maybe_layer = maybeCurrentDynamicLayer();
20676 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
20677 int64_t cur_level = maybe_layer->layerId();
20678 if (!isBatchedAtLevel(self, cur_level)) {
20679 return at::_ops::quantile_scalar::call(self, q, dim, keepdim, interpolation);
20680 }
20681 Tensor self_value;
20682 optional<int64_t> self_bdim;
20683 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
20684 auto results = batch_rule(self_value, self_bdim, q, dim, keepdim, interpolation);
20685 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
20686}
20687template <typename batch_rule_t, batch_rule_t batch_rule>
20688at::Tensor nanquantile_generated_plumbing(const at::Tensor & self, const at::Tensor & q, c10::optional<int64_t> dim, bool keepdim, c10::string_view interpolation) {
20689 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
20690 auto maybe_layer = maybeCurrentDynamicLayer();
20691 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
20692 int64_t cur_level = maybe_layer->layerId();
20693 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(q, cur_level)) {
20694 return at::_ops::nanquantile::call(self, q, dim, keepdim, interpolation);
20695 }
20696 Tensor self_value;
20697 optional<int64_t> self_bdim;
20698 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
20699 Tensor q_value;
20700 optional<int64_t> q_bdim;
20701 std::tie(q_value, q_bdim) = unwrapTensorAtLevel(q, cur_level);
20702 auto results = batch_rule(self_value, self_bdim, q_value, q_bdim, dim, keepdim, interpolation);
20703 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
20704}
20705template <typename batch_rule_t, batch_rule_t batch_rule>
20706at::Tensor nanquantile_scalar_generated_plumbing(const at::Tensor & self, double q, c10::optional<int64_t> dim, bool keepdim, c10::string_view interpolation) {
20707 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
20708 auto maybe_layer = maybeCurrentDynamicLayer();
20709 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
20710 int64_t cur_level = maybe_layer->layerId();
20711 if (!isBatchedAtLevel(self, cur_level)) {
20712 return at::_ops::nanquantile_scalar::call(self, q, dim, keepdim, interpolation);
20713 }
20714 Tensor self_value;
20715 optional<int64_t> self_bdim;
20716 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
20717 auto results = batch_rule(self_value, self_bdim, q, dim, keepdim, interpolation);
20718 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
20719}
20720template <typename batch_rule_t, batch_rule_t batch_rule>
20721::std::tuple<at::Tensor,at::Tensor> sort_generated_plumbing(const at::Tensor & self, int64_t dim, bool descending) {
20722 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
20723 auto maybe_layer = maybeCurrentDynamicLayer();
20724 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
20725 int64_t cur_level = maybe_layer->layerId();
20726 if (!isBatchedAtLevel(self, cur_level)) {
20727 return at::_ops::sort::call(self, dim, descending);
20728 }
20729 Tensor self_value;
20730 optional<int64_t> self_bdim;
20731 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
20732 auto results = batch_rule(self_value, self_bdim, dim, descending);
20733 return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
20734}
20735template <typename batch_rule_t, batch_rule_t batch_rule>
20736::std::tuple<at::Tensor,at::Tensor> sort_stable_generated_plumbing(const at::Tensor & self, c10::optional<bool> stable, int64_t dim, bool descending) {
20737 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
20738 auto maybe_layer = maybeCurrentDynamicLayer();
20739 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
20740 int64_t cur_level = maybe_layer->layerId();
20741 if (!isBatchedAtLevel(self, cur_level)) {
20742 return at::_ops::sort_stable::call(self, stable, dim, descending);
20743 }
20744 Tensor self_value;
20745 optional<int64_t> self_bdim;
20746 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
20747 auto results = batch_rule(self_value, self_bdim, stable, dim, descending);
20748 return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
20749}
20750template <typename batch_rule_t, batch_rule_t batch_rule>
20751::std::tuple<at::Tensor,at::Tensor> sort_dimname_generated_plumbing(const at::Tensor & self, at::Dimname dim, bool descending) {
20752 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
20753 auto maybe_layer = maybeCurrentDynamicLayer();
20754 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
20755 int64_t cur_level = maybe_layer->layerId();
20756 if (!isBatchedAtLevel(self, cur_level)) {
20757 return at::_ops::sort_dimname::call(self, dim, descending);
20758 }
20759 Tensor self_value;
20760 optional<int64_t> self_bdim;
20761 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
20762 auto results = batch_rule(self_value, self_bdim, dim, descending);
20763 return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
20764}
20765template <typename batch_rule_t, batch_rule_t batch_rule>
20766::std::tuple<at::Tensor,at::Tensor> sort_dimname_stable_generated_plumbing(const at::Tensor & self, c10::optional<bool> stable, at::Dimname dim, bool descending) {
20767 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
20768 auto maybe_layer = maybeCurrentDynamicLayer();
20769 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
20770 int64_t cur_level = maybe_layer->layerId();
20771 if (!isBatchedAtLevel(self, cur_level)) {
20772 return at::_ops::sort_dimname_stable::call(self, stable, dim, descending);
20773 }
20774 Tensor self_value;
20775 optional<int64_t> self_bdim;
20776 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
20777 auto results = batch_rule(self_value, self_bdim, stable, dim, descending);
20778 return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
20779}
20780template <typename batch_rule_t, batch_rule_t batch_rule>
20781at::Tensor msort_generated_plumbing(const at::Tensor & self) {
20782 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
20783 auto maybe_layer = maybeCurrentDynamicLayer();
20784 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
20785 int64_t cur_level = maybe_layer->layerId();
20786 if (!isBatchedAtLevel(self, cur_level)) {
20787 return at::_ops::msort::call(self);
20788 }
20789 Tensor self_value;
20790 optional<int64_t> self_bdim;
20791 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
20792 auto results = batch_rule(self_value, self_bdim);
20793 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
20794}
20795template <typename batch_rule_t, batch_rule_t batch_rule>
20796at::Tensor argsort_generated_plumbing(const at::Tensor & self, int64_t dim, bool descending) {
20797 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
20798 auto maybe_layer = maybeCurrentDynamicLayer();
20799 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
20800 int64_t cur_level = maybe_layer->layerId();
20801 if (!isBatchedAtLevel(self, cur_level)) {
20802 return at::_ops::argsort::call(self, dim, descending);
20803 }
20804 Tensor self_value;
20805 optional<int64_t> self_bdim;
20806 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
20807 auto results = batch_rule(self_value, self_bdim, dim, descending);
20808 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
20809}
20810template <typename batch_rule_t, batch_rule_t batch_rule>
20811at::Tensor argsort_stable_generated_plumbing(const at::Tensor & self, bool stable, int64_t dim, bool descending) {
20812 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
20813 auto maybe_layer = maybeCurrentDynamicLayer();
20814 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
20815 int64_t cur_level = maybe_layer->layerId();
20816 if (!isBatchedAtLevel(self, cur_level)) {
20817 return at::_ops::argsort_stable::call(self, stable, dim, descending);
20818 }
20819 Tensor self_value;
20820 optional<int64_t> self_bdim;
20821 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
20822 auto results = batch_rule(self_value, self_bdim, stable, dim, descending);
20823 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
20824}
20825template <typename batch_rule_t, batch_rule_t batch_rule>
20826at::Tensor argsort_dimname_generated_plumbing(const at::Tensor & self, at::Dimname dim, bool descending) {
20827 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
20828 auto maybe_layer = maybeCurrentDynamicLayer();
20829 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
20830 int64_t cur_level = maybe_layer->layerId();
20831 if (!isBatchedAtLevel(self, cur_level)) {
20832 return at::_ops::argsort_dimname::call(self, dim, descending);
20833 }
20834 Tensor self_value;
20835 optional<int64_t> self_bdim;
20836 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
20837 auto results = batch_rule(self_value, self_bdim, dim, descending);
20838 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
20839}
20840template <typename batch_rule_t, batch_rule_t batch_rule>
20841::std::tuple<at::Tensor,at::Tensor> topk_generated_plumbing(const at::Tensor & self, int64_t k, int64_t dim, bool largest, bool sorted) {
20842 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
20843 auto maybe_layer = maybeCurrentDynamicLayer();
20844 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
20845 int64_t cur_level = maybe_layer->layerId();
20846 if (!isBatchedAtLevel(self, cur_level)) {
20847 return at::_ops::topk::call(self, k, dim, largest, sorted);
20848 }
20849 Tensor self_value;
20850 optional<int64_t> self_bdim;
20851 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
20852 auto results = batch_rule(self_value, self_bdim, k, dim, largest, sorted);
20853 return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
20854}
20855template <typename batch_rule_t, batch_rule_t batch_rule>
20856at::Tensor all_generated_plumbing(const at::Tensor & self) {
20857 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
20858 auto maybe_layer = maybeCurrentDynamicLayer();
20859 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
20860 int64_t cur_level = maybe_layer->layerId();
20861 if (!isBatchedAtLevel(self, cur_level)) {
20862 return at::_ops::all::call(self);
20863 }
20864 Tensor self_value;
20865 optional<int64_t> self_bdim;
20866 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
20867 auto results = batch_rule(self_value, self_bdim);
20868 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
20869}
20870template <typename batch_rule_t, batch_rule_t batch_rule>
20871at::Tensor any_generated_plumbing(const at::Tensor & self) {
20872 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
20873 auto maybe_layer = maybeCurrentDynamicLayer();
20874 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
20875 int64_t cur_level = maybe_layer->layerId();
20876 if (!isBatchedAtLevel(self, cur_level)) {
20877 return at::_ops::any::call(self);
20878 }
20879 Tensor self_value;
20880 optional<int64_t> self_bdim;
20881 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
20882 auto results = batch_rule(self_value, self_bdim);
20883 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
20884}
20885template <typename batch_rule_t, batch_rule_t batch_rule>
20886at::Tensor renorm_generated_plumbing(const at::Tensor & self, const at::Scalar & p, int64_t dim, const at::Scalar & maxnorm) {
20887 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
20888 auto maybe_layer = maybeCurrentDynamicLayer();
20889 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
20890 int64_t cur_level = maybe_layer->layerId();
20891 if (!isBatchedAtLevel(self, cur_level)) {
20892 return at::_ops::renorm::call(self, p, dim, maxnorm);
20893 }
20894 Tensor self_value;
20895 optional<int64_t> self_bdim;
20896 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
20897 auto results = batch_rule(self_value, self_bdim, p, dim, maxnorm);
20898 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
20899}
20900template <typename batch_rule_t, batch_rule_t batch_rule>
20901at::Tensor & renorm__generated_plumbing(at::Tensor & self, const at::Scalar & p, int64_t dim, const at::Scalar & maxnorm) {
20902 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
20903 auto maybe_layer = maybeCurrentDynamicLayer();
20904 vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
20905 int64_t cur_level = maybe_layer->layerId();
20906 if (!isBatchedAtLevel(self, cur_level)) {
20907 return at::_ops::renorm_::call(self, p, dim, maxnorm);
20908 }
20909 Tensor self_value;
20910 optional<int64_t> self_bdim;
20911 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
20912 batch_rule(self_value, self_bdim, p, dim, maxnorm);
20913 return self;
20914}
20915template <typename batch_rule_t, batch_rule_t batch_rule>
20916at::Tensor unfold_generated_plumbing(const at::Tensor & self, int64_t dimension, int64_t size, int64_t step) {
20917 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
20918 auto maybe_layer = maybeCurrentDynamicLayer();
20919 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
20920 int64_t cur_level = maybe_layer->layerId();
20921 if (!isBatchedAtLevel(self, cur_level)) {
20922 return at::_ops::unfold::call(self, dimension, size, step);
20923 }
20924 Tensor self_value;
20925 optional<int64_t> self_bdim;
20926 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
20927 auto results = batch_rule(self_value, self_bdim, dimension, size, step);
20928 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
20929}
20930template <typename batch_rule_t, batch_rule_t batch_rule>
20931at::Tensor unfold_backward_generated_plumbing(const at::Tensor & grad_in, c10::SymIntArrayRef input_sizes, int64_t dim, int64_t size, int64_t step) {
20932 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
20933 auto maybe_layer = maybeCurrentDynamicLayer();
20934 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
20935 int64_t cur_level = maybe_layer->layerId();
20936 if (!isBatchedAtLevel(grad_in, cur_level)) {
20937 return at::_ops::unfold_backward::call(grad_in, input_sizes, dim, size, step);
20938 }
20939 Tensor grad_in_value;
20940 optional<int64_t> grad_in_bdim;
20941 std::tie(grad_in_value, grad_in_bdim) = unwrapTensorAtLevel(grad_in, cur_level);
20942 auto results = batch_rule(grad_in_value, grad_in_bdim, input_sizes, dim, size, step);
20943 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
20944}
20945template <typename batch_rule_t, batch_rule_t batch_rule>
20946at::Tensor pow_Tensor_Tensor_generated_plumbing(const at::Tensor & self, const at::Tensor & exponent) {
20947 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
20948 auto maybe_layer = maybeCurrentDynamicLayer();
20949 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
20950 int64_t cur_level = maybe_layer->layerId();
20951 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(exponent, cur_level)) {
20952 return at::_ops::pow_Tensor_Tensor::call(self, exponent);
20953 }
20954 Tensor self_value;
20955 optional<int64_t> self_bdim;
20956 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
20957 Tensor exponent_value;
20958 optional<int64_t> exponent_bdim;
20959 std::tie(exponent_value, exponent_bdim) = unwrapTensorAtLevel(exponent, cur_level);
20960 auto results = batch_rule(self_value, self_bdim, exponent_value, exponent_bdim);
20961 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
20962}
20963template <typename batch_rule_t, batch_rule_t batch_rule>
20964at::Tensor pow_Scalar_generated_plumbing(const at::Scalar & self, const at::Tensor & exponent) {
20965 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
20966 auto maybe_layer = maybeCurrentDynamicLayer();
20967 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
20968 int64_t cur_level = maybe_layer->layerId();
20969 if (!isBatchedAtLevel(exponent, cur_level)) {
20970 return at::_ops::pow_Scalar::call(self, exponent);
20971 }
20972 Tensor exponent_value;
20973 optional<int64_t> exponent_bdim;
20974 std::tie(exponent_value, exponent_bdim) = unwrapTensorAtLevel(exponent, cur_level);
20975 auto results = batch_rule(self, exponent_value, exponent_bdim);
20976 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
20977}
20978template <typename batch_rule_t, batch_rule_t batch_rule>
20979at::Tensor pow_Tensor_Scalar_generated_plumbing(const at::Tensor & self, const at::Scalar & exponent) {
20980 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
20981 auto maybe_layer = maybeCurrentDynamicLayer();
20982 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
20983 int64_t cur_level = maybe_layer->layerId();
20984 if (!isBatchedAtLevel(self, cur_level)) {
20985 return at::_ops::pow_Tensor_Scalar::call(self, exponent);
20986 }
20987 Tensor self_value;
20988 optional<int64_t> self_bdim;
20989 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
20990 auto results = batch_rule(self_value, self_bdim, exponent);
20991 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
20992}
20993template <typename batch_rule_t, batch_rule_t batch_rule>
20994at::Tensor & pow__Scalar_generated_plumbing(at::Tensor & self, const at::Scalar & exponent) {
20995 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
20996 auto maybe_layer = maybeCurrentDynamicLayer();
20997 vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
20998 int64_t cur_level = maybe_layer->layerId();
20999 if (!isBatchedAtLevel(self, cur_level)) {
21000 return at::_ops::pow__Scalar::call(self, exponent);
21001 }
21002 Tensor self_value;
21003 optional<int64_t> self_bdim;
21004 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
21005 batch_rule(self_value, self_bdim, exponent);
21006 return self;
21007}
21008template <typename batch_rule_t, batch_rule_t batch_rule>
21009at::Tensor & pow__Tensor_generated_plumbing(at::Tensor & self, const at::Tensor & exponent) {
21010 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
21011 auto maybe_layer = maybeCurrentDynamicLayer();
21012 vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
21013 int64_t cur_level = maybe_layer->layerId();
21014 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(exponent, cur_level)) {
21015 return at::_ops::pow__Tensor::call(self, exponent);
21016 }
21017 Tensor self_value;
21018 optional<int64_t> self_bdim;
21019 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
21020 Tensor exponent_value;
21021 optional<int64_t> exponent_bdim;
21022 std::tie(exponent_value, exponent_bdim) = unwrapTensorAtLevel(exponent, cur_level);
21023 batch_rule(self_value, self_bdim, exponent_value, exponent_bdim);
21024 return self;
21025}
21026template <typename batch_rule_t, batch_rule_t batch_rule>
21027at::Tensor float_power_Tensor_Tensor_generated_plumbing(const at::Tensor & self, const at::Tensor & exponent) {
21028 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
21029 auto maybe_layer = maybeCurrentDynamicLayer();
21030 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
21031 int64_t cur_level = maybe_layer->layerId();
21032 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(exponent, cur_level)) {
21033 return at::_ops::float_power_Tensor_Tensor::call(self, exponent);
21034 }
21035 Tensor self_value;
21036 optional<int64_t> self_bdim;
21037 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
21038 Tensor exponent_value;
21039 optional<int64_t> exponent_bdim;
21040 std::tie(exponent_value, exponent_bdim) = unwrapTensorAtLevel(exponent, cur_level);
21041 auto results = batch_rule(self_value, self_bdim, exponent_value, exponent_bdim);
21042 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
21043}
21044template <typename batch_rule_t, batch_rule_t batch_rule>
21045at::Tensor float_power_Scalar_generated_plumbing(const at::Scalar & self, const at::Tensor & exponent) {
21046 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
21047 auto maybe_layer = maybeCurrentDynamicLayer();
21048 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
21049 int64_t cur_level = maybe_layer->layerId();
21050 if (!isBatchedAtLevel(exponent, cur_level)) {
21051 return at::_ops::float_power_Scalar::call(self, exponent);
21052 }
21053 Tensor exponent_value;
21054 optional<int64_t> exponent_bdim;
21055 std::tie(exponent_value, exponent_bdim) = unwrapTensorAtLevel(exponent, cur_level);
21056 auto results = batch_rule(self, exponent_value, exponent_bdim);
21057 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
21058}
21059template <typename batch_rule_t, batch_rule_t batch_rule>
21060at::Tensor float_power_Tensor_Scalar_generated_plumbing(const at::Tensor & self, const at::Scalar & exponent) {
21061 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
21062 auto maybe_layer = maybeCurrentDynamicLayer();
21063 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
21064 int64_t cur_level = maybe_layer->layerId();
21065 if (!isBatchedAtLevel(self, cur_level)) {
21066 return at::_ops::float_power_Tensor_Scalar::call(self, exponent);
21067 }
21068 Tensor self_value;
21069 optional<int64_t> self_bdim;
21070 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
21071 auto results = batch_rule(self_value, self_bdim, exponent);
21072 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
21073}
21074template <typename batch_rule_t, batch_rule_t batch_rule>
21075at::Tensor & float_power__Scalar_generated_plumbing(at::Tensor & self, const at::Scalar & exponent) {
21076 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
21077 auto maybe_layer = maybeCurrentDynamicLayer();
21078 vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
21079 int64_t cur_level = maybe_layer->layerId();
21080 if (!isBatchedAtLevel(self, cur_level)) {
21081 return at::_ops::float_power__Scalar::call(self, exponent);
21082 }
21083 Tensor self_value;
21084 optional<int64_t> self_bdim;
21085 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
21086 batch_rule(self_value, self_bdim, exponent);
21087 return self;
21088}
21089template <typename batch_rule_t, batch_rule_t batch_rule>
21090at::Tensor & float_power__Tensor_generated_plumbing(at::Tensor & self, const at::Tensor & exponent) {
21091 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
21092 auto maybe_layer = maybeCurrentDynamicLayer();
21093 vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
21094 int64_t cur_level = maybe_layer->layerId();
21095 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(exponent, cur_level)) {
21096 return at::_ops::float_power__Tensor::call(self, exponent);
21097 }
21098 Tensor self_value;
21099 optional<int64_t> self_bdim;
21100 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
21101 Tensor exponent_value;
21102 optional<int64_t> exponent_bdim;
21103 std::tie(exponent_value, exponent_bdim) = unwrapTensorAtLevel(exponent, cur_level);
21104 batch_rule(self_value, self_bdim, exponent_value, exponent_bdim);
21105 return self;
21106}
21107template <typename batch_rule_t, batch_rule_t batch_rule>
21108at::Tensor & normal__generated_plumbing(at::Tensor & self, double mean, double std, c10::optional<at::Generator> generator) {
21109 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
21110 auto maybe_layer = maybeCurrentDynamicLayer();
21111 vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
21112 int64_t cur_level = maybe_layer->layerId();
21113 if (!isBatchedAtLevel(self, cur_level)) {
21114 return at::_ops::normal_::call(self, mean, std, generator);
21115 }
21116 Tensor self_value;
21117 optional<int64_t> self_bdim;
21118 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
21119 batch_rule(self_value, self_bdim, mean, std, generator);
21120 return self;
21121}
21122template <typename batch_rule_t, batch_rule_t batch_rule>
21123at::Tensor normal_functional_generated_plumbing(const at::Tensor & self, double mean, double std, c10::optional<at::Generator> generator) {
21124 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
21125 auto maybe_layer = maybeCurrentDynamicLayer();
21126 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
21127 int64_t cur_level = maybe_layer->layerId();
21128 if (!isBatchedAtLevel(self, cur_level)) {
21129 return at::_ops::normal_functional::call(self, mean, std, generator);
21130 }
21131 Tensor self_value;
21132 optional<int64_t> self_bdim;
21133 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
21134 auto results = batch_rule(self_value, self_bdim, mean, std, generator);
21135 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
21136}
21137template <typename batch_rule_t, batch_rule_t batch_rule>
21138at::Tensor normal_Tensor_float_generated_plumbing(const at::Tensor & mean, double std, c10::optional<at::Generator> generator) {
21139 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
21140 auto maybe_layer = maybeCurrentDynamicLayer();
21141 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
21142 int64_t cur_level = maybe_layer->layerId();
21143 if (!isBatchedAtLevel(mean, cur_level)) {
21144 return at::_ops::normal_Tensor_float::call(mean, std, generator);
21145 }
21146 Tensor mean_value;
21147 optional<int64_t> mean_bdim;
21148 std::tie(mean_value, mean_bdim) = unwrapTensorAtLevel(mean, cur_level);
21149 auto results = batch_rule(mean_value, mean_bdim, std, generator);
21150 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
21151}
21152template <typename batch_rule_t, batch_rule_t batch_rule>
21153at::Tensor normal_float_Tensor_generated_plumbing(double mean, const at::Tensor & std, c10::optional<at::Generator> generator) {
21154 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
21155 auto maybe_layer = maybeCurrentDynamicLayer();
21156 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
21157 int64_t cur_level = maybe_layer->layerId();
21158 if (!isBatchedAtLevel(std, cur_level)) {
21159 return at::_ops::normal_float_Tensor::call(mean, std, generator);
21160 }
21161 Tensor std_value;
21162 optional<int64_t> std_bdim;
21163 std::tie(std_value, std_bdim) = unwrapTensorAtLevel(std, cur_level);
21164 auto results = batch_rule(mean, std_value, std_bdim, generator);
21165 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
21166}
21167template <typename batch_rule_t, batch_rule_t batch_rule>
21168at::Tensor normal_Tensor_Tensor_generated_plumbing(const at::Tensor & mean, const at::Tensor & std, c10::optional<at::Generator> generator) {
21169 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
21170 auto maybe_layer = maybeCurrentDynamicLayer();
21171 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
21172 int64_t cur_level = maybe_layer->layerId();
21173 if (!isBatchedAtLevel(mean, cur_level) && !isBatchedAtLevel(std, cur_level)) {
21174 return at::_ops::normal_Tensor_Tensor::call(mean, std, generator);
21175 }
21176 Tensor mean_value;
21177 optional<int64_t> mean_bdim;
21178 std::tie(mean_value, mean_bdim) = unwrapTensorAtLevel(mean, cur_level);
21179 Tensor std_value;
21180 optional<int64_t> std_bdim;
21181 std::tie(std_value, std_bdim) = unwrapTensorAtLevel(std, cur_level);
21182 auto results = batch_rule(mean_value, mean_bdim, std_value, std_bdim, generator);
21183 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
21184}
21185template <typename batch_rule_t, batch_rule_t batch_rule>
21186at::Tensor alias_generated_plumbing(const at::Tensor & self) {
21187 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
21188 auto maybe_layer = maybeCurrentDynamicLayer();
21189 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
21190 int64_t cur_level = maybe_layer->layerId();
21191 if (!isBatchedAtLevel(self, cur_level)) {
21192 return at::_ops::alias::call(self);
21193 }
21194 Tensor self_value;
21195 optional<int64_t> self_bdim;
21196 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
21197 auto results = batch_rule(self_value, self_bdim);
21198 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
21199}
21200template <typename batch_rule_t, batch_rule_t batch_rule>
21201void _amp_foreach_non_finite_check_and_unscale__generated_plumbing(at::TensorList self, at::Tensor & found_inf, const at::Tensor & inv_scale) {
21202 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
21203 auto maybe_layer = maybeCurrentDynamicLayer();
21204 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
21205 int64_t cur_level = maybe_layer->layerId();
21206 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(found_inf, cur_level) && !isBatchedAtLevel(inv_scale, cur_level)) {
21207 return at::_ops::_amp_foreach_non_finite_check_and_unscale_::call(self, found_inf, inv_scale);
21208 }
21209 Tensor found_inf_value;
21210 optional<int64_t> found_inf_bdim;
21211 std::tie(found_inf_value, found_inf_bdim) = unwrapTensorAtLevel(found_inf, cur_level);
21212 Tensor inv_scale_value;
21213 optional<int64_t> inv_scale_bdim;
21214 std::tie(inv_scale_value, inv_scale_bdim) = unwrapTensorAtLevel(inv_scale, cur_level);
21215 batch_rule(self, found_inf_value, found_inf_bdim, inv_scale_value, inv_scale_bdim);
21216}
21217template <typename batch_rule_t, batch_rule_t batch_rule>
21218::std::vector<at::Tensor> _foreach_add_Scalar_generated_plumbing(at::TensorList self, const at::Scalar & scalar) {
21219 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
21220 auto maybe_layer = maybeCurrentDynamicLayer();
21221 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
21222 int64_t cur_level = maybe_layer->layerId();
21223 if (!isBatchedAtLevel(self, cur_level)) {
21224 return at::_ops::_foreach_add_Scalar::call(self, scalar);
21225 }
21226
21227 auto results = batch_rule(self, scalar);
21228 return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
21229}
21230template <typename batch_rule_t, batch_rule_t batch_rule>
21231void _foreach_add__Scalar_generated_plumbing(at::TensorList self, const at::Scalar & scalar) {
21232 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
21233 auto maybe_layer = maybeCurrentDynamicLayer();
21234 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
21235 int64_t cur_level = maybe_layer->layerId();
21236 if (!isBatchedAtLevel(self, cur_level)) {
21237 return at::_ops::_foreach_add__Scalar::call(self, scalar);
21238 }
21239
21240 batch_rule(self, scalar);
21241}
21242template <typename batch_rule_t, batch_rule_t batch_rule>
21243::std::vector<at::Tensor> _foreach_sub_Scalar_generated_plumbing(at::TensorList self, const at::Scalar & scalar) {
21244 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
21245 auto maybe_layer = maybeCurrentDynamicLayer();
21246 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
21247 int64_t cur_level = maybe_layer->layerId();
21248 if (!isBatchedAtLevel(self, cur_level)) {
21249 return at::_ops::_foreach_sub_Scalar::call(self, scalar);
21250 }
21251
21252 auto results = batch_rule(self, scalar);
21253 return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
21254}
21255template <typename batch_rule_t, batch_rule_t batch_rule>
21256void _foreach_sub__Scalar_generated_plumbing(at::TensorList self, const at::Scalar & scalar) {
21257 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
21258 auto maybe_layer = maybeCurrentDynamicLayer();
21259 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
21260 int64_t cur_level = maybe_layer->layerId();
21261 if (!isBatchedAtLevel(self, cur_level)) {
21262 return at::_ops::_foreach_sub__Scalar::call(self, scalar);
21263 }
21264
21265 batch_rule(self, scalar);
21266}
21267template <typename batch_rule_t, batch_rule_t batch_rule>
21268::std::vector<at::Tensor> _foreach_mul_Scalar_generated_plumbing(at::TensorList self, const at::Scalar & scalar) {
21269 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
21270 auto maybe_layer = maybeCurrentDynamicLayer();
21271 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
21272 int64_t cur_level = maybe_layer->layerId();
21273 if (!isBatchedAtLevel(self, cur_level)) {
21274 return at::_ops::_foreach_mul_Scalar::call(self, scalar);
21275 }
21276
21277 auto results = batch_rule(self, scalar);
21278 return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
21279}
21280template <typename batch_rule_t, batch_rule_t batch_rule>
21281void _foreach_mul__Scalar_generated_plumbing(at::TensorList self, const at::Scalar & scalar) {
21282 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
21283 auto maybe_layer = maybeCurrentDynamicLayer();
21284 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
21285 int64_t cur_level = maybe_layer->layerId();
21286 if (!isBatchedAtLevel(self, cur_level)) {
21287 return at::_ops::_foreach_mul__Scalar::call(self, scalar);
21288 }
21289
21290 batch_rule(self, scalar);
21291}
21292template <typename batch_rule_t, batch_rule_t batch_rule>
21293::std::vector<at::Tensor> _foreach_div_Scalar_generated_plumbing(at::TensorList self, const at::Scalar & scalar) {
21294 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
21295 auto maybe_layer = maybeCurrentDynamicLayer();
21296 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
21297 int64_t cur_level = maybe_layer->layerId();
21298 if (!isBatchedAtLevel(self, cur_level)) {
21299 return at::_ops::_foreach_div_Scalar::call(self, scalar);
21300 }
21301
21302 auto results = batch_rule(self, scalar);
21303 return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
21304}
21305template <typename batch_rule_t, batch_rule_t batch_rule>
21306void _foreach_div__Scalar_generated_plumbing(at::TensorList self, const at::Scalar & scalar) {
21307 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
21308 auto maybe_layer = maybeCurrentDynamicLayer();
21309 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
21310 int64_t cur_level = maybe_layer->layerId();
21311 if (!isBatchedAtLevel(self, cur_level)) {
21312 return at::_ops::_foreach_div__Scalar::call(self, scalar);
21313 }
21314
21315 batch_rule(self, scalar);
21316}
21317template <typename batch_rule_t, batch_rule_t batch_rule>
21318::std::vector<at::Tensor> _foreach_clamp_min_Scalar_generated_plumbing(at::TensorList self, const at::Scalar & scalar) {
21319 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
21320 auto maybe_layer = maybeCurrentDynamicLayer();
21321 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
21322 int64_t cur_level = maybe_layer->layerId();
21323 if (!isBatchedAtLevel(self, cur_level)) {
21324 return at::_ops::_foreach_clamp_min_Scalar::call(self, scalar);
21325 }
21326
21327 auto results = batch_rule(self, scalar);
21328 return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
21329}
21330template <typename batch_rule_t, batch_rule_t batch_rule>
21331void _foreach_clamp_min__Scalar_generated_plumbing(at::TensorList self, const at::Scalar & scalar) {
21332 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
21333 auto maybe_layer = maybeCurrentDynamicLayer();
21334 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
21335 int64_t cur_level = maybe_layer->layerId();
21336 if (!isBatchedAtLevel(self, cur_level)) {
21337 return at::_ops::_foreach_clamp_min__Scalar::call(self, scalar);
21338 }
21339
21340 batch_rule(self, scalar);
21341}
21342template <typename batch_rule_t, batch_rule_t batch_rule>
21343::std::vector<at::Tensor> _foreach_clamp_max_Scalar_generated_plumbing(at::TensorList self, const at::Scalar & scalar) {
21344 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
21345 auto maybe_layer = maybeCurrentDynamicLayer();
21346 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
21347 int64_t cur_level = maybe_layer->layerId();
21348 if (!isBatchedAtLevel(self, cur_level)) {
21349 return at::_ops::_foreach_clamp_max_Scalar::call(self, scalar);
21350 }
21351
21352 auto results = batch_rule(self, scalar);
21353 return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
21354}
21355template <typename batch_rule_t, batch_rule_t batch_rule>
21356void _foreach_clamp_max__Scalar_generated_plumbing(at::TensorList self, const at::Scalar & scalar) {
21357 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
21358 auto maybe_layer = maybeCurrentDynamicLayer();
21359 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
21360 int64_t cur_level = maybe_layer->layerId();
21361 if (!isBatchedAtLevel(self, cur_level)) {
21362 return at::_ops::_foreach_clamp_max__Scalar::call(self, scalar);
21363 }
21364
21365 batch_rule(self, scalar);
21366}
21367template <typename batch_rule_t, batch_rule_t batch_rule>
21368::std::vector<at::Tensor> _foreach_maximum_Scalar_generated_plumbing(at::TensorList self, const at::Scalar & scalar) {
21369 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
21370 auto maybe_layer = maybeCurrentDynamicLayer();
21371 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
21372 int64_t cur_level = maybe_layer->layerId();
21373 if (!isBatchedAtLevel(self, cur_level)) {
21374 return at::_ops::_foreach_maximum_Scalar::call(self, scalar);
21375 }
21376
21377 auto results = batch_rule(self, scalar);
21378 return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
21379}
21380template <typename batch_rule_t, batch_rule_t batch_rule>
21381void _foreach_maximum__Scalar_generated_plumbing(at::TensorList self, const at::Scalar & scalar) {
21382 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
21383 auto maybe_layer = maybeCurrentDynamicLayer();
21384 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
21385 int64_t cur_level = maybe_layer->layerId();
21386 if (!isBatchedAtLevel(self, cur_level)) {
21387 return at::_ops::_foreach_maximum__Scalar::call(self, scalar);
21388 }
21389
21390 batch_rule(self, scalar);
21391}
21392template <typename batch_rule_t, batch_rule_t batch_rule>
21393::std::vector<at::Tensor> _foreach_minimum_Scalar_generated_plumbing(at::TensorList self, const at::Scalar & scalar) {
21394 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
21395 auto maybe_layer = maybeCurrentDynamicLayer();
21396 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
21397 int64_t cur_level = maybe_layer->layerId();
21398 if (!isBatchedAtLevel(self, cur_level)) {
21399 return at::_ops::_foreach_minimum_Scalar::call(self, scalar);
21400 }
21401
21402 auto results = batch_rule(self, scalar);
21403 return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
21404}
21405template <typename batch_rule_t, batch_rule_t batch_rule>
21406void _foreach_minimum__Scalar_generated_plumbing(at::TensorList self, const at::Scalar & scalar) {
21407 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
21408 auto maybe_layer = maybeCurrentDynamicLayer();
21409 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
21410 int64_t cur_level = maybe_layer->layerId();
21411 if (!isBatchedAtLevel(self, cur_level)) {
21412 return at::_ops::_foreach_minimum__Scalar::call(self, scalar);
21413 }
21414
21415 batch_rule(self, scalar);
21416}
21417template <typename batch_rule_t, batch_rule_t batch_rule>
21418::std::vector<at::Tensor> _foreach_add_List_generated_plumbing(at::TensorList self, at::TensorList other, const at::Scalar & alpha) {
21419 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
21420 auto maybe_layer = maybeCurrentDynamicLayer();
21421 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
21422 int64_t cur_level = maybe_layer->layerId();
21423 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
21424 return at::_ops::_foreach_add_List::call(self, other, alpha);
21425 }
21426
21427 auto results = batch_rule(self, other, alpha);
21428 return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
21429}
21430template <typename batch_rule_t, batch_rule_t batch_rule>
21431void _foreach_add__List_generated_plumbing(at::TensorList self, at::TensorList other, const at::Scalar & alpha) {
21432 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
21433 auto maybe_layer = maybeCurrentDynamicLayer();
21434 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
21435 int64_t cur_level = maybe_layer->layerId();
21436 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
21437 return at::_ops::_foreach_add__List::call(self, other, alpha);
21438 }
21439
21440 batch_rule(self, other, alpha);
21441}
21442template <typename batch_rule_t, batch_rule_t batch_rule>
21443::std::vector<at::Tensor> _foreach_sub_List_generated_plumbing(at::TensorList self, at::TensorList other, const at::Scalar & alpha) {
21444 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
21445 auto maybe_layer = maybeCurrentDynamicLayer();
21446 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
21447 int64_t cur_level = maybe_layer->layerId();
21448 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
21449 return at::_ops::_foreach_sub_List::call(self, other, alpha);
21450 }
21451
21452 auto results = batch_rule(self, other, alpha);
21453 return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
21454}
21455template <typename batch_rule_t, batch_rule_t batch_rule>
21456void _foreach_sub__List_generated_plumbing(at::TensorList self, at::TensorList other, const at::Scalar & alpha) {
21457 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
21458 auto maybe_layer = maybeCurrentDynamicLayer();
21459 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
21460 int64_t cur_level = maybe_layer->layerId();
21461 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
21462 return at::_ops::_foreach_sub__List::call(self, other, alpha);
21463 }
21464
21465 batch_rule(self, other, alpha);
21466}
21467template <typename batch_rule_t, batch_rule_t batch_rule>
21468::std::vector<at::Tensor> _foreach_mul_List_generated_plumbing(at::TensorList self, at::TensorList other) {
21469 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
21470 auto maybe_layer = maybeCurrentDynamicLayer();
21471 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
21472 int64_t cur_level = maybe_layer->layerId();
21473 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
21474 return at::_ops::_foreach_mul_List::call(self, other);
21475 }
21476
21477 auto results = batch_rule(self, other);
21478 return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
21479}
21480template <typename batch_rule_t, batch_rule_t batch_rule>
21481void _foreach_mul__List_generated_plumbing(at::TensorList self, at::TensorList other) {
21482 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
21483 auto maybe_layer = maybeCurrentDynamicLayer();
21484 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
21485 int64_t cur_level = maybe_layer->layerId();
21486 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
21487 return at::_ops::_foreach_mul__List::call(self, other);
21488 }
21489
21490 batch_rule(self, other);
21491}
21492template <typename batch_rule_t, batch_rule_t batch_rule>
21493::std::vector<at::Tensor> _foreach_div_List_generated_plumbing(at::TensorList self, at::TensorList other) {
21494 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
21495 auto maybe_layer = maybeCurrentDynamicLayer();
21496 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
21497 int64_t cur_level = maybe_layer->layerId();
21498 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
21499 return at::_ops::_foreach_div_List::call(self, other);
21500 }
21501
21502 auto results = batch_rule(self, other);
21503 return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
21504}
21505template <typename batch_rule_t, batch_rule_t batch_rule>
21506void _foreach_div__List_generated_plumbing(at::TensorList self, at::TensorList other) {
21507 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
21508 auto maybe_layer = maybeCurrentDynamicLayer();
21509 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
21510 int64_t cur_level = maybe_layer->layerId();
21511 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
21512 return at::_ops::_foreach_div__List::call(self, other);
21513 }
21514
21515 batch_rule(self, other);
21516}
21517template <typename batch_rule_t, batch_rule_t batch_rule>
21518::std::vector<at::Tensor> _foreach_clamp_min_List_generated_plumbing(at::TensorList self, at::TensorList other) {
21519 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
21520 auto maybe_layer = maybeCurrentDynamicLayer();
21521 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
21522 int64_t cur_level = maybe_layer->layerId();
21523 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
21524 return at::_ops::_foreach_clamp_min_List::call(self, other);
21525 }
21526
21527 auto results = batch_rule(self, other);
21528 return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
21529}
21530template <typename batch_rule_t, batch_rule_t batch_rule>
21531void _foreach_clamp_min__List_generated_plumbing(at::TensorList self, at::TensorList other) {
21532 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
21533 auto maybe_layer = maybeCurrentDynamicLayer();
21534 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
21535 int64_t cur_level = maybe_layer->layerId();
21536 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
21537 return at::_ops::_foreach_clamp_min__List::call(self, other);
21538 }
21539
21540 batch_rule(self, other);
21541}
21542template <typename batch_rule_t, batch_rule_t batch_rule>
21543::std::vector<at::Tensor> _foreach_clamp_max_List_generated_plumbing(at::TensorList self, at::TensorList other) {
21544 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
21545 auto maybe_layer = maybeCurrentDynamicLayer();
21546 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
21547 int64_t cur_level = maybe_layer->layerId();
21548 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
21549 return at::_ops::_foreach_clamp_max_List::call(self, other);
21550 }
21551
21552 auto results = batch_rule(self, other);
21553 return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
21554}
21555template <typename batch_rule_t, batch_rule_t batch_rule>
21556void _foreach_clamp_max__List_generated_plumbing(at::TensorList self, at::TensorList other) {
21557 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
21558 auto maybe_layer = maybeCurrentDynamicLayer();
21559 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
21560 int64_t cur_level = maybe_layer->layerId();
21561 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
21562 return at::_ops::_foreach_clamp_max__List::call(self, other);
21563 }
21564
21565 batch_rule(self, other);
21566}
21567template <typename batch_rule_t, batch_rule_t batch_rule>
21568::std::vector<at::Tensor> _foreach_maximum_List_generated_plumbing(at::TensorList self, at::TensorList other) {
21569 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
21570 auto maybe_layer = maybeCurrentDynamicLayer();
21571 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
21572 int64_t cur_level = maybe_layer->layerId();
21573 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
21574 return at::_ops::_foreach_maximum_List::call(self, other);
21575 }
21576
21577 auto results = batch_rule(self, other);
21578 return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
21579}
21580template <typename batch_rule_t, batch_rule_t batch_rule>
21581void _foreach_maximum__List_generated_plumbing(at::TensorList self, at::TensorList other) {
21582 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
21583 auto maybe_layer = maybeCurrentDynamicLayer();
21584 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
21585 int64_t cur_level = maybe_layer->layerId();
21586 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
21587 return at::_ops::_foreach_maximum__List::call(self, other);
21588 }
21589
21590 batch_rule(self, other);
21591}
21592template <typename batch_rule_t, batch_rule_t batch_rule>
21593::std::vector<at::Tensor> _foreach_minimum_List_generated_plumbing(at::TensorList self, at::TensorList other) {
21594 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
21595 auto maybe_layer = maybeCurrentDynamicLayer();
21596 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
21597 int64_t cur_level = maybe_layer->layerId();
21598 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
21599 return at::_ops::_foreach_minimum_List::call(self, other);
21600 }
21601
21602 auto results = batch_rule(self, other);
21603 return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
21604}
21605template <typename batch_rule_t, batch_rule_t batch_rule>
21606void _foreach_minimum__List_generated_plumbing(at::TensorList self, at::TensorList other) {
21607 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
21608 auto maybe_layer = maybeCurrentDynamicLayer();
21609 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
21610 int64_t cur_level = maybe_layer->layerId();
21611 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
21612 return at::_ops::_foreach_minimum__List::call(self, other);
21613 }
21614
21615 batch_rule(self, other);
21616}
21617template <typename batch_rule_t, batch_rule_t batch_rule>
21618::std::vector<at::Tensor> _foreach_add_ScalarList_generated_plumbing(at::TensorList self, at::ArrayRef<at::Scalar> scalars) {
21619 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
21620 auto maybe_layer = maybeCurrentDynamicLayer();
21621 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
21622 int64_t cur_level = maybe_layer->layerId();
21623 if (!isBatchedAtLevel(self, cur_level)) {
21624 return at::_ops::_foreach_add_ScalarList::call(self, scalars);
21625 }
21626
21627 auto results = batch_rule(self, scalars);
21628 return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
21629}
21630template <typename batch_rule_t, batch_rule_t batch_rule>
21631void _foreach_add__ScalarList_generated_plumbing(at::TensorList self, at::ArrayRef<at::Scalar> scalars) {
21632 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
21633 auto maybe_layer = maybeCurrentDynamicLayer();
21634 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
21635 int64_t cur_level = maybe_layer->layerId();
21636 if (!isBatchedAtLevel(self, cur_level)) {
21637 return at::_ops::_foreach_add__ScalarList::call(self, scalars);
21638 }
21639
21640 batch_rule(self, scalars);
21641}
21642template <typename batch_rule_t, batch_rule_t batch_rule>
21643::std::vector<at::Tensor> _foreach_sub_ScalarList_generated_plumbing(at::TensorList self, at::ArrayRef<at::Scalar> scalars) {
21644 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
21645 auto maybe_layer = maybeCurrentDynamicLayer();
21646 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
21647 int64_t cur_level = maybe_layer->layerId();
21648 if (!isBatchedAtLevel(self, cur_level)) {
21649 return at::_ops::_foreach_sub_ScalarList::call(self, scalars);
21650 }
21651
21652 auto results = batch_rule(self, scalars);
21653 return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
21654}
21655template <typename batch_rule_t, batch_rule_t batch_rule>
21656void _foreach_sub__ScalarList_generated_plumbing(at::TensorList self, at::ArrayRef<at::Scalar> scalars) {
21657 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
21658 auto maybe_layer = maybeCurrentDynamicLayer();
21659 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
21660 int64_t cur_level = maybe_layer->layerId();
21661 if (!isBatchedAtLevel(self, cur_level)) {
21662 return at::_ops::_foreach_sub__ScalarList::call(self, scalars);
21663 }
21664
21665 batch_rule(self, scalars);
21666}
21667template <typename batch_rule_t, batch_rule_t batch_rule>
21668::std::vector<at::Tensor> _foreach_div_ScalarList_generated_plumbing(at::TensorList self, at::ArrayRef<at::Scalar> scalars) {
21669 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
21670 auto maybe_layer = maybeCurrentDynamicLayer();
21671 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
21672 int64_t cur_level = maybe_layer->layerId();
21673 if (!isBatchedAtLevel(self, cur_level)) {
21674 return at::_ops::_foreach_div_ScalarList::call(self, scalars);
21675 }
21676
21677 auto results = batch_rule(self, scalars);
21678 return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
21679}
21680template <typename batch_rule_t, batch_rule_t batch_rule>
21681void _foreach_div__ScalarList_generated_plumbing(at::TensorList self, at::ArrayRef<at::Scalar> scalars) {
21682 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
21683 auto maybe_layer = maybeCurrentDynamicLayer();
21684 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
21685 int64_t cur_level = maybe_layer->layerId();
21686 if (!isBatchedAtLevel(self, cur_level)) {
21687 return at::_ops::_foreach_div__ScalarList::call(self, scalars);
21688 }
21689
21690 batch_rule(self, scalars);
21691}
21692template <typename batch_rule_t, batch_rule_t batch_rule>
21693::std::vector<at::Tensor> _foreach_mul_ScalarList_generated_plumbing(at::TensorList self, at::ArrayRef<at::Scalar> scalars) {
21694 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
21695 auto maybe_layer = maybeCurrentDynamicLayer();
21696 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
21697 int64_t cur_level = maybe_layer->layerId();
21698 if (!isBatchedAtLevel(self, cur_level)) {
21699 return at::_ops::_foreach_mul_ScalarList::call(self, scalars);
21700 }
21701
21702 auto results = batch_rule(self, scalars);
21703 return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
21704}
21705template <typename batch_rule_t, batch_rule_t batch_rule>
21706void _foreach_mul__ScalarList_generated_plumbing(at::TensorList self, at::ArrayRef<at::Scalar> scalars) {
21707 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
21708 auto maybe_layer = maybeCurrentDynamicLayer();
21709 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
21710 int64_t cur_level = maybe_layer->layerId();
21711 if (!isBatchedAtLevel(self, cur_level)) {
21712 return at::_ops::_foreach_mul__ScalarList::call(self, scalars);
21713 }
21714
21715 batch_rule(self, scalars);
21716}
21717template <typename batch_rule_t, batch_rule_t batch_rule>
21718::std::vector<at::Tensor> _foreach_clamp_min_ScalarList_generated_plumbing(at::TensorList self, at::ArrayRef<at::Scalar> scalars) {
21719 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
21720 auto maybe_layer = maybeCurrentDynamicLayer();
21721 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
21722 int64_t cur_level = maybe_layer->layerId();
21723 if (!isBatchedAtLevel(self, cur_level)) {
21724 return at::_ops::_foreach_clamp_min_ScalarList::call(self, scalars);
21725 }
21726
21727 auto results = batch_rule(self, scalars);
21728 return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
21729}
21730template <typename batch_rule_t, batch_rule_t batch_rule>
21731void _foreach_clamp_min__ScalarList_generated_plumbing(at::TensorList self, at::ArrayRef<at::Scalar> scalars) {
21732 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
21733 auto maybe_layer = maybeCurrentDynamicLayer();
21734 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
21735 int64_t cur_level = maybe_layer->layerId();
21736 if (!isBatchedAtLevel(self, cur_level)) {
21737 return at::_ops::_foreach_clamp_min__ScalarList::call(self, scalars);
21738 }
21739
21740 batch_rule(self, scalars);
21741}
21742template <typename batch_rule_t, batch_rule_t batch_rule>
21743::std::vector<at::Tensor> _foreach_clamp_max_ScalarList_generated_plumbing(at::TensorList self, at::ArrayRef<at::Scalar> scalars) {
21744 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
21745 auto maybe_layer = maybeCurrentDynamicLayer();
21746 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
21747 int64_t cur_level = maybe_layer->layerId();
21748 if (!isBatchedAtLevel(self, cur_level)) {
21749 return at::_ops::_foreach_clamp_max_ScalarList::call(self, scalars);
21750 }
21751
21752 auto results = batch_rule(self, scalars);
21753 return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
21754}
21755template <typename batch_rule_t, batch_rule_t batch_rule>
21756void _foreach_clamp_max__ScalarList_generated_plumbing(at::TensorList self, at::ArrayRef<at::Scalar> scalars) {
21757 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
21758 auto maybe_layer = maybeCurrentDynamicLayer();
21759 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
21760 int64_t cur_level = maybe_layer->layerId();
21761 if (!isBatchedAtLevel(self, cur_level)) {
21762 return at::_ops::_foreach_clamp_max__ScalarList::call(self, scalars);
21763 }
21764
21765 batch_rule(self, scalars);
21766}
21767template <typename batch_rule_t, batch_rule_t batch_rule>
21768::std::vector<at::Tensor> _foreach_maximum_ScalarList_generated_plumbing(at::TensorList self, at::ArrayRef<at::Scalar> scalars) {
21769 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
21770 auto maybe_layer = maybeCurrentDynamicLayer();
21771 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
21772 int64_t cur_level = maybe_layer->layerId();
21773 if (!isBatchedAtLevel(self, cur_level)) {
21774 return at::_ops::_foreach_maximum_ScalarList::call(self, scalars);
21775 }
21776
21777 auto results = batch_rule(self, scalars);
21778 return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
21779}
21780template <typename batch_rule_t, batch_rule_t batch_rule>
21781void _foreach_maximum__ScalarList_generated_plumbing(at::TensorList self, at::ArrayRef<at::Scalar> scalars) {
21782 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
21783 auto maybe_layer = maybeCurrentDynamicLayer();
21784 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
21785 int64_t cur_level = maybe_layer->layerId();
21786 if (!isBatchedAtLevel(self, cur_level)) {
21787 return at::_ops::_foreach_maximum__ScalarList::call(self, scalars);
21788 }
21789
21790 batch_rule(self, scalars);
21791}
21792template <typename batch_rule_t, batch_rule_t batch_rule>
21793::std::vector<at::Tensor> _foreach_minimum_ScalarList_generated_plumbing(at::TensorList self, at::ArrayRef<at::Scalar> scalars) {
21794 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
21795 auto maybe_layer = maybeCurrentDynamicLayer();
21796 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
21797 int64_t cur_level = maybe_layer->layerId();
21798 if (!isBatchedAtLevel(self, cur_level)) {
21799 return at::_ops::_foreach_minimum_ScalarList::call(self, scalars);
21800 }
21801
21802 auto results = batch_rule(self, scalars);
21803 return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
21804}
21805template <typename batch_rule_t, batch_rule_t batch_rule>
21806void _foreach_minimum__ScalarList_generated_plumbing(at::TensorList self, at::ArrayRef<at::Scalar> scalars) {
21807 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
21808 auto maybe_layer = maybeCurrentDynamicLayer();
21809 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
21810 int64_t cur_level = maybe_layer->layerId();
21811 if (!isBatchedAtLevel(self, cur_level)) {
21812 return at::_ops::_foreach_minimum__ScalarList::call(self, scalars);
21813 }
21814
21815 batch_rule(self, scalars);
21816}
21817template <typename batch_rule_t, batch_rule_t batch_rule>
21818::std::vector<at::Tensor> _foreach_exp_generated_plumbing(at::TensorList self) {
21819 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
21820 auto maybe_layer = maybeCurrentDynamicLayer();
21821 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
21822 int64_t cur_level = maybe_layer->layerId();
21823 if (!isBatchedAtLevel(self, cur_level)) {
21824 return at::_ops::_foreach_exp::call(self);
21825 }
21826
21827 auto results = batch_rule(self);
21828 return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
21829}
21830template <typename batch_rule_t, batch_rule_t batch_rule>
21831void _foreach_zero__generated_plumbing(at::TensorList self) {
21832 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
21833 auto maybe_layer = maybeCurrentDynamicLayer();
21834 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
21835 int64_t cur_level = maybe_layer->layerId();
21836 if (!isBatchedAtLevel(self, cur_level)) {
21837 return at::_ops::_foreach_zero_::call(self);
21838 }
21839
21840 batch_rule(self);
21841}
21842template <typename batch_rule_t, batch_rule_t batch_rule>
21843void _foreach_exp__generated_plumbing(at::TensorList self) {
21844 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
21845 auto maybe_layer = maybeCurrentDynamicLayer();
21846 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
21847 int64_t cur_level = maybe_layer->layerId();
21848 if (!isBatchedAtLevel(self, cur_level)) {
21849 return at::_ops::_foreach_exp_::call(self);
21850 }
21851
21852 batch_rule(self);
21853}
21854template <typename batch_rule_t, batch_rule_t batch_rule>
21855::std::vector<at::Tensor> _foreach_sqrt_generated_plumbing(at::TensorList self) {
21856 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
21857 auto maybe_layer = maybeCurrentDynamicLayer();
21858 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
21859 int64_t cur_level = maybe_layer->layerId();
21860 if (!isBatchedAtLevel(self, cur_level)) {
21861 return at::_ops::_foreach_sqrt::call(self);
21862 }
21863
21864 auto results = batch_rule(self);
21865 return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
21866}
21867template <typename batch_rule_t, batch_rule_t batch_rule>
21868void _foreach_sqrt__generated_plumbing(at::TensorList self) {
21869 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
21870 auto maybe_layer = maybeCurrentDynamicLayer();
21871 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
21872 int64_t cur_level = maybe_layer->layerId();
21873 if (!isBatchedAtLevel(self, cur_level)) {
21874 return at::_ops::_foreach_sqrt_::call(self);
21875 }
21876
21877 batch_rule(self);
21878}
21879template <typename batch_rule_t, batch_rule_t batch_rule>
21880::std::vector<at::Tensor> _foreach_abs_generated_plumbing(at::TensorList self) {
21881 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
21882 auto maybe_layer = maybeCurrentDynamicLayer();
21883 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
21884 int64_t cur_level = maybe_layer->layerId();
21885 if (!isBatchedAtLevel(self, cur_level)) {
21886 return at::_ops::_foreach_abs::call(self);
21887 }
21888
21889 auto results = batch_rule(self);
21890 return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
21891}
21892template <typename batch_rule_t, batch_rule_t batch_rule>
21893void _foreach_abs__generated_plumbing(at::TensorList self) {
21894 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
21895 auto maybe_layer = maybeCurrentDynamicLayer();
21896 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
21897 int64_t cur_level = maybe_layer->layerId();
21898 if (!isBatchedAtLevel(self, cur_level)) {
21899 return at::_ops::_foreach_abs_::call(self);
21900 }
21901
21902 batch_rule(self);
21903}
21904template <typename batch_rule_t, batch_rule_t batch_rule>
21905::std::vector<at::Tensor> _foreach_acos_generated_plumbing(at::TensorList self) {
21906 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
21907 auto maybe_layer = maybeCurrentDynamicLayer();
21908 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
21909 int64_t cur_level = maybe_layer->layerId();
21910 if (!isBatchedAtLevel(self, cur_level)) {
21911 return at::_ops::_foreach_acos::call(self);
21912 }
21913
21914 auto results = batch_rule(self);
21915 return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
21916}
21917template <typename batch_rule_t, batch_rule_t batch_rule>
21918void _foreach_acos__generated_plumbing(at::TensorList self) {
21919 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
21920 auto maybe_layer = maybeCurrentDynamicLayer();
21921 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
21922 int64_t cur_level = maybe_layer->layerId();
21923 if (!isBatchedAtLevel(self, cur_level)) {
21924 return at::_ops::_foreach_acos_::call(self);
21925 }
21926
21927 batch_rule(self);
21928}
21929template <typename batch_rule_t, batch_rule_t batch_rule>
21930::std::vector<at::Tensor> _foreach_asin_generated_plumbing(at::TensorList self) {
21931 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
21932 auto maybe_layer = maybeCurrentDynamicLayer();
21933 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
21934 int64_t cur_level = maybe_layer->layerId();
21935 if (!isBatchedAtLevel(self, cur_level)) {
21936 return at::_ops::_foreach_asin::call(self);
21937 }
21938
21939 auto results = batch_rule(self);
21940 return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
21941}
21942template <typename batch_rule_t, batch_rule_t batch_rule>
21943void _foreach_asin__generated_plumbing(at::TensorList self) {
21944 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
21945 auto maybe_layer = maybeCurrentDynamicLayer();
21946 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
21947 int64_t cur_level = maybe_layer->layerId();
21948 if (!isBatchedAtLevel(self, cur_level)) {
21949 return at::_ops::_foreach_asin_::call(self);
21950 }
21951
21952 batch_rule(self);
21953}
21954template <typename batch_rule_t, batch_rule_t batch_rule>
21955::std::vector<at::Tensor> _foreach_atan_generated_plumbing(at::TensorList self) {
21956 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
21957 auto maybe_layer = maybeCurrentDynamicLayer();
21958 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
21959 int64_t cur_level = maybe_layer->layerId();
21960 if (!isBatchedAtLevel(self, cur_level)) {
21961 return at::_ops::_foreach_atan::call(self);
21962 }
21963
21964 auto results = batch_rule(self);
21965 return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
21966}
21967template <typename batch_rule_t, batch_rule_t batch_rule>
21968void _foreach_atan__generated_plumbing(at::TensorList self) {
21969 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
21970 auto maybe_layer = maybeCurrentDynamicLayer();
21971 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
21972 int64_t cur_level = maybe_layer->layerId();
21973 if (!isBatchedAtLevel(self, cur_level)) {
21974 return at::_ops::_foreach_atan_::call(self);
21975 }
21976
21977 batch_rule(self);
21978}
21979template <typename batch_rule_t, batch_rule_t batch_rule>
21980::std::vector<at::Tensor> _foreach_ceil_generated_plumbing(at::TensorList self) {
21981 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
21982 auto maybe_layer = maybeCurrentDynamicLayer();
21983 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
21984 int64_t cur_level = maybe_layer->layerId();
21985 if (!isBatchedAtLevel(self, cur_level)) {
21986 return at::_ops::_foreach_ceil::call(self);
21987 }
21988
21989 auto results = batch_rule(self);
21990 return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
21991}
21992template <typename batch_rule_t, batch_rule_t batch_rule>
21993void _foreach_ceil__generated_plumbing(at::TensorList self) {
21994 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
21995 auto maybe_layer = maybeCurrentDynamicLayer();
21996 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
21997 int64_t cur_level = maybe_layer->layerId();
21998 if (!isBatchedAtLevel(self, cur_level)) {
21999 return at::_ops::_foreach_ceil_::call(self);
22000 }
22001
22002 batch_rule(self);
22003}
22004template <typename batch_rule_t, batch_rule_t batch_rule>
22005::std::vector<at::Tensor> _foreach_cos_generated_plumbing(at::TensorList self) {
22006 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
22007 auto maybe_layer = maybeCurrentDynamicLayer();
22008 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
22009 int64_t cur_level = maybe_layer->layerId();
22010 if (!isBatchedAtLevel(self, cur_level)) {
22011 return at::_ops::_foreach_cos::call(self);
22012 }
22013
22014 auto results = batch_rule(self);
22015 return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
22016}
22017template <typename batch_rule_t, batch_rule_t batch_rule>
22018void _foreach_cos__generated_plumbing(at::TensorList self) {
22019 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
22020 auto maybe_layer = maybeCurrentDynamicLayer();
22021 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
22022 int64_t cur_level = maybe_layer->layerId();
22023 if (!isBatchedAtLevel(self, cur_level)) {
22024 return at::_ops::_foreach_cos_::call(self);
22025 }
22026
22027 batch_rule(self);
22028}
22029template <typename batch_rule_t, batch_rule_t batch_rule>
22030::std::vector<at::Tensor> _foreach_cosh_generated_plumbing(at::TensorList self) {
22031 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
22032 auto maybe_layer = maybeCurrentDynamicLayer();
22033 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
22034 int64_t cur_level = maybe_layer->layerId();
22035 if (!isBatchedAtLevel(self, cur_level)) {
22036 return at::_ops::_foreach_cosh::call(self);
22037 }
22038
22039 auto results = batch_rule(self);
22040 return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
22041}
22042template <typename batch_rule_t, batch_rule_t batch_rule>
22043void _foreach_cosh__generated_plumbing(at::TensorList self) {
22044 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
22045 auto maybe_layer = maybeCurrentDynamicLayer();
22046 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
22047 int64_t cur_level = maybe_layer->layerId();
22048 if (!isBatchedAtLevel(self, cur_level)) {
22049 return at::_ops::_foreach_cosh_::call(self);
22050 }
22051
22052 batch_rule(self);
22053}
22054template <typename batch_rule_t, batch_rule_t batch_rule>
22055::std::vector<at::Tensor> _foreach_erf_generated_plumbing(at::TensorList self) {
22056 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
22057 auto maybe_layer = maybeCurrentDynamicLayer();
22058 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
22059 int64_t cur_level = maybe_layer->layerId();
22060 if (!isBatchedAtLevel(self, cur_level)) {
22061 return at::_ops::_foreach_erf::call(self);
22062 }
22063
22064 auto results = batch_rule(self);
22065 return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
22066}
22067template <typename batch_rule_t, batch_rule_t batch_rule>
22068void _foreach_erf__generated_plumbing(at::TensorList self) {
22069 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
22070 auto maybe_layer = maybeCurrentDynamicLayer();
22071 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
22072 int64_t cur_level = maybe_layer->layerId();
22073 if (!isBatchedAtLevel(self, cur_level)) {
22074 return at::_ops::_foreach_erf_::call(self);
22075 }
22076
22077 batch_rule(self);
22078}
22079template <typename batch_rule_t, batch_rule_t batch_rule>
22080::std::vector<at::Tensor> _foreach_erfc_generated_plumbing(at::TensorList self) {
22081 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
22082 auto maybe_layer = maybeCurrentDynamicLayer();
22083 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
22084 int64_t cur_level = maybe_layer->layerId();
22085 if (!isBatchedAtLevel(self, cur_level)) {
22086 return at::_ops::_foreach_erfc::call(self);
22087 }
22088
22089 auto results = batch_rule(self);
22090 return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
22091}
22092template <typename batch_rule_t, batch_rule_t batch_rule>
22093void _foreach_erfc__generated_plumbing(at::TensorList self) {
22094 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
22095 auto maybe_layer = maybeCurrentDynamicLayer();
22096 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
22097 int64_t cur_level = maybe_layer->layerId();
22098 if (!isBatchedAtLevel(self, cur_level)) {
22099 return at::_ops::_foreach_erfc_::call(self);
22100 }
22101
22102 batch_rule(self);
22103}
22104template <typename batch_rule_t, batch_rule_t batch_rule>
22105::std::vector<at::Tensor> _foreach_expm1_generated_plumbing(at::TensorList self) {
22106 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
22107 auto maybe_layer = maybeCurrentDynamicLayer();
22108 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
22109 int64_t cur_level = maybe_layer->layerId();
22110 if (!isBatchedAtLevel(self, cur_level)) {
22111 return at::_ops::_foreach_expm1::call(self);
22112 }
22113
22114 auto results = batch_rule(self);
22115 return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
22116}
22117template <typename batch_rule_t, batch_rule_t batch_rule>
22118void _foreach_expm1__generated_plumbing(at::TensorList self) {
22119 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
22120 auto maybe_layer = maybeCurrentDynamicLayer();
22121 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
22122 int64_t cur_level = maybe_layer->layerId();
22123 if (!isBatchedAtLevel(self, cur_level)) {
22124 return at::_ops::_foreach_expm1_::call(self);
22125 }
22126
22127 batch_rule(self);
22128}
22129template <typename batch_rule_t, batch_rule_t batch_rule>
22130::std::vector<at::Tensor> _foreach_floor_generated_plumbing(at::TensorList self) {
22131 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
22132 auto maybe_layer = maybeCurrentDynamicLayer();
22133 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
22134 int64_t cur_level = maybe_layer->layerId();
22135 if (!isBatchedAtLevel(self, cur_level)) {
22136 return at::_ops::_foreach_floor::call(self);
22137 }
22138
22139 auto results = batch_rule(self);
22140 return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
22141}
22142template <typename batch_rule_t, batch_rule_t batch_rule>
22143void _foreach_floor__generated_plumbing(at::TensorList self) {
22144 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
22145 auto maybe_layer = maybeCurrentDynamicLayer();
22146 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
22147 int64_t cur_level = maybe_layer->layerId();
22148 if (!isBatchedAtLevel(self, cur_level)) {
22149 return at::_ops::_foreach_floor_::call(self);
22150 }
22151
22152 batch_rule(self);
22153}
22154template <typename batch_rule_t, batch_rule_t batch_rule>
22155::std::vector<at::Tensor> _foreach_log_generated_plumbing(at::TensorList self) {
22156 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
22157 auto maybe_layer = maybeCurrentDynamicLayer();
22158 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
22159 int64_t cur_level = maybe_layer->layerId();
22160 if (!isBatchedAtLevel(self, cur_level)) {
22161 return at::_ops::_foreach_log::call(self);
22162 }
22163
22164 auto results = batch_rule(self);
22165 return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
22166}
22167template <typename batch_rule_t, batch_rule_t batch_rule>
22168void _foreach_log__generated_plumbing(at::TensorList self) {
22169 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
22170 auto maybe_layer = maybeCurrentDynamicLayer();
22171 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
22172 int64_t cur_level = maybe_layer->layerId();
22173 if (!isBatchedAtLevel(self, cur_level)) {
22174 return at::_ops::_foreach_log_::call(self);
22175 }
22176
22177 batch_rule(self);
22178}
22179template <typename batch_rule_t, batch_rule_t batch_rule>
22180::std::vector<at::Tensor> _foreach_log10_generated_plumbing(at::TensorList self) {
22181 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
22182 auto maybe_layer = maybeCurrentDynamicLayer();
22183 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
22184 int64_t cur_level = maybe_layer->layerId();
22185 if (!isBatchedAtLevel(self, cur_level)) {
22186 return at::_ops::_foreach_log10::call(self);
22187 }
22188
22189 auto results = batch_rule(self);
22190 return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
22191}
22192template <typename batch_rule_t, batch_rule_t batch_rule>
22193void _foreach_log10__generated_plumbing(at::TensorList self) {
22194 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
22195 auto maybe_layer = maybeCurrentDynamicLayer();
22196 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
22197 int64_t cur_level = maybe_layer->layerId();
22198 if (!isBatchedAtLevel(self, cur_level)) {
22199 return at::_ops::_foreach_log10_::call(self);
22200 }
22201
22202 batch_rule(self);
22203}
22204template <typename batch_rule_t, batch_rule_t batch_rule>
22205::std::vector<at::Tensor> _foreach_log1p_generated_plumbing(at::TensorList self) {
22206 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
22207 auto maybe_layer = maybeCurrentDynamicLayer();
22208 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
22209 int64_t cur_level = maybe_layer->layerId();
22210 if (!isBatchedAtLevel(self, cur_level)) {
22211 return at::_ops::_foreach_log1p::call(self);
22212 }
22213
22214 auto results = batch_rule(self);
22215 return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
22216}
22217template <typename batch_rule_t, batch_rule_t batch_rule>
22218void _foreach_log1p__generated_plumbing(at::TensorList self) {
22219 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
22220 auto maybe_layer = maybeCurrentDynamicLayer();
22221 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
22222 int64_t cur_level = maybe_layer->layerId();
22223 if (!isBatchedAtLevel(self, cur_level)) {
22224 return at::_ops::_foreach_log1p_::call(self);
22225 }
22226
22227 batch_rule(self);
22228}
22229template <typename batch_rule_t, batch_rule_t batch_rule>
22230::std::vector<at::Tensor> _foreach_log2_generated_plumbing(at::TensorList self) {
22231 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
22232 auto maybe_layer = maybeCurrentDynamicLayer();
22233 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
22234 int64_t cur_level = maybe_layer->layerId();
22235 if (!isBatchedAtLevel(self, cur_level)) {
22236 return at::_ops::_foreach_log2::call(self);
22237 }
22238
22239 auto results = batch_rule(self);
22240 return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
22241}
22242template <typename batch_rule_t, batch_rule_t batch_rule>
22243void _foreach_log2__generated_plumbing(at::TensorList self) {
22244 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
22245 auto maybe_layer = maybeCurrentDynamicLayer();
22246 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
22247 int64_t cur_level = maybe_layer->layerId();
22248 if (!isBatchedAtLevel(self, cur_level)) {
22249 return at::_ops::_foreach_log2_::call(self);
22250 }
22251
22252 batch_rule(self);
22253}
22254template <typename batch_rule_t, batch_rule_t batch_rule>
22255::std::vector<at::Tensor> _foreach_neg_generated_plumbing(at::TensorList self) {
22256 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
22257 auto maybe_layer = maybeCurrentDynamicLayer();
22258 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
22259 int64_t cur_level = maybe_layer->layerId();
22260 if (!isBatchedAtLevel(self, cur_level)) {
22261 return at::_ops::_foreach_neg::call(self);
22262 }
22263
22264 auto results = batch_rule(self);
22265 return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
22266}
22267template <typename batch_rule_t, batch_rule_t batch_rule>
22268void _foreach_neg__generated_plumbing(at::TensorList self) {
22269 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
22270 auto maybe_layer = maybeCurrentDynamicLayer();
22271 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
22272 int64_t cur_level = maybe_layer->layerId();
22273 if (!isBatchedAtLevel(self, cur_level)) {
22274 return at::_ops::_foreach_neg_::call(self);
22275 }
22276
22277 batch_rule(self);
22278}
22279template <typename batch_rule_t, batch_rule_t batch_rule>
22280::std::vector<at::Tensor> _foreach_tan_generated_plumbing(at::TensorList self) {
22281 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
22282 auto maybe_layer = maybeCurrentDynamicLayer();
22283 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
22284 int64_t cur_level = maybe_layer->layerId();
22285 if (!isBatchedAtLevel(self, cur_level)) {
22286 return at::_ops::_foreach_tan::call(self);
22287 }
22288
22289 auto results = batch_rule(self);
22290 return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
22291}
22292template <typename batch_rule_t, batch_rule_t batch_rule>
22293void _foreach_tan__generated_plumbing(at::TensorList self) {
22294 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
22295 auto maybe_layer = maybeCurrentDynamicLayer();
22296 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
22297 int64_t cur_level = maybe_layer->layerId();
22298 if (!isBatchedAtLevel(self, cur_level)) {
22299 return at::_ops::_foreach_tan_::call(self);
22300 }
22301
22302 batch_rule(self);
22303}
22304template <typename batch_rule_t, batch_rule_t batch_rule>
22305::std::vector<at::Tensor> _foreach_tanh_generated_plumbing(at::TensorList self) {
22306 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
22307 auto maybe_layer = maybeCurrentDynamicLayer();
22308 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
22309 int64_t cur_level = maybe_layer->layerId();
22310 if (!isBatchedAtLevel(self, cur_level)) {
22311 return at::_ops::_foreach_tanh::call(self);
22312 }
22313
22314 auto results = batch_rule(self);
22315 return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
22316}
22317template <typename batch_rule_t, batch_rule_t batch_rule>
22318void _foreach_tanh__generated_plumbing(at::TensorList self) {
22319 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
22320 auto maybe_layer = maybeCurrentDynamicLayer();
22321 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
22322 int64_t cur_level = maybe_layer->layerId();
22323 if (!isBatchedAtLevel(self, cur_level)) {
22324 return at::_ops::_foreach_tanh_::call(self);
22325 }
22326
22327 batch_rule(self);
22328}
22329template <typename batch_rule_t, batch_rule_t batch_rule>
22330::std::vector<at::Tensor> _foreach_sin_generated_plumbing(at::TensorList self) {
22331 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
22332 auto maybe_layer = maybeCurrentDynamicLayer();
22333 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
22334 int64_t cur_level = maybe_layer->layerId();
22335 if (!isBatchedAtLevel(self, cur_level)) {
22336 return at::_ops::_foreach_sin::call(self);
22337 }
22338
22339 auto results = batch_rule(self);
22340 return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
22341}
22342template <typename batch_rule_t, batch_rule_t batch_rule>
22343void _foreach_sin__generated_plumbing(at::TensorList self) {
22344 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
22345 auto maybe_layer = maybeCurrentDynamicLayer();
22346 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
22347 int64_t cur_level = maybe_layer->layerId();
22348 if (!isBatchedAtLevel(self, cur_level)) {
22349 return at::_ops::_foreach_sin_::call(self);
22350 }
22351
22352 batch_rule(self);
22353}
22354template <typename batch_rule_t, batch_rule_t batch_rule>
22355::std::vector<at::Tensor> _foreach_sinh_generated_plumbing(at::TensorList self) {
22356 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
22357 auto maybe_layer = maybeCurrentDynamicLayer();
22358 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
22359 int64_t cur_level = maybe_layer->layerId();
22360 if (!isBatchedAtLevel(self, cur_level)) {
22361 return at::_ops::_foreach_sinh::call(self);
22362 }
22363
22364 auto results = batch_rule(self);
22365 return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
22366}
22367template <typename batch_rule_t, batch_rule_t batch_rule>
22368void _foreach_sinh__generated_plumbing(at::TensorList self) {
22369 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
22370 auto maybe_layer = maybeCurrentDynamicLayer();
22371 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
22372 int64_t cur_level = maybe_layer->layerId();
22373 if (!isBatchedAtLevel(self, cur_level)) {
22374 return at::_ops::_foreach_sinh_::call(self);
22375 }
22376
22377 batch_rule(self);
22378}
22379template <typename batch_rule_t, batch_rule_t batch_rule>
22380::std::vector<at::Tensor> _foreach_round_generated_plumbing(at::TensorList self) {
22381 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
22382 auto maybe_layer = maybeCurrentDynamicLayer();
22383 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
22384 int64_t cur_level = maybe_layer->layerId();
22385 if (!isBatchedAtLevel(self, cur_level)) {
22386 return at::_ops::_foreach_round::call(self);
22387 }
22388
22389 auto results = batch_rule(self);
22390 return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
22391}
22392template <typename batch_rule_t, batch_rule_t batch_rule>
22393void _foreach_round__generated_plumbing(at::TensorList self) {
22394 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
22395 auto maybe_layer = maybeCurrentDynamicLayer();
22396 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
22397 int64_t cur_level = maybe_layer->layerId();
22398 if (!isBatchedAtLevel(self, cur_level)) {
22399 return at::_ops::_foreach_round_::call(self);
22400 }
22401
22402 batch_rule(self);
22403}
22404template <typename batch_rule_t, batch_rule_t batch_rule>
22405::std::vector<at::Tensor> _foreach_lgamma_generated_plumbing(at::TensorList self) {
22406 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
22407 auto maybe_layer = maybeCurrentDynamicLayer();
22408 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
22409 int64_t cur_level = maybe_layer->layerId();
22410 if (!isBatchedAtLevel(self, cur_level)) {
22411 return at::_ops::_foreach_lgamma::call(self);
22412 }
22413
22414 auto results = batch_rule(self);
22415 return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
22416}
22417template <typename batch_rule_t, batch_rule_t batch_rule>
22418void _foreach_lgamma__generated_plumbing(at::TensorList self) {
22419 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
22420 auto maybe_layer = maybeCurrentDynamicLayer();
22421 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
22422 int64_t cur_level = maybe_layer->layerId();
22423 if (!isBatchedAtLevel(self, cur_level)) {
22424 return at::_ops::_foreach_lgamma_::call(self);
22425 }
22426
22427 batch_rule(self);
22428}
22429template <typename batch_rule_t, batch_rule_t batch_rule>
22430::std::vector<at::Tensor> _foreach_frac_generated_plumbing(at::TensorList self) {
22431 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
22432 auto maybe_layer = maybeCurrentDynamicLayer();
22433 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
22434 int64_t cur_level = maybe_layer->layerId();
22435 if (!isBatchedAtLevel(self, cur_level)) {
22436 return at::_ops::_foreach_frac::call(self);
22437 }
22438
22439 auto results = batch_rule(self);
22440 return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
22441}
22442template <typename batch_rule_t, batch_rule_t batch_rule>
22443void _foreach_frac__generated_plumbing(at::TensorList self) {
22444 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
22445 auto maybe_layer = maybeCurrentDynamicLayer();
22446 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
22447 int64_t cur_level = maybe_layer->layerId();
22448 if (!isBatchedAtLevel(self, cur_level)) {
22449 return at::_ops::_foreach_frac_::call(self);
22450 }
22451
22452 batch_rule(self);
22453}
22454template <typename batch_rule_t, batch_rule_t batch_rule>
22455::std::vector<at::Tensor> _foreach_reciprocal_generated_plumbing(at::TensorList self) {
22456 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
22457 auto maybe_layer = maybeCurrentDynamicLayer();
22458 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
22459 int64_t cur_level = maybe_layer->layerId();
22460 if (!isBatchedAtLevel(self, cur_level)) {
22461 return at::_ops::_foreach_reciprocal::call(self);
22462 }
22463
22464 auto results = batch_rule(self);
22465 return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
22466}
22467template <typename batch_rule_t, batch_rule_t batch_rule>
22468void _foreach_reciprocal__generated_plumbing(at::TensorList self) {
22469 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
22470 auto maybe_layer = maybeCurrentDynamicLayer();
22471 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
22472 int64_t cur_level = maybe_layer->layerId();
22473 if (!isBatchedAtLevel(self, cur_level)) {
22474 return at::_ops::_foreach_reciprocal_::call(self);
22475 }
22476
22477 batch_rule(self);
22478}
22479template <typename batch_rule_t, batch_rule_t batch_rule>
22480::std::vector<at::Tensor> _foreach_sigmoid_generated_plumbing(at::TensorList self) {
22481 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
22482 auto maybe_layer = maybeCurrentDynamicLayer();
22483 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
22484 int64_t cur_level = maybe_layer->layerId();
22485 if (!isBatchedAtLevel(self, cur_level)) {
22486 return at::_ops::_foreach_sigmoid::call(self);
22487 }
22488
22489 auto results = batch_rule(self);
22490 return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
22491}
22492template <typename batch_rule_t, batch_rule_t batch_rule>
22493void _foreach_sigmoid__generated_plumbing(at::TensorList self) {
22494 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
22495 auto maybe_layer = maybeCurrentDynamicLayer();
22496 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
22497 int64_t cur_level = maybe_layer->layerId();
22498 if (!isBatchedAtLevel(self, cur_level)) {
22499 return at::_ops::_foreach_sigmoid_::call(self);
22500 }
22501
22502 batch_rule(self);
22503}
22504template <typename batch_rule_t, batch_rule_t batch_rule>
22505::std::vector<at::Tensor> _foreach_trunc_generated_plumbing(at::TensorList self) {
22506 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
22507 auto maybe_layer = maybeCurrentDynamicLayer();
22508 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
22509 int64_t cur_level = maybe_layer->layerId();
22510 if (!isBatchedAtLevel(self, cur_level)) {
22511 return at::_ops::_foreach_trunc::call(self);
22512 }
22513
22514 auto results = batch_rule(self);
22515 return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
22516}
22517template <typename batch_rule_t, batch_rule_t batch_rule>
22518void _foreach_trunc__generated_plumbing(at::TensorList self) {
22519 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
22520 auto maybe_layer = maybeCurrentDynamicLayer();
22521 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
22522 int64_t cur_level = maybe_layer->layerId();
22523 if (!isBatchedAtLevel(self, cur_level)) {
22524 return at::_ops::_foreach_trunc_::call(self);
22525 }
22526
22527 batch_rule(self);
22528}
22529template <typename batch_rule_t, batch_rule_t batch_rule>
22530void _foreach_addcdiv__Scalar_generated_plumbing(at::TensorList self, at::TensorList tensor1, at::TensorList tensor2, const at::Scalar & value) {
22531 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
22532 auto maybe_layer = maybeCurrentDynamicLayer();
22533 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
22534 int64_t cur_level = maybe_layer->layerId();
22535 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(tensor1, cur_level) && !isBatchedAtLevel(tensor2, cur_level)) {
22536 return at::_ops::_foreach_addcdiv__Scalar::call(self, tensor1, tensor2, value);
22537 }
22538
22539 batch_rule(self, tensor1, tensor2, value);
22540}
22541template <typename batch_rule_t, batch_rule_t batch_rule>
22542void _foreach_addcmul__Scalar_generated_plumbing(at::TensorList self, at::TensorList tensor1, at::TensorList tensor2, const at::Scalar & value) {
22543 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
22544 auto maybe_layer = maybeCurrentDynamicLayer();
22545 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
22546 int64_t cur_level = maybe_layer->layerId();
22547 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(tensor1, cur_level) && !isBatchedAtLevel(tensor2, cur_level)) {
22548 return at::_ops::_foreach_addcmul__Scalar::call(self, tensor1, tensor2, value);
22549 }
22550
22551 batch_rule(self, tensor1, tensor2, value);
22552}
22553template <typename batch_rule_t, batch_rule_t batch_rule>
22554void _foreach_addcdiv__ScalarList_generated_plumbing(at::TensorList self, at::TensorList tensor1, at::TensorList tensor2, at::ArrayRef<at::Scalar> scalars) {
22555 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
22556 auto maybe_layer = maybeCurrentDynamicLayer();
22557 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
22558 int64_t cur_level = maybe_layer->layerId();
22559 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(tensor1, cur_level) && !isBatchedAtLevel(tensor2, cur_level)) {
22560 return at::_ops::_foreach_addcdiv__ScalarList::call(self, tensor1, tensor2, scalars);
22561 }
22562
22563 batch_rule(self, tensor1, tensor2, scalars);
22564}
22565template <typename batch_rule_t, batch_rule_t batch_rule>
22566void _foreach_addcdiv__Tensor_generated_plumbing(at::TensorList self, at::TensorList tensor1, at::TensorList tensor2, const at::Tensor & scalars) {
22567 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
22568 auto maybe_layer = maybeCurrentDynamicLayer();
22569 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
22570 int64_t cur_level = maybe_layer->layerId();
22571 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(tensor1, cur_level) && !isBatchedAtLevel(tensor2, cur_level) && !isBatchedAtLevel(scalars, cur_level)) {
22572 return at::_ops::_foreach_addcdiv__Tensor::call(self, tensor1, tensor2, scalars);
22573 }
22574 Tensor scalars_value;
22575 optional<int64_t> scalars_bdim;
22576 std::tie(scalars_value, scalars_bdim) = unwrapTensorAtLevel(scalars, cur_level);
22577 batch_rule(self, tensor1, tensor2, scalars_value, scalars_bdim);
22578}
22579template <typename batch_rule_t, batch_rule_t batch_rule>
22580void _foreach_addcmul__ScalarList_generated_plumbing(at::TensorList self, at::TensorList tensor1, at::TensorList tensor2, at::ArrayRef<at::Scalar> scalars) {
22581 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
22582 auto maybe_layer = maybeCurrentDynamicLayer();
22583 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
22584 int64_t cur_level = maybe_layer->layerId();
22585 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(tensor1, cur_level) && !isBatchedAtLevel(tensor2, cur_level)) {
22586 return at::_ops::_foreach_addcmul__ScalarList::call(self, tensor1, tensor2, scalars);
22587 }
22588
22589 batch_rule(self, tensor1, tensor2, scalars);
22590}
22591template <typename batch_rule_t, batch_rule_t batch_rule>
22592void _foreach_addcmul__Tensor_generated_plumbing(at::TensorList self, at::TensorList tensor1, at::TensorList tensor2, const at::Tensor & scalars) {
22593 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
22594 auto maybe_layer = maybeCurrentDynamicLayer();
22595 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
22596 int64_t cur_level = maybe_layer->layerId();
22597 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(tensor1, cur_level) && !isBatchedAtLevel(tensor2, cur_level) && !isBatchedAtLevel(scalars, cur_level)) {
22598 return at::_ops::_foreach_addcmul__Tensor::call(self, tensor1, tensor2, scalars);
22599 }
22600 Tensor scalars_value;
22601 optional<int64_t> scalars_bdim;
22602 std::tie(scalars_value, scalars_bdim) = unwrapTensorAtLevel(scalars, cur_level);
22603 batch_rule(self, tensor1, tensor2, scalars_value, scalars_bdim);
22604}
22605template <typename batch_rule_t, batch_rule_t batch_rule>
22606::std::vector<at::Tensor> _foreach_addcdiv_Scalar_generated_plumbing(at::TensorList self, at::TensorList tensor1, at::TensorList tensor2, const at::Scalar & value) {
22607 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
22608 auto maybe_layer = maybeCurrentDynamicLayer();
22609 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
22610 int64_t cur_level = maybe_layer->layerId();
22611 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(tensor1, cur_level) && !isBatchedAtLevel(tensor2, cur_level)) {
22612 return at::_ops::_foreach_addcdiv_Scalar::call(self, tensor1, tensor2, value);
22613 }
22614
22615 auto results = batch_rule(self, tensor1, tensor2, value);
22616 return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
22617}
22618template <typename batch_rule_t, batch_rule_t batch_rule>
22619::std::vector<at::Tensor> _foreach_addcmul_Scalar_generated_plumbing(at::TensorList self, at::TensorList tensor1, at::TensorList tensor2, const at::Scalar & value) {
22620 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
22621 auto maybe_layer = maybeCurrentDynamicLayer();
22622 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
22623 int64_t cur_level = maybe_layer->layerId();
22624 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(tensor1, cur_level) && !isBatchedAtLevel(tensor2, cur_level)) {
22625 return at::_ops::_foreach_addcmul_Scalar::call(self, tensor1, tensor2, value);
22626 }
22627
22628 auto results = batch_rule(self, tensor1, tensor2, value);
22629 return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
22630}
22631template <typename batch_rule_t, batch_rule_t batch_rule>
22632::std::vector<at::Tensor> _foreach_addcdiv_ScalarList_generated_plumbing(at::TensorList self, at::TensorList tensor1, at::TensorList tensor2, at::ArrayRef<at::Scalar> scalars) {
22633 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
22634 auto maybe_layer = maybeCurrentDynamicLayer();
22635 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
22636 int64_t cur_level = maybe_layer->layerId();
22637 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(tensor1, cur_level) && !isBatchedAtLevel(tensor2, cur_level)) {
22638 return at::_ops::_foreach_addcdiv_ScalarList::call(self, tensor1, tensor2, scalars);
22639 }
22640
22641 auto results = batch_rule(self, tensor1, tensor2, scalars);
22642 return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
22643}
22644template <typename batch_rule_t, batch_rule_t batch_rule>
22645::std::vector<at::Tensor> _foreach_addcdiv_Tensor_generated_plumbing(at::TensorList self, at::TensorList tensor1, at::TensorList tensor2, const at::Tensor & scalars) {
22646 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
22647 auto maybe_layer = maybeCurrentDynamicLayer();
22648 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
22649 int64_t cur_level = maybe_layer->layerId();
22650 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(tensor1, cur_level) && !isBatchedAtLevel(tensor2, cur_level) && !isBatchedAtLevel(scalars, cur_level)) {
22651 return at::_ops::_foreach_addcdiv_Tensor::call(self, tensor1, tensor2, scalars);
22652 }
22653 Tensor scalars_value;
22654 optional<int64_t> scalars_bdim;
22655 std::tie(scalars_value, scalars_bdim) = unwrapTensorAtLevel(scalars, cur_level);
22656 auto results = batch_rule(self, tensor1, tensor2, scalars_value, scalars_bdim);
22657 return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
22658}
22659template <typename batch_rule_t, batch_rule_t batch_rule>
22660::std::vector<at::Tensor> _foreach_addcmul_ScalarList_generated_plumbing(at::TensorList self, at::TensorList tensor1, at::TensorList tensor2, at::ArrayRef<at::Scalar> scalars) {
22661 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
22662 auto maybe_layer = maybeCurrentDynamicLayer();
22663 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
22664 int64_t cur_level = maybe_layer->layerId();
22665 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(tensor1, cur_level) && !isBatchedAtLevel(tensor2, cur_level)) {
22666 return at::_ops::_foreach_addcmul_ScalarList::call(self, tensor1, tensor2, scalars);
22667 }
22668
22669 auto results = batch_rule(self, tensor1, tensor2, scalars);
22670 return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
22671}
22672template <typename batch_rule_t, batch_rule_t batch_rule>
22673::std::vector<at::Tensor> _foreach_addcmul_Tensor_generated_plumbing(at::TensorList self, at::TensorList tensor1, at::TensorList tensor2, const at::Tensor & scalars) {
22674 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
22675 auto maybe_layer = maybeCurrentDynamicLayer();
22676 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
22677 int64_t cur_level = maybe_layer->layerId();
22678 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(tensor1, cur_level) && !isBatchedAtLevel(tensor2, cur_level) && !isBatchedAtLevel(scalars, cur_level)) {
22679 return at::_ops::_foreach_addcmul_Tensor::call(self, tensor1, tensor2, scalars);
22680 }
22681 Tensor scalars_value;
22682 optional<int64_t> scalars_bdim;
22683 std::tie(scalars_value, scalars_bdim) = unwrapTensorAtLevel(scalars, cur_level);
22684 auto results = batch_rule(self, tensor1, tensor2, scalars_value, scalars_bdim);
22685 return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
22686}
22687template <typename batch_rule_t, batch_rule_t batch_rule>
22688::std::vector<at::Tensor> _foreach_norm_Scalar_generated_plumbing(at::TensorList self, const at::Scalar & ord) {
22689 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
22690 auto maybe_layer = maybeCurrentDynamicLayer();
22691 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
22692 int64_t cur_level = maybe_layer->layerId();
22693 if (!isBatchedAtLevel(self, cur_level)) {
22694 return at::_ops::_foreach_norm_Scalar::call(self, ord);
22695 }
22696
22697 auto results = batch_rule(self, ord);
22698 return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
22699}
22700template <typename batch_rule_t, batch_rule_t batch_rule>
22701::std::vector<at::Tensor> _foreach_lerp_List_generated_plumbing(at::TensorList self, at::TensorList tensors1, at::TensorList weights) {
22702 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
22703 auto maybe_layer = maybeCurrentDynamicLayer();
22704 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
22705 int64_t cur_level = maybe_layer->layerId();
22706 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(tensors1, cur_level) && !isBatchedAtLevel(weights, cur_level)) {
22707 return at::_ops::_foreach_lerp_List::call(self, tensors1, weights);
22708 }
22709
22710 auto results = batch_rule(self, tensors1, weights);
22711 return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
22712}
22713template <typename batch_rule_t, batch_rule_t batch_rule>
22714void _foreach_lerp__List_generated_plumbing(at::TensorList self, at::TensorList tensors1, at::TensorList weights) {
22715 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
22716 auto maybe_layer = maybeCurrentDynamicLayer();
22717 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
22718 int64_t cur_level = maybe_layer->layerId();
22719 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(tensors1, cur_level) && !isBatchedAtLevel(weights, cur_level)) {
22720 return at::_ops::_foreach_lerp__List::call(self, tensors1, weights);
22721 }
22722
22723 batch_rule(self, tensors1, weights);
22724}
22725template <typename batch_rule_t, batch_rule_t batch_rule>
22726::std::vector<at::Tensor> _foreach_lerp_Scalar_generated_plumbing(at::TensorList self, at::TensorList tensors1, const at::Scalar & weight) {
22727 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
22728 auto maybe_layer = maybeCurrentDynamicLayer();
22729 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
22730 int64_t cur_level = maybe_layer->layerId();
22731 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(tensors1, cur_level)) {
22732 return at::_ops::_foreach_lerp_Scalar::call(self, tensors1, weight);
22733 }
22734
22735 auto results = batch_rule(self, tensors1, weight);
22736 return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
22737}
22738template <typename batch_rule_t, batch_rule_t batch_rule>
22739void _foreach_lerp__Scalar_generated_plumbing(at::TensorList self, at::TensorList tensors1, const at::Scalar & weight) {
22740 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
22741 auto maybe_layer = maybeCurrentDynamicLayer();
22742 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
22743 int64_t cur_level = maybe_layer->layerId();
22744 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(tensors1, cur_level)) {
22745 return at::_ops::_foreach_lerp__Scalar::call(self, tensors1, weight);
22746 }
22747
22748 batch_rule(self, tensors1, weight);
22749}
22750template <typename batch_rule_t, batch_rule_t batch_rule>
22751at::Tensor bucketize_Tensor_generated_plumbing(const at::Tensor & self, const at::Tensor & boundaries, bool out_int32, bool right) {
22752 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
22753 auto maybe_layer = maybeCurrentDynamicLayer();
22754 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
22755 int64_t cur_level = maybe_layer->layerId();
22756 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(boundaries, cur_level)) {
22757 return at::_ops::bucketize_Tensor::call(self, boundaries, out_int32, right);
22758 }
22759 Tensor self_value;
22760 optional<int64_t> self_bdim;
22761 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
22762 Tensor boundaries_value;
22763 optional<int64_t> boundaries_bdim;
22764 std::tie(boundaries_value, boundaries_bdim) = unwrapTensorAtLevel(boundaries, cur_level);
22765 auto results = batch_rule(self_value, self_bdim, boundaries_value, boundaries_bdim, out_int32, right);
22766 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
22767}
22768template <typename batch_rule_t, batch_rule_t batch_rule>
22769at::Tensor bucketize_Scalar_generated_plumbing(const at::Scalar & self, const at::Tensor & boundaries, bool out_int32, bool right) {
22770 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
22771 auto maybe_layer = maybeCurrentDynamicLayer();
22772 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
22773 int64_t cur_level = maybe_layer->layerId();
22774 if (!isBatchedAtLevel(boundaries, cur_level)) {
22775 return at::_ops::bucketize_Scalar::call(self, boundaries, out_int32, right);
22776 }
22777 Tensor boundaries_value;
22778 optional<int64_t> boundaries_bdim;
22779 std::tie(boundaries_value, boundaries_bdim) = unwrapTensorAtLevel(boundaries, cur_level);
22780 auto results = batch_rule(self, boundaries_value, boundaries_bdim, out_int32, right);
22781 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
22782}
22783template <typename batch_rule_t, batch_rule_t batch_rule>
22784at::Tensor searchsorted_Tensor_generated_plumbing(const at::Tensor & sorted_sequence, const at::Tensor & self, bool out_int32, bool right, c10::optional<c10::string_view> side, const c10::optional<at::Tensor> & sorter) {
22785 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
22786 auto maybe_layer = maybeCurrentDynamicLayer();
22787 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
22788 int64_t cur_level = maybe_layer->layerId();
22789 if (!isBatchedAtLevel(sorted_sequence, cur_level) && !isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(sorter, cur_level)) {
22790 return at::_ops::searchsorted_Tensor::call(sorted_sequence, self, out_int32, right, side, sorter);
22791 }
22792 Tensor sorted_sequence_value;
22793 optional<int64_t> sorted_sequence_bdim;
22794 std::tie(sorted_sequence_value, sorted_sequence_bdim) = unwrapTensorAtLevel(sorted_sequence, cur_level);
22795 Tensor self_value;
22796 optional<int64_t> self_bdim;
22797 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
22798 optional<Tensor> sorter_value;
22799 optional<int64_t> sorter_bdim;
22800 if (sorter) {
22801 std::tie(sorter_value, sorter_bdim) = unwrapTensorAtLevel(sorter.value(), cur_level);
22802 }
22803 auto results = batch_rule(sorted_sequence_value, sorted_sequence_bdim, self_value, self_bdim, out_int32, right, side, sorter_value, sorter_bdim);
22804 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
22805}
22806template <typename batch_rule_t, batch_rule_t batch_rule>
22807at::Tensor searchsorted_Scalar_generated_plumbing(const at::Tensor & sorted_sequence, const at::Scalar & self, bool out_int32, bool right, c10::optional<c10::string_view> side, const c10::optional<at::Tensor> & sorter) {
22808 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
22809 auto maybe_layer = maybeCurrentDynamicLayer();
22810 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
22811 int64_t cur_level = maybe_layer->layerId();
22812 if (!isBatchedAtLevel(sorted_sequence, cur_level) && !isBatchedAtLevel(sorter, cur_level)) {
22813 return at::_ops::searchsorted_Scalar::call(sorted_sequence, self, out_int32, right, side, sorter);
22814 }
22815 Tensor sorted_sequence_value;
22816 optional<int64_t> sorted_sequence_bdim;
22817 std::tie(sorted_sequence_value, sorted_sequence_bdim) = unwrapTensorAtLevel(sorted_sequence, cur_level);
22818 optional<Tensor> sorter_value;
22819 optional<int64_t> sorter_bdim;
22820 if (sorter) {
22821 std::tie(sorter_value, sorter_bdim) = unwrapTensorAtLevel(sorter.value(), cur_level);
22822 }
22823 auto results = batch_rule(sorted_sequence_value, sorted_sequence_bdim, self, out_int32, right, side, sorter_value, sorter_bdim);
22824 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
22825}
22826template <typename batch_rule_t, batch_rule_t batch_rule>
22827at::Tensor _convert_indices_from_coo_to_csr_generated_plumbing(const at::Tensor & self, int64_t size, bool out_int32) {
22828 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
22829 auto maybe_layer = maybeCurrentDynamicLayer();
22830 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
22831 int64_t cur_level = maybe_layer->layerId();
22832 if (!isBatchedAtLevel(self, cur_level)) {
22833 return at::_ops::_convert_indices_from_coo_to_csr::call(self, size, out_int32);
22834 }
22835 Tensor self_value;
22836 optional<int64_t> self_bdim;
22837 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
22838 auto results = batch_rule(self_value, self_bdim, size, out_int32);
22839 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
22840}
22841template <typename batch_rule_t, batch_rule_t batch_rule>
22842at::Tensor _convert_indices_from_csr_to_coo_generated_plumbing(const at::Tensor & crow_indices, const at::Tensor & col_indices, bool out_int32, bool transpose) {
22843 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
22844 auto maybe_layer = maybeCurrentDynamicLayer();
22845 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
22846 int64_t cur_level = maybe_layer->layerId();
22847 if (!isBatchedAtLevel(crow_indices, cur_level) && !isBatchedAtLevel(col_indices, cur_level)) {
22848 return at::_ops::_convert_indices_from_csr_to_coo::call(crow_indices, col_indices, out_int32, transpose);
22849 }
22850 Tensor crow_indices_value;
22851 optional<int64_t> crow_indices_bdim;
22852 std::tie(crow_indices_value, crow_indices_bdim) = unwrapTensorAtLevel(crow_indices, cur_level);
22853 Tensor col_indices_value;
22854 optional<int64_t> col_indices_bdim;
22855 std::tie(col_indices_value, col_indices_bdim) = unwrapTensorAtLevel(col_indices, cur_level);
22856 auto results = batch_rule(crow_indices_value, crow_indices_bdim, col_indices_value, col_indices_bdim, out_int32, transpose);
22857 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
22858}
22859template <typename batch_rule_t, batch_rule_t batch_rule>
22860at::Tensor mse_loss_generated_plumbing(const at::Tensor & self, const at::Tensor & target, int64_t reduction) {
22861 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
22862 auto maybe_layer = maybeCurrentDynamicLayer();
22863 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
22864 int64_t cur_level = maybe_layer->layerId();
22865 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(target, cur_level)) {
22866 return at::_ops::mse_loss::call(self, target, reduction);
22867 }
22868 Tensor self_value;
22869 optional<int64_t> self_bdim;
22870 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
22871 Tensor target_value;
22872 optional<int64_t> target_bdim;
22873 std::tie(target_value, target_bdim) = unwrapTensorAtLevel(target, cur_level);
22874 auto results = batch_rule(self_value, self_bdim, target_value, target_bdim, reduction);
22875 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
22876}
22877template <typename batch_rule_t, batch_rule_t batch_rule>
22878at::Tensor mse_loss_backward_generated_plumbing(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, int64_t reduction) {
22879 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
22880 auto maybe_layer = maybeCurrentDynamicLayer();
22881 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
22882 int64_t cur_level = maybe_layer->layerId();
22883 if (!isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(target, cur_level)) {
22884 return at::_ops::mse_loss_backward::call(grad_output, self, target, reduction);
22885 }
22886 Tensor grad_output_value;
22887 optional<int64_t> grad_output_bdim;
22888 std::tie(grad_output_value, grad_output_bdim) = unwrapTensorAtLevel(grad_output, cur_level);
22889 Tensor self_value;
22890 optional<int64_t> self_bdim;
22891 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
22892 Tensor target_value;
22893 optional<int64_t> target_bdim;
22894 std::tie(target_value, target_bdim) = unwrapTensorAtLevel(target, cur_level);
22895 auto results = batch_rule(grad_output_value, grad_output_bdim, self_value, self_bdim, target_value, target_bdim, reduction);
22896 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
22897}
22898template <typename batch_rule_t, batch_rule_t batch_rule>
22899at::Tensor l1_loss_generated_plumbing(const at::Tensor & self, const at::Tensor & target, int64_t reduction) {
22900 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
22901 auto maybe_layer = maybeCurrentDynamicLayer();
22902 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
22903 int64_t cur_level = maybe_layer->layerId();
22904 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(target, cur_level)) {
22905 return at::_ops::l1_loss::call(self, target, reduction);
22906 }
22907 Tensor self_value;
22908 optional<int64_t> self_bdim;
22909 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
22910 Tensor target_value;
22911 optional<int64_t> target_bdim;
22912 std::tie(target_value, target_bdim) = unwrapTensorAtLevel(target, cur_level);
22913 auto results = batch_rule(self_value, self_bdim, target_value, target_bdim, reduction);
22914 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
22915}
22916template <typename batch_rule_t, batch_rule_t batch_rule>
22917at::Tensor multi_margin_loss_generated_plumbing(const at::Tensor & self, const at::Tensor & target, const at::Scalar & p, const at::Scalar & margin, const c10::optional<at::Tensor> & weight, int64_t reduction) {
22918 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
22919 auto maybe_layer = maybeCurrentDynamicLayer();
22920 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
22921 int64_t cur_level = maybe_layer->layerId();
22922 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(target, cur_level) && !isBatchedAtLevel(weight, cur_level)) {
22923 return at::_ops::multi_margin_loss::call(self, target, p, margin, weight, reduction);
22924 }
22925 Tensor self_value;
22926 optional<int64_t> self_bdim;
22927 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
22928 Tensor target_value;
22929 optional<int64_t> target_bdim;
22930 std::tie(target_value, target_bdim) = unwrapTensorAtLevel(target, cur_level);
22931 optional<Tensor> weight_value;
22932 optional<int64_t> weight_bdim;
22933 if (weight) {
22934 std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight.value(), cur_level);
22935 }
22936 auto results = batch_rule(self_value, self_bdim, target_value, target_bdim, p, margin, weight_value, weight_bdim, reduction);
22937 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
22938}
22939template <typename batch_rule_t, batch_rule_t batch_rule>
22940at::Tensor multi_margin_loss_backward_generated_plumbing(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, const at::Scalar & p, const at::Scalar & margin, const c10::optional<at::Tensor> & weight, int64_t reduction) {
22941 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
22942 auto maybe_layer = maybeCurrentDynamicLayer();
22943 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
22944 int64_t cur_level = maybe_layer->layerId();
22945 if (!isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(target, cur_level) && !isBatchedAtLevel(weight, cur_level)) {
22946 return at::_ops::multi_margin_loss_backward::call(grad_output, self, target, p, margin, weight, reduction);
22947 }
22948 Tensor grad_output_value;
22949 optional<int64_t> grad_output_bdim;
22950 std::tie(grad_output_value, grad_output_bdim) = unwrapTensorAtLevel(grad_output, cur_level);
22951 Tensor self_value;
22952 optional<int64_t> self_bdim;
22953 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
22954 Tensor target_value;
22955 optional<int64_t> target_bdim;
22956 std::tie(target_value, target_bdim) = unwrapTensorAtLevel(target, cur_level);
22957 optional<Tensor> weight_value;
22958 optional<int64_t> weight_bdim;
22959 if (weight) {
22960 std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight.value(), cur_level);
22961 }
22962 auto results = batch_rule(grad_output_value, grad_output_bdim, self_value, self_bdim, target_value, target_bdim, p, margin, weight_value, weight_bdim, reduction);
22963 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
22964}
22965template <typename batch_rule_t, batch_rule_t batch_rule>
22966at::Tensor multilabel_margin_loss_generated_plumbing(const at::Tensor & self, const at::Tensor & target, int64_t reduction) {
22967 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
22968 auto maybe_layer = maybeCurrentDynamicLayer();
22969 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
22970 int64_t cur_level = maybe_layer->layerId();
22971 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(target, cur_level)) {
22972 return at::_ops::multilabel_margin_loss::call(self, target, reduction);
22973 }
22974 Tensor self_value;
22975 optional<int64_t> self_bdim;
22976 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
22977 Tensor target_value;
22978 optional<int64_t> target_bdim;
22979 std::tie(target_value, target_bdim) = unwrapTensorAtLevel(target, cur_level);
22980 auto results = batch_rule(self_value, self_bdim, target_value, target_bdim, reduction);
22981 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
22982}
22983template <typename batch_rule_t, batch_rule_t batch_rule>
22984::std::tuple<at::Tensor,at::Tensor> multilabel_margin_loss_forward_generated_plumbing(const at::Tensor & self, const at::Tensor & target, int64_t reduction) {
22985 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
22986 auto maybe_layer = maybeCurrentDynamicLayer();
22987 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
22988 int64_t cur_level = maybe_layer->layerId();
22989 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(target, cur_level)) {
22990 return at::_ops::multilabel_margin_loss_forward::call(self, target, reduction);
22991 }
22992 Tensor self_value;
22993 optional<int64_t> self_bdim;
22994 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
22995 Tensor target_value;
22996 optional<int64_t> target_bdim;
22997 std::tie(target_value, target_bdim) = unwrapTensorAtLevel(target, cur_level);
22998 auto results = batch_rule(self_value, self_bdim, target_value, target_bdim, reduction);
22999 return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
23000}
23001template <typename batch_rule_t, batch_rule_t batch_rule>
23002at::Tensor multilabel_margin_loss_backward_generated_plumbing(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, int64_t reduction, const at::Tensor & is_target) {
23003 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
23004 auto maybe_layer = maybeCurrentDynamicLayer();
23005 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
23006 int64_t cur_level = maybe_layer->layerId();
23007 if (!isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(target, cur_level) && !isBatchedAtLevel(is_target, cur_level)) {
23008 return at::_ops::multilabel_margin_loss_backward::call(grad_output, self, target, reduction, is_target);
23009 }
23010 Tensor grad_output_value;
23011 optional<int64_t> grad_output_bdim;
23012 std::tie(grad_output_value, grad_output_bdim) = unwrapTensorAtLevel(grad_output, cur_level);
23013 Tensor self_value;
23014 optional<int64_t> self_bdim;
23015 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
23016 Tensor target_value;
23017 optional<int64_t> target_bdim;
23018 std::tie(target_value, target_bdim) = unwrapTensorAtLevel(target, cur_level);
23019 Tensor is_target_value;
23020 optional<int64_t> is_target_bdim;
23021 std::tie(is_target_value, is_target_bdim) = unwrapTensorAtLevel(is_target, cur_level);
23022 auto results = batch_rule(grad_output_value, grad_output_bdim, self_value, self_bdim, target_value, target_bdim, reduction, is_target_value, is_target_bdim);
23023 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
23024}
23025template <typename batch_rule_t, batch_rule_t batch_rule>
23026at::Tensor nll_loss_nd_generated_plumbing(const at::Tensor & self, const at::Tensor & target, const c10::optional<at::Tensor> & weight, int64_t reduction, c10::SymInt ignore_index) {
23027 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
23028 auto maybe_layer = maybeCurrentDynamicLayer();
23029 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
23030 int64_t cur_level = maybe_layer->layerId();
23031 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(target, cur_level) && !isBatchedAtLevel(weight, cur_level)) {
23032 return at::_ops::nll_loss_nd::call(self, target, weight, reduction, ignore_index);
23033 }
23034 Tensor self_value;
23035 optional<int64_t> self_bdim;
23036 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
23037 Tensor target_value;
23038 optional<int64_t> target_bdim;
23039 std::tie(target_value, target_bdim) = unwrapTensorAtLevel(target, cur_level);
23040 optional<Tensor> weight_value;
23041 optional<int64_t> weight_bdim;
23042 if (weight) {
23043 std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight.value(), cur_level);
23044 }
23045 auto results = batch_rule(self_value, self_bdim, target_value, target_bdim, weight_value, weight_bdim, reduction, ignore_index);
23046 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
23047}
23048template <typename batch_rule_t, batch_rule_t batch_rule>
23049at::Tensor nll_loss_generated_plumbing(const at::Tensor & self, const at::Tensor & target, const c10::optional<at::Tensor> & weight, int64_t reduction, c10::SymInt ignore_index) {
23050 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
23051 auto maybe_layer = maybeCurrentDynamicLayer();
23052 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
23053 int64_t cur_level = maybe_layer->layerId();
23054 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(target, cur_level) && !isBatchedAtLevel(weight, cur_level)) {
23055 return at::_ops::nll_loss::call(self, target, weight, reduction, ignore_index);
23056 }
23057 Tensor self_value;
23058 optional<int64_t> self_bdim;
23059 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
23060 Tensor target_value;
23061 optional<int64_t> target_bdim;
23062 std::tie(target_value, target_bdim) = unwrapTensorAtLevel(target, cur_level);
23063 optional<Tensor> weight_value;
23064 optional<int64_t> weight_bdim;
23065 if (weight) {
23066 std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight.value(), cur_level);
23067 }
23068 auto results = batch_rule(self_value, self_bdim, target_value, target_bdim, weight_value, weight_bdim, reduction, ignore_index);
23069 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
23070}
23071template <typename batch_rule_t, batch_rule_t batch_rule>
23072::std::tuple<at::Tensor,at::Tensor> nll_loss_forward_generated_plumbing(const at::Tensor & self, const at::Tensor & target, const c10::optional<at::Tensor> & weight, int64_t reduction, c10::SymInt ignore_index) {
23073 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
23074 auto maybe_layer = maybeCurrentDynamicLayer();
23075 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
23076 int64_t cur_level = maybe_layer->layerId();
23077 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(target, cur_level) && !isBatchedAtLevel(weight, cur_level)) {
23078 return at::_ops::nll_loss_forward::call(self, target, weight, reduction, ignore_index);
23079 }
23080 Tensor self_value;
23081 optional<int64_t> self_bdim;
23082 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
23083 Tensor target_value;
23084 optional<int64_t> target_bdim;
23085 std::tie(target_value, target_bdim) = unwrapTensorAtLevel(target, cur_level);
23086 optional<Tensor> weight_value;
23087 optional<int64_t> weight_bdim;
23088 if (weight) {
23089 std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight.value(), cur_level);
23090 }
23091 auto results = batch_rule(self_value, self_bdim, target_value, target_bdim, weight_value, weight_bdim, reduction, ignore_index);
23092 return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
23093}
23094template <typename batch_rule_t, batch_rule_t batch_rule>
23095at::Tensor nll_loss_backward_generated_plumbing(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, const c10::optional<at::Tensor> & weight, int64_t reduction, c10::SymInt ignore_index, const at::Tensor & total_weight) {
23096 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
23097 auto maybe_layer = maybeCurrentDynamicLayer();
23098 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
23099 int64_t cur_level = maybe_layer->layerId();
23100 if (!isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(target, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(total_weight, cur_level)) {
23101 return at::_ops::nll_loss_backward::call(grad_output, self, target, weight, reduction, ignore_index, total_weight);
23102 }
23103 Tensor grad_output_value;
23104 optional<int64_t> grad_output_bdim;
23105 std::tie(grad_output_value, grad_output_bdim) = unwrapTensorAtLevel(grad_output, cur_level);
23106 Tensor self_value;
23107 optional<int64_t> self_bdim;
23108 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
23109 Tensor target_value;
23110 optional<int64_t> target_bdim;
23111 std::tie(target_value, target_bdim) = unwrapTensorAtLevel(target, cur_level);
23112 Tensor total_weight_value;
23113 optional<int64_t> total_weight_bdim;
23114 std::tie(total_weight_value, total_weight_bdim) = unwrapTensorAtLevel(total_weight, cur_level);
23115 optional<Tensor> weight_value;
23116 optional<int64_t> weight_bdim;
23117 if (weight) {
23118 std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight.value(), cur_level);
23119 }
23120 auto results = batch_rule(grad_output_value, grad_output_bdim, self_value, self_bdim, target_value, target_bdim, weight_value, weight_bdim, reduction, ignore_index, total_weight_value, total_weight_bdim);
23121 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
23122}
23123template <typename batch_rule_t, batch_rule_t batch_rule>
23124at::Tensor nll_loss2d_generated_plumbing(const at::Tensor & self, const at::Tensor & target, const c10::optional<at::Tensor> & weight, int64_t reduction, c10::SymInt ignore_index) {
23125 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
23126 auto maybe_layer = maybeCurrentDynamicLayer();
23127 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
23128 int64_t cur_level = maybe_layer->layerId();
23129 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(target, cur_level) && !isBatchedAtLevel(weight, cur_level)) {
23130 return at::_ops::nll_loss2d::call(self, target, weight, reduction, ignore_index);
23131 }
23132 Tensor self_value;
23133 optional<int64_t> self_bdim;
23134 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
23135 Tensor target_value;
23136 optional<int64_t> target_bdim;
23137 std::tie(target_value, target_bdim) = unwrapTensorAtLevel(target, cur_level);
23138 optional<Tensor> weight_value;
23139 optional<int64_t> weight_bdim;
23140 if (weight) {
23141 std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight.value(), cur_level);
23142 }
23143 auto results = batch_rule(self_value, self_bdim, target_value, target_bdim, weight_value, weight_bdim, reduction, ignore_index);
23144 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
23145}
23146template <typename batch_rule_t, batch_rule_t batch_rule>
23147::std::tuple<at::Tensor,at::Tensor> nll_loss2d_forward_generated_plumbing(const at::Tensor & self, const at::Tensor & target, const c10::optional<at::Tensor> & weight, int64_t reduction, c10::SymInt ignore_index) {
23148 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
23149 auto maybe_layer = maybeCurrentDynamicLayer();
23150 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
23151 int64_t cur_level = maybe_layer->layerId();
23152 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(target, cur_level) && !isBatchedAtLevel(weight, cur_level)) {
23153 return at::_ops::nll_loss2d_forward::call(self, target, weight, reduction, ignore_index);
23154 }
23155 Tensor self_value;
23156 optional<int64_t> self_bdim;
23157 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
23158 Tensor target_value;
23159 optional<int64_t> target_bdim;
23160 std::tie(target_value, target_bdim) = unwrapTensorAtLevel(target, cur_level);
23161 optional<Tensor> weight_value;
23162 optional<int64_t> weight_bdim;
23163 if (weight) {
23164 std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight.value(), cur_level);
23165 }
23166 auto results = batch_rule(self_value, self_bdim, target_value, target_bdim, weight_value, weight_bdim, reduction, ignore_index);
23167 return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
23168}
23169template <typename batch_rule_t, batch_rule_t batch_rule>
23170at::Tensor nll_loss2d_backward_generated_plumbing(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, const c10::optional<at::Tensor> & weight, int64_t reduction, c10::SymInt ignore_index, const at::Tensor & total_weight) {
23171 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
23172 auto maybe_layer = maybeCurrentDynamicLayer();
23173 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
23174 int64_t cur_level = maybe_layer->layerId();
23175 if (!isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(target, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(total_weight, cur_level)) {
23176 return at::_ops::nll_loss2d_backward::call(grad_output, self, target, weight, reduction, ignore_index, total_weight);
23177 }
23178 Tensor grad_output_value;
23179 optional<int64_t> grad_output_bdim;
23180 std::tie(grad_output_value, grad_output_bdim) = unwrapTensorAtLevel(grad_output, cur_level);
23181 Tensor self_value;
23182 optional<int64_t> self_bdim;
23183 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
23184 Tensor target_value;
23185 optional<int64_t> target_bdim;
23186 std::tie(target_value, target_bdim) = unwrapTensorAtLevel(target, cur_level);
23187 Tensor total_weight_value;
23188 optional<int64_t> total_weight_bdim;
23189 std::tie(total_weight_value, total_weight_bdim) = unwrapTensorAtLevel(total_weight, cur_level);
23190 optional<Tensor> weight_value;
23191 optional<int64_t> weight_bdim;
23192 if (weight) {
23193 std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight.value(), cur_level);
23194 }
23195 auto results = batch_rule(grad_output_value, grad_output_bdim, self_value, self_bdim, target_value, target_bdim, weight_value, weight_bdim, reduction, ignore_index, total_weight_value, total_weight_bdim);
23196 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
23197}
23198template <typename batch_rule_t, batch_rule_t batch_rule>
23199at::Tensor smooth_l1_loss_generated_plumbing(const at::Tensor & self, const at::Tensor & target, int64_t reduction, double beta) {
23200 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
23201 auto maybe_layer = maybeCurrentDynamicLayer();
23202 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
23203 int64_t cur_level = maybe_layer->layerId();
23204 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(target, cur_level)) {
23205 return at::_ops::smooth_l1_loss::call(self, target, reduction, beta);
23206 }
23207 Tensor self_value;
23208 optional<int64_t> self_bdim;
23209 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
23210 Tensor target_value;
23211 optional<int64_t> target_bdim;
23212 std::tie(target_value, target_bdim) = unwrapTensorAtLevel(target, cur_level);
23213 auto results = batch_rule(self_value, self_bdim, target_value, target_bdim, reduction, beta);
23214 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
23215}
23216template <typename batch_rule_t, batch_rule_t batch_rule>
23217at::Tensor smooth_l1_loss_backward_generated_plumbing(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, int64_t reduction, double beta) {
23218 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
23219 auto maybe_layer = maybeCurrentDynamicLayer();
23220 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
23221 int64_t cur_level = maybe_layer->layerId();
23222 if (!isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(target, cur_level)) {
23223 return at::_ops::smooth_l1_loss_backward::call(grad_output, self, target, reduction, beta);
23224 }
23225 Tensor grad_output_value;
23226 optional<int64_t> grad_output_bdim;
23227 std::tie(grad_output_value, grad_output_bdim) = unwrapTensorAtLevel(grad_output, cur_level);
23228 Tensor self_value;
23229 optional<int64_t> self_bdim;
23230 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
23231 Tensor target_value;
23232 optional<int64_t> target_bdim;
23233 std::tie(target_value, target_bdim) = unwrapTensorAtLevel(target, cur_level);
23234 auto results = batch_rule(grad_output_value, grad_output_bdim, self_value, self_bdim, target_value, target_bdim, reduction, beta);
23235 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
23236}
23237template <typename batch_rule_t, batch_rule_t batch_rule>
23238at::Tensor huber_loss_generated_plumbing(const at::Tensor & self, const at::Tensor & target, int64_t reduction, double delta) {
23239 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
23240 auto maybe_layer = maybeCurrentDynamicLayer();
23241 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
23242 int64_t cur_level = maybe_layer->layerId();
23243 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(target, cur_level)) {
23244 return at::_ops::huber_loss::call(self, target, reduction, delta);
23245 }
23246 Tensor self_value;
23247 optional<int64_t> self_bdim;
23248 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
23249 Tensor target_value;
23250 optional<int64_t> target_bdim;
23251 std::tie(target_value, target_bdim) = unwrapTensorAtLevel(target, cur_level);
23252 auto results = batch_rule(self_value, self_bdim, target_value, target_bdim, reduction, delta);
23253 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
23254}
23255template <typename batch_rule_t, batch_rule_t batch_rule>
23256at::Tensor huber_loss_backward_generated_plumbing(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, int64_t reduction, double delta) {
23257 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
23258 auto maybe_layer = maybeCurrentDynamicLayer();
23259 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
23260 int64_t cur_level = maybe_layer->layerId();
23261 if (!isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(target, cur_level)) {
23262 return at::_ops::huber_loss_backward::call(grad_output, self, target, reduction, delta);
23263 }
23264 Tensor grad_output_value;
23265 optional<int64_t> grad_output_bdim;
23266 std::tie(grad_output_value, grad_output_bdim) = unwrapTensorAtLevel(grad_output, cur_level);
23267 Tensor self_value;
23268 optional<int64_t> self_bdim;
23269 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
23270 Tensor target_value;
23271 optional<int64_t> target_bdim;
23272 std::tie(target_value, target_bdim) = unwrapTensorAtLevel(target, cur_level);
23273 auto results = batch_rule(grad_output_value, grad_output_bdim, self_value, self_bdim, target_value, target_bdim, reduction, delta);
23274 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
23275}
23276template <typename batch_rule_t, batch_rule_t batch_rule>
23277at::Tensor soft_margin_loss_generated_plumbing(const at::Tensor & self, const at::Tensor & target, int64_t reduction) {
23278 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
23279 auto maybe_layer = maybeCurrentDynamicLayer();
23280 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
23281 int64_t cur_level = maybe_layer->layerId();
23282 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(target, cur_level)) {
23283 return at::_ops::soft_margin_loss::call(self, target, reduction);
23284 }
23285 Tensor self_value;
23286 optional<int64_t> self_bdim;
23287 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
23288 Tensor target_value;
23289 optional<int64_t> target_bdim;
23290 std::tie(target_value, target_bdim) = unwrapTensorAtLevel(target, cur_level);
23291 auto results = batch_rule(self_value, self_bdim, target_value, target_bdim, reduction);
23292 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
23293}
23294template <typename batch_rule_t, batch_rule_t batch_rule>
23295at::Tensor soft_margin_loss_backward_generated_plumbing(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, int64_t reduction) {
23296 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
23297 auto maybe_layer = maybeCurrentDynamicLayer();
23298 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
23299 int64_t cur_level = maybe_layer->layerId();
23300 if (!isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(target, cur_level)) {
23301 return at::_ops::soft_margin_loss_backward::call(grad_output, self, target, reduction);
23302 }
23303 Tensor grad_output_value;
23304 optional<int64_t> grad_output_bdim;
23305 std::tie(grad_output_value, grad_output_bdim) = unwrapTensorAtLevel(grad_output, cur_level);
23306 Tensor self_value;
23307 optional<int64_t> self_bdim;
23308 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
23309 Tensor target_value;
23310 optional<int64_t> target_bdim;
23311 std::tie(target_value, target_bdim) = unwrapTensorAtLevel(target, cur_level);
23312 auto results = batch_rule(grad_output_value, grad_output_bdim, self_value, self_bdim, target_value, target_bdim, reduction);
23313 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
23314}
23315template <typename batch_rule_t, batch_rule_t batch_rule>
23316at::Tensor elu_generated_plumbing(const at::Tensor & self, const at::Scalar & alpha, const at::Scalar & scale, const at::Scalar & input_scale) {
23317 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
23318 auto maybe_layer = maybeCurrentDynamicLayer();
23319 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
23320 int64_t cur_level = maybe_layer->layerId();
23321 if (!isBatchedAtLevel(self, cur_level)) {
23322 return at::_ops::elu::call(self, alpha, scale, input_scale);
23323 }
23324 Tensor self_value;
23325 optional<int64_t> self_bdim;
23326 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
23327 auto results = batch_rule(self_value, self_bdim, alpha, scale, input_scale);
23328 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
23329}
23330template <typename batch_rule_t, batch_rule_t batch_rule>
23331at::Tensor elu_backward_generated_plumbing(const at::Tensor & grad_output, const at::Scalar & alpha, const at::Scalar & scale, const at::Scalar & input_scale, bool is_result, const at::Tensor & self_or_result) {
23332 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
23333 auto maybe_layer = maybeCurrentDynamicLayer();
23334 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
23335 int64_t cur_level = maybe_layer->layerId();
23336 if (!isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(self_or_result, cur_level)) {
23337 return at::_ops::elu_backward::call(grad_output, alpha, scale, input_scale, is_result, self_or_result);
23338 }
23339 Tensor grad_output_value;
23340 optional<int64_t> grad_output_bdim;
23341 std::tie(grad_output_value, grad_output_bdim) = unwrapTensorAtLevel(grad_output, cur_level);
23342 Tensor self_or_result_value;
23343 optional<int64_t> self_or_result_bdim;
23344 std::tie(self_or_result_value, self_or_result_bdim) = unwrapTensorAtLevel(self_or_result, cur_level);
23345 auto results = batch_rule(grad_output_value, grad_output_bdim, alpha, scale, input_scale, is_result, self_or_result_value, self_or_result_bdim);
23346 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
23347}
23348template <typename batch_rule_t, batch_rule_t batch_rule>
23349at::Tensor & elu__generated_plumbing(at::Tensor & self, const at::Scalar & alpha, const at::Scalar & scale, const at::Scalar & input_scale) {
23350 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
23351 auto maybe_layer = maybeCurrentDynamicLayer();
23352 vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
23353 int64_t cur_level = maybe_layer->layerId();
23354 if (!isBatchedAtLevel(self, cur_level)) {
23355 return at::_ops::elu_::call(self, alpha, scale, input_scale);
23356 }
23357 Tensor self_value;
23358 optional<int64_t> self_bdim;
23359 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
23360 batch_rule(self_value, self_bdim, alpha, scale, input_scale);
23361 return self;
23362}
23363template <typename batch_rule_t, batch_rule_t batch_rule>
23364at::Tensor glu_generated_plumbing(const at::Tensor & self, int64_t dim) {
23365 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
23366 auto maybe_layer = maybeCurrentDynamicLayer();
23367 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
23368 int64_t cur_level = maybe_layer->layerId();
23369 if (!isBatchedAtLevel(self, cur_level)) {
23370 return at::_ops::glu::call(self, dim);
23371 }
23372 Tensor self_value;
23373 optional<int64_t> self_bdim;
23374 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
23375 auto results = batch_rule(self_value, self_bdim, dim);
23376 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
23377}
23378template <typename batch_rule_t, batch_rule_t batch_rule>
23379at::Tensor glu_backward_generated_plumbing(const at::Tensor & grad_output, const at::Tensor & self, int64_t dim) {
23380 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
23381 auto maybe_layer = maybeCurrentDynamicLayer();
23382 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
23383 int64_t cur_level = maybe_layer->layerId();
23384 if (!isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(self, cur_level)) {
23385 return at::_ops::glu_backward::call(grad_output, self, dim);
23386 }
23387 Tensor grad_output_value;
23388 optional<int64_t> grad_output_bdim;
23389 std::tie(grad_output_value, grad_output_bdim) = unwrapTensorAtLevel(grad_output, cur_level);
23390 Tensor self_value;
23391 optional<int64_t> self_bdim;
23392 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
23393 auto results = batch_rule(grad_output_value, grad_output_bdim, self_value, self_bdim, dim);
23394 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
23395}
23396template <typename batch_rule_t, batch_rule_t batch_rule>
23397at::Tensor glu_jvp_generated_plumbing(const at::Tensor & glu, const at::Tensor & x, const at::Tensor & dx, int64_t dim) {
23398 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
23399 auto maybe_layer = maybeCurrentDynamicLayer();
23400 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
23401 int64_t cur_level = maybe_layer->layerId();
23402 if (!isBatchedAtLevel(glu, cur_level) && !isBatchedAtLevel(x, cur_level) && !isBatchedAtLevel(dx, cur_level)) {
23403 return at::_ops::glu_jvp::call(glu, x, dx, dim);
23404 }
23405 Tensor glu_value;
23406 optional<int64_t> glu_bdim;
23407 std::tie(glu_value, glu_bdim) = unwrapTensorAtLevel(glu, cur_level);
23408 Tensor x_value;
23409 optional<int64_t> x_bdim;
23410 std::tie(x_value, x_bdim) = unwrapTensorAtLevel(x, cur_level);
23411 Tensor dx_value;
23412 optional<int64_t> dx_bdim;
23413 std::tie(dx_value, dx_bdim) = unwrapTensorAtLevel(dx, cur_level);
23414 auto results = batch_rule(glu_value, glu_bdim, x_value, x_bdim, dx_value, dx_bdim, dim);
23415 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
23416}
23417template <typename batch_rule_t, batch_rule_t batch_rule>
23418at::Tensor glu_backward_jvp_generated_plumbing(const at::Tensor & grad_x, const at::Tensor & grad_glu, const at::Tensor & x, const at::Tensor & dgrad_glu, const at::Tensor & dx, int64_t dim) {
23419 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
23420 auto maybe_layer = maybeCurrentDynamicLayer();
23421 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
23422 int64_t cur_level = maybe_layer->layerId();
23423 if (!isBatchedAtLevel(grad_x, cur_level) && !isBatchedAtLevel(grad_glu, cur_level) && !isBatchedAtLevel(x, cur_level) && !isBatchedAtLevel(dgrad_glu, cur_level) && !isBatchedAtLevel(dx, cur_level)) {
23424 return at::_ops::glu_backward_jvp::call(grad_x, grad_glu, x, dgrad_glu, dx, dim);
23425 }
23426 Tensor grad_x_value;
23427 optional<int64_t> grad_x_bdim;
23428 std::tie(grad_x_value, grad_x_bdim) = unwrapTensorAtLevel(grad_x, cur_level);
23429 Tensor grad_glu_value;
23430 optional<int64_t> grad_glu_bdim;
23431 std::tie(grad_glu_value, grad_glu_bdim) = unwrapTensorAtLevel(grad_glu, cur_level);
23432 Tensor x_value;
23433 optional<int64_t> x_bdim;
23434 std::tie(x_value, x_bdim) = unwrapTensorAtLevel(x, cur_level);
23435 Tensor dgrad_glu_value;
23436 optional<int64_t> dgrad_glu_bdim;
23437 std::tie(dgrad_glu_value, dgrad_glu_bdim) = unwrapTensorAtLevel(dgrad_glu, cur_level);
23438 Tensor dx_value;
23439 optional<int64_t> dx_bdim;
23440 std::tie(dx_value, dx_bdim) = unwrapTensorAtLevel(dx, cur_level);
23441 auto results = batch_rule(grad_x_value, grad_x_bdim, grad_glu_value, grad_glu_bdim, x_value, x_bdim, dgrad_glu_value, dgrad_glu_bdim, dx_value, dx_bdim, dim);
23442 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
23443}
23444template <typename batch_rule_t, batch_rule_t batch_rule>
23445at::Tensor hardsigmoid_generated_plumbing(const at::Tensor & self) {
23446 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
23447 auto maybe_layer = maybeCurrentDynamicLayer();
23448 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
23449 int64_t cur_level = maybe_layer->layerId();
23450 if (!isBatchedAtLevel(self, cur_level)) {
23451 return at::_ops::hardsigmoid::call(self);
23452 }
23453 Tensor self_value;
23454 optional<int64_t> self_bdim;
23455 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
23456 auto results = batch_rule(self_value, self_bdim);
23457 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
23458}
23459template <typename batch_rule_t, batch_rule_t batch_rule>
23460at::Tensor & hardsigmoid__generated_plumbing(at::Tensor & self) {
23461 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
23462 auto maybe_layer = maybeCurrentDynamicLayer();
23463 vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
23464 int64_t cur_level = maybe_layer->layerId();
23465 if (!isBatchedAtLevel(self, cur_level)) {
23466 return at::_ops::hardsigmoid_::call(self);
23467 }
23468 Tensor self_value;
23469 optional<int64_t> self_bdim;
23470 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
23471 batch_rule(self_value, self_bdim);
23472 return self;
23473}
23474template <typename batch_rule_t, batch_rule_t batch_rule>
23475at::Tensor hardsigmoid_backward_generated_plumbing(const at::Tensor & grad_output, const at::Tensor & self) {
23476 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
23477 auto maybe_layer = maybeCurrentDynamicLayer();
23478 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
23479 int64_t cur_level = maybe_layer->layerId();
23480 if (!isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(self, cur_level)) {
23481 return at::_ops::hardsigmoid_backward::call(grad_output, self);
23482 }
23483 Tensor grad_output_value;
23484 optional<int64_t> grad_output_bdim;
23485 std::tie(grad_output_value, grad_output_bdim) = unwrapTensorAtLevel(grad_output, cur_level);
23486 Tensor self_value;
23487 optional<int64_t> self_bdim;
23488 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
23489 auto results = batch_rule(grad_output_value, grad_output_bdim, self_value, self_bdim);
23490 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
23491}
23492template <typename batch_rule_t, batch_rule_t batch_rule>
23493at::Tensor hardtanh_generated_plumbing(const at::Tensor & self, const at::Scalar & min_val, const at::Scalar & max_val) {
23494 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
23495 auto maybe_layer = maybeCurrentDynamicLayer();
23496 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
23497 int64_t cur_level = maybe_layer->layerId();
23498 if (!isBatchedAtLevel(self, cur_level)) {
23499 return at::_ops::hardtanh::call(self, min_val, max_val);
23500 }
23501 Tensor self_value;
23502 optional<int64_t> self_bdim;
23503 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
23504 auto results = batch_rule(self_value, self_bdim, min_val, max_val);
23505 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
23506}
23507template <typename batch_rule_t, batch_rule_t batch_rule>
23508at::Tensor hardtanh_backward_generated_plumbing(const at::Tensor & grad_output, const at::Tensor & self, const at::Scalar & min_val, const at::Scalar & max_val) {
23509 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
23510 auto maybe_layer = maybeCurrentDynamicLayer();
23511 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
23512 int64_t cur_level = maybe_layer->layerId();
23513 if (!isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(self, cur_level)) {
23514 return at::_ops::hardtanh_backward::call(grad_output, self, min_val, max_val);
23515 }
23516 Tensor grad_output_value;
23517 optional<int64_t> grad_output_bdim;
23518 std::tie(grad_output_value, grad_output_bdim) = unwrapTensorAtLevel(grad_output, cur_level);
23519 Tensor self_value;
23520 optional<int64_t> self_bdim;
23521 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
23522 auto results = batch_rule(grad_output_value, grad_output_bdim, self_value, self_bdim, min_val, max_val);
23523 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
23524}
23525template <typename batch_rule_t, batch_rule_t batch_rule>
23526at::Tensor & hardtanh__generated_plumbing(at::Tensor & self, const at::Scalar & min_val, const at::Scalar & max_val) {
23527 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
23528 auto maybe_layer = maybeCurrentDynamicLayer();
23529 vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
23530 int64_t cur_level = maybe_layer->layerId();
23531 if (!isBatchedAtLevel(self, cur_level)) {
23532 return at::_ops::hardtanh_::call(self, min_val, max_val);
23533 }
23534 Tensor self_value;
23535 optional<int64_t> self_bdim;
23536 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
23537 batch_rule(self_value, self_bdim, min_val, max_val);
23538 return self;
23539}
23540template <typename batch_rule_t, batch_rule_t batch_rule>
23541at::Tensor hardswish_generated_plumbing(const at::Tensor & self) {
23542 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
23543 auto maybe_layer = maybeCurrentDynamicLayer();
23544 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
23545 int64_t cur_level = maybe_layer->layerId();
23546 if (!isBatchedAtLevel(self, cur_level)) {
23547 return at::_ops::hardswish::call(self);
23548 }
23549 Tensor self_value;
23550 optional<int64_t> self_bdim;
23551 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
23552 auto results = batch_rule(self_value, self_bdim);
23553 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
23554}
23555template <typename batch_rule_t, batch_rule_t batch_rule>
23556at::Tensor & hardswish__generated_plumbing(at::Tensor & self) {
23557 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
23558 auto maybe_layer = maybeCurrentDynamicLayer();
23559 vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
23560 int64_t cur_level = maybe_layer->layerId();
23561 if (!isBatchedAtLevel(self, cur_level)) {
23562 return at::_ops::hardswish_::call(self);
23563 }
23564 Tensor self_value;
23565 optional<int64_t> self_bdim;
23566 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
23567 batch_rule(self_value, self_bdim);
23568 return self;
23569}
23570template <typename batch_rule_t, batch_rule_t batch_rule>
23571at::Tensor hardswish_backward_generated_plumbing(const at::Tensor & grad_output, const at::Tensor & self) {
23572 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
23573 auto maybe_layer = maybeCurrentDynamicLayer();
23574 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
23575 int64_t cur_level = maybe_layer->layerId();
23576 if (!isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(self, cur_level)) {
23577 return at::_ops::hardswish_backward::call(grad_output, self);
23578 }
23579 Tensor grad_output_value;
23580 optional<int64_t> grad_output_bdim;
23581 std::tie(grad_output_value, grad_output_bdim) = unwrapTensorAtLevel(grad_output, cur_level);
23582 Tensor self_value;
23583 optional<int64_t> self_bdim;
23584 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
23585 auto results = batch_rule(grad_output_value, grad_output_bdim, self_value, self_bdim);
23586 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
23587}
23588template <typename batch_rule_t, batch_rule_t batch_rule>
23589at::Tensor leaky_relu_generated_plumbing(const at::Tensor & self, const at::Scalar & negative_slope) {
23590 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
23591 auto maybe_layer = maybeCurrentDynamicLayer();
23592 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
23593 int64_t cur_level = maybe_layer->layerId();
23594 if (!isBatchedAtLevel(self, cur_level)) {
23595 return at::_ops::leaky_relu::call(self, negative_slope);
23596 }
23597 Tensor self_value;
23598 optional<int64_t> self_bdim;
23599 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
23600 auto results = batch_rule(self_value, self_bdim, negative_slope);
23601 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
23602}
23603template <typename batch_rule_t, batch_rule_t batch_rule>
23604at::Tensor leaky_relu_backward_generated_plumbing(const at::Tensor & grad_output, const at::Tensor & self, const at::Scalar & negative_slope, bool self_is_result) {
23605 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
23606 auto maybe_layer = maybeCurrentDynamicLayer();
23607 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
23608 int64_t cur_level = maybe_layer->layerId();
23609 if (!isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(self, cur_level)) {
23610 return at::_ops::leaky_relu_backward::call(grad_output, self, negative_slope, self_is_result);
23611 }
23612 Tensor grad_output_value;
23613 optional<int64_t> grad_output_bdim;
23614 std::tie(grad_output_value, grad_output_bdim) = unwrapTensorAtLevel(grad_output, cur_level);
23615 Tensor self_value;
23616 optional<int64_t> self_bdim;
23617 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
23618 auto results = batch_rule(grad_output_value, grad_output_bdim, self_value, self_bdim, negative_slope, self_is_result);
23619 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
23620}
23621template <typename batch_rule_t, batch_rule_t batch_rule>
23622at::Tensor & leaky_relu__generated_plumbing(at::Tensor & self, const at::Scalar & negative_slope) {
23623 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
23624 auto maybe_layer = maybeCurrentDynamicLayer();
23625 vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
23626 int64_t cur_level = maybe_layer->layerId();
23627 if (!isBatchedAtLevel(self, cur_level)) {
23628 return at::_ops::leaky_relu_::call(self, negative_slope);
23629 }
23630 Tensor self_value;
23631 optional<int64_t> self_bdim;
23632 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
23633 batch_rule(self_value, self_bdim, negative_slope);
23634 return self;
23635}
23636template <typename batch_rule_t, batch_rule_t batch_rule>
23637at::Tensor log_sigmoid_generated_plumbing(const at::Tensor & self) {
23638 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
23639 auto maybe_layer = maybeCurrentDynamicLayer();
23640 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
23641 int64_t cur_level = maybe_layer->layerId();
23642 if (!isBatchedAtLevel(self, cur_level)) {
23643 return at::_ops::log_sigmoid::call(self);
23644 }
23645 Tensor self_value;
23646 optional<int64_t> self_bdim;
23647 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
23648 auto results = batch_rule(self_value, self_bdim);
23649 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
23650}
23651template <typename batch_rule_t, batch_rule_t batch_rule>
23652::std::tuple<at::Tensor,at::Tensor> log_sigmoid_forward_generated_plumbing(const at::Tensor & self) {
23653 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
23654 auto maybe_layer = maybeCurrentDynamicLayer();
23655 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
23656 int64_t cur_level = maybe_layer->layerId();
23657 if (!isBatchedAtLevel(self, cur_level)) {
23658 return at::_ops::log_sigmoid_forward::call(self);
23659 }
23660 Tensor self_value;
23661 optional<int64_t> self_bdim;
23662 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
23663 auto results = batch_rule(self_value, self_bdim);
23664 return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
23665}
23666template <typename batch_rule_t, batch_rule_t batch_rule>
23667at::Tensor log_sigmoid_backward_generated_plumbing(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & buffer) {
23668 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
23669 auto maybe_layer = maybeCurrentDynamicLayer();
23670 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
23671 int64_t cur_level = maybe_layer->layerId();
23672 if (!isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(buffer, cur_level)) {
23673 return at::_ops::log_sigmoid_backward::call(grad_output, self, buffer);
23674 }
23675 Tensor grad_output_value;
23676 optional<int64_t> grad_output_bdim;
23677 std::tie(grad_output_value, grad_output_bdim) = unwrapTensorAtLevel(grad_output, cur_level);
23678 Tensor self_value;
23679 optional<int64_t> self_bdim;
23680 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
23681 Tensor buffer_value;
23682 optional<int64_t> buffer_bdim;
23683 std::tie(buffer_value, buffer_bdim) = unwrapTensorAtLevel(buffer, cur_level);
23684 auto results = batch_rule(grad_output_value, grad_output_bdim, self_value, self_bdim, buffer_value, buffer_bdim);
23685 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
23686}
23687template <typename batch_rule_t, batch_rule_t batch_rule>
23688at::Tensor rrelu_with_noise_generated_plumbing(const at::Tensor & self, const at::Tensor & noise, const at::Scalar & lower, const at::Scalar & upper, bool training, c10::optional<at::Generator> generator) {
23689 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
23690 auto maybe_layer = maybeCurrentDynamicLayer();
23691 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
23692 int64_t cur_level = maybe_layer->layerId();
23693 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(noise, cur_level)) {
23694 return at::_ops::rrelu_with_noise::call(self, noise, lower, upper, training, generator);
23695 }
23696 Tensor self_value;
23697 optional<int64_t> self_bdim;
23698 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
23699 Tensor noise_value;
23700 optional<int64_t> noise_bdim;
23701 std::tie(noise_value, noise_bdim) = unwrapTensorAtLevel(noise, cur_level);
23702 auto results = batch_rule(self_value, self_bdim, noise_value, noise_bdim, lower, upper, training, generator);
23703 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
23704}
23705template <typename batch_rule_t, batch_rule_t batch_rule>
23706at::Tensor rrelu_with_noise_backward_generated_plumbing(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & noise, const at::Scalar & lower, const at::Scalar & upper, bool training, bool self_is_result) {
23707 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
23708 auto maybe_layer = maybeCurrentDynamicLayer();
23709 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
23710 int64_t cur_level = maybe_layer->layerId();
23711 if (!isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(noise, cur_level)) {
23712 return at::_ops::rrelu_with_noise_backward::call(grad_output, self, noise, lower, upper, training, self_is_result);
23713 }
23714 Tensor grad_output_value;
23715 optional<int64_t> grad_output_bdim;
23716 std::tie(grad_output_value, grad_output_bdim) = unwrapTensorAtLevel(grad_output, cur_level);
23717 Tensor self_value;
23718 optional<int64_t> self_bdim;
23719 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
23720 Tensor noise_value;
23721 optional<int64_t> noise_bdim;
23722 std::tie(noise_value, noise_bdim) = unwrapTensorAtLevel(noise, cur_level);
23723 auto results = batch_rule(grad_output_value, grad_output_bdim, self_value, self_bdim, noise_value, noise_bdim, lower, upper, training, self_is_result);
23724 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
23725}
23726template <typename batch_rule_t, batch_rule_t batch_rule>
23727at::Tensor & rrelu_with_noise__generated_plumbing(at::Tensor & self, const at::Tensor & noise, const at::Scalar & lower, const at::Scalar & upper, bool training, c10::optional<at::Generator> generator) {
23728 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
23729 auto maybe_layer = maybeCurrentDynamicLayer();
23730 vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
23731 int64_t cur_level = maybe_layer->layerId();
23732 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(noise, cur_level)) {
23733 return at::_ops::rrelu_with_noise_::call(self, noise, lower, upper, training, generator);
23734 }
23735 Tensor self_value;
23736 optional<int64_t> self_bdim;
23737 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
23738 Tensor noise_value;
23739 optional<int64_t> noise_bdim;
23740 std::tie(noise_value, noise_bdim) = unwrapTensorAtLevel(noise, cur_level);
23741 batch_rule(self_value, self_bdim, noise_value, noise_bdim, lower, upper, training, generator);
23742 return self;
23743}
23744template <typename batch_rule_t, batch_rule_t batch_rule>
23745at::Tensor softplus_generated_plumbing(const at::Tensor & self, const at::Scalar & beta, const at::Scalar & threshold) {
23746 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
23747 auto maybe_layer = maybeCurrentDynamicLayer();
23748 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
23749 int64_t cur_level = maybe_layer->layerId();
23750 if (!isBatchedAtLevel(self, cur_level)) {
23751 return at::_ops::softplus::call(self, beta, threshold);
23752 }
23753 Tensor self_value;
23754 optional<int64_t> self_bdim;
23755 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
23756 auto results = batch_rule(self_value, self_bdim, beta, threshold);
23757 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
23758}
23759template <typename batch_rule_t, batch_rule_t batch_rule>
23760at::Tensor softplus_backward_generated_plumbing(const at::Tensor & grad_output, const at::Tensor & self, const at::Scalar & beta, const at::Scalar & threshold) {
23761 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
23762 auto maybe_layer = maybeCurrentDynamicLayer();
23763 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
23764 int64_t cur_level = maybe_layer->layerId();
23765 if (!isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(self, cur_level)) {
23766 return at::_ops::softplus_backward::call(grad_output, self, beta, threshold);
23767 }
23768 Tensor grad_output_value;
23769 optional<int64_t> grad_output_bdim;
23770 std::tie(grad_output_value, grad_output_bdim) = unwrapTensorAtLevel(grad_output, cur_level);
23771 Tensor self_value;
23772 optional<int64_t> self_bdim;
23773 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
23774 auto results = batch_rule(grad_output_value, grad_output_bdim, self_value, self_bdim, beta, threshold);
23775 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
23776}
23777template <typename batch_rule_t, batch_rule_t batch_rule>
23778at::Tensor softshrink_generated_plumbing(const at::Tensor & self, const at::Scalar & lambd) {
23779 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
23780 auto maybe_layer = maybeCurrentDynamicLayer();
23781 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
23782 int64_t cur_level = maybe_layer->layerId();
23783 if (!isBatchedAtLevel(self, cur_level)) {
23784 return at::_ops::softshrink::call(self, lambd);
23785 }
23786 Tensor self_value;
23787 optional<int64_t> self_bdim;
23788 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
23789 auto results = batch_rule(self_value, self_bdim, lambd);
23790 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
23791}
23792template <typename batch_rule_t, batch_rule_t batch_rule>
23793at::Tensor softshrink_backward_generated_plumbing(const at::Tensor & grad_output, const at::Tensor & self, const at::Scalar & lambd) {
23794 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
23795 auto maybe_layer = maybeCurrentDynamicLayer();
23796 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
23797 int64_t cur_level = maybe_layer->layerId();
23798 if (!isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(self, cur_level)) {
23799 return at::_ops::softshrink_backward::call(grad_output, self, lambd);
23800 }
23801 Tensor grad_output_value;
23802 optional<int64_t> grad_output_bdim;
23803 std::tie(grad_output_value, grad_output_bdim) = unwrapTensorAtLevel(grad_output, cur_level);
23804 Tensor self_value;
23805 optional<int64_t> self_bdim;
23806 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
23807 auto results = batch_rule(grad_output_value, grad_output_bdim, self_value, self_bdim, lambd);
23808 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
23809}
23810template <typename batch_rule_t, batch_rule_t batch_rule>
23811at::Tensor adaptive_avg_pool2d_generated_plumbing(const at::Tensor & self, c10::SymIntArrayRef output_size) {
23812 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
23813 auto maybe_layer = maybeCurrentDynamicLayer();
23814 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
23815 int64_t cur_level = maybe_layer->layerId();
23816 if (!isBatchedAtLevel(self, cur_level)) {
23817 return at::_ops::adaptive_avg_pool2d::call(self, output_size);
23818 }
23819 Tensor self_value;
23820 optional<int64_t> self_bdim;
23821 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
23822 auto results = batch_rule(self_value, self_bdim, output_size);
23823 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
23824}
23825template <typename batch_rule_t, batch_rule_t batch_rule>
23826at::Tensor mkldnn_adaptive_avg_pool2d_generated_plumbing(const at::Tensor & self, at::IntArrayRef output_size) {
23827 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
23828 auto maybe_layer = maybeCurrentDynamicLayer();
23829 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
23830 int64_t cur_level = maybe_layer->layerId();
23831 if (!isBatchedAtLevel(self, cur_level)) {
23832 return at::_ops::mkldnn_adaptive_avg_pool2d::call(self, output_size);
23833 }
23834 Tensor self_value;
23835 optional<int64_t> self_bdim;
23836 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
23837 auto results = batch_rule(self_value, self_bdim, output_size);
23838 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
23839}
23840template <typename batch_rule_t, batch_rule_t batch_rule>
23841at::Tensor mkldnn_adaptive_avg_pool2d_backward_generated_plumbing(const at::Tensor & grad_output, const at::Tensor & self) {
23842 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
23843 auto maybe_layer = maybeCurrentDynamicLayer();
23844 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
23845 int64_t cur_level = maybe_layer->layerId();
23846 if (!isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(self, cur_level)) {
23847 return at::_ops::mkldnn_adaptive_avg_pool2d_backward::call(grad_output, self);
23848 }
23849 Tensor grad_output_value;
23850 optional<int64_t> grad_output_bdim;
23851 std::tie(grad_output_value, grad_output_bdim) = unwrapTensorAtLevel(grad_output, cur_level);
23852 Tensor self_value;
23853 optional<int64_t> self_bdim;
23854 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
23855 auto results = batch_rule(grad_output_value, grad_output_bdim, self_value, self_bdim);
23856 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
23857}
23858template <typename batch_rule_t, batch_rule_t batch_rule>
23859at::Tensor _adaptive_avg_pool2d_generated_plumbing(const at::Tensor & self, c10::SymIntArrayRef output_size) {
23860 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
23861 auto maybe_layer = maybeCurrentDynamicLayer();
23862 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
23863 int64_t cur_level = maybe_layer->layerId();
23864 if (!isBatchedAtLevel(self, cur_level)) {
23865 return at::_ops::_adaptive_avg_pool2d::call(self, output_size);
23866 }
23867 Tensor self_value;
23868 optional<int64_t> self_bdim;
23869 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
23870 auto results = batch_rule(self_value, self_bdim, output_size);
23871 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
23872}
23873template <typename batch_rule_t, batch_rule_t batch_rule>
23874at::Tensor _adaptive_avg_pool2d_backward_generated_plumbing(const at::Tensor & grad_output, const at::Tensor & self) {
23875 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
23876 auto maybe_layer = maybeCurrentDynamicLayer();
23877 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
23878 int64_t cur_level = maybe_layer->layerId();
23879 if (!isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(self, cur_level)) {
23880 return at::_ops::_adaptive_avg_pool2d_backward::call(grad_output, self);
23881 }
23882 Tensor grad_output_value;
23883 optional<int64_t> grad_output_bdim;
23884 std::tie(grad_output_value, grad_output_bdim) = unwrapTensorAtLevel(grad_output, cur_level);
23885 Tensor self_value;
23886 optional<int64_t> self_bdim;
23887 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
23888 auto results = batch_rule(grad_output_value, grad_output_bdim, self_value, self_bdim);
23889 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
23890}
23891template <typename batch_rule_t, batch_rule_t batch_rule>
23892at::Tensor adaptive_avg_pool3d_generated_plumbing(const at::Tensor & self, c10::SymIntArrayRef output_size) {
23893 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
23894 auto maybe_layer = maybeCurrentDynamicLayer();
23895 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
23896 int64_t cur_level = maybe_layer->layerId();
23897 if (!isBatchedAtLevel(self, cur_level)) {
23898 return at::_ops::adaptive_avg_pool3d::call(self, output_size);
23899 }
23900 Tensor self_value;
23901 optional<int64_t> self_bdim;
23902 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
23903 auto results = batch_rule(self_value, self_bdim, output_size);
23904 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
23905}
23906template <typename batch_rule_t, batch_rule_t batch_rule>
23907at::Tensor _adaptive_avg_pool3d_generated_plumbing(const at::Tensor & self, c10::SymIntArrayRef output_size) {
23908 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
23909 auto maybe_layer = maybeCurrentDynamicLayer();
23910 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
23911 int64_t cur_level = maybe_layer->layerId();
23912 if (!isBatchedAtLevel(self, cur_level)) {
23913 return at::_ops::_adaptive_avg_pool3d::call(self, output_size);
23914 }
23915 Tensor self_value;
23916 optional<int64_t> self_bdim;
23917 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
23918 auto results = batch_rule(self_value, self_bdim, output_size);
23919 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
23920}
23921template <typename batch_rule_t, batch_rule_t batch_rule>
23922at::Tensor _adaptive_avg_pool3d_backward_generated_plumbing(const at::Tensor & grad_output, const at::Tensor & self) {
23923 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
23924 auto maybe_layer = maybeCurrentDynamicLayer();
23925 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
23926 int64_t cur_level = maybe_layer->layerId();
23927 if (!isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(self, cur_level)) {
23928 return at::_ops::_adaptive_avg_pool3d_backward::call(grad_output, self);
23929 }
23930 Tensor grad_output_value;
23931 optional<int64_t> grad_output_bdim;
23932 std::tie(grad_output_value, grad_output_bdim) = unwrapTensorAtLevel(grad_output, cur_level);
23933 Tensor self_value;
23934 optional<int64_t> self_bdim;
23935 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
23936 auto results = batch_rule(grad_output_value, grad_output_bdim, self_value, self_bdim);
23937 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
23938}
23939template <typename batch_rule_t, batch_rule_t batch_rule>
23940::std::tuple<at::Tensor,at::Tensor> adaptive_max_pool2d_generated_plumbing(const at::Tensor & self, at::IntArrayRef output_size) {
23941 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
23942 auto maybe_layer = maybeCurrentDynamicLayer();
23943 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
23944 int64_t cur_level = maybe_layer->layerId();
23945 if (!isBatchedAtLevel(self, cur_level)) {
23946 return at::_ops::adaptive_max_pool2d::call(self, output_size);
23947 }
23948 Tensor self_value;
23949 optional<int64_t> self_bdim;
23950 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
23951 auto results = batch_rule(self_value, self_bdim, output_size);
23952 return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
23953}
23954template <typename batch_rule_t, batch_rule_t batch_rule>
23955at::Tensor adaptive_max_pool2d_backward_generated_plumbing(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & indices) {
23956 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
23957 auto maybe_layer = maybeCurrentDynamicLayer();
23958 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
23959 int64_t cur_level = maybe_layer->layerId();
23960 if (!isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(indices, cur_level)) {
23961 return at::_ops::adaptive_max_pool2d_backward::call(grad_output, self, indices);
23962 }
23963 Tensor grad_output_value;
23964 optional<int64_t> grad_output_bdim;
23965 std::tie(grad_output_value, grad_output_bdim) = unwrapTensorAtLevel(grad_output, cur_level);
23966 Tensor self_value;
23967 optional<int64_t> self_bdim;
23968 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
23969 Tensor indices_value;
23970 optional<int64_t> indices_bdim;
23971 std::tie(indices_value, indices_bdim) = unwrapTensorAtLevel(indices, cur_level);
23972 auto results = batch_rule(grad_output_value, grad_output_bdim, self_value, self_bdim, indices_value, indices_bdim);
23973 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
23974}
23975template <typename batch_rule_t, batch_rule_t batch_rule>
23976::std::tuple<at::Tensor,at::Tensor> adaptive_max_pool3d_generated_plumbing(const at::Tensor & self, at::IntArrayRef output_size) {
23977 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
23978 auto maybe_layer = maybeCurrentDynamicLayer();
23979 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
23980 int64_t cur_level = maybe_layer->layerId();
23981 if (!isBatchedAtLevel(self, cur_level)) {
23982 return at::_ops::adaptive_max_pool3d::call(self, output_size);
23983 }
23984 Tensor self_value;
23985 optional<int64_t> self_bdim;
23986 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
23987 auto results = batch_rule(self_value, self_bdim, output_size);
23988 return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
23989}
23990template <typename batch_rule_t, batch_rule_t batch_rule>
23991at::Tensor adaptive_max_pool3d_backward_generated_plumbing(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & indices) {
23992 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
23993 auto maybe_layer = maybeCurrentDynamicLayer();
23994 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
23995 int64_t cur_level = maybe_layer->layerId();
23996 if (!isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(indices, cur_level)) {
23997 return at::_ops::adaptive_max_pool3d_backward::call(grad_output, self, indices);
23998 }
23999 Tensor grad_output_value;
24000 optional<int64_t> grad_output_bdim;
24001 std::tie(grad_output_value, grad_output_bdim) = unwrapTensorAtLevel(grad_output, cur_level);
24002 Tensor self_value;
24003 optional<int64_t> self_bdim;
24004 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
24005 Tensor indices_value;
24006 optional<int64_t> indices_bdim;
24007 std::tie(indices_value, indices_bdim) = unwrapTensorAtLevel(indices, cur_level);
24008 auto results = batch_rule(grad_output_value, grad_output_bdim, self_value, self_bdim, indices_value, indices_bdim);
24009 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
24010}
24011template <typename batch_rule_t, batch_rule_t batch_rule>
24012at::Tensor avg_pool2d_generated_plumbing(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, bool ceil_mode, bool count_include_pad, c10::optional<int64_t> divisor_override) {
24013 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
24014 auto maybe_layer = maybeCurrentDynamicLayer();
24015 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
24016 int64_t cur_level = maybe_layer->layerId();
24017 if (!isBatchedAtLevel(self, cur_level)) {
24018 return at::_ops::avg_pool2d::call(self, kernel_size, stride, padding, ceil_mode, count_include_pad, divisor_override);
24019 }
24020 Tensor self_value;
24021 optional<int64_t> self_bdim;
24022 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
24023 auto results = batch_rule(self_value, self_bdim, kernel_size, stride, padding, ceil_mode, count_include_pad, divisor_override);
24024 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
24025}
24026template <typename batch_rule_t, batch_rule_t batch_rule>
24027at::Tensor avg_pool2d_backward_generated_plumbing(const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, bool ceil_mode, bool count_include_pad, c10::optional<int64_t> divisor_override) {
24028 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
24029 auto maybe_layer = maybeCurrentDynamicLayer();
24030 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
24031 int64_t cur_level = maybe_layer->layerId();
24032 if (!isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(self, cur_level)) {
24033 return at::_ops::avg_pool2d_backward::call(grad_output, self, kernel_size, stride, padding, ceil_mode, count_include_pad, divisor_override);
24034 }
24035 Tensor grad_output_value;
24036 optional<int64_t> grad_output_bdim;
24037 std::tie(grad_output_value, grad_output_bdim) = unwrapTensorAtLevel(grad_output, cur_level);
24038 Tensor self_value;
24039 optional<int64_t> self_bdim;
24040 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
24041 auto results = batch_rule(grad_output_value, grad_output_bdim, self_value, self_bdim, kernel_size, stride, padding, ceil_mode, count_include_pad, divisor_override);
24042 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
24043}
24044template <typename batch_rule_t, batch_rule_t batch_rule>
24045at::Tensor avg_pool3d_generated_plumbing(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, bool ceil_mode, bool count_include_pad, c10::optional<int64_t> divisor_override) {
24046 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
24047 auto maybe_layer = maybeCurrentDynamicLayer();
24048 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
24049 int64_t cur_level = maybe_layer->layerId();
24050 if (!isBatchedAtLevel(self, cur_level)) {
24051 return at::_ops::avg_pool3d::call(self, kernel_size, stride, padding, ceil_mode, count_include_pad, divisor_override);
24052 }
24053 Tensor self_value;
24054 optional<int64_t> self_bdim;
24055 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
24056 auto results = batch_rule(self_value, self_bdim, kernel_size, stride, padding, ceil_mode, count_include_pad, divisor_override);
24057 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
24058}
24059template <typename batch_rule_t, batch_rule_t batch_rule>
24060at::Tensor avg_pool3d_backward_generated_plumbing(const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, bool ceil_mode, bool count_include_pad, c10::optional<int64_t> divisor_override) {
24061 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
24062 auto maybe_layer = maybeCurrentDynamicLayer();
24063 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
24064 int64_t cur_level = maybe_layer->layerId();
24065 if (!isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(self, cur_level)) {
24066 return at::_ops::avg_pool3d_backward::call(grad_output, self, kernel_size, stride, padding, ceil_mode, count_include_pad, divisor_override);
24067 }
24068 Tensor grad_output_value;
24069 optional<int64_t> grad_output_bdim;
24070 std::tie(grad_output_value, grad_output_bdim) = unwrapTensorAtLevel(grad_output, cur_level);
24071 Tensor self_value;
24072 optional<int64_t> self_bdim;
24073 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
24074 auto results = batch_rule(grad_output_value, grad_output_bdim, self_value, self_bdim, kernel_size, stride, padding, ceil_mode, count_include_pad, divisor_override);
24075 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
24076}
24077template <typename batch_rule_t, batch_rule_t batch_rule>
24078::std::tuple<at::Tensor,at::Tensor> fractional_max_pool2d_generated_plumbing(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef output_size, const at::Tensor & random_samples) {
24079 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
24080 auto maybe_layer = maybeCurrentDynamicLayer();
24081 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
24082 int64_t cur_level = maybe_layer->layerId();
24083 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(random_samples, cur_level)) {
24084 return at::_ops::fractional_max_pool2d::call(self, kernel_size, output_size, random_samples);
24085 }
24086 Tensor self_value;
24087 optional<int64_t> self_bdim;
24088 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
24089 Tensor random_samples_value;
24090 optional<int64_t> random_samples_bdim;
24091 std::tie(random_samples_value, random_samples_bdim) = unwrapTensorAtLevel(random_samples, cur_level);
24092 auto results = batch_rule(self_value, self_bdim, kernel_size, output_size, random_samples_value, random_samples_bdim);
24093 return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
24094}
24095template <typename batch_rule_t, batch_rule_t batch_rule>
24096at::Tensor fractional_max_pool2d_backward_generated_plumbing(const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef output_size, const at::Tensor & indices) {
24097 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
24098 auto maybe_layer = maybeCurrentDynamicLayer();
24099 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
24100 int64_t cur_level = maybe_layer->layerId();
24101 if (!isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(indices, cur_level)) {
24102 return at::_ops::fractional_max_pool2d_backward::call(grad_output, self, kernel_size, output_size, indices);
24103 }
24104 Tensor grad_output_value;
24105 optional<int64_t> grad_output_bdim;
24106 std::tie(grad_output_value, grad_output_bdim) = unwrapTensorAtLevel(grad_output, cur_level);
24107 Tensor self_value;
24108 optional<int64_t> self_bdim;
24109 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
24110 Tensor indices_value;
24111 optional<int64_t> indices_bdim;
24112 std::tie(indices_value, indices_bdim) = unwrapTensorAtLevel(indices, cur_level);
24113 auto results = batch_rule(grad_output_value, grad_output_bdim, self_value, self_bdim, kernel_size, output_size, indices_value, indices_bdim);
24114 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
24115}
24116template <typename batch_rule_t, batch_rule_t batch_rule>
24117::std::tuple<at::Tensor,at::Tensor> fractional_max_pool3d_generated_plumbing(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef output_size, const at::Tensor & random_samples) {
24118 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
24119 auto maybe_layer = maybeCurrentDynamicLayer();
24120 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
24121 int64_t cur_level = maybe_layer->layerId();
24122 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(random_samples, cur_level)) {
24123 return at::_ops::fractional_max_pool3d::call(self, kernel_size, output_size, random_samples);
24124 }
24125 Tensor self_value;
24126 optional<int64_t> self_bdim;
24127 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
24128 Tensor random_samples_value;
24129 optional<int64_t> random_samples_bdim;
24130 std::tie(random_samples_value, random_samples_bdim) = unwrapTensorAtLevel(random_samples, cur_level);
24131 auto results = batch_rule(self_value, self_bdim, kernel_size, output_size, random_samples_value, random_samples_bdim);
24132 return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
24133}
24134template <typename batch_rule_t, batch_rule_t batch_rule>
24135at::Tensor fractional_max_pool3d_backward_generated_plumbing(const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef output_size, const at::Tensor & indices) {
24136 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
24137 auto maybe_layer = maybeCurrentDynamicLayer();
24138 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
24139 int64_t cur_level = maybe_layer->layerId();
24140 if (!isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(indices, cur_level)) {
24141 return at::_ops::fractional_max_pool3d_backward::call(grad_output, self, kernel_size, output_size, indices);
24142 }
24143 Tensor grad_output_value;
24144 optional<int64_t> grad_output_bdim;
24145 std::tie(grad_output_value, grad_output_bdim) = unwrapTensorAtLevel(grad_output, cur_level);
24146 Tensor self_value;
24147 optional<int64_t> self_bdim;
24148 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
24149 Tensor indices_value;
24150 optional<int64_t> indices_bdim;
24151 std::tie(indices_value, indices_bdim) = unwrapTensorAtLevel(indices, cur_level);
24152 auto results = batch_rule(grad_output_value, grad_output_bdim, self_value, self_bdim, kernel_size, output_size, indices_value, indices_bdim);
24153 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
24154}
24155template <typename batch_rule_t, batch_rule_t batch_rule>
24156::std::tuple<at::Tensor,at::Tensor> max_pool2d_with_indices_generated_plumbing(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode) {
24157 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
24158 auto maybe_layer = maybeCurrentDynamicLayer();
24159 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
24160 int64_t cur_level = maybe_layer->layerId();
24161 if (!isBatchedAtLevel(self, cur_level)) {
24162 return at::_ops::max_pool2d_with_indices::call(self, kernel_size, stride, padding, dilation, ceil_mode);
24163 }
24164 Tensor self_value;
24165 optional<int64_t> self_bdim;
24166 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
24167 auto results = batch_rule(self_value, self_bdim, kernel_size, stride, padding, dilation, ceil_mode);
24168 return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
24169}
24170template <typename batch_rule_t, batch_rule_t batch_rule>
24171at::Tensor max_pool2d_with_indices_backward_generated_plumbing(const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode, const at::Tensor & indices) {
24172 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
24173 auto maybe_layer = maybeCurrentDynamicLayer();
24174 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
24175 int64_t cur_level = maybe_layer->layerId();
24176 if (!isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(indices, cur_level)) {
24177 return at::_ops::max_pool2d_with_indices_backward::call(grad_output, self, kernel_size, stride, padding, dilation, ceil_mode, indices);
24178 }
24179 Tensor grad_output_value;
24180 optional<int64_t> grad_output_bdim;
24181 std::tie(grad_output_value, grad_output_bdim) = unwrapTensorAtLevel(grad_output, cur_level);
24182 Tensor self_value;
24183 optional<int64_t> self_bdim;
24184 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
24185 Tensor indices_value;
24186 optional<int64_t> indices_bdim;
24187 std::tie(indices_value, indices_bdim) = unwrapTensorAtLevel(indices, cur_level);
24188 auto results = batch_rule(grad_output_value, grad_output_bdim, self_value, self_bdim, kernel_size, stride, padding, dilation, ceil_mode, indices_value, indices_bdim);
24189 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
24190}
24191template <typename batch_rule_t, batch_rule_t batch_rule>
24192::std::tuple<at::Tensor,at::Tensor> max_pool3d_with_indices_generated_plumbing(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode) {
24193 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
24194 auto maybe_layer = maybeCurrentDynamicLayer();
24195 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
24196 int64_t cur_level = maybe_layer->layerId();
24197 if (!isBatchedAtLevel(self, cur_level)) {
24198 return at::_ops::max_pool3d_with_indices::call(self, kernel_size, stride, padding, dilation, ceil_mode);
24199 }
24200 Tensor self_value;
24201 optional<int64_t> self_bdim;
24202 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
24203 auto results = batch_rule(self_value, self_bdim, kernel_size, stride, padding, dilation, ceil_mode);
24204 return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
24205}
24206template <typename batch_rule_t, batch_rule_t batch_rule>
24207at::Tensor max_pool3d_with_indices_backward_generated_plumbing(const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode, const at::Tensor & indices) {
24208 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
24209 auto maybe_layer = maybeCurrentDynamicLayer();
24210 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
24211 int64_t cur_level = maybe_layer->layerId();
24212 if (!isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(indices, cur_level)) {
24213 return at::_ops::max_pool3d_with_indices_backward::call(grad_output, self, kernel_size, stride, padding, dilation, ceil_mode, indices);
24214 }
24215 Tensor grad_output_value;
24216 optional<int64_t> grad_output_bdim;
24217 std::tie(grad_output_value, grad_output_bdim) = unwrapTensorAtLevel(grad_output, cur_level);
24218 Tensor self_value;
24219 optional<int64_t> self_bdim;
24220 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
24221 Tensor indices_value;
24222 optional<int64_t> indices_bdim;
24223 std::tie(indices_value, indices_bdim) = unwrapTensorAtLevel(indices, cur_level);
24224 auto results = batch_rule(grad_output_value, grad_output_bdim, self_value, self_bdim, kernel_size, stride, padding, dilation, ceil_mode, indices_value, indices_bdim);
24225 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
24226}
24227template <typename batch_rule_t, batch_rule_t batch_rule>
24228at::Tensor max_unpool2d_generated_plumbing(const at::Tensor & self, const at::Tensor & indices, at::IntArrayRef output_size) {
24229 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
24230 auto maybe_layer = maybeCurrentDynamicLayer();
24231 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
24232 int64_t cur_level = maybe_layer->layerId();
24233 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(indices, cur_level)) {
24234 return at::_ops::max_unpool2d::call(self, indices, output_size);
24235 }
24236 Tensor self_value;
24237 optional<int64_t> self_bdim;
24238 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
24239 Tensor indices_value;
24240 optional<int64_t> indices_bdim;
24241 std::tie(indices_value, indices_bdim) = unwrapTensorAtLevel(indices, cur_level);
24242 auto results = batch_rule(self_value, self_bdim, indices_value, indices_bdim, output_size);
24243 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
24244}
24245template <typename batch_rule_t, batch_rule_t batch_rule>
24246at::Tensor max_unpool3d_generated_plumbing(const at::Tensor & self, const at::Tensor & indices, at::IntArrayRef output_size, at::IntArrayRef stride, at::IntArrayRef padding) {
24247 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
24248 auto maybe_layer = maybeCurrentDynamicLayer();
24249 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
24250 int64_t cur_level = maybe_layer->layerId();
24251 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(indices, cur_level)) {
24252 return at::_ops::max_unpool3d::call(self, indices, output_size, stride, padding);
24253 }
24254 Tensor self_value;
24255 optional<int64_t> self_bdim;
24256 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
24257 Tensor indices_value;
24258 optional<int64_t> indices_bdim;
24259 std::tie(indices_value, indices_bdim) = unwrapTensorAtLevel(indices, cur_level);
24260 auto results = batch_rule(self_value, self_bdim, indices_value, indices_bdim, output_size, stride, padding);
24261 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
24262}
24263template <typename batch_rule_t, batch_rule_t batch_rule>
24264at::Tensor reflection_pad1d_generated_plumbing(const at::Tensor & self, c10::SymIntArrayRef padding) {
24265 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
24266 auto maybe_layer = maybeCurrentDynamicLayer();
24267 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
24268 int64_t cur_level = maybe_layer->layerId();
24269 if (!isBatchedAtLevel(self, cur_level)) {
24270 return at::_ops::reflection_pad1d::call(self, padding);
24271 }
24272 Tensor self_value;
24273 optional<int64_t> self_bdim;
24274 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
24275 auto results = batch_rule(self_value, self_bdim, padding);
24276 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
24277}
24278template <typename batch_rule_t, batch_rule_t batch_rule>
24279at::Tensor reflection_pad1d_backward_generated_plumbing(const at::Tensor & grad_output, const at::Tensor & self, c10::SymIntArrayRef padding) {
24280 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
24281 auto maybe_layer = maybeCurrentDynamicLayer();
24282 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
24283 int64_t cur_level = maybe_layer->layerId();
24284 if (!isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(self, cur_level)) {
24285 return at::_ops::reflection_pad1d_backward::call(grad_output, self, padding);
24286 }
24287 Tensor grad_output_value;
24288 optional<int64_t> grad_output_bdim;
24289 std::tie(grad_output_value, grad_output_bdim) = unwrapTensorAtLevel(grad_output, cur_level);
24290 Tensor self_value;
24291 optional<int64_t> self_bdim;
24292 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
24293 auto results = batch_rule(grad_output_value, grad_output_bdim, self_value, self_bdim, padding);
24294 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
24295}
24296template <typename batch_rule_t, batch_rule_t batch_rule>
24297at::Tensor reflection_pad2d_generated_plumbing(const at::Tensor & self, c10::SymIntArrayRef padding) {
24298 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
24299 auto maybe_layer = maybeCurrentDynamicLayer();
24300 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
24301 int64_t cur_level = maybe_layer->layerId();
24302 if (!isBatchedAtLevel(self, cur_level)) {
24303 return at::_ops::reflection_pad2d::call(self, padding);
24304 }
24305 Tensor self_value;
24306 optional<int64_t> self_bdim;
24307 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
24308 auto results = batch_rule(self_value, self_bdim, padding);
24309 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
24310}
24311template <typename batch_rule_t, batch_rule_t batch_rule>
24312at::Tensor reflection_pad2d_backward_generated_plumbing(const at::Tensor & grad_output, const at::Tensor & self, c10::SymIntArrayRef padding) {
24313 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
24314 auto maybe_layer = maybeCurrentDynamicLayer();
24315 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
24316 int64_t cur_level = maybe_layer->layerId();
24317 if (!isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(self, cur_level)) {
24318 return at::_ops::reflection_pad2d_backward::call(grad_output, self, padding);
24319 }
24320 Tensor grad_output_value;
24321 optional<int64_t> grad_output_bdim;
24322 std::tie(grad_output_value, grad_output_bdim) = unwrapTensorAtLevel(grad_output, cur_level);
24323 Tensor self_value;
24324 optional<int64_t> self_bdim;
24325 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
24326 auto results = batch_rule(grad_output_value, grad_output_bdim, self_value, self_bdim, padding);
24327 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
24328}
24329template <typename batch_rule_t, batch_rule_t batch_rule>
24330at::Tensor reflection_pad3d_generated_plumbing(const at::Tensor & self, c10::SymIntArrayRef padding) {
24331 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
24332 auto maybe_layer = maybeCurrentDynamicLayer();
24333 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
24334 int64_t cur_level = maybe_layer->layerId();
24335 if (!isBatchedAtLevel(self, cur_level)) {
24336 return at::_ops::reflection_pad3d::call(self, padding);
24337 }
24338 Tensor self_value;
24339 optional<int64_t> self_bdim;
24340 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
24341 auto results = batch_rule(self_value, self_bdim, padding);
24342 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
24343}
24344template <typename batch_rule_t, batch_rule_t batch_rule>
24345at::Tensor reflection_pad3d_backward_generated_plumbing(const at::Tensor & grad_output, const at::Tensor & self, c10::SymIntArrayRef padding) {
24346 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
24347 auto maybe_layer = maybeCurrentDynamicLayer();
24348 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
24349 int64_t cur_level = maybe_layer->layerId();
24350 if (!isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(self, cur_level)) {
24351 return at::_ops::reflection_pad3d_backward::call(grad_output, self, padding);
24352 }
24353 Tensor grad_output_value;
24354 optional<int64_t> grad_output_bdim;
24355 std::tie(grad_output_value, grad_output_bdim) = unwrapTensorAtLevel(grad_output, cur_level);
24356 Tensor self_value;
24357 optional<int64_t> self_bdim;
24358 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
24359 auto results = batch_rule(grad_output_value, grad_output_bdim, self_value, self_bdim, padding);
24360 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
24361}
24362template <typename batch_rule_t, batch_rule_t batch_rule>
24363at::Tensor replication_pad1d_generated_plumbing(const at::Tensor & self, c10::SymIntArrayRef padding) {
24364 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
24365 auto maybe_layer = maybeCurrentDynamicLayer();
24366 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
24367 int64_t cur_level = maybe_layer->layerId();
24368 if (!isBatchedAtLevel(self, cur_level)) {
24369 return at::_ops::replication_pad1d::call(self, padding);
24370 }
24371 Tensor self_value;
24372 optional<int64_t> self_bdim;
24373 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
24374 auto results = batch_rule(self_value, self_bdim, padding);
24375 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
24376}
24377template <typename batch_rule_t, batch_rule_t batch_rule>
24378at::Tensor replication_pad1d_backward_generated_plumbing(const at::Tensor & grad_output, const at::Tensor & self, c10::SymIntArrayRef padding) {
24379 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
24380 auto maybe_layer = maybeCurrentDynamicLayer();
24381 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
24382 int64_t cur_level = maybe_layer->layerId();
24383 if (!isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(self, cur_level)) {
24384 return at::_ops::replication_pad1d_backward::call(grad_output, self, padding);
24385 }
24386 Tensor grad_output_value;
24387 optional<int64_t> grad_output_bdim;
24388 std::tie(grad_output_value, grad_output_bdim) = unwrapTensorAtLevel(grad_output, cur_level);
24389 Tensor self_value;
24390 optional<int64_t> self_bdim;
24391 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
24392 auto results = batch_rule(grad_output_value, grad_output_bdim, self_value, self_bdim, padding);
24393 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
24394}
24395template <typename batch_rule_t, batch_rule_t batch_rule>
24396at::Tensor replication_pad2d_generated_plumbing(const at::Tensor & self, c10::SymIntArrayRef padding) {
24397 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
24398 auto maybe_layer = maybeCurrentDynamicLayer();
24399 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
24400 int64_t cur_level = maybe_layer->layerId();
24401 if (!isBatchedAtLevel(self, cur_level)) {
24402 return at::_ops::replication_pad2d::call(self, padding);
24403 }
24404 Tensor self_value;
24405 optional<int64_t> self_bdim;
24406 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
24407 auto results = batch_rule(self_value, self_bdim, padding);
24408 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
24409}
24410template <typename batch_rule_t, batch_rule_t batch_rule>
24411at::Tensor replication_pad2d_backward_generated_plumbing(const at::Tensor & grad_output, const at::Tensor & self, c10::SymIntArrayRef padding) {
24412 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
24413 auto maybe_layer = maybeCurrentDynamicLayer();
24414 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
24415 int64_t cur_level = maybe_layer->layerId();
24416 if (!isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(self, cur_level)) {
24417 return at::_ops::replication_pad2d_backward::call(grad_output, self, padding);
24418 }
24419 Tensor grad_output_value;
24420 optional<int64_t> grad_output_bdim;
24421 std::tie(grad_output_value, grad_output_bdim) = unwrapTensorAtLevel(grad_output, cur_level);
24422 Tensor self_value;
24423 optional<int64_t> self_bdim;
24424 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
24425 auto results = batch_rule(grad_output_value, grad_output_bdim, self_value, self_bdim, padding);
24426 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
24427}
24428template <typename batch_rule_t, batch_rule_t batch_rule>
24429at::Tensor replication_pad3d_generated_plumbing(const at::Tensor & self, c10::SymIntArrayRef padding) {
24430 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
24431 auto maybe_layer = maybeCurrentDynamicLayer();
24432 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
24433 int64_t cur_level = maybe_layer->layerId();
24434 if (!isBatchedAtLevel(self, cur_level)) {
24435 return at::_ops::replication_pad3d::call(self, padding);
24436 }
24437 Tensor self_value;
24438 optional<int64_t> self_bdim;
24439 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
24440 auto results = batch_rule(self_value, self_bdim, padding);
24441 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
24442}
24443template <typename batch_rule_t, batch_rule_t batch_rule>
24444at::Tensor replication_pad3d_backward_generated_plumbing(const at::Tensor & grad_output, const at::Tensor & self, c10::SymIntArrayRef padding) {
24445 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
24446 auto maybe_layer = maybeCurrentDynamicLayer();
24447 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
24448 int64_t cur_level = maybe_layer->layerId();
24449 if (!isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(self, cur_level)) {
24450 return at::_ops::replication_pad3d_backward::call(grad_output, self, padding);
24451 }
24452 Tensor grad_output_value;
24453 optional<int64_t> grad_output_bdim;
24454 std::tie(grad_output_value, grad_output_bdim) = unwrapTensorAtLevel(grad_output, cur_level);
24455 Tensor self_value;
24456 optional<int64_t> self_bdim;
24457 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
24458 auto results = batch_rule(grad_output_value, grad_output_bdim, self_value, self_bdim, padding);
24459 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
24460}
24461template <typename batch_rule_t, batch_rule_t batch_rule>
24462at::Tensor _pad_circular_generated_plumbing(const at::Tensor & self, c10::SymIntArrayRef pad) {
24463 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
24464 auto maybe_layer = maybeCurrentDynamicLayer();
24465 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
24466 int64_t cur_level = maybe_layer->layerId();
24467 if (!isBatchedAtLevel(self, cur_level)) {
24468 return at::_ops::_pad_circular::call(self, pad);
24469 }
24470 Tensor self_value;
24471 optional<int64_t> self_bdim;
24472 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
24473 auto results = batch_rule(self_value, self_bdim, pad);
24474 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
24475}
24476template <typename batch_rule_t, batch_rule_t batch_rule>
24477at::Tensor _pad_enum_generated_plumbing(const at::Tensor & self, c10::SymIntArrayRef pad, int64_t mode, c10::optional<double> value) {
24478 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
24479 auto maybe_layer = maybeCurrentDynamicLayer();
24480 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
24481 int64_t cur_level = maybe_layer->layerId();
24482 if (!isBatchedAtLevel(self, cur_level)) {
24483 return at::_ops::_pad_enum::call(self, pad, mode, value);
24484 }
24485 Tensor self_value;
24486 optional<int64_t> self_bdim;
24487 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
24488 auto results = batch_rule(self_value, self_bdim, pad, mode, value);
24489 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
24490}
24491template <typename batch_rule_t, batch_rule_t batch_rule>
24492at::Tensor pad_generated_plumbing(const at::Tensor & self, c10::SymIntArrayRef pad, c10::string_view mode, c10::optional<double> value) {
24493 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
24494 auto maybe_layer = maybeCurrentDynamicLayer();
24495 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
24496 int64_t cur_level = maybe_layer->layerId();
24497 if (!isBatchedAtLevel(self, cur_level)) {
24498 return at::_ops::pad::call(self, pad, mode, value);
24499 }
24500 Tensor self_value;
24501 optional<int64_t> self_bdim;
24502 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
24503 auto results = batch_rule(self_value, self_bdim, pad, mode, value);
24504 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
24505}
24506template <typename batch_rule_t, batch_rule_t batch_rule>
24507at::Tensor upsample_linear1d_vec_generated_plumbing(const at::Tensor & input, at::OptionalSymIntArrayRef output_size, bool align_corners, c10::optional<at::ArrayRef<double>> scale_factors) {
24508 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
24509 auto maybe_layer = maybeCurrentDynamicLayer();
24510 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
24511 int64_t cur_level = maybe_layer->layerId();
24512 if (!isBatchedAtLevel(input, cur_level)) {
24513 return at::_ops::upsample_linear1d_vec::call(input, output_size, align_corners, scale_factors);
24514 }
24515 Tensor input_value;
24516 optional<int64_t> input_bdim;
24517 std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level);
24518 auto results = batch_rule(input_value, input_bdim, output_size, align_corners, scale_factors);
24519 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
24520}
24521template <typename batch_rule_t, batch_rule_t batch_rule>
24522at::Tensor upsample_bilinear2d_vec_generated_plumbing(const at::Tensor & input, at::OptionalSymIntArrayRef output_size, bool align_corners, c10::optional<at::ArrayRef<double>> scale_factors) {
24523 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
24524 auto maybe_layer = maybeCurrentDynamicLayer();
24525 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
24526 int64_t cur_level = maybe_layer->layerId();
24527 if (!isBatchedAtLevel(input, cur_level)) {
24528 return at::_ops::upsample_bilinear2d_vec::call(input, output_size, align_corners, scale_factors);
24529 }
24530 Tensor input_value;
24531 optional<int64_t> input_bdim;
24532 std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level);
24533 auto results = batch_rule(input_value, input_bdim, output_size, align_corners, scale_factors);
24534 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
24535}
24536template <typename batch_rule_t, batch_rule_t batch_rule>
24537at::Tensor _upsample_bilinear2d_aa_vec_generated_plumbing(const at::Tensor & input, at::OptionalSymIntArrayRef output_size, bool align_corners, c10::optional<at::ArrayRef<double>> scale_factors) {
24538 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
24539 auto maybe_layer = maybeCurrentDynamicLayer();
24540 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
24541 int64_t cur_level = maybe_layer->layerId();
24542 if (!isBatchedAtLevel(input, cur_level)) {
24543 return at::_ops::_upsample_bilinear2d_aa_vec::call(input, output_size, align_corners, scale_factors);
24544 }
24545 Tensor input_value;
24546 optional<int64_t> input_bdim;
24547 std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level);
24548 auto results = batch_rule(input_value, input_bdim, output_size, align_corners, scale_factors);
24549 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
24550}
24551template <typename batch_rule_t, batch_rule_t batch_rule>
24552at::Tensor upsample_trilinear3d_vec_generated_plumbing(const at::Tensor & input, at::OptionalSymIntArrayRef output_size, bool align_corners, c10::optional<at::ArrayRef<double>> scale_factors) {
24553 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
24554 auto maybe_layer = maybeCurrentDynamicLayer();
24555 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
24556 int64_t cur_level = maybe_layer->layerId();
24557 if (!isBatchedAtLevel(input, cur_level)) {
24558 return at::_ops::upsample_trilinear3d_vec::call(input, output_size, align_corners, scale_factors);
24559 }
24560 Tensor input_value;
24561 optional<int64_t> input_bdim;
24562 std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level);
24563 auto results = batch_rule(input_value, input_bdim, output_size, align_corners, scale_factors);
24564 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
24565}
24566template <typename batch_rule_t, batch_rule_t batch_rule>
24567at::Tensor upsample_bicubic2d_vec_generated_plumbing(const at::Tensor & input, at::OptionalSymIntArrayRef output_size, bool align_corners, c10::optional<at::ArrayRef<double>> scale_factors) {
24568 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
24569 auto maybe_layer = maybeCurrentDynamicLayer();
24570 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
24571 int64_t cur_level = maybe_layer->layerId();
24572 if (!isBatchedAtLevel(input, cur_level)) {
24573 return at::_ops::upsample_bicubic2d_vec::call(input, output_size, align_corners, scale_factors);
24574 }
24575 Tensor input_value;
24576 optional<int64_t> input_bdim;
24577 std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level);
24578 auto results = batch_rule(input_value, input_bdim, output_size, align_corners, scale_factors);
24579 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
24580}
24581template <typename batch_rule_t, batch_rule_t batch_rule>
24582at::Tensor _upsample_bicubic2d_aa_vec_generated_plumbing(const at::Tensor & input, at::OptionalSymIntArrayRef output_size, bool align_corners, c10::optional<at::ArrayRef<double>> scale_factors) {
24583 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
24584 auto maybe_layer = maybeCurrentDynamicLayer();
24585 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
24586 int64_t cur_level = maybe_layer->layerId();
24587 if (!isBatchedAtLevel(input, cur_level)) {
24588 return at::_ops::_upsample_bicubic2d_aa_vec::call(input, output_size, align_corners, scale_factors);
24589 }
24590 Tensor input_value;
24591 optional<int64_t> input_bdim;
24592 std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level);
24593 auto results = batch_rule(input_value, input_bdim, output_size, align_corners, scale_factors);
24594 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
24595}
24596template <typename batch_rule_t, batch_rule_t batch_rule>
24597at::Tensor upsample_nearest1d_vec_generated_plumbing(const at::Tensor & input, at::OptionalSymIntArrayRef output_size, c10::optional<at::ArrayRef<double>> scale_factors) {
24598 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
24599 auto maybe_layer = maybeCurrentDynamicLayer();
24600 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
24601 int64_t cur_level = maybe_layer->layerId();
24602 if (!isBatchedAtLevel(input, cur_level)) {
24603 return at::_ops::upsample_nearest1d_vec::call(input, output_size, scale_factors);
24604 }
24605 Tensor input_value;
24606 optional<int64_t> input_bdim;
24607 std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level);
24608 auto results = batch_rule(input_value, input_bdim, output_size, scale_factors);
24609 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
24610}
24611template <typename batch_rule_t, batch_rule_t batch_rule>
24612at::Tensor _upsample_nearest_exact1d_vec_generated_plumbing(const at::Tensor & input, at::OptionalSymIntArrayRef output_size, c10::optional<at::ArrayRef<double>> scale_factors) {
24613 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
24614 auto maybe_layer = maybeCurrentDynamicLayer();
24615 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
24616 int64_t cur_level = maybe_layer->layerId();
24617 if (!isBatchedAtLevel(input, cur_level)) {
24618 return at::_ops::_upsample_nearest_exact1d_vec::call(input, output_size, scale_factors);
24619 }
24620 Tensor input_value;
24621 optional<int64_t> input_bdim;
24622 std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level);
24623 auto results = batch_rule(input_value, input_bdim, output_size, scale_factors);
24624 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
24625}
24626template <typename batch_rule_t, batch_rule_t batch_rule>
24627at::Tensor upsample_nearest2d_vec_generated_plumbing(const at::Tensor & input, at::OptionalSymIntArrayRef output_size, c10::optional<at::ArrayRef<double>> scale_factors) {
24628 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
24629 auto maybe_layer = maybeCurrentDynamicLayer();
24630 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
24631 int64_t cur_level = maybe_layer->layerId();
24632 if (!isBatchedAtLevel(input, cur_level)) {
24633 return at::_ops::upsample_nearest2d_vec::call(input, output_size, scale_factors);
24634 }
24635 Tensor input_value;
24636 optional<int64_t> input_bdim;
24637 std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level);
24638 auto results = batch_rule(input_value, input_bdim, output_size, scale_factors);
24639 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
24640}
24641template <typename batch_rule_t, batch_rule_t batch_rule>
24642at::Tensor _upsample_nearest_exact2d_vec_generated_plumbing(const at::Tensor & input, at::OptionalSymIntArrayRef output_size, c10::optional<at::ArrayRef<double>> scale_factors) {
24643 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
24644 auto maybe_layer = maybeCurrentDynamicLayer();
24645 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
24646 int64_t cur_level = maybe_layer->layerId();
24647 if (!isBatchedAtLevel(input, cur_level)) {
24648 return at::_ops::_upsample_nearest_exact2d_vec::call(input, output_size, scale_factors);
24649 }
24650 Tensor input_value;
24651 optional<int64_t> input_bdim;
24652 std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level);
24653 auto results = batch_rule(input_value, input_bdim, output_size, scale_factors);
24654 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
24655}
24656template <typename batch_rule_t, batch_rule_t batch_rule>
24657at::Tensor upsample_nearest3d_vec_generated_plumbing(const at::Tensor & input, at::OptionalSymIntArrayRef output_size, c10::optional<at::ArrayRef<double>> scale_factors) {
24658 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
24659 auto maybe_layer = maybeCurrentDynamicLayer();
24660 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
24661 int64_t cur_level = maybe_layer->layerId();
24662 if (!isBatchedAtLevel(input, cur_level)) {
24663 return at::_ops::upsample_nearest3d_vec::call(input, output_size, scale_factors);
24664 }
24665 Tensor input_value;
24666 optional<int64_t> input_bdim;
24667 std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level);
24668 auto results = batch_rule(input_value, input_bdim, output_size, scale_factors);
24669 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
24670}
24671template <typename batch_rule_t, batch_rule_t batch_rule>
24672at::Tensor _upsample_nearest_exact3d_vec_generated_plumbing(const at::Tensor & input, at::OptionalSymIntArrayRef output_size, c10::optional<at::ArrayRef<double>> scale_factors) {
24673 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
24674 auto maybe_layer = maybeCurrentDynamicLayer();
24675 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
24676 int64_t cur_level = maybe_layer->layerId();
24677 if (!isBatchedAtLevel(input, cur_level)) {
24678 return at::_ops::_upsample_nearest_exact3d_vec::call(input, output_size, scale_factors);
24679 }
24680 Tensor input_value;
24681 optional<int64_t> input_bdim;
24682 std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level);
24683 auto results = batch_rule(input_value, input_bdim, output_size, scale_factors);
24684 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
24685}
24686template <typename batch_rule_t, batch_rule_t batch_rule>
24687at::Tensor upsample_linear1d_generated_plumbing(const at::Tensor & self, c10::SymIntArrayRef output_size, bool align_corners, c10::optional<double> scales) {
24688 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
24689 auto maybe_layer = maybeCurrentDynamicLayer();
24690 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
24691 int64_t cur_level = maybe_layer->layerId();
24692 if (!isBatchedAtLevel(self, cur_level)) {
24693 return at::_ops::upsample_linear1d::call(self, output_size, align_corners, scales);
24694 }
24695 Tensor self_value;
24696 optional<int64_t> self_bdim;
24697 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
24698 auto results = batch_rule(self_value, self_bdim, output_size, align_corners, scales);
24699 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
24700}
24701template <typename batch_rule_t, batch_rule_t batch_rule>
24702at::Tensor upsample_linear1d_backward_generated_plumbing(const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, bool align_corners, c10::optional<double> scales) {
24703 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
24704 auto maybe_layer = maybeCurrentDynamicLayer();
24705 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
24706 int64_t cur_level = maybe_layer->layerId();
24707 if (!isBatchedAtLevel(grad_output, cur_level)) {
24708 return at::_ops::upsample_linear1d_backward::call(grad_output, output_size, input_size, align_corners, scales);
24709 }
24710 Tensor grad_output_value;
24711 optional<int64_t> grad_output_bdim;
24712 std::tie(grad_output_value, grad_output_bdim) = unwrapTensorAtLevel(grad_output, cur_level);
24713 auto results = batch_rule(grad_output_value, grad_output_bdim, output_size, input_size, align_corners, scales);
24714 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
24715}
24716template <typename batch_rule_t, batch_rule_t batch_rule>
24717at::Tensor upsample_bilinear2d_generated_plumbing(const at::Tensor & self, c10::SymIntArrayRef output_size, bool align_corners, c10::optional<double> scales_h, c10::optional<double> scales_w) {
24718 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
24719 auto maybe_layer = maybeCurrentDynamicLayer();
24720 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
24721 int64_t cur_level = maybe_layer->layerId();
24722 if (!isBatchedAtLevel(self, cur_level)) {
24723 return at::_ops::upsample_bilinear2d::call(self, output_size, align_corners, scales_h, scales_w);
24724 }
24725 Tensor self_value;
24726 optional<int64_t> self_bdim;
24727 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
24728 auto results = batch_rule(self_value, self_bdim, output_size, align_corners, scales_h, scales_w);
24729 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
24730}
24731template <typename batch_rule_t, batch_rule_t batch_rule>
24732at::Tensor upsample_bilinear2d_backward_generated_plumbing(const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, bool align_corners, c10::optional<double> scales_h, c10::optional<double> scales_w) {
24733 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
24734 auto maybe_layer = maybeCurrentDynamicLayer();
24735 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
24736 int64_t cur_level = maybe_layer->layerId();
24737 if (!isBatchedAtLevel(grad_output, cur_level)) {
24738 return at::_ops::upsample_bilinear2d_backward::call(grad_output, output_size, input_size, align_corners, scales_h, scales_w);
24739 }
24740 Tensor grad_output_value;
24741 optional<int64_t> grad_output_bdim;
24742 std::tie(grad_output_value, grad_output_bdim) = unwrapTensorAtLevel(grad_output, cur_level);
24743 auto results = batch_rule(grad_output_value, grad_output_bdim, output_size, input_size, align_corners, scales_h, scales_w);
24744 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
24745}
24746template <typename batch_rule_t, batch_rule_t batch_rule>
24747at::Tensor _upsample_bilinear2d_aa_generated_plumbing(const at::Tensor & self, c10::SymIntArrayRef output_size, bool align_corners, c10::optional<double> scales_h, c10::optional<double> scales_w) {
24748 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
24749 auto maybe_layer = maybeCurrentDynamicLayer();
24750 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
24751 int64_t cur_level = maybe_layer->layerId();
24752 if (!isBatchedAtLevel(self, cur_level)) {
24753 return at::_ops::_upsample_bilinear2d_aa::call(self, output_size, align_corners, scales_h, scales_w);
24754 }
24755 Tensor self_value;
24756 optional<int64_t> self_bdim;
24757 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
24758 auto results = batch_rule(self_value, self_bdim, output_size, align_corners, scales_h, scales_w);
24759 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
24760}
24761template <typename batch_rule_t, batch_rule_t batch_rule>
24762at::Tensor _upsample_bilinear2d_aa_backward_generated_plumbing(const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, bool align_corners, c10::optional<double> scales_h, c10::optional<double> scales_w) {
24763 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
24764 auto maybe_layer = maybeCurrentDynamicLayer();
24765 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
24766 int64_t cur_level = maybe_layer->layerId();
24767 if (!isBatchedAtLevel(grad_output, cur_level)) {
24768 return at::_ops::_upsample_bilinear2d_aa_backward::call(grad_output, output_size, input_size, align_corners, scales_h, scales_w);
24769 }
24770 Tensor grad_output_value;
24771 optional<int64_t> grad_output_bdim;
24772 std::tie(grad_output_value, grad_output_bdim) = unwrapTensorAtLevel(grad_output, cur_level);
24773 auto results = batch_rule(grad_output_value, grad_output_bdim, output_size, input_size, align_corners, scales_h, scales_w);
24774 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
24775}
24776template <typename batch_rule_t, batch_rule_t batch_rule>
24777at::Tensor upsample_bicubic2d_generated_plumbing(const at::Tensor & self, c10::SymIntArrayRef output_size, bool align_corners, c10::optional<double> scales_h, c10::optional<double> scales_w) {
24778 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
24779 auto maybe_layer = maybeCurrentDynamicLayer();
24780 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
24781 int64_t cur_level = maybe_layer->layerId();
24782 if (!isBatchedAtLevel(self, cur_level)) {
24783 return at::_ops::upsample_bicubic2d::call(self, output_size, align_corners, scales_h, scales_w);
24784 }
24785 Tensor self_value;
24786 optional<int64_t> self_bdim;
24787 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
24788 auto results = batch_rule(self_value, self_bdim, output_size, align_corners, scales_h, scales_w);
24789 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
24790}
24791template <typename batch_rule_t, batch_rule_t batch_rule>
24792at::Tensor upsample_bicubic2d_backward_generated_plumbing(const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, bool align_corners, c10::optional<double> scales_h, c10::optional<double> scales_w) {
24793 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
24794 auto maybe_layer = maybeCurrentDynamicLayer();
24795 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
24796 int64_t cur_level = maybe_layer->layerId();
24797 if (!isBatchedAtLevel(grad_output, cur_level)) {
24798 return at::_ops::upsample_bicubic2d_backward::call(grad_output, output_size, input_size, align_corners, scales_h, scales_w);
24799 }
24800 Tensor grad_output_value;
24801 optional<int64_t> grad_output_bdim;
24802 std::tie(grad_output_value, grad_output_bdim) = unwrapTensorAtLevel(grad_output, cur_level);
24803 auto results = batch_rule(grad_output_value, grad_output_bdim, output_size, input_size, align_corners, scales_h, scales_w);
24804 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
24805}
24806template <typename batch_rule_t, batch_rule_t batch_rule>
24807at::Tensor _upsample_bicubic2d_aa_generated_plumbing(const at::Tensor & self, c10::SymIntArrayRef output_size, bool align_corners, c10::optional<double> scales_h, c10::optional<double> scales_w) {
24808 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
24809 auto maybe_layer = maybeCurrentDynamicLayer();
24810 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
24811 int64_t cur_level = maybe_layer->layerId();
24812 if (!isBatchedAtLevel(self, cur_level)) {
24813 return at::_ops::_upsample_bicubic2d_aa::call(self, output_size, align_corners, scales_h, scales_w);
24814 }
24815 Tensor self_value;
24816 optional<int64_t> self_bdim;
24817 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
24818 auto results = batch_rule(self_value, self_bdim, output_size, align_corners, scales_h, scales_w);
24819 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
24820}
24821template <typename batch_rule_t, batch_rule_t batch_rule>
24822at::Tensor _upsample_bicubic2d_aa_backward_generated_plumbing(const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, bool align_corners, c10::optional<double> scales_h, c10::optional<double> scales_w) {
24823 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
24824 auto maybe_layer = maybeCurrentDynamicLayer();
24825 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
24826 int64_t cur_level = maybe_layer->layerId();
24827 if (!isBatchedAtLevel(grad_output, cur_level)) {
24828 return at::_ops::_upsample_bicubic2d_aa_backward::call(grad_output, output_size, input_size, align_corners, scales_h, scales_w);
24829 }
24830 Tensor grad_output_value;
24831 optional<int64_t> grad_output_bdim;
24832 std::tie(grad_output_value, grad_output_bdim) = unwrapTensorAtLevel(grad_output, cur_level);
24833 auto results = batch_rule(grad_output_value, grad_output_bdim, output_size, input_size, align_corners, scales_h, scales_w);
24834 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
24835}
24836template <typename batch_rule_t, batch_rule_t batch_rule>
24837at::Tensor upsample_trilinear3d_generated_plumbing(const at::Tensor & self, c10::SymIntArrayRef output_size, bool align_corners, c10::optional<double> scales_d, c10::optional<double> scales_h, c10::optional<double> scales_w) {
24838 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
24839 auto maybe_layer = maybeCurrentDynamicLayer();
24840 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
24841 int64_t cur_level = maybe_layer->layerId();
24842 if (!isBatchedAtLevel(self, cur_level)) {
24843 return at::_ops::upsample_trilinear3d::call(self, output_size, align_corners, scales_d, scales_h, scales_w);
24844 }
24845 Tensor self_value;
24846 optional<int64_t> self_bdim;
24847 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
24848 auto results = batch_rule(self_value, self_bdim, output_size, align_corners, scales_d, scales_h, scales_w);
24849 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
24850}
24851template <typename batch_rule_t, batch_rule_t batch_rule>
24852at::Tensor upsample_trilinear3d_backward_generated_plumbing(const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, bool align_corners, c10::optional<double> scales_d, c10::optional<double> scales_h, c10::optional<double> scales_w) {
24853 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
24854 auto maybe_layer = maybeCurrentDynamicLayer();
24855 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
24856 int64_t cur_level = maybe_layer->layerId();
24857 if (!isBatchedAtLevel(grad_output, cur_level)) {
24858 return at::_ops::upsample_trilinear3d_backward::call(grad_output, output_size, input_size, align_corners, scales_d, scales_h, scales_w);
24859 }
24860 Tensor grad_output_value;
24861 optional<int64_t> grad_output_bdim;
24862 std::tie(grad_output_value, grad_output_bdim) = unwrapTensorAtLevel(grad_output, cur_level);
24863 auto results = batch_rule(grad_output_value, grad_output_bdim, output_size, input_size, align_corners, scales_d, scales_h, scales_w);
24864 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
24865}
24866template <typename batch_rule_t, batch_rule_t batch_rule>
24867at::Tensor upsample_nearest1d_generated_plumbing(const at::Tensor & self, c10::SymIntArrayRef output_size, c10::optional<double> scales) {
24868 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
24869 auto maybe_layer = maybeCurrentDynamicLayer();
24870 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
24871 int64_t cur_level = maybe_layer->layerId();
24872 if (!isBatchedAtLevel(self, cur_level)) {
24873 return at::_ops::upsample_nearest1d::call(self, output_size, scales);
24874 }
24875 Tensor self_value;
24876 optional<int64_t> self_bdim;
24877 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
24878 auto results = batch_rule(self_value, self_bdim, output_size, scales);
24879 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
24880}
24881template <typename batch_rule_t, batch_rule_t batch_rule>
24882at::Tensor _upsample_nearest_exact1d_generated_plumbing(const at::Tensor & self, c10::SymIntArrayRef output_size, c10::optional<double> scales) {
24883 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
24884 auto maybe_layer = maybeCurrentDynamicLayer();
24885 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
24886 int64_t cur_level = maybe_layer->layerId();
24887 if (!isBatchedAtLevel(self, cur_level)) {
24888 return at::_ops::_upsample_nearest_exact1d::call(self, output_size, scales);
24889 }
24890 Tensor self_value;
24891 optional<int64_t> self_bdim;
24892 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
24893 auto results = batch_rule(self_value, self_bdim, output_size, scales);
24894 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
24895}
24896template <typename batch_rule_t, batch_rule_t batch_rule>
24897at::Tensor upsample_nearest1d_backward_generated_plumbing(const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, c10::optional<double> scales) {
24898 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
24899 auto maybe_layer = maybeCurrentDynamicLayer();
24900 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
24901 int64_t cur_level = maybe_layer->layerId();
24902 if (!isBatchedAtLevel(grad_output, cur_level)) {
24903 return at::_ops::upsample_nearest1d_backward::call(grad_output, output_size, input_size, scales);
24904 }
24905 Tensor grad_output_value;
24906 optional<int64_t> grad_output_bdim;
24907 std::tie(grad_output_value, grad_output_bdim) = unwrapTensorAtLevel(grad_output, cur_level);
24908 auto results = batch_rule(grad_output_value, grad_output_bdim, output_size, input_size, scales);
24909 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
24910}
24911template <typename batch_rule_t, batch_rule_t batch_rule>
24912at::Tensor _upsample_nearest_exact1d_backward_generated_plumbing(const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, c10::optional<double> scales) {
24913 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
24914 auto maybe_layer = maybeCurrentDynamicLayer();
24915 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
24916 int64_t cur_level = maybe_layer->layerId();
24917 if (!isBatchedAtLevel(grad_output, cur_level)) {
24918 return at::_ops::_upsample_nearest_exact1d_backward::call(grad_output, output_size, input_size, scales);
24919 }
24920 Tensor grad_output_value;
24921 optional<int64_t> grad_output_bdim;
24922 std::tie(grad_output_value, grad_output_bdim) = unwrapTensorAtLevel(grad_output, cur_level);
24923 auto results = batch_rule(grad_output_value, grad_output_bdim, output_size, input_size, scales);
24924 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
24925}
24926template <typename batch_rule_t, batch_rule_t batch_rule>
24927at::Tensor upsample_nearest2d_generated_plumbing(const at::Tensor & self, c10::SymIntArrayRef output_size, c10::optional<double> scales_h, c10::optional<double> scales_w) {
24928 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
24929 auto maybe_layer = maybeCurrentDynamicLayer();
24930 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
24931 int64_t cur_level = maybe_layer->layerId();
24932 if (!isBatchedAtLevel(self, cur_level)) {
24933 return at::_ops::upsample_nearest2d::call(self, output_size, scales_h, scales_w);
24934 }
24935 Tensor self_value;
24936 optional<int64_t> self_bdim;
24937 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
24938 auto results = batch_rule(self_value, self_bdim, output_size, scales_h, scales_w);
24939 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
24940}
24941template <typename batch_rule_t, batch_rule_t batch_rule>
24942at::Tensor _upsample_nearest_exact2d_generated_plumbing(const at::Tensor & self, c10::SymIntArrayRef output_size, c10::optional<double> scales_h, c10::optional<double> scales_w) {
24943 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
24944 auto maybe_layer = maybeCurrentDynamicLayer();
24945 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
24946 int64_t cur_level = maybe_layer->layerId();
24947 if (!isBatchedAtLevel(self, cur_level)) {
24948 return at::_ops::_upsample_nearest_exact2d::call(self, output_size, scales_h, scales_w);
24949 }
24950 Tensor self_value;
24951 optional<int64_t> self_bdim;
24952 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
24953 auto results = batch_rule(self_value, self_bdim, output_size, scales_h, scales_w);
24954 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
24955}
24956template <typename batch_rule_t, batch_rule_t batch_rule>
24957at::Tensor upsample_nearest2d_backward_generated_plumbing(const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, c10::optional<double> scales_h, c10::optional<double> scales_w) {
24958 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
24959 auto maybe_layer = maybeCurrentDynamicLayer();
24960 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
24961 int64_t cur_level = maybe_layer->layerId();
24962 if (!isBatchedAtLevel(grad_output, cur_level)) {
24963 return at::_ops::upsample_nearest2d_backward::call(grad_output, output_size, input_size, scales_h, scales_w);
24964 }
24965 Tensor grad_output_value;
24966 optional<int64_t> grad_output_bdim;
24967 std::tie(grad_output_value, grad_output_bdim) = unwrapTensorAtLevel(grad_output, cur_level);
24968 auto results = batch_rule(grad_output_value, grad_output_bdim, output_size, input_size, scales_h, scales_w);
24969 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
24970}
24971template <typename batch_rule_t, batch_rule_t batch_rule>
24972at::Tensor _upsample_nearest_exact2d_backward_generated_plumbing(const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, c10::optional<double> scales_h, c10::optional<double> scales_w) {
24973 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
24974 auto maybe_layer = maybeCurrentDynamicLayer();
24975 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
24976 int64_t cur_level = maybe_layer->layerId();
24977 if (!isBatchedAtLevel(grad_output, cur_level)) {
24978 return at::_ops::_upsample_nearest_exact2d_backward::call(grad_output, output_size, input_size, scales_h, scales_w);
24979 }
24980 Tensor grad_output_value;
24981 optional<int64_t> grad_output_bdim;
24982 std::tie(grad_output_value, grad_output_bdim) = unwrapTensorAtLevel(grad_output, cur_level);
24983 auto results = batch_rule(grad_output_value, grad_output_bdim, output_size, input_size, scales_h, scales_w);
24984 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
24985}
24986template <typename batch_rule_t, batch_rule_t batch_rule>
24987at::Tensor upsample_nearest3d_generated_plumbing(const at::Tensor & self, c10::SymIntArrayRef output_size, c10::optional<double> scales_d, c10::optional<double> scales_h, c10::optional<double> scales_w) {
24988 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
24989 auto maybe_layer = maybeCurrentDynamicLayer();
24990 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
24991 int64_t cur_level = maybe_layer->layerId();
24992 if (!isBatchedAtLevel(self, cur_level)) {
24993 return at::_ops::upsample_nearest3d::call(self, output_size, scales_d, scales_h, scales_w);
24994 }
24995 Tensor self_value;
24996 optional<int64_t> self_bdim;
24997 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
24998 auto results = batch_rule(self_value, self_bdim, output_size, scales_d, scales_h, scales_w);
24999 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
25000}
25001template <typename batch_rule_t, batch_rule_t batch_rule>
25002at::Tensor _upsample_nearest_exact3d_generated_plumbing(const at::Tensor & self, c10::SymIntArrayRef output_size, c10::optional<double> scales_d, c10::optional<double> scales_h, c10::optional<double> scales_w) {
25003 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
25004 auto maybe_layer = maybeCurrentDynamicLayer();
25005 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
25006 int64_t cur_level = maybe_layer->layerId();
25007 if (!isBatchedAtLevel(self, cur_level)) {
25008 return at::_ops::_upsample_nearest_exact3d::call(self, output_size, scales_d, scales_h, scales_w);
25009 }
25010 Tensor self_value;
25011 optional<int64_t> self_bdim;
25012 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
25013 auto results = batch_rule(self_value, self_bdim, output_size, scales_d, scales_h, scales_w);
25014 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
25015}
25016template <typename batch_rule_t, batch_rule_t batch_rule>
25017at::Tensor upsample_nearest3d_backward_generated_plumbing(const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, c10::optional<double> scales_d, c10::optional<double> scales_h, c10::optional<double> scales_w) {
25018 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
25019 auto maybe_layer = maybeCurrentDynamicLayer();
25020 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
25021 int64_t cur_level = maybe_layer->layerId();
25022 if (!isBatchedAtLevel(grad_output, cur_level)) {
25023 return at::_ops::upsample_nearest3d_backward::call(grad_output, output_size, input_size, scales_d, scales_h, scales_w);
25024 }
25025 Tensor grad_output_value;
25026 optional<int64_t> grad_output_bdim;
25027 std::tie(grad_output_value, grad_output_bdim) = unwrapTensorAtLevel(grad_output, cur_level);
25028 auto results = batch_rule(grad_output_value, grad_output_bdim, output_size, input_size, scales_d, scales_h, scales_w);
25029 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
25030}
25031template <typename batch_rule_t, batch_rule_t batch_rule>
25032at::Tensor _upsample_nearest_exact3d_backward_generated_plumbing(const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, c10::optional<double> scales_d, c10::optional<double> scales_h, c10::optional<double> scales_w) {
25033 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
25034 auto maybe_layer = maybeCurrentDynamicLayer();
25035 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
25036 int64_t cur_level = maybe_layer->layerId();
25037 if (!isBatchedAtLevel(grad_output, cur_level)) {
25038 return at::_ops::_upsample_nearest_exact3d_backward::call(grad_output, output_size, input_size, scales_d, scales_h, scales_w);
25039 }
25040 Tensor grad_output_value;
25041 optional<int64_t> grad_output_bdim;
25042 std::tie(grad_output_value, grad_output_bdim) = unwrapTensorAtLevel(grad_output, cur_level);
25043 auto results = batch_rule(grad_output_value, grad_output_bdim, output_size, input_size, scales_d, scales_h, scales_w);
25044 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
25045}
25046template <typename batch_rule_t, batch_rule_t batch_rule>
25047at::Tensor sigmoid_backward_generated_plumbing(const at::Tensor & grad_output, const at::Tensor & output) {
25048 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
25049 auto maybe_layer = maybeCurrentDynamicLayer();
25050 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
25051 int64_t cur_level = maybe_layer->layerId();
25052 if (!isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(output, cur_level)) {
25053 return at::_ops::sigmoid_backward::call(grad_output, output);
25054 }
25055 Tensor grad_output_value;
25056 optional<int64_t> grad_output_bdim;
25057 std::tie(grad_output_value, grad_output_bdim) = unwrapTensorAtLevel(grad_output, cur_level);
25058 Tensor output_value;
25059 optional<int64_t> output_bdim;
25060 std::tie(output_value, output_bdim) = unwrapTensorAtLevel(output, cur_level);
25061 auto results = batch_rule(grad_output_value, grad_output_bdim, output_value, output_bdim);
25062 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
25063}
25064template <typename batch_rule_t, batch_rule_t batch_rule>
25065at::Tensor logit_backward_generated_plumbing(const at::Tensor & grad_output, const at::Tensor & self, c10::optional<double> eps) {
25066 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
25067 auto maybe_layer = maybeCurrentDynamicLayer();
25068 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
25069 int64_t cur_level = maybe_layer->layerId();
25070 if (!isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(self, cur_level)) {
25071 return at::_ops::logit_backward::call(grad_output, self, eps);
25072 }
25073 Tensor grad_output_value;
25074 optional<int64_t> grad_output_bdim;
25075 std::tie(grad_output_value, grad_output_bdim) = unwrapTensorAtLevel(grad_output, cur_level);
25076 Tensor self_value;
25077 optional<int64_t> self_bdim;
25078 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
25079 auto results = batch_rule(grad_output_value, grad_output_bdim, self_value, self_bdim, eps);
25080 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
25081}
25082template <typename batch_rule_t, batch_rule_t batch_rule>
25083at::Tensor tanh_backward_generated_plumbing(const at::Tensor & grad_output, const at::Tensor & output) {
25084 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
25085 auto maybe_layer = maybeCurrentDynamicLayer();
25086 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
25087 int64_t cur_level = maybe_layer->layerId();
25088 if (!isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(output, cur_level)) {
25089 return at::_ops::tanh_backward::call(grad_output, output);
25090 }
25091 Tensor grad_output_value;
25092 optional<int64_t> grad_output_bdim;
25093 std::tie(grad_output_value, grad_output_bdim) = unwrapTensorAtLevel(grad_output, cur_level);
25094 Tensor output_value;
25095 optional<int64_t> output_bdim;
25096 std::tie(output_value, output_bdim) = unwrapTensorAtLevel(output, cur_level);
25097 auto results = batch_rule(grad_output_value, grad_output_bdim, output_value, output_bdim);
25098 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
25099}
25100template <typename batch_rule_t, batch_rule_t batch_rule>
25101at::Tensor slow_conv_transpose2d_generated_plumbing(const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, const c10::optional<at::Tensor> & bias, at::IntArrayRef stride, c10::SymIntArrayRef padding, c10::SymIntArrayRef output_padding, at::IntArrayRef dilation) {
25102 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
25103 auto maybe_layer = maybeCurrentDynamicLayer();
25104 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
25105 int64_t cur_level = maybe_layer->layerId();
25106 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(bias, cur_level)) {
25107 return at::_ops::slow_conv_transpose2d::call(self, weight, kernel_size, bias, stride, padding, output_padding, dilation);
25108 }
25109 Tensor self_value;
25110 optional<int64_t> self_bdim;
25111 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
25112 Tensor weight_value;
25113 optional<int64_t> weight_bdim;
25114 std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight, cur_level);
25115 optional<Tensor> bias_value;
25116 optional<int64_t> bias_bdim;
25117 if (bias) {
25118 std::tie(bias_value, bias_bdim) = unwrapTensorAtLevel(bias.value(), cur_level);
25119 }
25120 auto results = batch_rule(self_value, self_bdim, weight_value, weight_bdim, kernel_size, bias_value, bias_bdim, stride, padding, output_padding, dilation);
25121 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
25122}
25123template <typename batch_rule_t, batch_rule_t batch_rule>
25124at::Tensor slow_conv_transpose3d_generated_plumbing(const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, const c10::optional<at::Tensor> & bias, at::IntArrayRef stride, c10::SymIntArrayRef padding, c10::SymIntArrayRef output_padding, at::IntArrayRef dilation) {
25125 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
25126 auto maybe_layer = maybeCurrentDynamicLayer();
25127 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
25128 int64_t cur_level = maybe_layer->layerId();
25129 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(bias, cur_level)) {
25130 return at::_ops::slow_conv_transpose3d::call(self, weight, kernel_size, bias, stride, padding, output_padding, dilation);
25131 }
25132 Tensor self_value;
25133 optional<int64_t> self_bdim;
25134 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
25135 Tensor weight_value;
25136 optional<int64_t> weight_bdim;
25137 std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight, cur_level);
25138 optional<Tensor> bias_value;
25139 optional<int64_t> bias_bdim;
25140 if (bias) {
25141 std::tie(bias_value, bias_bdim) = unwrapTensorAtLevel(bias.value(), cur_level);
25142 }
25143 auto results = batch_rule(self_value, self_bdim, weight_value, weight_bdim, kernel_size, bias_value, bias_bdim, stride, padding, output_padding, dilation);
25144 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
25145}
25146template <typename batch_rule_t, batch_rule_t batch_rule>
25147at::Tensor thnn_conv2d_generated_plumbing(const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, const c10::optional<at::Tensor> & bias, at::IntArrayRef stride, at::IntArrayRef padding) {
25148 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
25149 auto maybe_layer = maybeCurrentDynamicLayer();
25150 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
25151 int64_t cur_level = maybe_layer->layerId();
25152 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(bias, cur_level)) {
25153 return at::_ops::thnn_conv2d::call(self, weight, kernel_size, bias, stride, padding);
25154 }
25155 Tensor self_value;
25156 optional<int64_t> self_bdim;
25157 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
25158 Tensor weight_value;
25159 optional<int64_t> weight_bdim;
25160 std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight, cur_level);
25161 optional<Tensor> bias_value;
25162 optional<int64_t> bias_bdim;
25163 if (bias) {
25164 std::tie(bias_value, bias_bdim) = unwrapTensorAtLevel(bias.value(), cur_level);
25165 }
25166 auto results = batch_rule(self_value, self_bdim, weight_value, weight_bdim, kernel_size, bias_value, bias_bdim, stride, padding);
25167 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
25168}
25169template <typename batch_rule_t, batch_rule_t batch_rule>
25170at::Tensor _slow_conv2d_forward_generated_plumbing(const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, const c10::optional<at::Tensor> & bias, at::IntArrayRef stride, at::IntArrayRef padding) {
25171 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
25172 auto maybe_layer = maybeCurrentDynamicLayer();
25173 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
25174 int64_t cur_level = maybe_layer->layerId();
25175 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(bias, cur_level)) {
25176 return at::_ops::_slow_conv2d_forward::call(self, weight, kernel_size, bias, stride, padding);
25177 }
25178 Tensor self_value;
25179 optional<int64_t> self_bdim;
25180 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
25181 Tensor weight_value;
25182 optional<int64_t> weight_bdim;
25183 std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight, cur_level);
25184 optional<Tensor> bias_value;
25185 optional<int64_t> bias_bdim;
25186 if (bias) {
25187 std::tie(bias_value, bias_bdim) = unwrapTensorAtLevel(bias.value(), cur_level);
25188 }
25189 auto results = batch_rule(self_value, self_bdim, weight_value, weight_bdim, kernel_size, bias_value, bias_bdim, stride, padding);
25190 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
25191}
25192template <typename batch_rule_t, batch_rule_t batch_rule>
25193::std::tuple<at::Tensor,at::Tensor,at::Tensor> _slow_conv2d_backward_output_mask_generated_plumbing(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, ::std::array<bool,3> output_mask) {
25194 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
25195 auto maybe_layer = maybeCurrentDynamicLayer();
25196 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
25197 int64_t cur_level = maybe_layer->layerId();
25198 if (!isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(weight, cur_level)) {
25199 return at::_ops::_slow_conv2d_backward_output_mask::call(grad_output, self, weight, kernel_size, stride, padding, output_mask);
25200 }
25201 Tensor grad_output_value;
25202 optional<int64_t> grad_output_bdim;
25203 std::tie(grad_output_value, grad_output_bdim) = unwrapTensorAtLevel(grad_output, cur_level);
25204 Tensor self_value;
25205 optional<int64_t> self_bdim;
25206 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
25207 Tensor weight_value;
25208 optional<int64_t> weight_bdim;
25209 std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight, cur_level);
25210 auto results = batch_rule(grad_output_value, grad_output_bdim, self_value, self_bdim, weight_value, weight_bdim, kernel_size, stride, padding, output_mask);
25211 return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level));
25212}
25213template <typename batch_rule_t, batch_rule_t batch_rule>
25214at::Tensor _conv_depthwise2d_generated_plumbing(const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, const c10::optional<at::Tensor> & bias, at::IntArrayRef stride, c10::SymIntArrayRef padding, at::IntArrayRef dilation) {
25215 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
25216 auto maybe_layer = maybeCurrentDynamicLayer();
25217 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
25218 int64_t cur_level = maybe_layer->layerId();
25219 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(bias, cur_level)) {
25220 return at::_ops::_conv_depthwise2d::call(self, weight, kernel_size, bias, stride, padding, dilation);
25221 }
25222 Tensor self_value;
25223 optional<int64_t> self_bdim;
25224 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
25225 Tensor weight_value;
25226 optional<int64_t> weight_bdim;
25227 std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight, cur_level);
25228 optional<Tensor> bias_value;
25229 optional<int64_t> bias_bdim;
25230 if (bias) {
25231 std::tie(bias_value, bias_bdim) = unwrapTensorAtLevel(bias.value(), cur_level);
25232 }
25233 auto results = batch_rule(self_value, self_bdim, weight_value, weight_bdim, kernel_size, bias_value, bias_bdim, stride, padding, dilation);
25234 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
25235}
25236template <typename batch_rule_t, batch_rule_t batch_rule>
25237at::Tensor conv_depthwise3d_generated_plumbing(const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, const c10::optional<at::Tensor> & bias, at::IntArrayRef stride, c10::SymIntArrayRef padding, at::IntArrayRef dilation) {
25238 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
25239 auto maybe_layer = maybeCurrentDynamicLayer();
25240 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
25241 int64_t cur_level = maybe_layer->layerId();
25242 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(bias, cur_level)) {
25243 return at::_ops::conv_depthwise3d::call(self, weight, kernel_size, bias, stride, padding, dilation);
25244 }
25245 Tensor self_value;
25246 optional<int64_t> self_bdim;
25247 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
25248 Tensor weight_value;
25249 optional<int64_t> weight_bdim;
25250 std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight, cur_level);
25251 optional<Tensor> bias_value;
25252 optional<int64_t> bias_bdim;
25253 if (bias) {
25254 std::tie(bias_value, bias_bdim) = unwrapTensorAtLevel(bias.value(), cur_level);
25255 }
25256 auto results = batch_rule(self_value, self_bdim, weight_value, weight_bdim, kernel_size, bias_value, bias_bdim, stride, padding, dilation);
25257 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
25258}
25259template <typename batch_rule_t, batch_rule_t batch_rule>
25260at::Tensor slow_conv3d_generated_plumbing(const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, const c10::optional<at::Tensor> & bias, at::IntArrayRef stride, c10::SymIntArrayRef padding) {
25261 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
25262 auto maybe_layer = maybeCurrentDynamicLayer();
25263 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
25264 int64_t cur_level = maybe_layer->layerId();
25265 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(bias, cur_level)) {
25266 return at::_ops::slow_conv3d::call(self, weight, kernel_size, bias, stride, padding);
25267 }
25268 Tensor self_value;
25269 optional<int64_t> self_bdim;
25270 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
25271 Tensor weight_value;
25272 optional<int64_t> weight_bdim;
25273 std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight, cur_level);
25274 optional<Tensor> bias_value;
25275 optional<int64_t> bias_bdim;
25276 if (bias) {
25277 std::tie(bias_value, bias_bdim) = unwrapTensorAtLevel(bias.value(), cur_level);
25278 }
25279 auto results = batch_rule(self_value, self_bdim, weight_value, weight_bdim, kernel_size, bias_value, bias_bdim, stride, padding);
25280 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
25281}
25282template <typename batch_rule_t, batch_rule_t batch_rule>
25283at::Tensor slow_conv3d_forward_generated_plumbing(const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, const c10::optional<at::Tensor> & bias, at::IntArrayRef stride, c10::SymIntArrayRef padding) {
25284 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
25285 auto maybe_layer = maybeCurrentDynamicLayer();
25286 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
25287 int64_t cur_level = maybe_layer->layerId();
25288 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(bias, cur_level)) {
25289 return at::_ops::slow_conv3d_forward::call(self, weight, kernel_size, bias, stride, padding);
25290 }
25291 Tensor self_value;
25292 optional<int64_t> self_bdim;
25293 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
25294 Tensor weight_value;
25295 optional<int64_t> weight_bdim;
25296 std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight, cur_level);
25297 optional<Tensor> bias_value;
25298 optional<int64_t> bias_bdim;
25299 if (bias) {
25300 std::tie(bias_value, bias_bdim) = unwrapTensorAtLevel(bias.value(), cur_level);
25301 }
25302 auto results = batch_rule(self_value, self_bdim, weight_value, weight_bdim, kernel_size, bias_value, bias_bdim, stride, padding);
25303 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
25304}
25305template <typename batch_rule_t, batch_rule_t batch_rule>
25306at::Tensor slow_conv_dilated2d_generated_plumbing(const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, const c10::optional<at::Tensor> & bias, at::IntArrayRef stride, c10::SymIntArrayRef padding, at::IntArrayRef dilation) {
25307 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
25308 auto maybe_layer = maybeCurrentDynamicLayer();
25309 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
25310 int64_t cur_level = maybe_layer->layerId();
25311 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(bias, cur_level)) {
25312 return at::_ops::slow_conv_dilated2d::call(self, weight, kernel_size, bias, stride, padding, dilation);
25313 }
25314 Tensor self_value;
25315 optional<int64_t> self_bdim;
25316 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
25317 Tensor weight_value;
25318 optional<int64_t> weight_bdim;
25319 std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight, cur_level);
25320 optional<Tensor> bias_value;
25321 optional<int64_t> bias_bdim;
25322 if (bias) {
25323 std::tie(bias_value, bias_bdim) = unwrapTensorAtLevel(bias.value(), cur_level);
25324 }
25325 auto results = batch_rule(self_value, self_bdim, weight_value, weight_bdim, kernel_size, bias_value, bias_bdim, stride, padding, dilation);
25326 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
25327}
25328template <typename batch_rule_t, batch_rule_t batch_rule>
25329at::Tensor slow_conv_dilated3d_generated_plumbing(const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, const c10::optional<at::Tensor> & bias, at::IntArrayRef stride, c10::SymIntArrayRef padding, at::IntArrayRef dilation) {
25330 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
25331 auto maybe_layer = maybeCurrentDynamicLayer();
25332 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
25333 int64_t cur_level = maybe_layer->layerId();
25334 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(bias, cur_level)) {
25335 return at::_ops::slow_conv_dilated3d::call(self, weight, kernel_size, bias, stride, padding, dilation);
25336 }
25337 Tensor self_value;
25338 optional<int64_t> self_bdim;
25339 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
25340 Tensor weight_value;
25341 optional<int64_t> weight_bdim;
25342 std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight, cur_level);
25343 optional<Tensor> bias_value;
25344 optional<int64_t> bias_bdim;
25345 if (bias) {
25346 std::tie(bias_value, bias_bdim) = unwrapTensorAtLevel(bias.value(), cur_level);
25347 }
25348 auto results = batch_rule(self_value, self_bdim, weight_value, weight_bdim, kernel_size, bias_value, bias_bdim, stride, padding, dilation);
25349 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
25350}
25351template <typename batch_rule_t, batch_rule_t batch_rule>
25352at::Tensor col2im_generated_plumbing(const at::Tensor & self, c10::SymIntArrayRef output_size, at::IntArrayRef kernel_size, at::IntArrayRef dilation, at::IntArrayRef padding, at::IntArrayRef stride) {
25353 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
25354 auto maybe_layer = maybeCurrentDynamicLayer();
25355 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
25356 int64_t cur_level = maybe_layer->layerId();
25357 if (!isBatchedAtLevel(self, cur_level)) {
25358 return at::_ops::col2im::call(self, output_size, kernel_size, dilation, padding, stride);
25359 }
25360 Tensor self_value;
25361 optional<int64_t> self_bdim;
25362 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
25363 auto results = batch_rule(self_value, self_bdim, output_size, kernel_size, dilation, padding, stride);
25364 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
25365}
25366template <typename batch_rule_t, batch_rule_t batch_rule>
25367at::Tensor column_stack_generated_plumbing(at::TensorList tensors) {
25368 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
25369 auto maybe_layer = maybeCurrentDynamicLayer();
25370 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
25371 int64_t cur_level = maybe_layer->layerId();
25372 if (!isBatchedAtLevel(tensors, cur_level)) {
25373 return at::_ops::column_stack::call(tensors);
25374 }
25375
25376 auto results = batch_rule(tensors);
25377 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
25378}
25379template <typename batch_rule_t, batch_rule_t batch_rule>
25380at::Tensor im2col_generated_plumbing(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef dilation, at::IntArrayRef padding, at::IntArrayRef stride) {
25381 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
25382 auto maybe_layer = maybeCurrentDynamicLayer();
25383 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
25384 int64_t cur_level = maybe_layer->layerId();
25385 if (!isBatchedAtLevel(self, cur_level)) {
25386 return at::_ops::im2col::call(self, kernel_size, dilation, padding, stride);
25387 }
25388 Tensor self_value;
25389 optional<int64_t> self_bdim;
25390 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
25391 auto results = batch_rule(self_value, self_bdim, kernel_size, dilation, padding, stride);
25392 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
25393}
25394template <typename batch_rule_t, batch_rule_t batch_rule>
25395at::Tensor isfinite_generated_plumbing(const at::Tensor & self) {
25396 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
25397 auto maybe_layer = maybeCurrentDynamicLayer();
25398 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
25399 int64_t cur_level = maybe_layer->layerId();
25400 if (!isBatchedAtLevel(self, cur_level)) {
25401 return at::_ops::isfinite::call(self);
25402 }
25403 Tensor self_value;
25404 optional<int64_t> self_bdim;
25405 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
25406 auto results = batch_rule(self_value, self_bdim);
25407 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
25408}
25409template <typename batch_rule_t, batch_rule_t batch_rule>
25410at::Tensor isinf_generated_plumbing(const at::Tensor & self) {
25411 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
25412 auto maybe_layer = maybeCurrentDynamicLayer();
25413 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
25414 int64_t cur_level = maybe_layer->layerId();
25415 if (!isBatchedAtLevel(self, cur_level)) {
25416 return at::_ops::isinf::call(self);
25417 }
25418 Tensor self_value;
25419 optional<int64_t> self_bdim;
25420 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
25421 auto results = batch_rule(self_value, self_bdim);
25422 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
25423}
25424template <typename batch_rule_t, batch_rule_t batch_rule>
25425void record_stream_generated_plumbing(at::Tensor & self, at::Stream s) {
25426 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
25427 auto maybe_layer = maybeCurrentDynamicLayer();
25428 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
25429 int64_t cur_level = maybe_layer->layerId();
25430 if (!isBatchedAtLevel(self, cur_level)) {
25431 return at::_ops::record_stream::call(self, s);
25432 }
25433 Tensor self_value;
25434 optional<int64_t> self_bdim;
25435 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
25436 batch_rule(self_value, self_bdim, s);
25437}
25438template <typename batch_rule_t, batch_rule_t batch_rule>
25439at::Tensor isposinf_generated_plumbing(const at::Tensor & self) {
25440 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
25441 auto maybe_layer = maybeCurrentDynamicLayer();
25442 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
25443 int64_t cur_level = maybe_layer->layerId();
25444 if (!isBatchedAtLevel(self, cur_level)) {
25445 return at::_ops::isposinf::call(self);
25446 }
25447 Tensor self_value;
25448 optional<int64_t> self_bdim;
25449 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
25450 auto results = batch_rule(self_value, self_bdim);
25451 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
25452}
25453template <typename batch_rule_t, batch_rule_t batch_rule>
25454at::Tensor isneginf_generated_plumbing(const at::Tensor & self) {
25455 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
25456 auto maybe_layer = maybeCurrentDynamicLayer();
25457 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
25458 int64_t cur_level = maybe_layer->layerId();
25459 if (!isBatchedAtLevel(self, cur_level)) {
25460 return at::_ops::isneginf::call(self);
25461 }
25462 Tensor self_value;
25463 optional<int64_t> self_bdim;
25464 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
25465 auto results = batch_rule(self_value, self_bdim);
25466 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
25467}
25468template <typename batch_rule_t, batch_rule_t batch_rule>
25469at::Tensor _add_batch_dim_generated_plumbing(const at::Tensor & self, int64_t batch_dim, int64_t level) {
25470 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
25471 auto maybe_layer = maybeCurrentDynamicLayer();
25472 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
25473 int64_t cur_level = maybe_layer->layerId();
25474 if (!isBatchedAtLevel(self, cur_level)) {
25475 return at::_ops::_add_batch_dim::call(self, batch_dim, level);
25476 }
25477 Tensor self_value;
25478 optional<int64_t> self_bdim;
25479 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
25480 auto results = batch_rule(self_value, self_bdim, batch_dim, level);
25481 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
25482}
25483template <typename batch_rule_t, batch_rule_t batch_rule>
25484at::Tensor _remove_batch_dim_generated_plumbing(const at::Tensor & self, int64_t level, int64_t batch_size, int64_t out_dim) {
25485 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
25486 auto maybe_layer = maybeCurrentDynamicLayer();
25487 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
25488 int64_t cur_level = maybe_layer->layerId();
25489 if (!isBatchedAtLevel(self, cur_level)) {
25490 return at::_ops::_remove_batch_dim::call(self, level, batch_size, out_dim);
25491 }
25492 Tensor self_value;
25493 optional<int64_t> self_bdim;
25494 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
25495 auto results = batch_rule(self_value, self_bdim, level, batch_size, out_dim);
25496 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
25497}
25498template <typename batch_rule_t, batch_rule_t batch_rule>
25499at::Tensor special_entr_generated_plumbing(const at::Tensor & self) {
25500 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
25501 auto maybe_layer = maybeCurrentDynamicLayer();
25502 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
25503 int64_t cur_level = maybe_layer->layerId();
25504 if (!isBatchedAtLevel(self, cur_level)) {
25505 return at::_ops::special_entr::call(self);
25506 }
25507 Tensor self_value;
25508 optional<int64_t> self_bdim;
25509 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
25510 auto results = batch_rule(self_value, self_bdim);
25511 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
25512}
25513template <typename batch_rule_t, batch_rule_t batch_rule>
25514at::Tensor special_ndtri_generated_plumbing(const at::Tensor & self) {
25515 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
25516 auto maybe_layer = maybeCurrentDynamicLayer();
25517 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
25518 int64_t cur_level = maybe_layer->layerId();
25519 if (!isBatchedAtLevel(self, cur_level)) {
25520 return at::_ops::special_ndtri::call(self);
25521 }
25522 Tensor self_value;
25523 optional<int64_t> self_bdim;
25524 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
25525 auto results = batch_rule(self_value, self_bdim);
25526 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
25527}
25528template <typename batch_rule_t, batch_rule_t batch_rule>
25529at::Tensor special_log_ndtr_generated_plumbing(const at::Tensor & self) {
25530 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
25531 auto maybe_layer = maybeCurrentDynamicLayer();
25532 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
25533 int64_t cur_level = maybe_layer->layerId();
25534 if (!isBatchedAtLevel(self, cur_level)) {
25535 return at::_ops::special_log_ndtr::call(self);
25536 }
25537 Tensor self_value;
25538 optional<int64_t> self_bdim;
25539 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
25540 auto results = batch_rule(self_value, self_bdim);
25541 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
25542}
25543template <typename batch_rule_t, batch_rule_t batch_rule>
25544at::Tensor special_expm1_generated_plumbing(const at::Tensor & self) {
25545 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
25546 auto maybe_layer = maybeCurrentDynamicLayer();
25547 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
25548 int64_t cur_level = maybe_layer->layerId();
25549 if (!isBatchedAtLevel(self, cur_level)) {
25550 return at::_ops::special_expm1::call(self);
25551 }
25552 Tensor self_value;
25553 optional<int64_t> self_bdim;
25554 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
25555 auto results = batch_rule(self_value, self_bdim);
25556 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
25557}
25558template <typename batch_rule_t, batch_rule_t batch_rule>
25559at::Tensor special_exp2_generated_plumbing(const at::Tensor & self) {
25560 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
25561 auto maybe_layer = maybeCurrentDynamicLayer();
25562 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
25563 int64_t cur_level = maybe_layer->layerId();
25564 if (!isBatchedAtLevel(self, cur_level)) {
25565 return at::_ops::special_exp2::call(self);
25566 }
25567 Tensor self_value;
25568 optional<int64_t> self_bdim;
25569 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
25570 auto results = batch_rule(self_value, self_bdim);
25571 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
25572}
25573template <typename batch_rule_t, batch_rule_t batch_rule>
25574at::Tensor special_psi_generated_plumbing(const at::Tensor & self) {
25575 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
25576 auto maybe_layer = maybeCurrentDynamicLayer();
25577 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
25578 int64_t cur_level = maybe_layer->layerId();
25579 if (!isBatchedAtLevel(self, cur_level)) {
25580 return at::_ops::special_psi::call(self);
25581 }
25582 Tensor self_value;
25583 optional<int64_t> self_bdim;
25584 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
25585 auto results = batch_rule(self_value, self_bdim);
25586 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
25587}
25588template <typename batch_rule_t, batch_rule_t batch_rule>
25589at::Tensor special_digamma_generated_plumbing(const at::Tensor & self) {
25590 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
25591 auto maybe_layer = maybeCurrentDynamicLayer();
25592 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
25593 int64_t cur_level = maybe_layer->layerId();
25594 if (!isBatchedAtLevel(self, cur_level)) {
25595 return at::_ops::special_digamma::call(self);
25596 }
25597 Tensor self_value;
25598 optional<int64_t> self_bdim;
25599 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
25600 auto results = batch_rule(self_value, self_bdim);
25601 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
25602}
25603template <typename batch_rule_t, batch_rule_t batch_rule>
25604at::Tensor special_gammaln_generated_plumbing(const at::Tensor & self) {
25605 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
25606 auto maybe_layer = maybeCurrentDynamicLayer();
25607 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
25608 int64_t cur_level = maybe_layer->layerId();
25609 if (!isBatchedAtLevel(self, cur_level)) {
25610 return at::_ops::special_gammaln::call(self);
25611 }
25612 Tensor self_value;
25613 optional<int64_t> self_bdim;
25614 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
25615 auto results = batch_rule(self_value, self_bdim);
25616 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
25617}
25618template <typename batch_rule_t, batch_rule_t batch_rule>
25619at::Tensor special_erf_generated_plumbing(const at::Tensor & self) {
25620 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
25621 auto maybe_layer = maybeCurrentDynamicLayer();
25622 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
25623 int64_t cur_level = maybe_layer->layerId();
25624 if (!isBatchedAtLevel(self, cur_level)) {
25625 return at::_ops::special_erf::call(self);
25626 }
25627 Tensor self_value;
25628 optional<int64_t> self_bdim;
25629 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
25630 auto results = batch_rule(self_value, self_bdim);
25631 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
25632}
25633template <typename batch_rule_t, batch_rule_t batch_rule>
25634at::Tensor special_erfc_generated_plumbing(const at::Tensor & self) {
25635 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
25636 auto maybe_layer = maybeCurrentDynamicLayer();
25637 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
25638 int64_t cur_level = maybe_layer->layerId();
25639 if (!isBatchedAtLevel(self, cur_level)) {
25640 return at::_ops::special_erfc::call(self);
25641 }
25642 Tensor self_value;
25643 optional<int64_t> self_bdim;
25644 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
25645 auto results = batch_rule(self_value, self_bdim);
25646 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
25647}
25648template <typename batch_rule_t, batch_rule_t batch_rule>
25649at::Tensor special_erfcx_generated_plumbing(const at::Tensor & self) {
25650 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
25651 auto maybe_layer = maybeCurrentDynamicLayer();
25652 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
25653 int64_t cur_level = maybe_layer->layerId();
25654 if (!isBatchedAtLevel(self, cur_level)) {
25655 return at::_ops::special_erfcx::call(self);
25656 }
25657 Tensor self_value;
25658 optional<int64_t> self_bdim;
25659 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
25660 auto results = batch_rule(self_value, self_bdim);
25661 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
25662}
25663template <typename batch_rule_t, batch_rule_t batch_rule>
25664at::Tensor special_erfinv_generated_plumbing(const at::Tensor & self) {
25665 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
25666 auto maybe_layer = maybeCurrentDynamicLayer();
25667 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
25668 int64_t cur_level = maybe_layer->layerId();
25669 if (!isBatchedAtLevel(self, cur_level)) {
25670 return at::_ops::special_erfinv::call(self);
25671 }
25672 Tensor self_value;
25673 optional<int64_t> self_bdim;
25674 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
25675 auto results = batch_rule(self_value, self_bdim);
25676 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
25677}
25678template <typename batch_rule_t, batch_rule_t batch_rule>
25679at::Tensor special_ndtr_generated_plumbing(const at::Tensor & self) {
25680 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
25681 auto maybe_layer = maybeCurrentDynamicLayer();
25682 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
25683 int64_t cur_level = maybe_layer->layerId();
25684 if (!isBatchedAtLevel(self, cur_level)) {
25685 return at::_ops::special_ndtr::call(self);
25686 }
25687 Tensor self_value;
25688 optional<int64_t> self_bdim;
25689 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
25690 auto results = batch_rule(self_value, self_bdim);
25691 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
25692}
25693template <typename batch_rule_t, batch_rule_t batch_rule>
25694at::Tensor special_xlog1py_generated_plumbing(const at::Tensor & self, const at::Tensor & other) {
25695 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
25696 auto maybe_layer = maybeCurrentDynamicLayer();
25697 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
25698 int64_t cur_level = maybe_layer->layerId();
25699 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
25700 return at::_ops::special_xlog1py::call(self, other);
25701 }
25702 Tensor self_value;
25703 optional<int64_t> self_bdim;
25704 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
25705 Tensor other_value;
25706 optional<int64_t> other_bdim;
25707 std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
25708 auto results = batch_rule(self_value, self_bdim, other_value, other_bdim);
25709 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
25710}
25711template <typename batch_rule_t, batch_rule_t batch_rule>
25712at::Tensor special_xlog1py_self_scalar_generated_plumbing(const at::Scalar & self, const at::Tensor & other) {
25713 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
25714 auto maybe_layer = maybeCurrentDynamicLayer();
25715 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
25716 int64_t cur_level = maybe_layer->layerId();
25717 if (!isBatchedAtLevel(other, cur_level)) {
25718 return at::_ops::special_xlog1py_self_scalar::call(self, other);
25719 }
25720 Tensor other_value;
25721 optional<int64_t> other_bdim;
25722 std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
25723 auto results = batch_rule(self, other_value, other_bdim);
25724 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
25725}
25726template <typename batch_rule_t, batch_rule_t batch_rule>
25727at::Tensor special_xlog1py_other_scalar_generated_plumbing(const at::Tensor & self, const at::Scalar & other) {
25728 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
25729 auto maybe_layer = maybeCurrentDynamicLayer();
25730 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
25731 int64_t cur_level = maybe_layer->layerId();
25732 if (!isBatchedAtLevel(self, cur_level)) {
25733 return at::_ops::special_xlog1py_other_scalar::call(self, other);
25734 }
25735 Tensor self_value;
25736 optional<int64_t> self_bdim;
25737 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
25738 auto results = batch_rule(self_value, self_bdim, other);
25739 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
25740}
25741template <typename batch_rule_t, batch_rule_t batch_rule>
25742at::Tensor special_xlogy_generated_plumbing(const at::Tensor & self, const at::Tensor & other) {
25743 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
25744 auto maybe_layer = maybeCurrentDynamicLayer();
25745 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
25746 int64_t cur_level = maybe_layer->layerId();
25747 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
25748 return at::_ops::special_xlogy::call(self, other);
25749 }
25750 Tensor self_value;
25751 optional<int64_t> self_bdim;
25752 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
25753 Tensor other_value;
25754 optional<int64_t> other_bdim;
25755 std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
25756 auto results = batch_rule(self_value, self_bdim, other_value, other_bdim);
25757 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
25758}
25759template <typename batch_rule_t, batch_rule_t batch_rule>
25760at::Tensor special_xlogy_self_scalar_generated_plumbing(const at::Scalar & self, const at::Tensor & other) {
25761 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
25762 auto maybe_layer = maybeCurrentDynamicLayer();
25763 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
25764 int64_t cur_level = maybe_layer->layerId();
25765 if (!isBatchedAtLevel(other, cur_level)) {
25766 return at::_ops::special_xlogy_self_scalar::call(self, other);
25767 }
25768 Tensor other_value;
25769 optional<int64_t> other_bdim;
25770 std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
25771 auto results = batch_rule(self, other_value, other_bdim);
25772 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
25773}
25774template <typename batch_rule_t, batch_rule_t batch_rule>
25775at::Tensor special_xlogy_other_scalar_generated_plumbing(const at::Tensor & self, const at::Scalar & other) {
25776 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
25777 auto maybe_layer = maybeCurrentDynamicLayer();
25778 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
25779 int64_t cur_level = maybe_layer->layerId();
25780 if (!isBatchedAtLevel(self, cur_level)) {
25781 return at::_ops::special_xlogy_other_scalar::call(self, other);
25782 }
25783 Tensor self_value;
25784 optional<int64_t> self_bdim;
25785 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
25786 auto results = batch_rule(self_value, self_bdim, other);
25787 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
25788}
25789template <typename batch_rule_t, batch_rule_t batch_rule>
25790at::Tensor special_zeta_generated_plumbing(const at::Tensor & self, const at::Tensor & other) {
25791 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
25792 auto maybe_layer = maybeCurrentDynamicLayer();
25793 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
25794 int64_t cur_level = maybe_layer->layerId();
25795 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
25796 return at::_ops::special_zeta::call(self, other);
25797 }
25798 Tensor self_value;
25799 optional<int64_t> self_bdim;
25800 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
25801 Tensor other_value;
25802 optional<int64_t> other_bdim;
25803 std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
25804 auto results = batch_rule(self_value, self_bdim, other_value, other_bdim);
25805 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
25806}
25807template <typename batch_rule_t, batch_rule_t batch_rule>
25808at::Tensor special_zeta_self_scalar_generated_plumbing(const at::Scalar & self, const at::Tensor & other) {
25809 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
25810 auto maybe_layer = maybeCurrentDynamicLayer();
25811 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
25812 int64_t cur_level = maybe_layer->layerId();
25813 if (!isBatchedAtLevel(other, cur_level)) {
25814 return at::_ops::special_zeta_self_scalar::call(self, other);
25815 }
25816 Tensor other_value;
25817 optional<int64_t> other_bdim;
25818 std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
25819 auto results = batch_rule(self, other_value, other_bdim);
25820 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
25821}
25822template <typename batch_rule_t, batch_rule_t batch_rule>
25823at::Tensor special_zeta_other_scalar_generated_plumbing(const at::Tensor & self, const at::Scalar & other) {
25824 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
25825 auto maybe_layer = maybeCurrentDynamicLayer();
25826 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
25827 int64_t cur_level = maybe_layer->layerId();
25828 if (!isBatchedAtLevel(self, cur_level)) {
25829 return at::_ops::special_zeta_other_scalar::call(self, other);
25830 }
25831 Tensor self_value;
25832 optional<int64_t> self_bdim;
25833 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
25834 auto results = batch_rule(self_value, self_bdim, other);
25835 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
25836}
25837template <typename batch_rule_t, batch_rule_t batch_rule>
25838at::Tensor special_i0_generated_plumbing(const at::Tensor & self) {
25839 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
25840 auto maybe_layer = maybeCurrentDynamicLayer();
25841 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
25842 int64_t cur_level = maybe_layer->layerId();
25843 if (!isBatchedAtLevel(self, cur_level)) {
25844 return at::_ops::special_i0::call(self);
25845 }
25846 Tensor self_value;
25847 optional<int64_t> self_bdim;
25848 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
25849 auto results = batch_rule(self_value, self_bdim);
25850 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
25851}
25852template <typename batch_rule_t, batch_rule_t batch_rule>
25853at::Tensor special_i0e_generated_plumbing(const at::Tensor & self) {
25854 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
25855 auto maybe_layer = maybeCurrentDynamicLayer();
25856 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
25857 int64_t cur_level = maybe_layer->layerId();
25858 if (!isBatchedAtLevel(self, cur_level)) {
25859 return at::_ops::special_i0e::call(self);
25860 }
25861 Tensor self_value;
25862 optional<int64_t> self_bdim;
25863 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
25864 auto results = batch_rule(self_value, self_bdim);
25865 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
25866}
25867template <typename batch_rule_t, batch_rule_t batch_rule>
25868at::Tensor special_i1_generated_plumbing(const at::Tensor & self) {
25869 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
25870 auto maybe_layer = maybeCurrentDynamicLayer();
25871 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
25872 int64_t cur_level = maybe_layer->layerId();
25873 if (!isBatchedAtLevel(self, cur_level)) {
25874 return at::_ops::special_i1::call(self);
25875 }
25876 Tensor self_value;
25877 optional<int64_t> self_bdim;
25878 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
25879 auto results = batch_rule(self_value, self_bdim);
25880 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
25881}
25882template <typename batch_rule_t, batch_rule_t batch_rule>
25883at::Tensor special_i1e_generated_plumbing(const at::Tensor & self) {
25884 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
25885 auto maybe_layer = maybeCurrentDynamicLayer();
25886 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
25887 int64_t cur_level = maybe_layer->layerId();
25888 if (!isBatchedAtLevel(self, cur_level)) {
25889 return at::_ops::special_i1e::call(self);
25890 }
25891 Tensor self_value;
25892 optional<int64_t> self_bdim;
25893 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
25894 auto results = batch_rule(self_value, self_bdim);
25895 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
25896}
25897template <typename batch_rule_t, batch_rule_t batch_rule>
25898at::Tensor special_logit_generated_plumbing(const at::Tensor & self, c10::optional<double> eps) {
25899 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
25900 auto maybe_layer = maybeCurrentDynamicLayer();
25901 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
25902 int64_t cur_level = maybe_layer->layerId();
25903 if (!isBatchedAtLevel(self, cur_level)) {
25904 return at::_ops::special_logit::call(self, eps);
25905 }
25906 Tensor self_value;
25907 optional<int64_t> self_bdim;
25908 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
25909 auto results = batch_rule(self_value, self_bdim, eps);
25910 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
25911}
25912template <typename batch_rule_t, batch_rule_t batch_rule>
25913at::Tensor special_polygamma_generated_plumbing(int64_t n, const at::Tensor & self) {
25914 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
25915 auto maybe_layer = maybeCurrentDynamicLayer();
25916 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
25917 int64_t cur_level = maybe_layer->layerId();
25918 if (!isBatchedAtLevel(self, cur_level)) {
25919 return at::_ops::special_polygamma::call(n, self);
25920 }
25921 Tensor self_value;
25922 optional<int64_t> self_bdim;
25923 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
25924 auto results = batch_rule(n, self_value, self_bdim);
25925 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
25926}
25927template <typename batch_rule_t, batch_rule_t batch_rule>
25928at::Tensor special_logsumexp_generated_plumbing(const at::Tensor & self, at::IntArrayRef dim, bool keepdim) {
25929 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
25930 auto maybe_layer = maybeCurrentDynamicLayer();
25931 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
25932 int64_t cur_level = maybe_layer->layerId();
25933 if (!isBatchedAtLevel(self, cur_level)) {
25934 return at::_ops::special_logsumexp::call(self, dim, keepdim);
25935 }
25936 Tensor self_value;
25937 optional<int64_t> self_bdim;
25938 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
25939 auto results = batch_rule(self_value, self_bdim, dim, keepdim);
25940 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
25941}
25942template <typename batch_rule_t, batch_rule_t batch_rule>
25943at::Tensor special_expit_generated_plumbing(const at::Tensor & self) {
25944 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
25945 auto maybe_layer = maybeCurrentDynamicLayer();
25946 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
25947 int64_t cur_level = maybe_layer->layerId();
25948 if (!isBatchedAtLevel(self, cur_level)) {
25949 return at::_ops::special_expit::call(self);
25950 }
25951 Tensor self_value;
25952 optional<int64_t> self_bdim;
25953 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
25954 auto results = batch_rule(self_value, self_bdim);
25955 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
25956}
25957template <typename batch_rule_t, batch_rule_t batch_rule>
25958at::Tensor special_sinc_generated_plumbing(const at::Tensor & self) {
25959 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
25960 auto maybe_layer = maybeCurrentDynamicLayer();
25961 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
25962 int64_t cur_level = maybe_layer->layerId();
25963 if (!isBatchedAtLevel(self, cur_level)) {
25964 return at::_ops::special_sinc::call(self);
25965 }
25966 Tensor self_value;
25967 optional<int64_t> self_bdim;
25968 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
25969 auto results = batch_rule(self_value, self_bdim);
25970 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
25971}
25972template <typename batch_rule_t, batch_rule_t batch_rule>
25973at::Tensor special_round_generated_plumbing(const at::Tensor & self, int64_t decimals) {
25974 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
25975 auto maybe_layer = maybeCurrentDynamicLayer();
25976 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
25977 int64_t cur_level = maybe_layer->layerId();
25978 if (!isBatchedAtLevel(self, cur_level)) {
25979 return at::_ops::special_round::call(self, decimals);
25980 }
25981 Tensor self_value;
25982 optional<int64_t> self_bdim;
25983 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
25984 auto results = batch_rule(self_value, self_bdim, decimals);
25985 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
25986}
25987template <typename batch_rule_t, batch_rule_t batch_rule>
25988at::Tensor special_log1p_generated_plumbing(const at::Tensor & self) {
25989 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
25990 auto maybe_layer = maybeCurrentDynamicLayer();
25991 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
25992 int64_t cur_level = maybe_layer->layerId();
25993 if (!isBatchedAtLevel(self, cur_level)) {
25994 return at::_ops::special_log1p::call(self);
25995 }
25996 Tensor self_value;
25997 optional<int64_t> self_bdim;
25998 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
25999 auto results = batch_rule(self_value, self_bdim);
26000 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
26001}
26002template <typename batch_rule_t, batch_rule_t batch_rule>
26003at::Tensor special_log_softmax_generated_plumbing(const at::Tensor & self, int64_t dim, c10::optional<at::ScalarType> dtype) {
26004 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
26005 auto maybe_layer = maybeCurrentDynamicLayer();
26006 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
26007 int64_t cur_level = maybe_layer->layerId();
26008 if (!isBatchedAtLevel(self, cur_level)) {
26009 return at::_ops::special_log_softmax::call(self, dim, dtype);
26010 }
26011 Tensor self_value;
26012 optional<int64_t> self_bdim;
26013 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
26014 auto results = batch_rule(self_value, self_bdim, dim, dtype);
26015 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
26016}
26017template <typename batch_rule_t, batch_rule_t batch_rule>
26018at::Tensor special_gammainc_generated_plumbing(const at::Tensor & self, const at::Tensor & other) {
26019 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
26020 auto maybe_layer = maybeCurrentDynamicLayer();
26021 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
26022 int64_t cur_level = maybe_layer->layerId();
26023 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
26024 return at::_ops::special_gammainc::call(self, other);
26025 }
26026 Tensor self_value;
26027 optional<int64_t> self_bdim;
26028 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
26029 Tensor other_value;
26030 optional<int64_t> other_bdim;
26031 std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
26032 auto results = batch_rule(self_value, self_bdim, other_value, other_bdim);
26033 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
26034}
26035template <typename batch_rule_t, batch_rule_t batch_rule>
26036at::Tensor special_gammaincc_generated_plumbing(const at::Tensor & self, const at::Tensor & other) {
26037 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
26038 auto maybe_layer = maybeCurrentDynamicLayer();
26039 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
26040 int64_t cur_level = maybe_layer->layerId();
26041 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
26042 return at::_ops::special_gammaincc::call(self, other);
26043 }
26044 Tensor self_value;
26045 optional<int64_t> self_bdim;
26046 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
26047 Tensor other_value;
26048 optional<int64_t> other_bdim;
26049 std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
26050 auto results = batch_rule(self_value, self_bdim, other_value, other_bdim);
26051 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
26052}
26053template <typename batch_rule_t, batch_rule_t batch_rule>
26054at::Tensor special_multigammaln_generated_plumbing(const at::Tensor & self, int64_t p) {
26055 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
26056 auto maybe_layer = maybeCurrentDynamicLayer();
26057 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
26058 int64_t cur_level = maybe_layer->layerId();
26059 if (!isBatchedAtLevel(self, cur_level)) {
26060 return at::_ops::special_multigammaln::call(self, p);
26061 }
26062 Tensor self_value;
26063 optional<int64_t> self_bdim;
26064 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
26065 auto results = batch_rule(self_value, self_bdim, p);
26066 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
26067}
26068template <typename batch_rule_t, batch_rule_t batch_rule>
26069at::Tensor special_softmax_generated_plumbing(const at::Tensor & self, int64_t dim, c10::optional<at::ScalarType> dtype) {
26070 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
26071 auto maybe_layer = maybeCurrentDynamicLayer();
26072 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
26073 int64_t cur_level = maybe_layer->layerId();
26074 if (!isBatchedAtLevel(self, cur_level)) {
26075 return at::_ops::special_softmax::call(self, dim, dtype);
26076 }
26077 Tensor self_value;
26078 optional<int64_t> self_bdim;
26079 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
26080 auto results = batch_rule(self_value, self_bdim, dim, dtype);
26081 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
26082}
26083template <typename batch_rule_t, batch_rule_t batch_rule>
26084at::Tensor fft_fft_generated_plumbing(const at::Tensor & self, c10::optional<int64_t> n, int64_t dim, c10::optional<c10::string_view> norm) {
26085 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
26086 auto maybe_layer = maybeCurrentDynamicLayer();
26087 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
26088 int64_t cur_level = maybe_layer->layerId();
26089 if (!isBatchedAtLevel(self, cur_level)) {
26090 return at::_ops::fft_fft::call(self, n, dim, norm);
26091 }
26092 Tensor self_value;
26093 optional<int64_t> self_bdim;
26094 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
26095 auto results = batch_rule(self_value, self_bdim, n, dim, norm);
26096 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
26097}
26098template <typename batch_rule_t, batch_rule_t batch_rule>
26099at::Tensor fft_ifft_generated_plumbing(const at::Tensor & self, c10::optional<int64_t> n, int64_t dim, c10::optional<c10::string_view> norm) {
26100 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
26101 auto maybe_layer = maybeCurrentDynamicLayer();
26102 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
26103 int64_t cur_level = maybe_layer->layerId();
26104 if (!isBatchedAtLevel(self, cur_level)) {
26105 return at::_ops::fft_ifft::call(self, n, dim, norm);
26106 }
26107 Tensor self_value;
26108 optional<int64_t> self_bdim;
26109 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
26110 auto results = batch_rule(self_value, self_bdim, n, dim, norm);
26111 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
26112}
26113template <typename batch_rule_t, batch_rule_t batch_rule>
26114at::Tensor fft_rfft_generated_plumbing(const at::Tensor & self, c10::optional<int64_t> n, int64_t dim, c10::optional<c10::string_view> norm) {
26115 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
26116 auto maybe_layer = maybeCurrentDynamicLayer();
26117 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
26118 int64_t cur_level = maybe_layer->layerId();
26119 if (!isBatchedAtLevel(self, cur_level)) {
26120 return at::_ops::fft_rfft::call(self, n, dim, norm);
26121 }
26122 Tensor self_value;
26123 optional<int64_t> self_bdim;
26124 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
26125 auto results = batch_rule(self_value, self_bdim, n, dim, norm);
26126 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
26127}
26128template <typename batch_rule_t, batch_rule_t batch_rule>
26129at::Tensor fft_irfft_generated_plumbing(const at::Tensor & self, c10::optional<int64_t> n, int64_t dim, c10::optional<c10::string_view> norm) {
26130 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
26131 auto maybe_layer = maybeCurrentDynamicLayer();
26132 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
26133 int64_t cur_level = maybe_layer->layerId();
26134 if (!isBatchedAtLevel(self, cur_level)) {
26135 return at::_ops::fft_irfft::call(self, n, dim, norm);
26136 }
26137 Tensor self_value;
26138 optional<int64_t> self_bdim;
26139 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
26140 auto results = batch_rule(self_value, self_bdim, n, dim, norm);
26141 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
26142}
26143template <typename batch_rule_t, batch_rule_t batch_rule>
26144at::Tensor fft_hfft_generated_plumbing(const at::Tensor & self, c10::optional<int64_t> n, int64_t dim, c10::optional<c10::string_view> norm) {
26145 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
26146 auto maybe_layer = maybeCurrentDynamicLayer();
26147 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
26148 int64_t cur_level = maybe_layer->layerId();
26149 if (!isBatchedAtLevel(self, cur_level)) {
26150 return at::_ops::fft_hfft::call(self, n, dim, norm);
26151 }
26152 Tensor self_value;
26153 optional<int64_t> self_bdim;
26154 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
26155 auto results = batch_rule(self_value, self_bdim, n, dim, norm);
26156 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
26157}
26158template <typename batch_rule_t, batch_rule_t batch_rule>
26159at::Tensor fft_ihfft_generated_plumbing(const at::Tensor & self, c10::optional<int64_t> n, int64_t dim, c10::optional<c10::string_view> norm) {
26160 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
26161 auto maybe_layer = maybeCurrentDynamicLayer();
26162 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
26163 int64_t cur_level = maybe_layer->layerId();
26164 if (!isBatchedAtLevel(self, cur_level)) {
26165 return at::_ops::fft_ihfft::call(self, n, dim, norm);
26166 }
26167 Tensor self_value;
26168 optional<int64_t> self_bdim;
26169 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
26170 auto results = batch_rule(self_value, self_bdim, n, dim, norm);
26171 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
26172}
26173template <typename batch_rule_t, batch_rule_t batch_rule>
26174at::Tensor fft_fft2_generated_plumbing(const at::Tensor & self, at::OptionalIntArrayRef s, at::IntArrayRef dim, c10::optional<c10::string_view> norm) {
26175 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
26176 auto maybe_layer = maybeCurrentDynamicLayer();
26177 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
26178 int64_t cur_level = maybe_layer->layerId();
26179 if (!isBatchedAtLevel(self, cur_level)) {
26180 return at::_ops::fft_fft2::call(self, s, dim, norm);
26181 }
26182 Tensor self_value;
26183 optional<int64_t> self_bdim;
26184 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
26185 auto results = batch_rule(self_value, self_bdim, s, dim, norm);
26186 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
26187}
26188template <typename batch_rule_t, batch_rule_t batch_rule>
26189at::Tensor fft_ifft2_generated_plumbing(const at::Tensor & self, at::OptionalIntArrayRef s, at::IntArrayRef dim, c10::optional<c10::string_view> norm) {
26190 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
26191 auto maybe_layer = maybeCurrentDynamicLayer();
26192 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
26193 int64_t cur_level = maybe_layer->layerId();
26194 if (!isBatchedAtLevel(self, cur_level)) {
26195 return at::_ops::fft_ifft2::call(self, s, dim, norm);
26196 }
26197 Tensor self_value;
26198 optional<int64_t> self_bdim;
26199 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
26200 auto results = batch_rule(self_value, self_bdim, s, dim, norm);
26201 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
26202}
26203template <typename batch_rule_t, batch_rule_t batch_rule>
26204at::Tensor fft_rfft2_generated_plumbing(const at::Tensor & self, at::OptionalIntArrayRef s, at::IntArrayRef dim, c10::optional<c10::string_view> norm) {
26205 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
26206 auto maybe_layer = maybeCurrentDynamicLayer();
26207 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
26208 int64_t cur_level = maybe_layer->layerId();
26209 if (!isBatchedAtLevel(self, cur_level)) {
26210 return at::_ops::fft_rfft2::call(self, s, dim, norm);
26211 }
26212 Tensor self_value;
26213 optional<int64_t> self_bdim;
26214 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
26215 auto results = batch_rule(self_value, self_bdim, s, dim, norm);
26216 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
26217}
26218template <typename batch_rule_t, batch_rule_t batch_rule>
26219at::Tensor fft_irfft2_generated_plumbing(const at::Tensor & self, at::OptionalIntArrayRef s, at::IntArrayRef dim, c10::optional<c10::string_view> norm) {
26220 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
26221 auto maybe_layer = maybeCurrentDynamicLayer();
26222 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
26223 int64_t cur_level = maybe_layer->layerId();
26224 if (!isBatchedAtLevel(self, cur_level)) {
26225 return at::_ops::fft_irfft2::call(self, s, dim, norm);
26226 }
26227 Tensor self_value;
26228 optional<int64_t> self_bdim;
26229 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
26230 auto results = batch_rule(self_value, self_bdim, s, dim, norm);
26231 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
26232}
26233template <typename batch_rule_t, batch_rule_t batch_rule>
26234at::Tensor fft_hfft2_generated_plumbing(const at::Tensor & self, at::OptionalIntArrayRef s, at::IntArrayRef dim, c10::optional<c10::string_view> norm) {
26235 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
26236 auto maybe_layer = maybeCurrentDynamicLayer();
26237 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
26238 int64_t cur_level = maybe_layer->layerId();
26239 if (!isBatchedAtLevel(self, cur_level)) {
26240 return at::_ops::fft_hfft2::call(self, s, dim, norm);
26241 }
26242 Tensor self_value;
26243 optional<int64_t> self_bdim;
26244 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
26245 auto results = batch_rule(self_value, self_bdim, s, dim, norm);
26246 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
26247}
26248template <typename batch_rule_t, batch_rule_t batch_rule>
26249at::Tensor fft_ihfft2_generated_plumbing(const at::Tensor & self, at::OptionalIntArrayRef s, at::IntArrayRef dim, c10::optional<c10::string_view> norm) {
26250 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
26251 auto maybe_layer = maybeCurrentDynamicLayer();
26252 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
26253 int64_t cur_level = maybe_layer->layerId();
26254 if (!isBatchedAtLevel(self, cur_level)) {
26255 return at::_ops::fft_ihfft2::call(self, s, dim, norm);
26256 }
26257 Tensor self_value;
26258 optional<int64_t> self_bdim;
26259 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
26260 auto results = batch_rule(self_value, self_bdim, s, dim, norm);
26261 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
26262}
26263template <typename batch_rule_t, batch_rule_t batch_rule>
26264at::Tensor fft_fftn_generated_plumbing(const at::Tensor & self, at::OptionalIntArrayRef s, at::OptionalIntArrayRef dim, c10::optional<c10::string_view> norm) {
26265 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
26266 auto maybe_layer = maybeCurrentDynamicLayer();
26267 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
26268 int64_t cur_level = maybe_layer->layerId();
26269 if (!isBatchedAtLevel(self, cur_level)) {
26270 return at::_ops::fft_fftn::call(self, s, dim, norm);
26271 }
26272 Tensor self_value;
26273 optional<int64_t> self_bdim;
26274 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
26275 auto results = batch_rule(self_value, self_bdim, s, dim, norm);
26276 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
26277}
26278template <typename batch_rule_t, batch_rule_t batch_rule>
26279at::Tensor fft_ifftn_generated_plumbing(const at::Tensor & self, at::OptionalIntArrayRef s, at::OptionalIntArrayRef dim, c10::optional<c10::string_view> norm) {
26280 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
26281 auto maybe_layer = maybeCurrentDynamicLayer();
26282 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
26283 int64_t cur_level = maybe_layer->layerId();
26284 if (!isBatchedAtLevel(self, cur_level)) {
26285 return at::_ops::fft_ifftn::call(self, s, dim, norm);
26286 }
26287 Tensor self_value;
26288 optional<int64_t> self_bdim;
26289 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
26290 auto results = batch_rule(self_value, self_bdim, s, dim, norm);
26291 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
26292}
26293template <typename batch_rule_t, batch_rule_t batch_rule>
26294at::Tensor fft_rfftn_generated_plumbing(const at::Tensor & self, at::OptionalIntArrayRef s, at::OptionalIntArrayRef dim, c10::optional<c10::string_view> norm) {
26295 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
26296 auto maybe_layer = maybeCurrentDynamicLayer();
26297 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
26298 int64_t cur_level = maybe_layer->layerId();
26299 if (!isBatchedAtLevel(self, cur_level)) {
26300 return at::_ops::fft_rfftn::call(self, s, dim, norm);
26301 }
26302 Tensor self_value;
26303 optional<int64_t> self_bdim;
26304 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
26305 auto results = batch_rule(self_value, self_bdim, s, dim, norm);
26306 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
26307}
26308template <typename batch_rule_t, batch_rule_t batch_rule>
26309at::Tensor fft_irfftn_generated_plumbing(const at::Tensor & self, at::OptionalIntArrayRef s, at::OptionalIntArrayRef dim, c10::optional<c10::string_view> norm) {
26310 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
26311 auto maybe_layer = maybeCurrentDynamicLayer();
26312 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
26313 int64_t cur_level = maybe_layer->layerId();
26314 if (!isBatchedAtLevel(self, cur_level)) {
26315 return at::_ops::fft_irfftn::call(self, s, dim, norm);
26316 }
26317 Tensor self_value;
26318 optional<int64_t> self_bdim;
26319 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
26320 auto results = batch_rule(self_value, self_bdim, s, dim, norm);
26321 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
26322}
26323template <typename batch_rule_t, batch_rule_t batch_rule>
26324at::Tensor fft_hfftn_generated_plumbing(const at::Tensor & self, at::OptionalIntArrayRef s, at::OptionalIntArrayRef dim, c10::optional<c10::string_view> norm) {
26325 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
26326 auto maybe_layer = maybeCurrentDynamicLayer();
26327 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
26328 int64_t cur_level = maybe_layer->layerId();
26329 if (!isBatchedAtLevel(self, cur_level)) {
26330 return at::_ops::fft_hfftn::call(self, s, dim, norm);
26331 }
26332 Tensor self_value;
26333 optional<int64_t> self_bdim;
26334 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
26335 auto results = batch_rule(self_value, self_bdim, s, dim, norm);
26336 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
26337}
26338template <typename batch_rule_t, batch_rule_t batch_rule>
26339at::Tensor fft_ihfftn_generated_plumbing(const at::Tensor & self, at::OptionalIntArrayRef s, at::OptionalIntArrayRef dim, c10::optional<c10::string_view> norm) {
26340 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
26341 auto maybe_layer = maybeCurrentDynamicLayer();
26342 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
26343 int64_t cur_level = maybe_layer->layerId();
26344 if (!isBatchedAtLevel(self, cur_level)) {
26345 return at::_ops::fft_ihfftn::call(self, s, dim, norm);
26346 }
26347 Tensor self_value;
26348 optional<int64_t> self_bdim;
26349 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
26350 auto results = batch_rule(self_value, self_bdim, s, dim, norm);
26351 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
26352}
26353template <typename batch_rule_t, batch_rule_t batch_rule>
26354at::Tensor fft_fftshift_generated_plumbing(const at::Tensor & self, at::OptionalIntArrayRef dim) {
26355 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
26356 auto maybe_layer = maybeCurrentDynamicLayer();
26357 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
26358 int64_t cur_level = maybe_layer->layerId();
26359 if (!isBatchedAtLevel(self, cur_level)) {
26360 return at::_ops::fft_fftshift::call(self, dim);
26361 }
26362 Tensor self_value;
26363 optional<int64_t> self_bdim;
26364 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
26365 auto results = batch_rule(self_value, self_bdim, dim);
26366 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
26367}
26368template <typename batch_rule_t, batch_rule_t batch_rule>
26369at::Tensor fft_ifftshift_generated_plumbing(const at::Tensor & self, at::OptionalIntArrayRef dim) {
26370 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
26371 auto maybe_layer = maybeCurrentDynamicLayer();
26372 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
26373 int64_t cur_level = maybe_layer->layerId();
26374 if (!isBatchedAtLevel(self, cur_level)) {
26375 return at::_ops::fft_ifftshift::call(self, dim);
26376 }
26377 Tensor self_value;
26378 optional<int64_t> self_bdim;
26379 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
26380 auto results = batch_rule(self_value, self_bdim, dim);
26381 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
26382}
26383template <typename batch_rule_t, batch_rule_t batch_rule>
26384::std::tuple<at::Tensor,at::Tensor> linalg_cholesky_ex_generated_plumbing(const at::Tensor & self, bool upper, bool check_errors) {
26385 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
26386 auto maybe_layer = maybeCurrentDynamicLayer();
26387 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
26388 int64_t cur_level = maybe_layer->layerId();
26389 if (!isBatchedAtLevel(self, cur_level)) {
26390 return at::_ops::linalg_cholesky_ex::call(self, upper, check_errors);
26391 }
26392 Tensor self_value;
26393 optional<int64_t> self_bdim;
26394 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
26395 auto results = batch_rule(self_value, self_bdim, upper, check_errors);
26396 return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
26397}
26398template <typename batch_rule_t, batch_rule_t batch_rule>
26399at::Tensor linalg_cholesky_generated_plumbing(const at::Tensor & self, bool upper) {
26400 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
26401 auto maybe_layer = maybeCurrentDynamicLayer();
26402 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
26403 int64_t cur_level = maybe_layer->layerId();
26404 if (!isBatchedAtLevel(self, cur_level)) {
26405 return at::_ops::linalg_cholesky::call(self, upper);
26406 }
26407 Tensor self_value;
26408 optional<int64_t> self_bdim;
26409 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
26410 auto results = batch_rule(self_value, self_bdim, upper);
26411 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
26412}
26413template <typename batch_rule_t, batch_rule_t batch_rule>
26414at::Tensor linalg_cross_generated_plumbing(const at::Tensor & self, const at::Tensor & other, int64_t dim) {
26415 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
26416 auto maybe_layer = maybeCurrentDynamicLayer();
26417 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
26418 int64_t cur_level = maybe_layer->layerId();
26419 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
26420 return at::_ops::linalg_cross::call(self, other, dim);
26421 }
26422 Tensor self_value;
26423 optional<int64_t> self_bdim;
26424 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
26425 Tensor other_value;
26426 optional<int64_t> other_bdim;
26427 std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
26428 auto results = batch_rule(self_value, self_bdim, other_value, other_bdim, dim);
26429 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
26430}
26431template <typename batch_rule_t, batch_rule_t batch_rule>
26432::std::tuple<at::Tensor,at::Tensor> linalg_lu_factor_generated_plumbing(const at::Tensor & A, bool pivot) {
26433 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
26434 auto maybe_layer = maybeCurrentDynamicLayer();
26435 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
26436 int64_t cur_level = maybe_layer->layerId();
26437 if (!isBatchedAtLevel(A, cur_level)) {
26438 return at::_ops::linalg_lu_factor::call(A, pivot);
26439 }
26440 Tensor A_value;
26441 optional<int64_t> A_bdim;
26442 std::tie(A_value, A_bdim) = unwrapTensorAtLevel(A, cur_level);
26443 auto results = batch_rule(A_value, A_bdim, pivot);
26444 return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
26445}
26446template <typename batch_rule_t, batch_rule_t batch_rule>
26447::std::tuple<at::Tensor,at::Tensor,at::Tensor> linalg_lu_factor_ex_generated_plumbing(const at::Tensor & A, bool pivot, bool check_errors) {
26448 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
26449 auto maybe_layer = maybeCurrentDynamicLayer();
26450 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
26451 int64_t cur_level = maybe_layer->layerId();
26452 if (!isBatchedAtLevel(A, cur_level)) {
26453 return at::_ops::linalg_lu_factor_ex::call(A, pivot, check_errors);
26454 }
26455 Tensor A_value;
26456 optional<int64_t> A_bdim;
26457 std::tie(A_value, A_bdim) = unwrapTensorAtLevel(A, cur_level);
26458 auto results = batch_rule(A_value, A_bdim, pivot, check_errors);
26459 return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level));
26460}
26461template <typename batch_rule_t, batch_rule_t batch_rule>
26462::std::tuple<at::Tensor,at::Tensor,at::Tensor> linalg_lu_generated_plumbing(const at::Tensor & A, bool pivot) {
26463 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
26464 auto maybe_layer = maybeCurrentDynamicLayer();
26465 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
26466 int64_t cur_level = maybe_layer->layerId();
26467 if (!isBatchedAtLevel(A, cur_level)) {
26468 return at::_ops::linalg_lu::call(A, pivot);
26469 }
26470 Tensor A_value;
26471 optional<int64_t> A_bdim;
26472 std::tie(A_value, A_bdim) = unwrapTensorAtLevel(A, cur_level);
26473 auto results = batch_rule(A_value, A_bdim, pivot);
26474 return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level));
26475}
26476template <typename batch_rule_t, batch_rule_t batch_rule>
26477at::Tensor linalg_lu_solve_generated_plumbing(const at::Tensor & LU, const at::Tensor & pivots, const at::Tensor & B, bool left, bool adjoint) {
26478 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
26479 auto maybe_layer = maybeCurrentDynamicLayer();
26480 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
26481 int64_t cur_level = maybe_layer->layerId();
26482 if (!isBatchedAtLevel(LU, cur_level) && !isBatchedAtLevel(pivots, cur_level) && !isBatchedAtLevel(B, cur_level)) {
26483 return at::_ops::linalg_lu_solve::call(LU, pivots, B, left, adjoint);
26484 }
26485 Tensor LU_value;
26486 optional<int64_t> LU_bdim;
26487 std::tie(LU_value, LU_bdim) = unwrapTensorAtLevel(LU, cur_level);
26488 Tensor pivots_value;
26489 optional<int64_t> pivots_bdim;
26490 std::tie(pivots_value, pivots_bdim) = unwrapTensorAtLevel(pivots, cur_level);
26491 Tensor B_value;
26492 optional<int64_t> B_bdim;
26493 std::tie(B_value, B_bdim) = unwrapTensorAtLevel(B, cur_level);
26494 auto results = batch_rule(LU_value, LU_bdim, pivots_value, pivots_bdim, B_value, B_bdim, left, adjoint);
26495 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
26496}
26497template <typename batch_rule_t, batch_rule_t batch_rule>
26498::std::tuple<at::Tensor,at::Tensor,at::Tensor> _linalg_det_generated_plumbing(const at::Tensor & A) {
26499 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
26500 auto maybe_layer = maybeCurrentDynamicLayer();
26501 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
26502 int64_t cur_level = maybe_layer->layerId();
26503 if (!isBatchedAtLevel(A, cur_level)) {
26504 return at::_ops::_linalg_det::call(A);
26505 }
26506 Tensor A_value;
26507 optional<int64_t> A_bdim;
26508 std::tie(A_value, A_bdim) = unwrapTensorAtLevel(A, cur_level);
26509 auto results = batch_rule(A_value, A_bdim);
26510 return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level));
26511}
26512template <typename batch_rule_t, batch_rule_t batch_rule>
26513at::Tensor linalg_det_generated_plumbing(const at::Tensor & A) {
26514 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
26515 auto maybe_layer = maybeCurrentDynamicLayer();
26516 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
26517 int64_t cur_level = maybe_layer->layerId();
26518 if (!isBatchedAtLevel(A, cur_level)) {
26519 return at::_ops::linalg_det::call(A);
26520 }
26521 Tensor A_value;
26522 optional<int64_t> A_bdim;
26523 std::tie(A_value, A_bdim) = unwrapTensorAtLevel(A, cur_level);
26524 auto results = batch_rule(A_value, A_bdim);
26525 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
26526}
26527template <typename batch_rule_t, batch_rule_t batch_rule>
26528at::Tensor det_generated_plumbing(const at::Tensor & self) {
26529 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
26530 auto maybe_layer = maybeCurrentDynamicLayer();
26531 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
26532 int64_t cur_level = maybe_layer->layerId();
26533 if (!isBatchedAtLevel(self, cur_level)) {
26534 return at::_ops::det::call(self);
26535 }
26536 Tensor self_value;
26537 optional<int64_t> self_bdim;
26538 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
26539 auto results = batch_rule(self_value, self_bdim);
26540 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
26541}
26542template <typename batch_rule_t, batch_rule_t batch_rule>
26543::std::tuple<at::Tensor,at::Tensor,at::Tensor> linalg_ldl_factor_ex_generated_plumbing(const at::Tensor & self, bool hermitian, bool check_errors) {
26544 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
26545 auto maybe_layer = maybeCurrentDynamicLayer();
26546 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
26547 int64_t cur_level = maybe_layer->layerId();
26548 if (!isBatchedAtLevel(self, cur_level)) {
26549 return at::_ops::linalg_ldl_factor_ex::call(self, hermitian, check_errors);
26550 }
26551 Tensor self_value;
26552 optional<int64_t> self_bdim;
26553 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
26554 auto results = batch_rule(self_value, self_bdim, hermitian, check_errors);
26555 return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level));
26556}
26557template <typename batch_rule_t, batch_rule_t batch_rule>
26558::std::tuple<at::Tensor,at::Tensor> linalg_ldl_factor_generated_plumbing(const at::Tensor & self, bool hermitian) {
26559 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
26560 auto maybe_layer = maybeCurrentDynamicLayer();
26561 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
26562 int64_t cur_level = maybe_layer->layerId();
26563 if (!isBatchedAtLevel(self, cur_level)) {
26564 return at::_ops::linalg_ldl_factor::call(self, hermitian);
26565 }
26566 Tensor self_value;
26567 optional<int64_t> self_bdim;
26568 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
26569 auto results = batch_rule(self_value, self_bdim, hermitian);
26570 return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
26571}
26572template <typename batch_rule_t, batch_rule_t batch_rule>
26573at::Tensor linalg_ldl_solve_generated_plumbing(const at::Tensor & LD, const at::Tensor & pivots, const at::Tensor & B, bool hermitian) {
26574 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
26575 auto maybe_layer = maybeCurrentDynamicLayer();
26576 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
26577 int64_t cur_level = maybe_layer->layerId();
26578 if (!isBatchedAtLevel(LD, cur_level) && !isBatchedAtLevel(pivots, cur_level) && !isBatchedAtLevel(B, cur_level)) {
26579 return at::_ops::linalg_ldl_solve::call(LD, pivots, B, hermitian);
26580 }
26581 Tensor LD_value;
26582 optional<int64_t> LD_bdim;
26583 std::tie(LD_value, LD_bdim) = unwrapTensorAtLevel(LD, cur_level);
26584 Tensor pivots_value;
26585 optional<int64_t> pivots_bdim;
26586 std::tie(pivots_value, pivots_bdim) = unwrapTensorAtLevel(pivots, cur_level);
26587 Tensor B_value;
26588 optional<int64_t> B_bdim;
26589 std::tie(B_value, B_bdim) = unwrapTensorAtLevel(B, cur_level);
26590 auto results = batch_rule(LD_value, LD_bdim, pivots_value, pivots_bdim, B_value, B_bdim, hermitian);
26591 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
26592}
26593template <typename batch_rule_t, batch_rule_t batch_rule>
26594::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor> linalg_lstsq_generated_plumbing(const at::Tensor & self, const at::Tensor & b, c10::optional<double> rcond, c10::optional<c10::string_view> driver) {
26595 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
26596 auto maybe_layer = maybeCurrentDynamicLayer();
26597 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
26598 int64_t cur_level = maybe_layer->layerId();
26599 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(b, cur_level)) {
26600 return at::_ops::linalg_lstsq::call(self, b, rcond, driver);
26601 }
26602 Tensor self_value;
26603 optional<int64_t> self_bdim;
26604 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
26605 Tensor b_value;
26606 optional<int64_t> b_bdim;
26607 std::tie(b_value, b_bdim) = unwrapTensorAtLevel(b, cur_level);
26608 auto results = batch_rule(self_value, self_bdim, b_value, b_bdim, rcond, driver);
26609 return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level), makeBatched(std::get<6>(results), std::get<7>(results), cur_level));
26610}
26611template <typename batch_rule_t, batch_rule_t batch_rule>
26612at::Tensor linalg_matmul_generated_plumbing(const at::Tensor & self, const at::Tensor & other) {
26613 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
26614 auto maybe_layer = maybeCurrentDynamicLayer();
26615 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
26616 int64_t cur_level = maybe_layer->layerId();
26617 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
26618 return at::_ops::linalg_matmul::call(self, other);
26619 }
26620 Tensor self_value;
26621 optional<int64_t> self_bdim;
26622 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
26623 Tensor other_value;
26624 optional<int64_t> other_bdim;
26625 std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
26626 auto results = batch_rule(self_value, self_bdim, other_value, other_bdim);
26627 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
26628}
26629template <typename batch_rule_t, batch_rule_t batch_rule>
26630at::Tensor linalg_vecdot_generated_plumbing(const at::Tensor & x, const at::Tensor & y, int64_t dim) {
26631 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
26632 auto maybe_layer = maybeCurrentDynamicLayer();
26633 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
26634 int64_t cur_level = maybe_layer->layerId();
26635 if (!isBatchedAtLevel(x, cur_level) && !isBatchedAtLevel(y, cur_level)) {
26636 return at::_ops::linalg_vecdot::call(x, y, dim);
26637 }
26638 Tensor x_value;
26639 optional<int64_t> x_bdim;
26640 std::tie(x_value, x_bdim) = unwrapTensorAtLevel(x, cur_level);
26641 Tensor y_value;
26642 optional<int64_t> y_bdim;
26643 std::tie(y_value, y_bdim) = unwrapTensorAtLevel(y, cur_level);
26644 auto results = batch_rule(x_value, x_bdim, y_value, y_bdim, dim);
26645 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
26646}
26647template <typename batch_rule_t, batch_rule_t batch_rule>
26648at::Tensor linalg_matrix_exp_generated_plumbing(const at::Tensor & self) {
26649 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
26650 auto maybe_layer = maybeCurrentDynamicLayer();
26651 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
26652 int64_t cur_level = maybe_layer->layerId();
26653 if (!isBatchedAtLevel(self, cur_level)) {
26654 return at::_ops::linalg_matrix_exp::call(self);
26655 }
26656 Tensor self_value;
26657 optional<int64_t> self_bdim;
26658 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
26659 auto results = batch_rule(self_value, self_bdim);
26660 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
26661}
26662template <typename batch_rule_t, batch_rule_t batch_rule>
26663::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor> _linalg_slogdet_generated_plumbing(const at::Tensor & A) {
26664 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
26665 auto maybe_layer = maybeCurrentDynamicLayer();
26666 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
26667 int64_t cur_level = maybe_layer->layerId();
26668 if (!isBatchedAtLevel(A, cur_level)) {
26669 return at::_ops::_linalg_slogdet::call(A);
26670 }
26671 Tensor A_value;
26672 optional<int64_t> A_bdim;
26673 std::tie(A_value, A_bdim) = unwrapTensorAtLevel(A, cur_level);
26674 auto results = batch_rule(A_value, A_bdim);
26675 return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level), makeBatched(std::get<6>(results), std::get<7>(results), cur_level));
26676}
26677template <typename batch_rule_t, batch_rule_t batch_rule>
26678::std::tuple<at::Tensor,at::Tensor> linalg_slogdet_generated_plumbing(const at::Tensor & A) {
26679 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
26680 auto maybe_layer = maybeCurrentDynamicLayer();
26681 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
26682 int64_t cur_level = maybe_layer->layerId();
26683 if (!isBatchedAtLevel(A, cur_level)) {
26684 return at::_ops::linalg_slogdet::call(A);
26685 }
26686 Tensor A_value;
26687 optional<int64_t> A_bdim;
26688 std::tie(A_value, A_bdim) = unwrapTensorAtLevel(A, cur_level);
26689 auto results = batch_rule(A_value, A_bdim);
26690 return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
26691}
26692template <typename batch_rule_t, batch_rule_t batch_rule>
26693::std::tuple<at::Tensor,at::Tensor> slogdet_generated_plumbing(const at::Tensor & self) {
26694 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
26695 auto maybe_layer = maybeCurrentDynamicLayer();
26696 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
26697 int64_t cur_level = maybe_layer->layerId();
26698 if (!isBatchedAtLevel(self, cur_level)) {
26699 return at::_ops::slogdet::call(self);
26700 }
26701 Tensor self_value;
26702 optional<int64_t> self_bdim;
26703 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
26704 auto results = batch_rule(self_value, self_bdim);
26705 return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
26706}
26707template <typename batch_rule_t, batch_rule_t batch_rule>
26708at::Tensor logdet_generated_plumbing(const at::Tensor & self) {
26709 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
26710 auto maybe_layer = maybeCurrentDynamicLayer();
26711 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
26712 int64_t cur_level = maybe_layer->layerId();
26713 if (!isBatchedAtLevel(self, cur_level)) {
26714 return at::_ops::logdet::call(self);
26715 }
26716 Tensor self_value;
26717 optional<int64_t> self_bdim;
26718 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
26719 auto results = batch_rule(self_value, self_bdim);
26720 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
26721}
26722template <typename batch_rule_t, batch_rule_t batch_rule>
26723::std::tuple<at::Tensor,at::Tensor> linalg_eig_generated_plumbing(const at::Tensor & self) {
26724 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
26725 auto maybe_layer = maybeCurrentDynamicLayer();
26726 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
26727 int64_t cur_level = maybe_layer->layerId();
26728 if (!isBatchedAtLevel(self, cur_level)) {
26729 return at::_ops::linalg_eig::call(self);
26730 }
26731 Tensor self_value;
26732 optional<int64_t> self_bdim;
26733 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
26734 auto results = batch_rule(self_value, self_bdim);
26735 return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
26736}
26737template <typename batch_rule_t, batch_rule_t batch_rule>
26738at::Tensor linalg_eigvals_generated_plumbing(const at::Tensor & self) {
26739 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
26740 auto maybe_layer = maybeCurrentDynamicLayer();
26741 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
26742 int64_t cur_level = maybe_layer->layerId();
26743 if (!isBatchedAtLevel(self, cur_level)) {
26744 return at::_ops::linalg_eigvals::call(self);
26745 }
26746 Tensor self_value;
26747 optional<int64_t> self_bdim;
26748 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
26749 auto results = batch_rule(self_value, self_bdim);
26750 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
26751}
26752template <typename batch_rule_t, batch_rule_t batch_rule>
26753::std::tuple<at::Tensor,at::Tensor> _linalg_eigh_generated_plumbing(const at::Tensor & A, c10::string_view UPLO, bool compute_v) {
26754 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
26755 auto maybe_layer = maybeCurrentDynamicLayer();
26756 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
26757 int64_t cur_level = maybe_layer->layerId();
26758 if (!isBatchedAtLevel(A, cur_level)) {
26759 return at::_ops::_linalg_eigh::call(A, UPLO, compute_v);
26760 }
26761 Tensor A_value;
26762 optional<int64_t> A_bdim;
26763 std::tie(A_value, A_bdim) = unwrapTensorAtLevel(A, cur_level);
26764 auto results = batch_rule(A_value, A_bdim, UPLO, compute_v);
26765 return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
26766}
26767template <typename batch_rule_t, batch_rule_t batch_rule>
26768::std::tuple<at::Tensor,at::Tensor> linalg_eigh_generated_plumbing(const at::Tensor & self, c10::string_view UPLO) {
26769 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
26770 auto maybe_layer = maybeCurrentDynamicLayer();
26771 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
26772 int64_t cur_level = maybe_layer->layerId();
26773 if (!isBatchedAtLevel(self, cur_level)) {
26774 return at::_ops::linalg_eigh::call(self, UPLO);
26775 }
26776 Tensor self_value;
26777 optional<int64_t> self_bdim;
26778 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
26779 auto results = batch_rule(self_value, self_bdim, UPLO);
26780 return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
26781}
26782template <typename batch_rule_t, batch_rule_t batch_rule>
26783at::Tensor linalg_eigvalsh_generated_plumbing(const at::Tensor & self, c10::string_view UPLO) {
26784 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
26785 auto maybe_layer = maybeCurrentDynamicLayer();
26786 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
26787 int64_t cur_level = maybe_layer->layerId();
26788 if (!isBatchedAtLevel(self, cur_level)) {
26789 return at::_ops::linalg_eigvalsh::call(self, UPLO);
26790 }
26791 Tensor self_value;
26792 optional<int64_t> self_bdim;
26793 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
26794 auto results = batch_rule(self_value, self_bdim, UPLO);
26795 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
26796}
26797template <typename batch_rule_t, batch_rule_t batch_rule>
26798at::Tensor linalg_householder_product_generated_plumbing(const at::Tensor & input, const at::Tensor & tau) {
26799 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
26800 auto maybe_layer = maybeCurrentDynamicLayer();
26801 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
26802 int64_t cur_level = maybe_layer->layerId();
26803 if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(tau, cur_level)) {
26804 return at::_ops::linalg_householder_product::call(input, tau);
26805 }
26806 Tensor input_value;
26807 optional<int64_t> input_bdim;
26808 std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level);
26809 Tensor tau_value;
26810 optional<int64_t> tau_bdim;
26811 std::tie(tau_value, tau_bdim) = unwrapTensorAtLevel(tau, cur_level);
26812 auto results = batch_rule(input_value, input_bdim, tau_value, tau_bdim);
26813 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
26814}
26815template <typename batch_rule_t, batch_rule_t batch_rule>
26816::std::tuple<at::Tensor,at::Tensor> linalg_inv_ex_generated_plumbing(const at::Tensor & A, bool check_errors) {
26817 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
26818 auto maybe_layer = maybeCurrentDynamicLayer();
26819 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
26820 int64_t cur_level = maybe_layer->layerId();
26821 if (!isBatchedAtLevel(A, cur_level)) {
26822 return at::_ops::linalg_inv_ex::call(A, check_errors);
26823 }
26824 Tensor A_value;
26825 optional<int64_t> A_bdim;
26826 std::tie(A_value, A_bdim) = unwrapTensorAtLevel(A, cur_level);
26827 auto results = batch_rule(A_value, A_bdim, check_errors);
26828 return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
26829}
26830template <typename batch_rule_t, batch_rule_t batch_rule>
26831at::Tensor linalg_inv_generated_plumbing(const at::Tensor & A) {
26832 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
26833 auto maybe_layer = maybeCurrentDynamicLayer();
26834 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
26835 int64_t cur_level = maybe_layer->layerId();
26836 if (!isBatchedAtLevel(A, cur_level)) {
26837 return at::_ops::linalg_inv::call(A);
26838 }
26839 Tensor A_value;
26840 optional<int64_t> A_bdim;
26841 std::tie(A_value, A_bdim) = unwrapTensorAtLevel(A, cur_level);
26842 auto results = batch_rule(A_value, A_bdim);
26843 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
26844}
26845template <typename batch_rule_t, batch_rule_t batch_rule>
26846at::Tensor inverse_generated_plumbing(const at::Tensor & self) {
26847 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
26848 auto maybe_layer = maybeCurrentDynamicLayer();
26849 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
26850 int64_t cur_level = maybe_layer->layerId();
26851 if (!isBatchedAtLevel(self, cur_level)) {
26852 return at::_ops::inverse::call(self);
26853 }
26854 Tensor self_value;
26855 optional<int64_t> self_bdim;
26856 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
26857 auto results = batch_rule(self_value, self_bdim);
26858 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
26859}
26860template <typename batch_rule_t, batch_rule_t batch_rule>
26861at::Tensor inner_generated_plumbing(const at::Tensor & self, const at::Tensor & other) {
26862 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
26863 auto maybe_layer = maybeCurrentDynamicLayer();
26864 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
26865 int64_t cur_level = maybe_layer->layerId();
26866 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
26867 return at::_ops::inner::call(self, other);
26868 }
26869 Tensor self_value;
26870 optional<int64_t> self_bdim;
26871 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
26872 Tensor other_value;
26873 optional<int64_t> other_bdim;
26874 std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
26875 auto results = batch_rule(self_value, self_bdim, other_value, other_bdim);
26876 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
26877}
26878template <typename batch_rule_t, batch_rule_t batch_rule>
26879at::Tensor outer_generated_plumbing(const at::Tensor & self, const at::Tensor & vec2) {
26880 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
26881 auto maybe_layer = maybeCurrentDynamicLayer();
26882 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
26883 int64_t cur_level = maybe_layer->layerId();
26884 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(vec2, cur_level)) {
26885 return at::_ops::outer::call(self, vec2);
26886 }
26887 Tensor self_value;
26888 optional<int64_t> self_bdim;
26889 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
26890 Tensor vec2_value;
26891 optional<int64_t> vec2_bdim;
26892 std::tie(vec2_value, vec2_bdim) = unwrapTensorAtLevel(vec2, cur_level);
26893 auto results = batch_rule(self_value, self_bdim, vec2_value, vec2_bdim);
26894 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
26895}
26896template <typename batch_rule_t, batch_rule_t batch_rule>
26897at::Tensor ger_generated_plumbing(const at::Tensor & self, const at::Tensor & vec2) {
26898 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
26899 auto maybe_layer = maybeCurrentDynamicLayer();
26900 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
26901 int64_t cur_level = maybe_layer->layerId();
26902 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(vec2, cur_level)) {
26903 return at::_ops::ger::call(self, vec2);
26904 }
26905 Tensor self_value;
26906 optional<int64_t> self_bdim;
26907 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
26908 Tensor vec2_value;
26909 optional<int64_t> vec2_bdim;
26910 std::tie(vec2_value, vec2_bdim) = unwrapTensorAtLevel(vec2, cur_level);
26911 auto results = batch_rule(self_value, self_bdim, vec2_value, vec2_bdim);
26912 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
26913}
26914template <typename batch_rule_t, batch_rule_t batch_rule>
26915at::Tensor linalg_norm_generated_plumbing(const at::Tensor & self, const c10::optional<at::Scalar> & ord, at::OptionalIntArrayRef dim, bool keepdim, c10::optional<at::ScalarType> dtype) {
26916 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
26917 auto maybe_layer = maybeCurrentDynamicLayer();
26918 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
26919 int64_t cur_level = maybe_layer->layerId();
26920 if (!isBatchedAtLevel(self, cur_level)) {
26921 return at::_ops::linalg_norm::call(self, ord, dim, keepdim, dtype);
26922 }
26923 Tensor self_value;
26924 optional<int64_t> self_bdim;
26925 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
26926 auto results = batch_rule(self_value, self_bdim, ord, dim, keepdim, dtype);
26927 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
26928}
26929template <typename batch_rule_t, batch_rule_t batch_rule>
26930at::Tensor linalg_norm_ord_str_generated_plumbing(const at::Tensor & self, c10::string_view ord, at::OptionalIntArrayRef dim, bool keepdim, c10::optional<at::ScalarType> dtype) {
26931 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
26932 auto maybe_layer = maybeCurrentDynamicLayer();
26933 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
26934 int64_t cur_level = maybe_layer->layerId();
26935 if (!isBatchedAtLevel(self, cur_level)) {
26936 return at::_ops::linalg_norm_ord_str::call(self, ord, dim, keepdim, dtype);
26937 }
26938 Tensor self_value;
26939 optional<int64_t> self_bdim;
26940 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
26941 auto results = batch_rule(self_value, self_bdim, ord, dim, keepdim, dtype);
26942 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
26943}
26944template <typename batch_rule_t, batch_rule_t batch_rule>
26945at::Tensor linalg_vector_norm_generated_plumbing(const at::Tensor & self, const at::Scalar & ord, at::OptionalIntArrayRef dim, bool keepdim, c10::optional<at::ScalarType> dtype) {
26946 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
26947 auto maybe_layer = maybeCurrentDynamicLayer();
26948 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
26949 int64_t cur_level = maybe_layer->layerId();
26950 if (!isBatchedAtLevel(self, cur_level)) {
26951 return at::_ops::linalg_vector_norm::call(self, ord, dim, keepdim, dtype);
26952 }
26953 Tensor self_value;
26954 optional<int64_t> self_bdim;
26955 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
26956 auto results = batch_rule(self_value, self_bdim, ord, dim, keepdim, dtype);
26957 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
26958}
26959template <typename batch_rule_t, batch_rule_t batch_rule>
26960at::Tensor linalg_matrix_norm_generated_plumbing(const at::Tensor & self, const at::Scalar & ord, at::IntArrayRef dim, bool keepdim, c10::optional<at::ScalarType> dtype) {
26961 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
26962 auto maybe_layer = maybeCurrentDynamicLayer();
26963 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
26964 int64_t cur_level = maybe_layer->layerId();
26965 if (!isBatchedAtLevel(self, cur_level)) {
26966 return at::_ops::linalg_matrix_norm::call(self, ord, dim, keepdim, dtype);
26967 }
26968 Tensor self_value;
26969 optional<int64_t> self_bdim;
26970 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
26971 auto results = batch_rule(self_value, self_bdim, ord, dim, keepdim, dtype);
26972 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
26973}
26974template <typename batch_rule_t, batch_rule_t batch_rule>
26975at::Tensor linalg_matrix_norm_str_ord_generated_plumbing(const at::Tensor & self, c10::string_view ord, at::IntArrayRef dim, bool keepdim, c10::optional<at::ScalarType> dtype) {
26976 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
26977 auto maybe_layer = maybeCurrentDynamicLayer();
26978 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
26979 int64_t cur_level = maybe_layer->layerId();
26980 if (!isBatchedAtLevel(self, cur_level)) {
26981 return at::_ops::linalg_matrix_norm_str_ord::call(self, ord, dim, keepdim, dtype);
26982 }
26983 Tensor self_value;
26984 optional<int64_t> self_bdim;
26985 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
26986 auto results = batch_rule(self_value, self_bdim, ord, dim, keepdim, dtype);
26987 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
26988}
26989template <typename batch_rule_t, batch_rule_t batch_rule>
26990::std::tuple<at::Tensor,at::Tensor,at::Tensor> _linalg_svd_generated_plumbing(const at::Tensor & A, bool full_matrices, bool compute_uv, c10::optional<c10::string_view> driver) {
26991 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
26992 auto maybe_layer = maybeCurrentDynamicLayer();
26993 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
26994 int64_t cur_level = maybe_layer->layerId();
26995 if (!isBatchedAtLevel(A, cur_level)) {
26996 return at::_ops::_linalg_svd::call(A, full_matrices, compute_uv, driver);
26997 }
26998 Tensor A_value;
26999 optional<int64_t> A_bdim;
27000 std::tie(A_value, A_bdim) = unwrapTensorAtLevel(A, cur_level);
27001 auto results = batch_rule(A_value, A_bdim, full_matrices, compute_uv, driver);
27002 return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level));
27003}
27004template <typename batch_rule_t, batch_rule_t batch_rule>
27005::std::tuple<at::Tensor,at::Tensor,at::Tensor> linalg_svd_generated_plumbing(const at::Tensor & A, bool full_matrices, c10::optional<c10::string_view> driver) {
27006 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
27007 auto maybe_layer = maybeCurrentDynamicLayer();
27008 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
27009 int64_t cur_level = maybe_layer->layerId();
27010 if (!isBatchedAtLevel(A, cur_level)) {
27011 return at::_ops::linalg_svd::call(A, full_matrices, driver);
27012 }
27013 Tensor A_value;
27014 optional<int64_t> A_bdim;
27015 std::tie(A_value, A_bdim) = unwrapTensorAtLevel(A, cur_level);
27016 auto results = batch_rule(A_value, A_bdim, full_matrices, driver);
27017 return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level));
27018}
27019template <typename batch_rule_t, batch_rule_t batch_rule>
27020at::Tensor linalg_svdvals_generated_plumbing(const at::Tensor & A, c10::optional<c10::string_view> driver) {
27021 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
27022 auto maybe_layer = maybeCurrentDynamicLayer();
27023 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
27024 int64_t cur_level = maybe_layer->layerId();
27025 if (!isBatchedAtLevel(A, cur_level)) {
27026 return at::_ops::linalg_svdvals::call(A, driver);
27027 }
27028 Tensor A_value;
27029 optional<int64_t> A_bdim;
27030 std::tie(A_value, A_bdim) = unwrapTensorAtLevel(A, cur_level);
27031 auto results = batch_rule(A_value, A_bdim, driver);
27032 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
27033}
27034template <typename batch_rule_t, batch_rule_t batch_rule>
27035at::Tensor linalg_cond_generated_plumbing(const at::Tensor & self, const c10::optional<at::Scalar> & p) {
27036 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
27037 auto maybe_layer = maybeCurrentDynamicLayer();
27038 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
27039 int64_t cur_level = maybe_layer->layerId();
27040 if (!isBatchedAtLevel(self, cur_level)) {
27041 return at::_ops::linalg_cond::call(self, p);
27042 }
27043 Tensor self_value;
27044 optional<int64_t> self_bdim;
27045 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
27046 auto results = batch_rule(self_value, self_bdim, p);
27047 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
27048}
27049template <typename batch_rule_t, batch_rule_t batch_rule>
27050at::Tensor linalg_cond_p_str_generated_plumbing(const at::Tensor & self, c10::string_view p) {
27051 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
27052 auto maybe_layer = maybeCurrentDynamicLayer();
27053 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
27054 int64_t cur_level = maybe_layer->layerId();
27055 if (!isBatchedAtLevel(self, cur_level)) {
27056 return at::_ops::linalg_cond_p_str::call(self, p);
27057 }
27058 Tensor self_value;
27059 optional<int64_t> self_bdim;
27060 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
27061 auto results = batch_rule(self_value, self_bdim, p);
27062 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
27063}
27064template <typename batch_rule_t, batch_rule_t batch_rule>
27065at::Tensor linalg_pinv_atol_rtol_tensor_generated_plumbing(const at::Tensor & self, const c10::optional<at::Tensor> & atol, const c10::optional<at::Tensor> & rtol, bool hermitian) {
27066 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
27067 auto maybe_layer = maybeCurrentDynamicLayer();
27068 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
27069 int64_t cur_level = maybe_layer->layerId();
27070 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(atol, cur_level) && !isBatchedAtLevel(rtol, cur_level)) {
27071 return at::_ops::linalg_pinv_atol_rtol_tensor::call(self, atol, rtol, hermitian);
27072 }
27073 Tensor self_value;
27074 optional<int64_t> self_bdim;
27075 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
27076 optional<Tensor> atol_value;
27077 optional<int64_t> atol_bdim;
27078 if (atol) {
27079 std::tie(atol_value, atol_bdim) = unwrapTensorAtLevel(atol.value(), cur_level);
27080 }
27081 optional<Tensor> rtol_value;
27082 optional<int64_t> rtol_bdim;
27083 if (rtol) {
27084 std::tie(rtol_value, rtol_bdim) = unwrapTensorAtLevel(rtol.value(), cur_level);
27085 }
27086 auto results = batch_rule(self_value, self_bdim, atol_value, atol_bdim, rtol_value, rtol_bdim, hermitian);
27087 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
27088}
27089template <typename batch_rule_t, batch_rule_t batch_rule>
27090at::Tensor linalg_pinv_atol_rtol_float_generated_plumbing(const at::Tensor & self, c10::optional<double> atol, c10::optional<double> rtol, bool hermitian) {
27091 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
27092 auto maybe_layer = maybeCurrentDynamicLayer();
27093 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
27094 int64_t cur_level = maybe_layer->layerId();
27095 if (!isBatchedAtLevel(self, cur_level)) {
27096 return at::_ops::linalg_pinv_atol_rtol_float::call(self, atol, rtol, hermitian);
27097 }
27098 Tensor self_value;
27099 optional<int64_t> self_bdim;
27100 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
27101 auto results = batch_rule(self_value, self_bdim, atol, rtol, hermitian);
27102 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
27103}
27104template <typename batch_rule_t, batch_rule_t batch_rule>
27105at::Tensor linalg_pinv_generated_plumbing(const at::Tensor & self, double rcond, bool hermitian) {
27106 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
27107 auto maybe_layer = maybeCurrentDynamicLayer();
27108 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
27109 int64_t cur_level = maybe_layer->layerId();
27110 if (!isBatchedAtLevel(self, cur_level)) {
27111 return at::_ops::linalg_pinv::call(self, rcond, hermitian);
27112 }
27113 Tensor self_value;
27114 optional<int64_t> self_bdim;
27115 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
27116 auto results = batch_rule(self_value, self_bdim, rcond, hermitian);
27117 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
27118}
27119template <typename batch_rule_t, batch_rule_t batch_rule>
27120at::Tensor linalg_pinv_rcond_tensor_generated_plumbing(const at::Tensor & self, const at::Tensor & rcond, bool hermitian) {
27121 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
27122 auto maybe_layer = maybeCurrentDynamicLayer();
27123 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
27124 int64_t cur_level = maybe_layer->layerId();
27125 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(rcond, cur_level)) {
27126 return at::_ops::linalg_pinv_rcond_tensor::call(self, rcond, hermitian);
27127 }
27128 Tensor self_value;
27129 optional<int64_t> self_bdim;
27130 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
27131 Tensor rcond_value;
27132 optional<int64_t> rcond_bdim;
27133 std::tie(rcond_value, rcond_bdim) = unwrapTensorAtLevel(rcond, cur_level);
27134 auto results = batch_rule(self_value, self_bdim, rcond_value, rcond_bdim, hermitian);
27135 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
27136}
27137template <typename batch_rule_t, batch_rule_t batch_rule>
27138::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor> _linalg_solve_ex_generated_plumbing(const at::Tensor & A, const at::Tensor & B, bool left, bool check_errors) {
27139 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
27140 auto maybe_layer = maybeCurrentDynamicLayer();
27141 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
27142 int64_t cur_level = maybe_layer->layerId();
27143 if (!isBatchedAtLevel(A, cur_level) && !isBatchedAtLevel(B, cur_level)) {
27144 return at::_ops::_linalg_solve_ex::call(A, B, left, check_errors);
27145 }
27146 Tensor A_value;
27147 optional<int64_t> A_bdim;
27148 std::tie(A_value, A_bdim) = unwrapTensorAtLevel(A, cur_level);
27149 Tensor B_value;
27150 optional<int64_t> B_bdim;
27151 std::tie(B_value, B_bdim) = unwrapTensorAtLevel(B, cur_level);
27152 auto results = batch_rule(A_value, A_bdim, B_value, B_bdim, left, check_errors);
27153 return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level), makeBatched(std::get<6>(results), std::get<7>(results), cur_level));
27154}
27155template <typename batch_rule_t, batch_rule_t batch_rule>
27156::std::tuple<at::Tensor,at::Tensor> linalg_solve_ex_generated_plumbing(const at::Tensor & A, const at::Tensor & B, bool left, bool check_errors) {
27157 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
27158 auto maybe_layer = maybeCurrentDynamicLayer();
27159 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
27160 int64_t cur_level = maybe_layer->layerId();
27161 if (!isBatchedAtLevel(A, cur_level) && !isBatchedAtLevel(B, cur_level)) {
27162 return at::_ops::linalg_solve_ex::call(A, B, left, check_errors);
27163 }
27164 Tensor A_value;
27165 optional<int64_t> A_bdim;
27166 std::tie(A_value, A_bdim) = unwrapTensorAtLevel(A, cur_level);
27167 Tensor B_value;
27168 optional<int64_t> B_bdim;
27169 std::tie(B_value, B_bdim) = unwrapTensorAtLevel(B, cur_level);
27170 auto results = batch_rule(A_value, A_bdim, B_value, B_bdim, left, check_errors);
27171 return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
27172}
27173template <typename batch_rule_t, batch_rule_t batch_rule>
27174at::Tensor linalg_solve_generated_plumbing(const at::Tensor & A, const at::Tensor & B, bool left) {
27175 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
27176 auto maybe_layer = maybeCurrentDynamicLayer();
27177 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
27178 int64_t cur_level = maybe_layer->layerId();
27179 if (!isBatchedAtLevel(A, cur_level) && !isBatchedAtLevel(B, cur_level)) {
27180 return at::_ops::linalg_solve::call(A, B, left);
27181 }
27182 Tensor A_value;
27183 optional<int64_t> A_bdim;
27184 std::tie(A_value, A_bdim) = unwrapTensorAtLevel(A, cur_level);
27185 Tensor B_value;
27186 optional<int64_t> B_bdim;
27187 std::tie(B_value, B_bdim) = unwrapTensorAtLevel(B, cur_level);
27188 auto results = batch_rule(A_value, A_bdim, B_value, B_bdim, left);
27189 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
27190}
27191template <typename batch_rule_t, batch_rule_t batch_rule>
27192at::Tensor linalg_tensorinv_generated_plumbing(const at::Tensor & self, int64_t ind) {
27193 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
27194 auto maybe_layer = maybeCurrentDynamicLayer();
27195 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
27196 int64_t cur_level = maybe_layer->layerId();
27197 if (!isBatchedAtLevel(self, cur_level)) {
27198 return at::_ops::linalg_tensorinv::call(self, ind);
27199 }
27200 Tensor self_value;
27201 optional<int64_t> self_bdim;
27202 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
27203 auto results = batch_rule(self_value, self_bdim, ind);
27204 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
27205}
27206template <typename batch_rule_t, batch_rule_t batch_rule>
27207at::Tensor linalg_tensorsolve_generated_plumbing(const at::Tensor & self, const at::Tensor & other, at::OptionalIntArrayRef dims) {
27208 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
27209 auto maybe_layer = maybeCurrentDynamicLayer();
27210 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
27211 int64_t cur_level = maybe_layer->layerId();
27212 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
27213 return at::_ops::linalg_tensorsolve::call(self, other, dims);
27214 }
27215 Tensor self_value;
27216 optional<int64_t> self_bdim;
27217 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
27218 Tensor other_value;
27219 optional<int64_t> other_bdim;
27220 std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
27221 auto results = batch_rule(self_value, self_bdim, other_value, other_bdim, dims);
27222 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
27223}
27224template <typename batch_rule_t, batch_rule_t batch_rule>
27225::std::tuple<at::Tensor,at::Tensor> linalg_qr_generated_plumbing(const at::Tensor & A, c10::string_view mode) {
27226 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
27227 auto maybe_layer = maybeCurrentDynamicLayer();
27228 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
27229 int64_t cur_level = maybe_layer->layerId();
27230 if (!isBatchedAtLevel(A, cur_level)) {
27231 return at::_ops::linalg_qr::call(A, mode);
27232 }
27233 Tensor A_value;
27234 optional<int64_t> A_bdim;
27235 std::tie(A_value, A_bdim) = unwrapTensorAtLevel(A, cur_level);
27236 auto results = batch_rule(A_value, A_bdim, mode);
27237 return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
27238}
27239template <typename batch_rule_t, batch_rule_t batch_rule>
27240at::Tensor linalg_matrix_power_generated_plumbing(const at::Tensor & self, int64_t n) {
27241 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
27242 auto maybe_layer = maybeCurrentDynamicLayer();
27243 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
27244 int64_t cur_level = maybe_layer->layerId();
27245 if (!isBatchedAtLevel(self, cur_level)) {
27246 return at::_ops::linalg_matrix_power::call(self, n);
27247 }
27248 Tensor self_value;
27249 optional<int64_t> self_bdim;
27250 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
27251 auto results = batch_rule(self_value, self_bdim, n);
27252 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
27253}
27254template <typename batch_rule_t, batch_rule_t batch_rule>
27255at::Tensor linalg_matrix_rank_atol_rtol_tensor_generated_plumbing(const at::Tensor & input, const c10::optional<at::Tensor> & atol, const c10::optional<at::Tensor> & rtol, bool hermitian) {
27256 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
27257 auto maybe_layer = maybeCurrentDynamicLayer();
27258 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
27259 int64_t cur_level = maybe_layer->layerId();
27260 if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(atol, cur_level) && !isBatchedAtLevel(rtol, cur_level)) {
27261 return at::_ops::linalg_matrix_rank_atol_rtol_tensor::call(input, atol, rtol, hermitian);
27262 }
27263 Tensor input_value;
27264 optional<int64_t> input_bdim;
27265 std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level);
27266 optional<Tensor> atol_value;
27267 optional<int64_t> atol_bdim;
27268 if (atol) {
27269 std::tie(atol_value, atol_bdim) = unwrapTensorAtLevel(atol.value(), cur_level);
27270 }
27271 optional<Tensor> rtol_value;
27272 optional<int64_t> rtol_bdim;
27273 if (rtol) {
27274 std::tie(rtol_value, rtol_bdim) = unwrapTensorAtLevel(rtol.value(), cur_level);
27275 }
27276 auto results = batch_rule(input_value, input_bdim, atol_value, atol_bdim, rtol_value, rtol_bdim, hermitian);
27277 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
27278}
27279template <typename batch_rule_t, batch_rule_t batch_rule>
27280at::Tensor linalg_matrix_rank_atol_rtol_float_generated_plumbing(const at::Tensor & self, c10::optional<double> atol, c10::optional<double> rtol, bool hermitian) {
27281 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
27282 auto maybe_layer = maybeCurrentDynamicLayer();
27283 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
27284 int64_t cur_level = maybe_layer->layerId();
27285 if (!isBatchedAtLevel(self, cur_level)) {
27286 return at::_ops::linalg_matrix_rank_atol_rtol_float::call(self, atol, rtol, hermitian);
27287 }
27288 Tensor self_value;
27289 optional<int64_t> self_bdim;
27290 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
27291 auto results = batch_rule(self_value, self_bdim, atol, rtol, hermitian);
27292 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
27293}
27294template <typename batch_rule_t, batch_rule_t batch_rule>
27295at::Tensor linalg_matrix_rank_generated_plumbing(const at::Tensor & self, double tol, bool hermitian) {
27296 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
27297 auto maybe_layer = maybeCurrentDynamicLayer();
27298 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
27299 int64_t cur_level = maybe_layer->layerId();
27300 if (!isBatchedAtLevel(self, cur_level)) {
27301 return at::_ops::linalg_matrix_rank::call(self, tol, hermitian);
27302 }
27303 Tensor self_value;
27304 optional<int64_t> self_bdim;
27305 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
27306 auto results = batch_rule(self_value, self_bdim, tol, hermitian);
27307 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
27308}
27309template <typename batch_rule_t, batch_rule_t batch_rule>
27310at::Tensor linalg_matrix_rank_tol_tensor_generated_plumbing(const at::Tensor & input, const at::Tensor & tol, bool hermitian) {
27311 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
27312 auto maybe_layer = maybeCurrentDynamicLayer();
27313 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
27314 int64_t cur_level = maybe_layer->layerId();
27315 if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(tol, cur_level)) {
27316 return at::_ops::linalg_matrix_rank_tol_tensor::call(input, tol, hermitian);
27317 }
27318 Tensor input_value;
27319 optional<int64_t> input_bdim;
27320 std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level);
27321 Tensor tol_value;
27322 optional<int64_t> tol_bdim;
27323 std::tie(tol_value, tol_bdim) = unwrapTensorAtLevel(tol, cur_level);
27324 auto results = batch_rule(input_value, input_bdim, tol_value, tol_bdim, hermitian);
27325 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
27326}
27327template <typename batch_rule_t, batch_rule_t batch_rule>
27328at::Tensor linalg_multi_dot_generated_plumbing(at::TensorList tensors) {
27329 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
27330 auto maybe_layer = maybeCurrentDynamicLayer();
27331 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
27332 int64_t cur_level = maybe_layer->layerId();
27333 if (!isBatchedAtLevel(tensors, cur_level)) {
27334 return at::_ops::linalg_multi_dot::call(tensors);
27335 }
27336
27337 auto results = batch_rule(tensors);
27338 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
27339}
27340template <typename batch_rule_t, batch_rule_t batch_rule>
27341at::Tensor nested_to_padded_tensor_generated_plumbing(const at::Tensor & self, double padding, at::OptionalIntArrayRef output_size) {
27342 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
27343 auto maybe_layer = maybeCurrentDynamicLayer();
27344 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
27345 int64_t cur_level = maybe_layer->layerId();
27346 if (!isBatchedAtLevel(self, cur_level)) {
27347 return at::_ops::nested_to_padded_tensor::call(self, padding, output_size);
27348 }
27349 Tensor self_value;
27350 optional<int64_t> self_bdim;
27351 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
27352 auto results = batch_rule(self_value, self_bdim, padding, output_size);
27353 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
27354}
27355template <typename batch_rule_t, batch_rule_t batch_rule>
27356at::Tensor _test_serialization_subcmul_generated_plumbing(const at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha) {
27357 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
27358 auto maybe_layer = maybeCurrentDynamicLayer();
27359 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
27360 int64_t cur_level = maybe_layer->layerId();
27361 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
27362 return at::_ops::_test_serialization_subcmul::call(self, other, alpha);
27363 }
27364 Tensor self_value;
27365 optional<int64_t> self_bdim;
27366 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
27367 Tensor other_value;
27368 optional<int64_t> other_bdim;
27369 std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
27370 auto results = batch_rule(self_value, self_bdim, other_value, other_bdim, alpha);
27371 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
27372}
27373template <typename batch_rule_t, batch_rule_t batch_rule>
27374at::Tensor _test_optional_intlist_generated_plumbing(const at::Tensor & values, at::OptionalIntArrayRef addends) {
27375 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
27376 auto maybe_layer = maybeCurrentDynamicLayer();
27377 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
27378 int64_t cur_level = maybe_layer->layerId();
27379 if (!isBatchedAtLevel(values, cur_level)) {
27380 return at::_ops::_test_optional_intlist::call(values, addends);
27381 }
27382 Tensor values_value;
27383 optional<int64_t> values_bdim;
27384 std::tie(values_value, values_bdim) = unwrapTensorAtLevel(values, cur_level);
27385 auto results = batch_rule(values_value, values_bdim, addends);
27386 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
27387}
27388template <typename batch_rule_t, batch_rule_t batch_rule>
27389at::Tensor _test_optional_filled_intlist_generated_plumbing(const at::Tensor & values, at::OptionalIntArrayRef addends) {
27390 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
27391 auto maybe_layer = maybeCurrentDynamicLayer();
27392 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
27393 int64_t cur_level = maybe_layer->layerId();
27394 if (!isBatchedAtLevel(values, cur_level)) {
27395 return at::_ops::_test_optional_filled_intlist::call(values, addends);
27396 }
27397 Tensor values_value;
27398 optional<int64_t> values_bdim;
27399 std::tie(values_value, values_bdim) = unwrapTensorAtLevel(values, cur_level);
27400 auto results = batch_rule(values_value, values_bdim, addends);
27401 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
27402}
27403template <typename batch_rule_t, batch_rule_t batch_rule>
27404at::Tensor _test_optional_floatlist_generated_plumbing(const at::Tensor & values, c10::optional<at::ArrayRef<double>> addends) {
27405 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
27406 auto maybe_layer = maybeCurrentDynamicLayer();
27407 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
27408 int64_t cur_level = maybe_layer->layerId();
27409 if (!isBatchedAtLevel(values, cur_level)) {
27410 return at::_ops::_test_optional_floatlist::call(values, addends);
27411 }
27412 Tensor values_value;
27413 optional<int64_t> values_bdim;
27414 std::tie(values_value, values_bdim) = unwrapTensorAtLevel(values, cur_level);
27415 auto results = batch_rule(values_value, values_bdim, addends);
27416 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
27417}
27418template <typename batch_rule_t, batch_rule_t batch_rule>
27419at::Tensor _test_string_default_generated_plumbing(const at::Tensor & dummy, c10::string_view a, c10::string_view b) {
27420 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
27421 auto maybe_layer = maybeCurrentDynamicLayer();
27422 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
27423 int64_t cur_level = maybe_layer->layerId();
27424 if (!isBatchedAtLevel(dummy, cur_level)) {
27425 return at::_ops::_test_string_default::call(dummy, a, b);
27426 }
27427 Tensor dummy_value;
27428 optional<int64_t> dummy_bdim;
27429 std::tie(dummy_value, dummy_bdim) = unwrapTensorAtLevel(dummy, cur_level);
27430 auto results = batch_rule(dummy_value, dummy_bdim, a, b);
27431 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
27432}
27433template <typename batch_rule_t, batch_rule_t batch_rule>
27434at::Tensor _test_ambiguous_defaults_a_generated_plumbing(const at::Tensor & dummy, int64_t a, int64_t b) {
27435 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
27436 auto maybe_layer = maybeCurrentDynamicLayer();
27437 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
27438 int64_t cur_level = maybe_layer->layerId();
27439 if (!isBatchedAtLevel(dummy, cur_level)) {
27440 return at::_ops::_test_ambiguous_defaults_a::call(dummy, a, b);
27441 }
27442 Tensor dummy_value;
27443 optional<int64_t> dummy_bdim;
27444 std::tie(dummy_value, dummy_bdim) = unwrapTensorAtLevel(dummy, cur_level);
27445 auto results = batch_rule(dummy_value, dummy_bdim, a, b);
27446 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
27447}
27448template <typename batch_rule_t, batch_rule_t batch_rule>
27449at::Tensor _test_ambiguous_defaults_b_generated_plumbing(const at::Tensor & dummy, int64_t a, c10::string_view b) {
27450 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
27451 auto maybe_layer = maybeCurrentDynamicLayer();
27452 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
27453 int64_t cur_level = maybe_layer->layerId();
27454 if (!isBatchedAtLevel(dummy, cur_level)) {
27455 return at::_ops::_test_ambiguous_defaults_b::call(dummy, a, b);
27456 }
27457 Tensor dummy_value;
27458 optional<int64_t> dummy_bdim;
27459 std::tie(dummy_value, dummy_bdim) = unwrapTensorAtLevel(dummy, cur_level);
27460 auto results = batch_rule(dummy_value, dummy_bdim, a, b);
27461 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
27462}
27463template <typename batch_rule_t, batch_rule_t batch_rule>
27464at::Tensor _test_warn_in_autograd_generated_plumbing(const at::Tensor & self) {
27465 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
27466 auto maybe_layer = maybeCurrentDynamicLayer();
27467 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
27468 int64_t cur_level = maybe_layer->layerId();
27469 if (!isBatchedAtLevel(self, cur_level)) {
27470 return at::_ops::_test_warn_in_autograd::call(self);
27471 }
27472 Tensor self_value;
27473 optional<int64_t> self_bdim;
27474 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
27475 auto results = batch_rule(self_value, self_bdim);
27476 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
27477}
27478template <typename batch_rule_t, batch_rule_t batch_rule>
27479at::Tensor _test_autograd_multiple_dispatch_fullcoverage_generated_plumbing(const at::Tensor & self) {
27480 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
27481 auto maybe_layer = maybeCurrentDynamicLayer();
27482 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
27483 int64_t cur_level = maybe_layer->layerId();
27484 if (!isBatchedAtLevel(self, cur_level)) {
27485 return at::_ops::_test_autograd_multiple_dispatch_fullcoverage::call(self);
27486 }
27487 Tensor self_value;
27488 optional<int64_t> self_bdim;
27489 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
27490 auto results = batch_rule(self_value, self_bdim);
27491 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
27492}
27493template <typename batch_rule_t, batch_rule_t batch_rule>
27494at::Tensor _test_autograd_multiple_dispatch_ntonly_generated_plumbing(const at::Tensor & self, bool b) {
27495 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
27496 auto maybe_layer = maybeCurrentDynamicLayer();
27497 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
27498 int64_t cur_level = maybe_layer->layerId();
27499 if (!isBatchedAtLevel(self, cur_level)) {
27500 return at::_ops::_test_autograd_multiple_dispatch_ntonly::call(self, b);
27501 }
27502 Tensor self_value;
27503 optional<int64_t> self_bdim;
27504 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
27505 auto results = batch_rule(self_value, self_bdim, b);
27506 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
27507}
27508template <typename batch_rule_t, batch_rule_t batch_rule>
27509at::Tensor _test_autograd_multiple_dispatch_view_generated_plumbing(const at::Tensor & self) {
27510 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
27511 auto maybe_layer = maybeCurrentDynamicLayer();
27512 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
27513 int64_t cur_level = maybe_layer->layerId();
27514 if (!isBatchedAtLevel(self, cur_level)) {
27515 return at::_ops::_test_autograd_multiple_dispatch_view::call(self);
27516 }
27517 Tensor self_value;
27518 optional<int64_t> self_bdim;
27519 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
27520 auto results = batch_rule(self_value, self_bdim);
27521 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
27522}
27523template <typename batch_rule_t, batch_rule_t batch_rule>
27524at::Tensor _test_autograd_multiple_dispatch_view_copy_generated_plumbing(const at::Tensor & self) {
27525 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
27526 auto maybe_layer = maybeCurrentDynamicLayer();
27527 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
27528 int64_t cur_level = maybe_layer->layerId();
27529 if (!isBatchedAtLevel(self, cur_level)) {
27530 return at::_ops::_test_autograd_multiple_dispatch_view_copy::call(self);
27531 }
27532 Tensor self_value;
27533 optional<int64_t> self_bdim;
27534 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
27535 auto results = batch_rule(self_value, self_bdim);
27536 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
27537}
27538template <typename batch_rule_t, batch_rule_t batch_rule>
27539at::Tensor segment_reduce_generated_plumbing(const at::Tensor & data, c10::string_view reduce, const c10::optional<at::Tensor> & lengths, const c10::optional<at::Tensor> & indices, const c10::optional<at::Tensor> & offsets, int64_t axis, bool unsafe, const c10::optional<at::Scalar> & initial) {
27540 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
27541 auto maybe_layer = maybeCurrentDynamicLayer();
27542 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
27543 int64_t cur_level = maybe_layer->layerId();
27544 if (!isBatchedAtLevel(data, cur_level) && !isBatchedAtLevel(lengths, cur_level) && !isBatchedAtLevel(indices, cur_level) && !isBatchedAtLevel(offsets, cur_level)) {
27545 return at::_ops::segment_reduce::call(data, reduce, lengths, indices, offsets, axis, unsafe, initial);
27546 }
27547 Tensor data_value;
27548 optional<int64_t> data_bdim;
27549 std::tie(data_value, data_bdim) = unwrapTensorAtLevel(data, cur_level);
27550 optional<Tensor> lengths_value;
27551 optional<int64_t> lengths_bdim;
27552 if (lengths) {
27553 std::tie(lengths_value, lengths_bdim) = unwrapTensorAtLevel(lengths.value(), cur_level);
27554 }
27555 optional<Tensor> indices_value;
27556 optional<int64_t> indices_bdim;
27557 if (indices) {
27558 std::tie(indices_value, indices_bdim) = unwrapTensorAtLevel(indices.value(), cur_level);
27559 }
27560 optional<Tensor> offsets_value;
27561 optional<int64_t> offsets_bdim;
27562 if (offsets) {
27563 std::tie(offsets_value, offsets_bdim) = unwrapTensorAtLevel(offsets.value(), cur_level);
27564 }
27565 auto results = batch_rule(data_value, data_bdim, reduce, lengths_value, lengths_bdim, indices_value, indices_bdim, offsets_value, offsets_bdim, axis, unsafe, initial);
27566 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
27567}
27568template <typename batch_rule_t, batch_rule_t batch_rule>
27569at::Tensor _segment_reduce_backward_generated_plumbing(const at::Tensor & grad, const at::Tensor & output, const at::Tensor & data, c10::string_view reduce, const c10::optional<at::Tensor> & lengths, const c10::optional<at::Tensor> & offsets, int64_t axis, const c10::optional<at::Scalar> & initial) {
27570 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
27571 auto maybe_layer = maybeCurrentDynamicLayer();
27572 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
27573 int64_t cur_level = maybe_layer->layerId();
27574 if (!isBatchedAtLevel(grad, cur_level) && !isBatchedAtLevel(output, cur_level) && !isBatchedAtLevel(data, cur_level) && !isBatchedAtLevel(lengths, cur_level) && !isBatchedAtLevel(offsets, cur_level)) {
27575 return at::_ops::_segment_reduce_backward::call(grad, output, data, reduce, lengths, offsets, axis, initial);
27576 }
27577 Tensor grad_value;
27578 optional<int64_t> grad_bdim;
27579 std::tie(grad_value, grad_bdim) = unwrapTensorAtLevel(grad, cur_level);
27580 Tensor output_value;
27581 optional<int64_t> output_bdim;
27582 std::tie(output_value, output_bdim) = unwrapTensorAtLevel(output, cur_level);
27583 Tensor data_value;
27584 optional<int64_t> data_bdim;
27585 std::tie(data_value, data_bdim) = unwrapTensorAtLevel(data, cur_level);
27586 optional<Tensor> lengths_value;
27587 optional<int64_t> lengths_bdim;
27588 if (lengths) {
27589 std::tie(lengths_value, lengths_bdim) = unwrapTensorAtLevel(lengths.value(), cur_level);
27590 }
27591 optional<Tensor> offsets_value;
27592 optional<int64_t> offsets_bdim;
27593 if (offsets) {
27594 std::tie(offsets_value, offsets_bdim) = unwrapTensorAtLevel(offsets.value(), cur_level);
27595 }
27596 auto results = batch_rule(grad_value, grad_bdim, output_value, output_bdim, data_value, data_bdim, reduce, lengths_value, lengths_bdim, offsets_value, offsets_bdim, axis, initial);
27597 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
27598}
27599template <typename batch_rule_t, batch_rule_t batch_rule>
27600at::Tensor pad_sequence_generated_plumbing(at::TensorList sequences, bool batch_first, double padding_value) {
27601 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
27602 auto maybe_layer = maybeCurrentDynamicLayer();
27603 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
27604 int64_t cur_level = maybe_layer->layerId();
27605 if (!isBatchedAtLevel(sequences, cur_level)) {
27606 return at::_ops::pad_sequence::call(sequences, batch_first, padding_value);
27607 }
27608
27609 auto results = batch_rule(sequences, batch_first, padding_value);
27610 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
27611}
27612template <typename batch_rule_t, batch_rule_t batch_rule>
27613at::Tensor flatten_dense_tensors_generated_plumbing(at::TensorList tensors) {
27614 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
27615 auto maybe_layer = maybeCurrentDynamicLayer();
27616 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
27617 int64_t cur_level = maybe_layer->layerId();
27618 if (!isBatchedAtLevel(tensors, cur_level)) {
27619 return at::_ops::flatten_dense_tensors::call(tensors);
27620 }
27621
27622 auto results = batch_rule(tensors);
27623 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
27624}
27625template <typename batch_rule_t, batch_rule_t batch_rule>
27626::std::vector<at::Tensor> unflatten_dense_tensors_generated_plumbing(const at::Tensor & flat, at::TensorList tensors) {
27627 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
27628 auto maybe_layer = maybeCurrentDynamicLayer();
27629 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
27630 int64_t cur_level = maybe_layer->layerId();
27631 if (!isBatchedAtLevel(flat, cur_level) && !isBatchedAtLevel(tensors, cur_level)) {
27632 return at::_ops::unflatten_dense_tensors::call(flat, tensors);
27633 }
27634 Tensor flat_value;
27635 optional<int64_t> flat_bdim;
27636 std::tie(flat_value, flat_bdim) = unwrapTensorAtLevel(flat, cur_level);
27637 auto results = batch_rule(flat_value, flat_bdim, tensors);
27638 return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
27639}
27640template <typename batch_rule_t, batch_rule_t batch_rule>
27641at::Tensor _nested_tensor_from_tensor_list_generated_plumbing(at::TensorList list, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) {
27642 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
27643 auto maybe_layer = maybeCurrentDynamicLayer();
27644 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
27645 int64_t cur_level = maybe_layer->layerId();
27646 if (!isBatchedAtLevel(list, cur_level)) {
27647 return at::_ops::_nested_tensor_from_tensor_list::call(list, dtype, layout, device, pin_memory);
27648 }
27649
27650 auto results = batch_rule(list, dtype, layout, device, pin_memory);
27651 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
27652}
27653template <typename batch_rule_t, batch_rule_t batch_rule>
27654at::Tensor _fw_primal_copy_generated_plumbing(const at::Tensor & self, int64_t level) {
27655 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
27656 auto maybe_layer = maybeCurrentDynamicLayer();
27657 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
27658 int64_t cur_level = maybe_layer->layerId();
27659 if (!isBatchedAtLevel(self, cur_level)) {
27660 return at::_ops::_fw_primal_copy::call(self, level);
27661 }
27662 Tensor self_value;
27663 optional<int64_t> self_bdim;
27664 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
27665 auto results = batch_rule(self_value, self_bdim, level);
27666 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
27667}
27668template <typename batch_rule_t, batch_rule_t batch_rule>
27669at::Tensor _make_dual_copy_generated_plumbing(const at::Tensor & primal, const at::Tensor & tangent, int64_t level) {
27670 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
27671 auto maybe_layer = maybeCurrentDynamicLayer();
27672 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
27673 int64_t cur_level = maybe_layer->layerId();
27674 if (!isBatchedAtLevel(primal, cur_level) && !isBatchedAtLevel(tangent, cur_level)) {
27675 return at::_ops::_make_dual_copy::call(primal, tangent, level);
27676 }
27677 Tensor primal_value;
27678 optional<int64_t> primal_bdim;
27679 std::tie(primal_value, primal_bdim) = unwrapTensorAtLevel(primal, cur_level);
27680 Tensor tangent_value;
27681 optional<int64_t> tangent_bdim;
27682 std::tie(tangent_value, tangent_bdim) = unwrapTensorAtLevel(tangent, cur_level);
27683 auto results = batch_rule(primal_value, primal_bdim, tangent_value, tangent_bdim, level);
27684 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
27685}
27686template <typename batch_rule_t, batch_rule_t batch_rule>
27687at::Tensor view_as_real_copy_generated_plumbing(const at::Tensor & self) {
27688 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
27689 auto maybe_layer = maybeCurrentDynamicLayer();
27690 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
27691 int64_t cur_level = maybe_layer->layerId();
27692 if (!isBatchedAtLevel(self, cur_level)) {
27693 return at::_ops::view_as_real_copy::call(self);
27694 }
27695 Tensor self_value;
27696 optional<int64_t> self_bdim;
27697 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
27698 auto results = batch_rule(self_value, self_bdim);
27699 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
27700}
27701template <typename batch_rule_t, batch_rule_t batch_rule>
27702at::Tensor view_as_complex_copy_generated_plumbing(const at::Tensor & self) {
27703 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
27704 auto maybe_layer = maybeCurrentDynamicLayer();
27705 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
27706 int64_t cur_level = maybe_layer->layerId();
27707 if (!isBatchedAtLevel(self, cur_level)) {
27708 return at::_ops::view_as_complex_copy::call(self);
27709 }
27710 Tensor self_value;
27711 optional<int64_t> self_bdim;
27712 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
27713 auto results = batch_rule(self_value, self_bdim);
27714 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
27715}
27716template <typename batch_rule_t, batch_rule_t batch_rule>
27717at::Tensor _conj_copy_generated_plumbing(const at::Tensor & self) {
27718 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
27719 auto maybe_layer = maybeCurrentDynamicLayer();
27720 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
27721 int64_t cur_level = maybe_layer->layerId();
27722 if (!isBatchedAtLevel(self, cur_level)) {
27723 return at::_ops::_conj_copy::call(self);
27724 }
27725 Tensor self_value;
27726 optional<int64_t> self_bdim;
27727 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
27728 auto results = batch_rule(self_value, self_bdim);
27729 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
27730}
27731template <typename batch_rule_t, batch_rule_t batch_rule>
27732at::Tensor _neg_view_copy_generated_plumbing(const at::Tensor & self) {
27733 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
27734 auto maybe_layer = maybeCurrentDynamicLayer();
27735 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
27736 int64_t cur_level = maybe_layer->layerId();
27737 if (!isBatchedAtLevel(self, cur_level)) {
27738 return at::_ops::_neg_view_copy::call(self);
27739 }
27740 Tensor self_value;
27741 optional<int64_t> self_bdim;
27742 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
27743 auto results = batch_rule(self_value, self_bdim);
27744 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
27745}
27746template <typename batch_rule_t, batch_rule_t batch_rule>
27747at::Tensor as_strided_copy_generated_plumbing(const at::Tensor & self, c10::SymIntArrayRef size, c10::SymIntArrayRef stride, c10::optional<c10::SymInt> storage_offset) {
27748 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
27749 auto maybe_layer = maybeCurrentDynamicLayer();
27750 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
27751 int64_t cur_level = maybe_layer->layerId();
27752 if (!isBatchedAtLevel(self, cur_level)) {
27753 return at::_ops::as_strided_copy::call(self, size, stride, storage_offset);
27754 }
27755 Tensor self_value;
27756 optional<int64_t> self_bdim;
27757 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
27758 auto results = batch_rule(self_value, self_bdim, size, stride, storage_offset);
27759 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
27760}
27761template <typename batch_rule_t, batch_rule_t batch_rule>
27762at::Tensor _sparse_broadcast_to_copy_generated_plumbing(const at::Tensor & self, at::IntArrayRef size) {
27763 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
27764 auto maybe_layer = maybeCurrentDynamicLayer();
27765 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
27766 int64_t cur_level = maybe_layer->layerId();
27767 if (!isBatchedAtLevel(self, cur_level)) {
27768 return at::_ops::_sparse_broadcast_to_copy::call(self, size);
27769 }
27770 Tensor self_value;
27771 optional<int64_t> self_bdim;
27772 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
27773 auto results = batch_rule(self_value, self_bdim, size);
27774 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
27775}
27776template <typename batch_rule_t, batch_rule_t batch_rule>
27777at::Tensor diagonal_copy_generated_plumbing(const at::Tensor & self, int64_t offset, int64_t dim1, int64_t dim2) {
27778 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
27779 auto maybe_layer = maybeCurrentDynamicLayer();
27780 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
27781 int64_t cur_level = maybe_layer->layerId();
27782 if (!isBatchedAtLevel(self, cur_level)) {
27783 return at::_ops::diagonal_copy::call(self, offset, dim1, dim2);
27784 }
27785 Tensor self_value;
27786 optional<int64_t> self_bdim;
27787 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
27788 auto results = batch_rule(self_value, self_bdim, offset, dim1, dim2);
27789 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
27790}
27791template <typename batch_rule_t, batch_rule_t batch_rule>
27792at::Tensor expand_copy_generated_plumbing(const at::Tensor & self, c10::SymIntArrayRef size, bool implicit) {
27793 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
27794 auto maybe_layer = maybeCurrentDynamicLayer();
27795 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
27796 int64_t cur_level = maybe_layer->layerId();
27797 if (!isBatchedAtLevel(self, cur_level)) {
27798 return at::_ops::expand_copy::call(self, size, implicit);
27799 }
27800 Tensor self_value;
27801 optional<int64_t> self_bdim;
27802 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
27803 auto results = batch_rule(self_value, self_bdim, size, implicit);
27804 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
27805}
27806template <typename batch_rule_t, batch_rule_t batch_rule>
27807at::Tensor permute_copy_generated_plumbing(const at::Tensor & self, at::IntArrayRef dims) {
27808 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
27809 auto maybe_layer = maybeCurrentDynamicLayer();
27810 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
27811 int64_t cur_level = maybe_layer->layerId();
27812 if (!isBatchedAtLevel(self, cur_level)) {
27813 return at::_ops::permute_copy::call(self, dims);
27814 }
27815 Tensor self_value;
27816 optional<int64_t> self_bdim;
27817 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
27818 auto results = batch_rule(self_value, self_bdim, dims);
27819 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
27820}
27821template <typename batch_rule_t, batch_rule_t batch_rule>
27822at::Tensor _reshape_alias_copy_generated_plumbing(const at::Tensor & self, c10::SymIntArrayRef size, c10::SymIntArrayRef stride) {
27823 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
27824 auto maybe_layer = maybeCurrentDynamicLayer();
27825 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
27826 int64_t cur_level = maybe_layer->layerId();
27827 if (!isBatchedAtLevel(self, cur_level)) {
27828 return at::_ops::_reshape_alias_copy::call(self, size, stride);
27829 }
27830 Tensor self_value;
27831 optional<int64_t> self_bdim;
27832 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
27833 auto results = batch_rule(self_value, self_bdim, size, stride);
27834 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
27835}
27836template <typename batch_rule_t, batch_rule_t batch_rule>
27837at::Tensor select_copy_int_generated_plumbing(const at::Tensor & self, int64_t dim, c10::SymInt index) {
27838 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
27839 auto maybe_layer = maybeCurrentDynamicLayer();
27840 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
27841 int64_t cur_level = maybe_layer->layerId();
27842 if (!isBatchedAtLevel(self, cur_level)) {
27843 return at::_ops::select_copy_int::call(self, dim, index);
27844 }
27845 Tensor self_value;
27846 optional<int64_t> self_bdim;
27847 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
27848 auto results = batch_rule(self_value, self_bdim, dim, index);
27849 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
27850}
27851template <typename batch_rule_t, batch_rule_t batch_rule>
27852at::Tensor detach_copy_generated_plumbing(const at::Tensor & self) {
27853 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
27854 auto maybe_layer = maybeCurrentDynamicLayer();
27855 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
27856 int64_t cur_level = maybe_layer->layerId();
27857 if (!isBatchedAtLevel(self, cur_level)) {
27858 return at::_ops::detach_copy::call(self);
27859 }
27860 Tensor self_value;
27861 optional<int64_t> self_bdim;
27862 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
27863 auto results = batch_rule(self_value, self_bdim);
27864 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
27865}
27866template <typename batch_rule_t, batch_rule_t batch_rule>
27867at::Tensor slice_copy_Tensor_generated_plumbing(const at::Tensor & self, int64_t dim, c10::optional<c10::SymInt> start, c10::optional<c10::SymInt> end, c10::SymInt step) {
27868 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
27869 auto maybe_layer = maybeCurrentDynamicLayer();
27870 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
27871 int64_t cur_level = maybe_layer->layerId();
27872 if (!isBatchedAtLevel(self, cur_level)) {
27873 return at::_ops::slice_copy_Tensor::call(self, dim, start, end, step);
27874 }
27875 Tensor self_value;
27876 optional<int64_t> self_bdim;
27877 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
27878 auto results = batch_rule(self_value, self_bdim, dim, start, end, step);
27879 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
27880}
27881template <typename batch_rule_t, batch_rule_t batch_rule>
27882::std::vector<at::Tensor> split_copy_Tensor_generated_plumbing(const at::Tensor & self, c10::SymInt split_size, int64_t dim) {
27883 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
27884 auto maybe_layer = maybeCurrentDynamicLayer();
27885 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
27886 int64_t cur_level = maybe_layer->layerId();
27887 if (!isBatchedAtLevel(self, cur_level)) {
27888 return at::_ops::split_copy_Tensor::call(self, split_size, dim);
27889 }
27890 Tensor self_value;
27891 optional<int64_t> self_bdim;
27892 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
27893 auto results = batch_rule(self_value, self_bdim, split_size, dim);
27894 return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
27895}
27896template <typename batch_rule_t, batch_rule_t batch_rule>
27897::std::vector<at::Tensor> split_with_sizes_copy_generated_plumbing(const at::Tensor & self, c10::SymIntArrayRef split_sizes, int64_t dim) {
27898 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
27899 auto maybe_layer = maybeCurrentDynamicLayer();
27900 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
27901 int64_t cur_level = maybe_layer->layerId();
27902 if (!isBatchedAtLevel(self, cur_level)) {
27903 return at::_ops::split_with_sizes_copy::call(self, split_sizes, dim);
27904 }
27905 Tensor self_value;
27906 optional<int64_t> self_bdim;
27907 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
27908 auto results = batch_rule(self_value, self_bdim, split_sizes, dim);
27909 return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
27910}
27911template <typename batch_rule_t, batch_rule_t batch_rule>
27912at::Tensor squeeze_copy_generated_plumbing(const at::Tensor & self) {
27913 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
27914 auto maybe_layer = maybeCurrentDynamicLayer();
27915 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
27916 int64_t cur_level = maybe_layer->layerId();
27917 if (!isBatchedAtLevel(self, cur_level)) {
27918 return at::_ops::squeeze_copy::call(self);
27919 }
27920 Tensor self_value;
27921 optional<int64_t> self_bdim;
27922 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
27923 auto results = batch_rule(self_value, self_bdim);
27924 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
27925}
27926template <typename batch_rule_t, batch_rule_t batch_rule>
27927at::Tensor squeeze_copy_dim_generated_plumbing(const at::Tensor & self, int64_t dim) {
27928 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
27929 auto maybe_layer = maybeCurrentDynamicLayer();
27930 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
27931 int64_t cur_level = maybe_layer->layerId();
27932 if (!isBatchedAtLevel(self, cur_level)) {
27933 return at::_ops::squeeze_copy_dim::call(self, dim);
27934 }
27935 Tensor self_value;
27936 optional<int64_t> self_bdim;
27937 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
27938 auto results = batch_rule(self_value, self_bdim, dim);
27939 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
27940}
27941template <typename batch_rule_t, batch_rule_t batch_rule>
27942at::Tensor squeeze_copy_dims_generated_plumbing(const at::Tensor & self, at::IntArrayRef dim) {
27943 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
27944 auto maybe_layer = maybeCurrentDynamicLayer();
27945 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
27946 int64_t cur_level = maybe_layer->layerId();
27947 if (!isBatchedAtLevel(self, cur_level)) {
27948 return at::_ops::squeeze_copy_dims::call(self, dim);
27949 }
27950 Tensor self_value;
27951 optional<int64_t> self_bdim;
27952 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
27953 auto results = batch_rule(self_value, self_bdim, dim);
27954 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
27955}
27956template <typename batch_rule_t, batch_rule_t batch_rule>
27957at::Tensor t_copy_generated_plumbing(const at::Tensor & self) {
27958 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
27959 auto maybe_layer = maybeCurrentDynamicLayer();
27960 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
27961 int64_t cur_level = maybe_layer->layerId();
27962 if (!isBatchedAtLevel(self, cur_level)) {
27963 return at::_ops::t_copy::call(self);
27964 }
27965 Tensor self_value;
27966 optional<int64_t> self_bdim;
27967 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
27968 auto results = batch_rule(self_value, self_bdim);
27969 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
27970}
27971template <typename batch_rule_t, batch_rule_t batch_rule>
27972at::Tensor transpose_copy_int_generated_plumbing(const at::Tensor & self, int64_t dim0, int64_t dim1) {
27973 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
27974 auto maybe_layer = maybeCurrentDynamicLayer();
27975 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
27976 int64_t cur_level = maybe_layer->layerId();
27977 if (!isBatchedAtLevel(self, cur_level)) {
27978 return at::_ops::transpose_copy_int::call(self, dim0, dim1);
27979 }
27980 Tensor self_value;
27981 optional<int64_t> self_bdim;
27982 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
27983 auto results = batch_rule(self_value, self_bdim, dim0, dim1);
27984 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
27985}
27986template <typename batch_rule_t, batch_rule_t batch_rule>
27987at::Tensor unsqueeze_copy_generated_plumbing(const at::Tensor & self, int64_t dim) {
27988 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
27989 auto maybe_layer = maybeCurrentDynamicLayer();
27990 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
27991 int64_t cur_level = maybe_layer->layerId();
27992 if (!isBatchedAtLevel(self, cur_level)) {
27993 return at::_ops::unsqueeze_copy::call(self, dim);
27994 }
27995 Tensor self_value;
27996 optional<int64_t> self_bdim;
27997 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
27998 auto results = batch_rule(self_value, self_bdim, dim);
27999 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
28000}
28001template <typename batch_rule_t, batch_rule_t batch_rule>
28002at::Tensor _indices_copy_generated_plumbing(const at::Tensor & self) {
28003 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
28004 auto maybe_layer = maybeCurrentDynamicLayer();
28005 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
28006 int64_t cur_level = maybe_layer->layerId();
28007 if (!isBatchedAtLevel(self, cur_level)) {
28008 return at::_ops::_indices_copy::call(self);
28009 }
28010 Tensor self_value;
28011 optional<int64_t> self_bdim;
28012 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
28013 auto results = batch_rule(self_value, self_bdim);
28014 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
28015}
28016template <typename batch_rule_t, batch_rule_t batch_rule>
28017at::Tensor _values_copy_generated_plumbing(const at::Tensor & self) {
28018 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
28019 auto maybe_layer = maybeCurrentDynamicLayer();
28020 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
28021 int64_t cur_level = maybe_layer->layerId();
28022 if (!isBatchedAtLevel(self, cur_level)) {
28023 return at::_ops::_values_copy::call(self);
28024 }
28025 Tensor self_value;
28026 optional<int64_t> self_bdim;
28027 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
28028 auto results = batch_rule(self_value, self_bdim);
28029 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
28030}
28031template <typename batch_rule_t, batch_rule_t batch_rule>
28032at::Tensor indices_copy_generated_plumbing(const at::Tensor & self) {
28033 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
28034 auto maybe_layer = maybeCurrentDynamicLayer();
28035 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
28036 int64_t cur_level = maybe_layer->layerId();
28037 if (!isBatchedAtLevel(self, cur_level)) {
28038 return at::_ops::indices_copy::call(self);
28039 }
28040 Tensor self_value;
28041 optional<int64_t> self_bdim;
28042 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
28043 auto results = batch_rule(self_value, self_bdim);
28044 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
28045}
28046template <typename batch_rule_t, batch_rule_t batch_rule>
28047at::Tensor values_copy_generated_plumbing(const at::Tensor & self) {
28048 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
28049 auto maybe_layer = maybeCurrentDynamicLayer();
28050 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
28051 int64_t cur_level = maybe_layer->layerId();
28052 if (!isBatchedAtLevel(self, cur_level)) {
28053 return at::_ops::values_copy::call(self);
28054 }
28055 Tensor self_value;
28056 optional<int64_t> self_bdim;
28057 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
28058 auto results = batch_rule(self_value, self_bdim);
28059 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
28060}
28061template <typename batch_rule_t, batch_rule_t batch_rule>
28062at::Tensor crow_indices_copy_generated_plumbing(const at::Tensor & self) {
28063 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
28064 auto maybe_layer = maybeCurrentDynamicLayer();
28065 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
28066 int64_t cur_level = maybe_layer->layerId();
28067 if (!isBatchedAtLevel(self, cur_level)) {
28068 return at::_ops::crow_indices_copy::call(self);
28069 }
28070 Tensor self_value;
28071 optional<int64_t> self_bdim;
28072 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
28073 auto results = batch_rule(self_value, self_bdim);
28074 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
28075}
28076template <typename batch_rule_t, batch_rule_t batch_rule>
28077at::Tensor col_indices_copy_generated_plumbing(const at::Tensor & self) {
28078 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
28079 auto maybe_layer = maybeCurrentDynamicLayer();
28080 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
28081 int64_t cur_level = maybe_layer->layerId();
28082 if (!isBatchedAtLevel(self, cur_level)) {
28083 return at::_ops::col_indices_copy::call(self);
28084 }
28085 Tensor self_value;
28086 optional<int64_t> self_bdim;
28087 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
28088 auto results = batch_rule(self_value, self_bdim);
28089 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
28090}
28091template <typename batch_rule_t, batch_rule_t batch_rule>
28092at::Tensor ccol_indices_copy_generated_plumbing(const at::Tensor & self) {
28093 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
28094 auto maybe_layer = maybeCurrentDynamicLayer();
28095 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
28096 int64_t cur_level = maybe_layer->layerId();
28097 if (!isBatchedAtLevel(self, cur_level)) {
28098 return at::_ops::ccol_indices_copy::call(self);
28099 }
28100 Tensor self_value;
28101 optional<int64_t> self_bdim;
28102 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
28103 auto results = batch_rule(self_value, self_bdim);
28104 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
28105}
28106template <typename batch_rule_t, batch_rule_t batch_rule>
28107at::Tensor row_indices_copy_generated_plumbing(const at::Tensor & self) {
28108 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
28109 auto maybe_layer = maybeCurrentDynamicLayer();
28110 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
28111 int64_t cur_level = maybe_layer->layerId();
28112 if (!isBatchedAtLevel(self, cur_level)) {
28113 return at::_ops::row_indices_copy::call(self);
28114 }
28115 Tensor self_value;
28116 optional<int64_t> self_bdim;
28117 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
28118 auto results = batch_rule(self_value, self_bdim);
28119 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
28120}
28121template <typename batch_rule_t, batch_rule_t batch_rule>
28122::std::vector<at::Tensor> unbind_copy_int_generated_plumbing(const at::Tensor & self, int64_t dim) {
28123 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
28124 auto maybe_layer = maybeCurrentDynamicLayer();
28125 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
28126 int64_t cur_level = maybe_layer->layerId();
28127 if (!isBatchedAtLevel(self, cur_level)) {
28128 return at::_ops::unbind_copy_int::call(self, dim);
28129 }
28130 Tensor self_value;
28131 optional<int64_t> self_bdim;
28132 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
28133 auto results = batch_rule(self_value, self_bdim, dim);
28134 return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
28135}
28136template <typename batch_rule_t, batch_rule_t batch_rule>
28137void unbind_copy_int_out_generated_plumbing(const at::Tensor & self, int64_t dim, at::TensorList out) {
28138 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
28139 auto maybe_layer = maybeCurrentDynamicLayer();
28140 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
28141 int64_t cur_level = maybe_layer->layerId();
28142 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(out, cur_level)) {
28143 return at::_ops::unbind_copy_int_out::call(self, dim, out);
28144 }
28145 Tensor self_value;
28146 optional<int64_t> self_bdim;
28147 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
28148 batch_rule(self_value, self_bdim, dim, out);
28149}
28150template <typename batch_rule_t, batch_rule_t batch_rule>
28151void split_copy_Tensor_out_generated_plumbing(const at::Tensor & self, c10::SymInt split_size, int64_t dim, at::TensorList out) {
28152 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
28153 auto maybe_layer = maybeCurrentDynamicLayer();
28154 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
28155 int64_t cur_level = maybe_layer->layerId();
28156 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(out, cur_level)) {
28157 return at::_ops::split_copy_Tensor_out::call(self, split_size, dim, out);
28158 }
28159 Tensor self_value;
28160 optional<int64_t> self_bdim;
28161 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
28162 batch_rule(self_value, self_bdim, split_size, dim, out);
28163}
28164template <typename batch_rule_t, batch_rule_t batch_rule>
28165void split_with_sizes_copy_out_generated_plumbing(const at::Tensor & self, c10::SymIntArrayRef split_sizes, int64_t dim, at::TensorList out) {
28166 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
28167 auto maybe_layer = maybeCurrentDynamicLayer();
28168 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
28169 int64_t cur_level = maybe_layer->layerId();
28170 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(out, cur_level)) {
28171 return at::_ops::split_with_sizes_copy_out::call(self, split_sizes, dim, out);
28172 }
28173 Tensor self_value;
28174 optional<int64_t> self_bdim;
28175 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
28176 batch_rule(self_value, self_bdim, split_sizes, dim, out);
28177}
28178template <typename batch_rule_t, batch_rule_t batch_rule>
28179at::Tensor view_copy_generated_plumbing(const at::Tensor & self, c10::SymIntArrayRef size) {
28180 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
28181 auto maybe_layer = maybeCurrentDynamicLayer();
28182 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
28183 int64_t cur_level = maybe_layer->layerId();
28184 if (!isBatchedAtLevel(self, cur_level)) {
28185 return at::_ops::view_copy::call(self, size);
28186 }
28187 Tensor self_value;
28188 optional<int64_t> self_bdim;
28189 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
28190 auto results = batch_rule(self_value, self_bdim, size);
28191 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
28192}
28193template <typename batch_rule_t, batch_rule_t batch_rule>
28194at::Tensor view_copy_dtype_generated_plumbing(const at::Tensor & self, at::ScalarType dtype) {
28195 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
28196 auto maybe_layer = maybeCurrentDynamicLayer();
28197 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
28198 int64_t cur_level = maybe_layer->layerId();
28199 if (!isBatchedAtLevel(self, cur_level)) {
28200 return at::_ops::view_copy_dtype::call(self, dtype);
28201 }
28202 Tensor self_value;
28203 optional<int64_t> self_bdim;
28204 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
28205 auto results = batch_rule(self_value, self_bdim, dtype);
28206 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
28207}
28208template <typename batch_rule_t, batch_rule_t batch_rule>
28209at::Tensor unfold_copy_generated_plumbing(const at::Tensor & self, int64_t dimension, int64_t size, int64_t step) {
28210 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
28211 auto maybe_layer = maybeCurrentDynamicLayer();
28212 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
28213 int64_t cur_level = maybe_layer->layerId();
28214 if (!isBatchedAtLevel(self, cur_level)) {
28215 return at::_ops::unfold_copy::call(self, dimension, size, step);
28216 }
28217 Tensor self_value;
28218 optional<int64_t> self_bdim;
28219 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
28220 auto results = batch_rule(self_value, self_bdim, dimension, size, step);
28221 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
28222}
28223template <typename batch_rule_t, batch_rule_t batch_rule>
28224at::Tensor alias_copy_generated_plumbing(const at::Tensor & self) {
28225 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
28226 auto maybe_layer = maybeCurrentDynamicLayer();
28227 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
28228 int64_t cur_level = maybe_layer->layerId();
28229 if (!isBatchedAtLevel(self, cur_level)) {
28230 return at::_ops::alias_copy::call(self);
28231 }
28232 Tensor self_value;
28233 optional<int64_t> self_bdim;
28234 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
28235 auto results = batch_rule(self_value, self_bdim);
28236 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
28237}
28238template <typename batch_rule_t, batch_rule_t batch_rule>
28239at::Tensor to_padded_tensor_generated_plumbing(const at::Tensor & self, double padding, at::OptionalSymIntArrayRef output_size) {
28240 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
28241 auto maybe_layer = maybeCurrentDynamicLayer();
28242 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
28243 int64_t cur_level = maybe_layer->layerId();
28244 if (!isBatchedAtLevel(self, cur_level)) {
28245 return at::_ops::to_padded_tensor::call(self, padding, output_size);
28246 }
28247 Tensor self_value;
28248 optional<int64_t> self_bdim;
28249 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
28250 auto results = batch_rule(self_value, self_bdim, padding, output_size);
28251 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
28252}
28253template <typename batch_rule_t, batch_rule_t batch_rule>
28254at::Tensor _nested_tensor_softmax_with_shape_generated_plumbing(const at::Tensor & self, const at::Tensor & query) {
28255 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
28256 auto maybe_layer = maybeCurrentDynamicLayer();
28257 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
28258 int64_t cur_level = maybe_layer->layerId();
28259 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(query, cur_level)) {
28260 return at::_ops::_nested_tensor_softmax_with_shape::call(self, query);
28261 }
28262 Tensor self_value;
28263 optional<int64_t> self_bdim;
28264 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
28265 Tensor query_value;
28266 optional<int64_t> query_bdim;
28267 std::tie(query_value, query_bdim) = unwrapTensorAtLevel(query, cur_level);
28268 auto results = batch_rule(self_value, self_bdim, query_value, query_bdim);
28269 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
28270}
28271template <typename batch_rule_t, batch_rule_t batch_rule>
28272at::Tensor _transformer_encoder_layer_fwd_generated_plumbing(const at::Tensor & src, int64_t embed_dim, int64_t num_heads, const at::Tensor & qkv_weight, const at::Tensor & qkv_bias, const at::Tensor & proj_weight, const at::Tensor & proj_bias, bool use_gelu, bool norm_first, double eps, const at::Tensor & norm_weight_1, const at::Tensor & norm_bias_1, const at::Tensor & norm_weight_2, const at::Tensor & norm_bias_2, const at::Tensor & ffn_weight_1, const at::Tensor & ffn_bias_1, const at::Tensor & ffn_weight_2, const at::Tensor & ffn_bias_2, const c10::optional<at::Tensor> & mask, c10::optional<int64_t> mask_type) {
28273 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
28274 auto maybe_layer = maybeCurrentDynamicLayer();
28275 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
28276 int64_t cur_level = maybe_layer->layerId();
28277 if (!isBatchedAtLevel(src, cur_level) && !isBatchedAtLevel(qkv_weight, cur_level) && !isBatchedAtLevel(qkv_bias, cur_level) && !isBatchedAtLevel(proj_weight, cur_level) && !isBatchedAtLevel(proj_bias, cur_level) && !isBatchedAtLevel(norm_weight_1, cur_level) && !isBatchedAtLevel(norm_bias_1, cur_level) && !isBatchedAtLevel(norm_weight_2, cur_level) && !isBatchedAtLevel(norm_bias_2, cur_level) && !isBatchedAtLevel(ffn_weight_1, cur_level) && !isBatchedAtLevel(ffn_bias_1, cur_level) && !isBatchedAtLevel(ffn_weight_2, cur_level) && !isBatchedAtLevel(ffn_bias_2, cur_level) && !isBatchedAtLevel(mask, cur_level)) {
28278 return at::_ops::_transformer_encoder_layer_fwd::call(src, embed_dim, num_heads, qkv_weight, qkv_bias, proj_weight, proj_bias, use_gelu, norm_first, eps, norm_weight_1, norm_bias_1, norm_weight_2, norm_bias_2, ffn_weight_1, ffn_bias_1, ffn_weight_2, ffn_bias_2, mask, mask_type);
28279 }
28280 Tensor src_value;
28281 optional<int64_t> src_bdim;
28282 std::tie(src_value, src_bdim) = unwrapTensorAtLevel(src, cur_level);
28283 Tensor qkv_weight_value;
28284 optional<int64_t> qkv_weight_bdim;
28285 std::tie(qkv_weight_value, qkv_weight_bdim) = unwrapTensorAtLevel(qkv_weight, cur_level);
28286 Tensor qkv_bias_value;
28287 optional<int64_t> qkv_bias_bdim;
28288 std::tie(qkv_bias_value, qkv_bias_bdim) = unwrapTensorAtLevel(qkv_bias, cur_level);
28289 Tensor proj_weight_value;
28290 optional<int64_t> proj_weight_bdim;
28291 std::tie(proj_weight_value, proj_weight_bdim) = unwrapTensorAtLevel(proj_weight, cur_level);
28292 Tensor proj_bias_value;
28293 optional<int64_t> proj_bias_bdim;
28294 std::tie(proj_bias_value, proj_bias_bdim) = unwrapTensorAtLevel(proj_bias, cur_level);
28295 Tensor norm_weight_1_value;
28296 optional<int64_t> norm_weight_1_bdim;
28297 std::tie(norm_weight_1_value, norm_weight_1_bdim) = unwrapTensorAtLevel(norm_weight_1, cur_level);
28298 Tensor norm_bias_1_value;
28299 optional<int64_t> norm_bias_1_bdim;
28300 std::tie(norm_bias_1_value, norm_bias_1_bdim) = unwrapTensorAtLevel(norm_bias_1, cur_level);
28301 Tensor norm_weight_2_value;
28302 optional<int64_t> norm_weight_2_bdim;
28303 std::tie(norm_weight_2_value, norm_weight_2_bdim) = unwrapTensorAtLevel(norm_weight_2, cur_level);
28304 Tensor norm_bias_2_value;
28305 optional<int64_t> norm_bias_2_bdim;
28306 std::tie(norm_bias_2_value, norm_bias_2_bdim) = unwrapTensorAtLevel(norm_bias_2, cur_level);
28307 Tensor ffn_weight_1_value;
28308 optional<int64_t> ffn_weight_1_bdim;
28309 std::tie(ffn_weight_1_value, ffn_weight_1_bdim) = unwrapTensorAtLevel(ffn_weight_1, cur_level);
28310 Tensor ffn_bias_1_value;
28311 optional<int64_t> ffn_bias_1_bdim;
28312 std::tie(ffn_bias_1_value, ffn_bias_1_bdim) = unwrapTensorAtLevel(ffn_bias_1, cur_level);
28313 Tensor ffn_weight_2_value;
28314 optional<int64_t> ffn_weight_2_bdim;
28315 std::tie(ffn_weight_2_value, ffn_weight_2_bdim) = unwrapTensorAtLevel(ffn_weight_2, cur_level);
28316 Tensor ffn_bias_2_value;
28317 optional<int64_t> ffn_bias_2_bdim;
28318 std::tie(ffn_bias_2_value, ffn_bias_2_bdim) = unwrapTensorAtLevel(ffn_bias_2, cur_level);
28319 optional<Tensor> mask_value;
28320 optional<int64_t> mask_bdim;
28321 if (mask) {
28322 std::tie(mask_value, mask_bdim) = unwrapTensorAtLevel(mask.value(), cur_level);
28323 }
28324 auto results = batch_rule(src_value, src_bdim, embed_dim, num_heads, qkv_weight_value, qkv_weight_bdim, qkv_bias_value, qkv_bias_bdim, proj_weight_value, proj_weight_bdim, proj_bias_value, proj_bias_bdim, use_gelu, norm_first, eps, norm_weight_1_value, norm_weight_1_bdim, norm_bias_1_value, norm_bias_1_bdim, norm_weight_2_value, norm_weight_2_bdim, norm_bias_2_value, norm_bias_2_bdim, ffn_weight_1_value, ffn_weight_1_bdim, ffn_bias_1_value, ffn_bias_1_bdim, ffn_weight_2_value, ffn_weight_2_bdim, ffn_bias_2_value, ffn_bias_2_bdim, mask_value, mask_bdim, mask_type);
28325 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
28326}
28327template <typename batch_rule_t, batch_rule_t batch_rule>
28328::std::tuple<at::Tensor,at::Tensor> _native_multi_head_attention_generated_plumbing(const at::Tensor & query, const at::Tensor & key, const at::Tensor & value, int64_t embed_dim, int64_t num_head, const at::Tensor & qkv_weight, const at::Tensor & qkv_bias, const at::Tensor & proj_weight, const at::Tensor & proj_bias, const c10::optional<at::Tensor> & mask, bool need_weights, bool average_attn_weights, c10::optional<int64_t> mask_type) {
28329 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
28330 auto maybe_layer = maybeCurrentDynamicLayer();
28331 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
28332 int64_t cur_level = maybe_layer->layerId();
28333 if (!isBatchedAtLevel(query, cur_level) && !isBatchedAtLevel(key, cur_level) && !isBatchedAtLevel(value, cur_level) && !isBatchedAtLevel(qkv_weight, cur_level) && !isBatchedAtLevel(qkv_bias, cur_level) && !isBatchedAtLevel(proj_weight, cur_level) && !isBatchedAtLevel(proj_bias, cur_level) && !isBatchedAtLevel(mask, cur_level)) {
28334 return at::_ops::_native_multi_head_attention::call(query, key, value, embed_dim, num_head, qkv_weight, qkv_bias, proj_weight, proj_bias, mask, need_weights, average_attn_weights, mask_type);
28335 }
28336 Tensor query_value;
28337 optional<int64_t> query_bdim;
28338 std::tie(query_value, query_bdim) = unwrapTensorAtLevel(query, cur_level);
28339 Tensor key_value;
28340 optional<int64_t> key_bdim;
28341 std::tie(key_value, key_bdim) = unwrapTensorAtLevel(key, cur_level);
28342 Tensor value_value;
28343 optional<int64_t> value_bdim;
28344 std::tie(value_value, value_bdim) = unwrapTensorAtLevel(value, cur_level);
28345 Tensor qkv_weight_value;
28346 optional<int64_t> qkv_weight_bdim;
28347 std::tie(qkv_weight_value, qkv_weight_bdim) = unwrapTensorAtLevel(qkv_weight, cur_level);
28348 Tensor qkv_bias_value;
28349 optional<int64_t> qkv_bias_bdim;
28350 std::tie(qkv_bias_value, qkv_bias_bdim) = unwrapTensorAtLevel(qkv_bias, cur_level);
28351 Tensor proj_weight_value;
28352 optional<int64_t> proj_weight_bdim;
28353 std::tie(proj_weight_value, proj_weight_bdim) = unwrapTensorAtLevel(proj_weight, cur_level);
28354 Tensor proj_bias_value;
28355 optional<int64_t> proj_bias_bdim;
28356 std::tie(proj_bias_value, proj_bias_bdim) = unwrapTensorAtLevel(proj_bias, cur_level);
28357 optional<Tensor> mask_value;
28358 optional<int64_t> mask_bdim;
28359 if (mask) {
28360 std::tie(mask_value, mask_bdim) = unwrapTensorAtLevel(mask.value(), cur_level);
28361 }
28362 auto results = batch_rule(query_value, query_bdim, key_value, key_bdim, value_value, value_bdim, embed_dim, num_head, qkv_weight_value, qkv_weight_bdim, qkv_bias_value, qkv_bias_bdim, proj_weight_value, proj_weight_bdim, proj_bias_value, proj_bias_bdim, mask_value, mask_bdim, need_weights, average_attn_weights, mask_type);
28363 return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
28364}
28365template <typename batch_rule_t, batch_rule_t batch_rule>
28366at::Tensor scaled_dot_product_attention_generated_plumbing(const at::Tensor & query, const at::Tensor & key, const at::Tensor & value, const c10::optional<at::Tensor> & attn_mask, double dropout_p, bool is_causal) {
28367 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
28368 auto maybe_layer = maybeCurrentDynamicLayer();
28369 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
28370 int64_t cur_level = maybe_layer->layerId();
28371 if (!isBatchedAtLevel(query, cur_level) && !isBatchedAtLevel(key, cur_level) && !isBatchedAtLevel(value, cur_level) && !isBatchedAtLevel(attn_mask, cur_level)) {
28372 return at::_ops::scaled_dot_product_attention::call(query, key, value, attn_mask, dropout_p, is_causal);
28373 }
28374 Tensor query_value;
28375 optional<int64_t> query_bdim;
28376 std::tie(query_value, query_bdim) = unwrapTensorAtLevel(query, cur_level);
28377 Tensor key_value;
28378 optional<int64_t> key_bdim;
28379 std::tie(key_value, key_bdim) = unwrapTensorAtLevel(key, cur_level);
28380 Tensor value_value;
28381 optional<int64_t> value_bdim;
28382 std::tie(value_value, value_bdim) = unwrapTensorAtLevel(value, cur_level);
28383 optional<Tensor> attn_mask_value;
28384 optional<int64_t> attn_mask_bdim;
28385 if (attn_mask) {
28386 std::tie(attn_mask_value, attn_mask_bdim) = unwrapTensorAtLevel(attn_mask.value(), cur_level);
28387 }
28388 auto results = batch_rule(query_value, query_bdim, key_value, key_bdim, value_value, value_bdim, attn_mask_value, attn_mask_bdim, dropout_p, is_causal);
28389 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
28390}
28391template <typename batch_rule_t, batch_rule_t batch_rule>
28392::std::tuple<at::Tensor,at::Tensor> _scaled_dot_product_attention_generated_plumbing(const at::Tensor & query, const at::Tensor & key, const at::Tensor & value, const c10::optional<at::Tensor> & attn_mask, double dropout_p, bool need_attn_weights, bool is_causal) {
28393 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
28394 auto maybe_layer = maybeCurrentDynamicLayer();
28395 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
28396 int64_t cur_level = maybe_layer->layerId();
28397 if (!isBatchedAtLevel(query, cur_level) && !isBatchedAtLevel(key, cur_level) && !isBatchedAtLevel(value, cur_level) && !isBatchedAtLevel(attn_mask, cur_level)) {
28398 return at::_ops::_scaled_dot_product_attention::call(query, key, value, attn_mask, dropout_p, need_attn_weights, is_causal);
28399 }
28400 Tensor query_value;
28401 optional<int64_t> query_bdim;
28402 std::tie(query_value, query_bdim) = unwrapTensorAtLevel(query, cur_level);
28403 Tensor key_value;
28404 optional<int64_t> key_bdim;
28405 std::tie(key_value, key_bdim) = unwrapTensorAtLevel(key, cur_level);
28406 Tensor value_value;
28407 optional<int64_t> value_bdim;
28408 std::tie(value_value, value_bdim) = unwrapTensorAtLevel(value, cur_level);
28409 optional<Tensor> attn_mask_value;
28410 optional<int64_t> attn_mask_bdim;
28411 if (attn_mask) {
28412 std::tie(attn_mask_value, attn_mask_bdim) = unwrapTensorAtLevel(attn_mask.value(), cur_level);
28413 }
28414 auto results = batch_rule(query_value, query_bdim, key_value, key_bdim, value_value, value_bdim, attn_mask_value, attn_mask_bdim, dropout_p, need_attn_weights, is_causal);
28415 return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
28416}
28417template <typename batch_rule_t, batch_rule_t batch_rule>
28418::std::tuple<at::Tensor,at::Tensor> _scaled_dot_product_attention_math_generated_plumbing(const at::Tensor & query, const at::Tensor & key, const at::Tensor & value, const c10::optional<at::Tensor> & attn_mask, double dropout_p, bool is_causal, const c10::optional<at::Tensor> & dropout_mask) {
28419 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
28420 auto maybe_layer = maybeCurrentDynamicLayer();
28421 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
28422 int64_t cur_level = maybe_layer->layerId();
28423 if (!isBatchedAtLevel(query, cur_level) && !isBatchedAtLevel(key, cur_level) && !isBatchedAtLevel(value, cur_level) && !isBatchedAtLevel(attn_mask, cur_level) && !isBatchedAtLevel(dropout_mask, cur_level)) {
28424 return at::_ops::_scaled_dot_product_attention_math::call(query, key, value, attn_mask, dropout_p, is_causal, dropout_mask);
28425 }
28426 Tensor query_value;
28427 optional<int64_t> query_bdim;
28428 std::tie(query_value, query_bdim) = unwrapTensorAtLevel(query, cur_level);
28429 Tensor key_value;
28430 optional<int64_t> key_bdim;
28431 std::tie(key_value, key_bdim) = unwrapTensorAtLevel(key, cur_level);
28432 Tensor value_value;
28433 optional<int64_t> value_bdim;
28434 std::tie(value_value, value_bdim) = unwrapTensorAtLevel(value, cur_level);
28435 optional<Tensor> attn_mask_value;
28436 optional<int64_t> attn_mask_bdim;
28437 if (attn_mask) {
28438 std::tie(attn_mask_value, attn_mask_bdim) = unwrapTensorAtLevel(attn_mask.value(), cur_level);
28439 }
28440 optional<Tensor> dropout_mask_value;
28441 optional<int64_t> dropout_mask_bdim;
28442 if (dropout_mask) {
28443 std::tie(dropout_mask_value, dropout_mask_bdim) = unwrapTensorAtLevel(dropout_mask.value(), cur_level);
28444 }
28445 auto results = batch_rule(query_value, query_bdim, key_value, key_bdim, value_value, value_bdim, attn_mask_value, attn_mask_bdim, dropout_p, is_causal, dropout_mask_value, dropout_mask_bdim);
28446 return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
28447}
28448template <typename batch_rule_t, batch_rule_t batch_rule>
28449::std::tuple<at::Tensor,at::Tensor,at::Tensor> _scaled_dot_product_flash_attention_backward_generated_plumbing(const at::Tensor & grad_out, const at::Tensor & query, const at::Tensor & key, const at::Tensor & value, const at::Tensor & out, const at::Tensor & logsumexp, const at::Tensor & cum_seq_q, const at::Tensor & cum_seq_k, int64_t max_q, int64_t max_k, double dropout_p, bool is_causal, int64_t philox_seed, int64_t philox_offset) {
28450 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
28451 auto maybe_layer = maybeCurrentDynamicLayer();
28452 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
28453 int64_t cur_level = maybe_layer->layerId();
28454 if (!isBatchedAtLevel(grad_out, cur_level) && !isBatchedAtLevel(query, cur_level) && !isBatchedAtLevel(key, cur_level) && !isBatchedAtLevel(value, cur_level) && !isBatchedAtLevel(out, cur_level) && !isBatchedAtLevel(logsumexp, cur_level) && !isBatchedAtLevel(cum_seq_q, cur_level) && !isBatchedAtLevel(cum_seq_k, cur_level)) {
28455 return at::_ops::_scaled_dot_product_flash_attention_backward::call(grad_out, query, key, value, out, logsumexp, cum_seq_q, cum_seq_k, max_q, max_k, dropout_p, is_causal, philox_seed, philox_offset);
28456 }
28457 Tensor grad_out_value;
28458 optional<int64_t> grad_out_bdim;
28459 std::tie(grad_out_value, grad_out_bdim) = unwrapTensorAtLevel(grad_out, cur_level);
28460 Tensor query_value;
28461 optional<int64_t> query_bdim;
28462 std::tie(query_value, query_bdim) = unwrapTensorAtLevel(query, cur_level);
28463 Tensor key_value;
28464 optional<int64_t> key_bdim;
28465 std::tie(key_value, key_bdim) = unwrapTensorAtLevel(key, cur_level);
28466 Tensor value_value;
28467 optional<int64_t> value_bdim;
28468 std::tie(value_value, value_bdim) = unwrapTensorAtLevel(value, cur_level);
28469 Tensor out_value;
28470 optional<int64_t> out_bdim;
28471 std::tie(out_value, out_bdim) = unwrapTensorAtLevel(out, cur_level);
28472 Tensor logsumexp_value;
28473 optional<int64_t> logsumexp_bdim;
28474 std::tie(logsumexp_value, logsumexp_bdim) = unwrapTensorAtLevel(logsumexp, cur_level);
28475 Tensor cum_seq_q_value;
28476 optional<int64_t> cum_seq_q_bdim;
28477 std::tie(cum_seq_q_value, cum_seq_q_bdim) = unwrapTensorAtLevel(cum_seq_q, cur_level);
28478 Tensor cum_seq_k_value;
28479 optional<int64_t> cum_seq_k_bdim;
28480 std::tie(cum_seq_k_value, cum_seq_k_bdim) = unwrapTensorAtLevel(cum_seq_k, cur_level);
28481 auto results = batch_rule(grad_out_value, grad_out_bdim, query_value, query_bdim, key_value, key_bdim, value_value, value_bdim, out_value, out_bdim, logsumexp_value, logsumexp_bdim, cum_seq_q_value, cum_seq_q_bdim, cum_seq_k_value, cum_seq_k_bdim, max_q, max_k, dropout_p, is_causal, philox_seed, philox_offset);
28482 return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level));
28483}
28484template <typename batch_rule_t, batch_rule_t batch_rule>
28485::std::tuple<at::Tensor,at::Tensor> _scaled_dot_product_efficient_attention_generated_plumbing(const at::Tensor & query, const at::Tensor & key, const at::Tensor & value, bool compute_log_sumexp, bool is_causal) {
28486 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
28487 auto maybe_layer = maybeCurrentDynamicLayer();
28488 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
28489 int64_t cur_level = maybe_layer->layerId();
28490 if (!isBatchedAtLevel(query, cur_level) && !isBatchedAtLevel(key, cur_level) && !isBatchedAtLevel(value, cur_level)) {
28491 return at::_ops::_scaled_dot_product_efficient_attention::call(query, key, value, compute_log_sumexp, is_causal);
28492 }
28493 Tensor query_value;
28494 optional<int64_t> query_bdim;
28495 std::tie(query_value, query_bdim) = unwrapTensorAtLevel(query, cur_level);
28496 Tensor key_value;
28497 optional<int64_t> key_bdim;
28498 std::tie(key_value, key_bdim) = unwrapTensorAtLevel(key, cur_level);
28499 Tensor value_value;
28500 optional<int64_t> value_bdim;
28501 std::tie(value_value, value_bdim) = unwrapTensorAtLevel(value, cur_level);
28502 auto results = batch_rule(query_value, query_bdim, key_value, key_bdim, value_value, value_bdim, compute_log_sumexp, is_causal);
28503 return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
28504}
28505template <typename batch_rule_t, batch_rule_t batch_rule>
28506::std::tuple<at::Tensor,at::Tensor,at::Tensor> _scaled_dot_product_efficient_attention_backward_generated_plumbing(const at::Tensor & grad_out_, const at::Tensor & query, const at::Tensor & key, const at::Tensor & value, const at::Tensor & out, const at::Tensor & logsumexp, bool is_causal, bool chunk_grad_outputs) {
28507 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
28508 auto maybe_layer = maybeCurrentDynamicLayer();
28509 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
28510 int64_t cur_level = maybe_layer->layerId();
28511 if (!isBatchedAtLevel(grad_out_, cur_level) && !isBatchedAtLevel(query, cur_level) && !isBatchedAtLevel(key, cur_level) && !isBatchedAtLevel(value, cur_level) && !isBatchedAtLevel(out, cur_level) && !isBatchedAtLevel(logsumexp, cur_level)) {
28512 return at::_ops::_scaled_dot_product_efficient_attention_backward::call(grad_out_, query, key, value, out, logsumexp, is_causal, chunk_grad_outputs);
28513 }
28514 Tensor grad_out__value;
28515 optional<int64_t> grad_out__bdim;
28516 std::tie(grad_out__value, grad_out__bdim) = unwrapTensorAtLevel(grad_out_, cur_level);
28517 Tensor query_value;
28518 optional<int64_t> query_bdim;
28519 std::tie(query_value, query_bdim) = unwrapTensorAtLevel(query, cur_level);
28520 Tensor key_value;
28521 optional<int64_t> key_bdim;
28522 std::tie(key_value, key_bdim) = unwrapTensorAtLevel(key, cur_level);
28523 Tensor value_value;
28524 optional<int64_t> value_bdim;
28525 std::tie(value_value, value_bdim) = unwrapTensorAtLevel(value, cur_level);
28526 Tensor out_value;
28527 optional<int64_t> out_bdim;
28528 std::tie(out_value, out_bdim) = unwrapTensorAtLevel(out, cur_level);
28529 Tensor logsumexp_value;
28530 optional<int64_t> logsumexp_bdim;
28531 std::tie(logsumexp_value, logsumexp_bdim) = unwrapTensorAtLevel(logsumexp, cur_level);
28532 auto results = batch_rule(grad_out__value, grad_out__bdim, query_value, query_bdim, key_value, key_bdim, value_value, value_bdim, out_value, out_bdim, logsumexp_value, logsumexp_bdim, is_causal, chunk_grad_outputs);
28533 return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level));
28534}
28535template <typename batch_rule_t, batch_rule_t batch_rule>
28536::std::tuple<at::Tensor,at::Tensor,at::Tensor> _flash_attention_backward_generated_plumbing(const at::Tensor & grad_out, const at::Tensor & query, const at::Tensor & key, const at::Tensor & value, const at::Tensor & out, const at::Tensor & logsumexp, const at::Tensor & cum_seq_q, const at::Tensor & cum_seq_k, int64_t max_q, int64_t max_k, double dropout_p, bool is_causal, int64_t philox_seed, int64_t philox_offset) {
28537 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
28538 auto maybe_layer = maybeCurrentDynamicLayer();
28539 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
28540 int64_t cur_level = maybe_layer->layerId();
28541 if (!isBatchedAtLevel(grad_out, cur_level) && !isBatchedAtLevel(query, cur_level) && !isBatchedAtLevel(key, cur_level) && !isBatchedAtLevel(value, cur_level) && !isBatchedAtLevel(out, cur_level) && !isBatchedAtLevel(logsumexp, cur_level) && !isBatchedAtLevel(cum_seq_q, cur_level) && !isBatchedAtLevel(cum_seq_k, cur_level)) {
28542 return at::_ops::_flash_attention_backward::call(grad_out, query, key, value, out, logsumexp, cum_seq_q, cum_seq_k, max_q, max_k, dropout_p, is_causal, philox_seed, philox_offset);
28543 }
28544 Tensor grad_out_value;
28545 optional<int64_t> grad_out_bdim;
28546 std::tie(grad_out_value, grad_out_bdim) = unwrapTensorAtLevel(grad_out, cur_level);
28547 Tensor query_value;
28548 optional<int64_t> query_bdim;
28549 std::tie(query_value, query_bdim) = unwrapTensorAtLevel(query, cur_level);
28550 Tensor key_value;
28551 optional<int64_t> key_bdim;
28552 std::tie(key_value, key_bdim) = unwrapTensorAtLevel(key, cur_level);
28553 Tensor value_value;
28554 optional<int64_t> value_bdim;
28555 std::tie(value_value, value_bdim) = unwrapTensorAtLevel(value, cur_level);
28556 Tensor out_value;
28557 optional<int64_t> out_bdim;
28558 std::tie(out_value, out_bdim) = unwrapTensorAtLevel(out, cur_level);
28559 Tensor logsumexp_value;
28560 optional<int64_t> logsumexp_bdim;
28561 std::tie(logsumexp_value, logsumexp_bdim) = unwrapTensorAtLevel(logsumexp, cur_level);
28562 Tensor cum_seq_q_value;
28563 optional<int64_t> cum_seq_q_bdim;
28564 std::tie(cum_seq_q_value, cum_seq_q_bdim) = unwrapTensorAtLevel(cum_seq_q, cur_level);
28565 Tensor cum_seq_k_value;
28566 optional<int64_t> cum_seq_k_bdim;
28567 std::tie(cum_seq_k_value, cum_seq_k_bdim) = unwrapTensorAtLevel(cum_seq_k, cur_level);
28568 auto results = batch_rule(grad_out_value, grad_out_bdim, query_value, query_bdim, key_value, key_bdim, value_value, value_bdim, out_value, out_bdim, logsumexp_value, logsumexp_bdim, cum_seq_q_value, cum_seq_q_bdim, cum_seq_k_value, cum_seq_k_bdim, max_q, max_k, dropout_p, is_causal, philox_seed, philox_offset);
28569 return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level));
28570}
28571template <typename batch_rule_t, batch_rule_t batch_rule>
28572::std::tuple<at::Tensor,at::Tensor> _efficient_attention_forward_generated_plumbing(const at::Tensor & query, const at::Tensor & key, const at::Tensor & value, const c10::optional<at::Tensor> & cu_seqlens_q, const c10::optional<at::Tensor> & cu_seqlens_k, c10::optional<int64_t> max_seqlen_q, bool compute_log_sumexp, bool causal) {
28573 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
28574 auto maybe_layer = maybeCurrentDynamicLayer();
28575 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
28576 int64_t cur_level = maybe_layer->layerId();
28577 if (!isBatchedAtLevel(query, cur_level) && !isBatchedAtLevel(key, cur_level) && !isBatchedAtLevel(value, cur_level) && !isBatchedAtLevel(cu_seqlens_q, cur_level) && !isBatchedAtLevel(cu_seqlens_k, cur_level)) {
28578 return at::_ops::_efficient_attention_forward::call(query, key, value, cu_seqlens_q, cu_seqlens_k, max_seqlen_q, compute_log_sumexp, causal);
28579 }
28580 Tensor query_value;
28581 optional<int64_t> query_bdim;
28582 std::tie(query_value, query_bdim) = unwrapTensorAtLevel(query, cur_level);
28583 Tensor key_value;
28584 optional<int64_t> key_bdim;
28585 std::tie(key_value, key_bdim) = unwrapTensorAtLevel(key, cur_level);
28586 Tensor value_value;
28587 optional<int64_t> value_bdim;
28588 std::tie(value_value, value_bdim) = unwrapTensorAtLevel(value, cur_level);
28589 optional<Tensor> cu_seqlens_q_value;
28590 optional<int64_t> cu_seqlens_q_bdim;
28591 if (cu_seqlens_q) {
28592 std::tie(cu_seqlens_q_value, cu_seqlens_q_bdim) = unwrapTensorAtLevel(cu_seqlens_q.value(), cur_level);
28593 }
28594 optional<Tensor> cu_seqlens_k_value;
28595 optional<int64_t> cu_seqlens_k_bdim;
28596 if (cu_seqlens_k) {
28597 std::tie(cu_seqlens_k_value, cu_seqlens_k_bdim) = unwrapTensorAtLevel(cu_seqlens_k.value(), cur_level);
28598 }
28599 auto results = batch_rule(query_value, query_bdim, key_value, key_bdim, value_value, value_bdim, cu_seqlens_q_value, cu_seqlens_q_bdim, cu_seqlens_k_value, cu_seqlens_k_bdim, max_seqlen_q, compute_log_sumexp, causal);
28600 return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
28601}
28602template <typename batch_rule_t, batch_rule_t batch_rule>
28603::std::tuple<at::Tensor,at::Tensor,at::Tensor> _efficient_attention_backward_generated_plumbing(const at::Tensor & grad_out_, const at::Tensor & query, const at::Tensor & key, const at::Tensor & value, const at::Tensor & out, const at::Tensor & logsumexp, bool is_causal, bool chunk_grad_outputs) {
28604 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
28605 auto maybe_layer = maybeCurrentDynamicLayer();
28606 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
28607 int64_t cur_level = maybe_layer->layerId();
28608 if (!isBatchedAtLevel(grad_out_, cur_level) && !isBatchedAtLevel(query, cur_level) && !isBatchedAtLevel(key, cur_level) && !isBatchedAtLevel(value, cur_level) && !isBatchedAtLevel(out, cur_level) && !isBatchedAtLevel(logsumexp, cur_level)) {
28609 return at::_ops::_efficient_attention_backward::call(grad_out_, query, key, value, out, logsumexp, is_causal, chunk_grad_outputs);
28610 }
28611 Tensor grad_out__value;
28612 optional<int64_t> grad_out__bdim;
28613 std::tie(grad_out__value, grad_out__bdim) = unwrapTensorAtLevel(grad_out_, cur_level);
28614 Tensor query_value;
28615 optional<int64_t> query_bdim;
28616 std::tie(query_value, query_bdim) = unwrapTensorAtLevel(query, cur_level);
28617 Tensor key_value;
28618 optional<int64_t> key_bdim;
28619 std::tie(key_value, key_bdim) = unwrapTensorAtLevel(key, cur_level);
28620 Tensor value_value;
28621 optional<int64_t> value_bdim;
28622 std::tie(value_value, value_bdim) = unwrapTensorAtLevel(value, cur_level);
28623 Tensor out_value;
28624 optional<int64_t> out_bdim;
28625 std::tie(out_value, out_bdim) = unwrapTensorAtLevel(out, cur_level);
28626 Tensor logsumexp_value;
28627 optional<int64_t> logsumexp_bdim;
28628 std::tie(logsumexp_value, logsumexp_bdim) = unwrapTensorAtLevel(logsumexp, cur_level);
28629 auto results = batch_rule(grad_out__value, grad_out__bdim, query_value, query_bdim, key_value, key_bdim, value_value, value_bdim, out_value, out_bdim, logsumexp_value, logsumexp_bdim, is_causal, chunk_grad_outputs);
28630 return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level));
28631}
28632template <typename batch_rule_t, batch_rule_t batch_rule>
28633at::Tensor _triton_scaled_dot_attention_generated_plumbing(const at::Tensor & q, const at::Tensor & k, const at::Tensor & v, double dropout_p) {
28634 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
28635 auto maybe_layer = maybeCurrentDynamicLayer();
28636 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
28637 int64_t cur_level = maybe_layer->layerId();
28638 if (!isBatchedAtLevel(q, cur_level) && !isBatchedAtLevel(k, cur_level) && !isBatchedAtLevel(v, cur_level)) {
28639 return at::_ops::_triton_scaled_dot_attention::call(q, k, v, dropout_p);
28640 }
28641 Tensor q_value;
28642 optional<int64_t> q_bdim;
28643 std::tie(q_value, q_bdim) = unwrapTensorAtLevel(q, cur_level);
28644 Tensor k_value;
28645 optional<int64_t> k_bdim;
28646 std::tie(k_value, k_bdim) = unwrapTensorAtLevel(k, cur_level);
28647 Tensor v_value;
28648 optional<int64_t> v_bdim;
28649 std::tie(v_value, v_bdim) = unwrapTensorAtLevel(v, cur_level);
28650 auto results = batch_rule(q_value, q_bdim, k_value, k_bdim, v_value, v_bdim, dropout_p);
28651 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
28652}
28653template <typename batch_rule_t, batch_rule_t batch_rule>
28654at::Tensor _triton_multi_head_attention_generated_plumbing(const at::Tensor & query, const at::Tensor & key, const at::Tensor & value, int64_t embed_dim, int64_t num_head, const at::Tensor & qkv_weight, const at::Tensor & qkv_bias, const at::Tensor & proj_weight, const at::Tensor & proj_bias, const c10::optional<at::Tensor> & mask) {
28655 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
28656 auto maybe_layer = maybeCurrentDynamicLayer();
28657 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
28658 int64_t cur_level = maybe_layer->layerId();
28659 if (!isBatchedAtLevel(query, cur_level) && !isBatchedAtLevel(key, cur_level) && !isBatchedAtLevel(value, cur_level) && !isBatchedAtLevel(qkv_weight, cur_level) && !isBatchedAtLevel(qkv_bias, cur_level) && !isBatchedAtLevel(proj_weight, cur_level) && !isBatchedAtLevel(proj_bias, cur_level) && !isBatchedAtLevel(mask, cur_level)) {
28660 return at::_ops::_triton_multi_head_attention::call(query, key, value, embed_dim, num_head, qkv_weight, qkv_bias, proj_weight, proj_bias, mask);
28661 }
28662 Tensor query_value;
28663 optional<int64_t> query_bdim;
28664 std::tie(query_value, query_bdim) = unwrapTensorAtLevel(query, cur_level);
28665 Tensor key_value;
28666 optional<int64_t> key_bdim;
28667 std::tie(key_value, key_bdim) = unwrapTensorAtLevel(key, cur_level);
28668 Tensor value_value;
28669 optional<int64_t> value_bdim;
28670 std::tie(value_value, value_bdim) = unwrapTensorAtLevel(value, cur_level);
28671 Tensor qkv_weight_value;
28672 optional<int64_t> qkv_weight_bdim;
28673 std::tie(qkv_weight_value, qkv_weight_bdim) = unwrapTensorAtLevel(qkv_weight, cur_level);
28674 Tensor qkv_bias_value;
28675 optional<int64_t> qkv_bias_bdim;
28676 std::tie(qkv_bias_value, qkv_bias_bdim) = unwrapTensorAtLevel(qkv_bias, cur_level);
28677 Tensor proj_weight_value;
28678 optional<int64_t> proj_weight_bdim;
28679 std::tie(proj_weight_value, proj_weight_bdim) = unwrapTensorAtLevel(proj_weight, cur_level);
28680 Tensor proj_bias_value;
28681 optional<int64_t> proj_bias_bdim;
28682 std::tie(proj_bias_value, proj_bias_bdim) = unwrapTensorAtLevel(proj_bias, cur_level);
28683 optional<Tensor> mask_value;
28684 optional<int64_t> mask_bdim;
28685 if (mask) {
28686 std::tie(mask_value, mask_bdim) = unwrapTensorAtLevel(mask.value(), cur_level);
28687 }
28688 auto results = batch_rule(query_value, query_bdim, key_value, key_bdim, value_value, value_bdim, embed_dim, num_head, qkv_weight_value, qkv_weight_bdim, qkv_bias_value, qkv_bias_bdim, proj_weight_value, proj_weight_bdim, proj_bias_value, proj_bias_bdim, mask_value, mask_bdim);
28689 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
28690}
28691template <typename batch_rule_t, batch_rule_t batch_rule>
28692at::Tensor special_airy_ai_generated_plumbing(const at::Tensor & x) {
28693 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
28694 auto maybe_layer = maybeCurrentDynamicLayer();
28695 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
28696 int64_t cur_level = maybe_layer->layerId();
28697 if (!isBatchedAtLevel(x, cur_level)) {
28698 return at::_ops::special_airy_ai::call(x);
28699 }
28700 Tensor x_value;
28701 optional<int64_t> x_bdim;
28702 std::tie(x_value, x_bdim) = unwrapTensorAtLevel(x, cur_level);
28703 auto results = batch_rule(x_value, x_bdim);
28704 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
28705}
28706template <typename batch_rule_t, batch_rule_t batch_rule>
28707::std::tuple<at::Tensor,at::Tensor,at::Tensor> _transformer_decoder_only_layer_fwd_generated_plumbing(const at::Tensor & src, int64_t embed_dim, int64_t num_heads, const at::Tensor & qkv_weight, const at::Tensor & qkv_bias, const at::Tensor & proj_weight, const at::Tensor & proj_bias, bool use_gelu, bool norm_first, double eps, const at::Tensor & norm_weight_1, const at::Tensor & norm_bias_1, const at::Tensor & norm_weight_2, const at::Tensor & norm_bias_2, const at::Tensor & ffn_weight_1, const at::Tensor & ffn_bias_1, const at::Tensor & ffn_weight_2, const at::Tensor & ffn_bias_2, const c10::optional<at::Tensor> & mask, const c10::optional<at::Tensor> & incr_key, const c10::optional<at::Tensor> & incr_value) {
28708 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
28709 auto maybe_layer = maybeCurrentDynamicLayer();
28710 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
28711 int64_t cur_level = maybe_layer->layerId();
28712 if (!isBatchedAtLevel(src, cur_level) && !isBatchedAtLevel(qkv_weight, cur_level) && !isBatchedAtLevel(qkv_bias, cur_level) && !isBatchedAtLevel(proj_weight, cur_level) && !isBatchedAtLevel(proj_bias, cur_level) && !isBatchedAtLevel(norm_weight_1, cur_level) && !isBatchedAtLevel(norm_bias_1, cur_level) && !isBatchedAtLevel(norm_weight_2, cur_level) && !isBatchedAtLevel(norm_bias_2, cur_level) && !isBatchedAtLevel(ffn_weight_1, cur_level) && !isBatchedAtLevel(ffn_bias_1, cur_level) && !isBatchedAtLevel(ffn_weight_2, cur_level) && !isBatchedAtLevel(ffn_bias_2, cur_level) && !isBatchedAtLevel(mask, cur_level) && !isBatchedAtLevel(incr_key, cur_level) && !isBatchedAtLevel(incr_value, cur_level)) {
28713 return at::_ops::_transformer_decoder_only_layer_fwd::call(src, embed_dim, num_heads, qkv_weight, qkv_bias, proj_weight, proj_bias, use_gelu, norm_first, eps, norm_weight_1, norm_bias_1, norm_weight_2, norm_bias_2, ffn_weight_1, ffn_bias_1, ffn_weight_2, ffn_bias_2, mask, incr_key, incr_value);
28714 }
28715 Tensor src_value;
28716 optional<int64_t> src_bdim;
28717 std::tie(src_value, src_bdim) = unwrapTensorAtLevel(src, cur_level);
28718 Tensor qkv_weight_value;
28719 optional<int64_t> qkv_weight_bdim;
28720 std::tie(qkv_weight_value, qkv_weight_bdim) = unwrapTensorAtLevel(qkv_weight, cur_level);
28721 Tensor qkv_bias_value;
28722 optional<int64_t> qkv_bias_bdim;
28723 std::tie(qkv_bias_value, qkv_bias_bdim) = unwrapTensorAtLevel(qkv_bias, cur_level);
28724 Tensor proj_weight_value;
28725 optional<int64_t> proj_weight_bdim;
28726 std::tie(proj_weight_value, proj_weight_bdim) = unwrapTensorAtLevel(proj_weight, cur_level);
28727 Tensor proj_bias_value;
28728 optional<int64_t> proj_bias_bdim;
28729 std::tie(proj_bias_value, proj_bias_bdim) = unwrapTensorAtLevel(proj_bias, cur_level);
28730 Tensor norm_weight_1_value;
28731 optional<int64_t> norm_weight_1_bdim;
28732 std::tie(norm_weight_1_value, norm_weight_1_bdim) = unwrapTensorAtLevel(norm_weight_1, cur_level);
28733 Tensor norm_bias_1_value;
28734 optional<int64_t> norm_bias_1_bdim;
28735 std::tie(norm_bias_1_value, norm_bias_1_bdim) = unwrapTensorAtLevel(norm_bias_1, cur_level);
28736 Tensor norm_weight_2_value;
28737 optional<int64_t> norm_weight_2_bdim;
28738 std::tie(norm_weight_2_value, norm_weight_2_bdim) = unwrapTensorAtLevel(norm_weight_2, cur_level);
28739 Tensor norm_bias_2_value;
28740 optional<int64_t> norm_bias_2_bdim;
28741 std::tie(norm_bias_2_value, norm_bias_2_bdim) = unwrapTensorAtLevel(norm_bias_2, cur_level);
28742 Tensor ffn_weight_1_value;
28743 optional<int64_t> ffn_weight_1_bdim;
28744 std::tie(ffn_weight_1_value, ffn_weight_1_bdim) = unwrapTensorAtLevel(ffn_weight_1, cur_level);
28745 Tensor ffn_bias_1_value;
28746 optional<int64_t> ffn_bias_1_bdim;
28747 std::tie(ffn_bias_1_value, ffn_bias_1_bdim) = unwrapTensorAtLevel(ffn_bias_1, cur_level);
28748 Tensor ffn_weight_2_value;
28749 optional<int64_t> ffn_weight_2_bdim;
28750 std::tie(ffn_weight_2_value, ffn_weight_2_bdim) = unwrapTensorAtLevel(ffn_weight_2, cur_level);
28751 Tensor ffn_bias_2_value;
28752 optional<int64_t> ffn_bias_2_bdim;
28753 std::tie(ffn_bias_2_value, ffn_bias_2_bdim) = unwrapTensorAtLevel(ffn_bias_2, cur_level);
28754 optional<Tensor> mask_value;
28755 optional<int64_t> mask_bdim;
28756 if (mask) {
28757 std::tie(mask_value, mask_bdim) = unwrapTensorAtLevel(mask.value(), cur_level);
28758 }
28759 optional<Tensor> incr_key_value;
28760 optional<int64_t> incr_key_bdim;
28761 if (incr_key) {
28762 std::tie(incr_key_value, incr_key_bdim) = unwrapTensorAtLevel(incr_key.value(), cur_level);
28763 }
28764 optional<Tensor> incr_value_value;
28765 optional<int64_t> incr_value_bdim;
28766 if (incr_value) {
28767 std::tie(incr_value_value, incr_value_bdim) = unwrapTensorAtLevel(incr_value.value(), cur_level);
28768 }
28769 auto results = batch_rule(src_value, src_bdim, embed_dim, num_heads, qkv_weight_value, qkv_weight_bdim, qkv_bias_value, qkv_bias_bdim, proj_weight_value, proj_weight_bdim, proj_bias_value, proj_bias_bdim, use_gelu, norm_first, eps, norm_weight_1_value, norm_weight_1_bdim, norm_bias_1_value, norm_bias_1_bdim, norm_weight_2_value, norm_weight_2_bdim, norm_bias_2_value, norm_bias_2_bdim, ffn_weight_1_value, ffn_weight_1_bdim, ffn_bias_1_value, ffn_bias_1_bdim, ffn_weight_2_value, ffn_weight_2_bdim, ffn_bias_2_value, ffn_bias_2_bdim, mask_value, mask_bdim, incr_key_value, incr_key_bdim, incr_value_value, incr_value_bdim);
28770 return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level));
28771}
28772template <typename batch_rule_t, batch_rule_t batch_rule>
28773::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor> _native_decoder_only_multi_head_attention_generated_plumbing(const at::Tensor & query, const at::Tensor & key, const at::Tensor & value, int64_t embed_dim, int64_t num_head, const at::Tensor & qkv_weight, const at::Tensor & qkv_bias, const at::Tensor & proj_weight, const at::Tensor & proj_bias, const c10::optional<at::Tensor> & mask, const c10::optional<at::Tensor> & incr_key, const c10::optional<at::Tensor> & incr_value, bool need_weights, bool average_attn_weights) {
28774 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
28775 auto maybe_layer = maybeCurrentDynamicLayer();
28776 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
28777 int64_t cur_level = maybe_layer->layerId();
28778 if (!isBatchedAtLevel(query, cur_level) && !isBatchedAtLevel(key, cur_level) && !isBatchedAtLevel(value, cur_level) && !isBatchedAtLevel(qkv_weight, cur_level) && !isBatchedAtLevel(qkv_bias, cur_level) && !isBatchedAtLevel(proj_weight, cur_level) && !isBatchedAtLevel(proj_bias, cur_level) && !isBatchedAtLevel(mask, cur_level) && !isBatchedAtLevel(incr_key, cur_level) && !isBatchedAtLevel(incr_value, cur_level)) {
28779 return at::_ops::_native_decoder_only_multi_head_attention::call(query, key, value, embed_dim, num_head, qkv_weight, qkv_bias, proj_weight, proj_bias, mask, incr_key, incr_value, need_weights, average_attn_weights);
28780 }
28781 Tensor query_value;
28782 optional<int64_t> query_bdim;
28783 std::tie(query_value, query_bdim) = unwrapTensorAtLevel(query, cur_level);
28784 Tensor key_value;
28785 optional<int64_t> key_bdim;
28786 std::tie(key_value, key_bdim) = unwrapTensorAtLevel(key, cur_level);
28787 Tensor value_value;
28788 optional<int64_t> value_bdim;
28789 std::tie(value_value, value_bdim) = unwrapTensorAtLevel(value, cur_level);
28790 Tensor qkv_weight_value;
28791 optional<int64_t> qkv_weight_bdim;
28792 std::tie(qkv_weight_value, qkv_weight_bdim) = unwrapTensorAtLevel(qkv_weight, cur_level);
28793 Tensor qkv_bias_value;
28794 optional<int64_t> qkv_bias_bdim;
28795 std::tie(qkv_bias_value, qkv_bias_bdim) = unwrapTensorAtLevel(qkv_bias, cur_level);
28796 Tensor proj_weight_value;
28797 optional<int64_t> proj_weight_bdim;
28798 std::tie(proj_weight_value, proj_weight_bdim) = unwrapTensorAtLevel(proj_weight, cur_level);
28799 Tensor proj_bias_value;
28800 optional<int64_t> proj_bias_bdim;
28801 std::tie(proj_bias_value, proj_bias_bdim) = unwrapTensorAtLevel(proj_bias, cur_level);
28802 optional<Tensor> mask_value;
28803 optional<int64_t> mask_bdim;
28804 if (mask) {
28805 std::tie(mask_value, mask_bdim) = unwrapTensorAtLevel(mask.value(), cur_level);
28806 }
28807 optional<Tensor> incr_key_value;
28808 optional<int64_t> incr_key_bdim;
28809 if (incr_key) {
28810 std::tie(incr_key_value, incr_key_bdim) = unwrapTensorAtLevel(incr_key.value(), cur_level);
28811 }
28812 optional<Tensor> incr_value_value;
28813 optional<int64_t> incr_value_bdim;
28814 if (incr_value) {
28815 std::tie(incr_value_value, incr_value_bdim) = unwrapTensorAtLevel(incr_value.value(), cur_level);
28816 }
28817 auto results = batch_rule(query_value, query_bdim, key_value, key_bdim, value_value, value_bdim, embed_dim, num_head, qkv_weight_value, qkv_weight_bdim, qkv_bias_value, qkv_bias_bdim, proj_weight_value, proj_weight_bdim, proj_bias_value, proj_bias_bdim, mask_value, mask_bdim, incr_key_value, incr_key_bdim, incr_value_value, incr_value_bdim, need_weights, average_attn_weights);
28818 return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level), makeBatched(std::get<6>(results), std::get<7>(results), cur_level));
28819}
28820template <typename batch_rule_t, batch_rule_t batch_rule>
28821at::Tensor special_bessel_j0_generated_plumbing(const at::Tensor & self) {
28822 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
28823 auto maybe_layer = maybeCurrentDynamicLayer();
28824 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
28825 int64_t cur_level = maybe_layer->layerId();
28826 if (!isBatchedAtLevel(self, cur_level)) {
28827 return at::_ops::special_bessel_j0::call(self);
28828 }
28829 Tensor self_value;
28830 optional<int64_t> self_bdim;
28831 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
28832 auto results = batch_rule(self_value, self_bdim);
28833 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
28834}
28835template <typename batch_rule_t, batch_rule_t batch_rule>
28836at::Tensor special_bessel_j1_generated_plumbing(const at::Tensor & self) {
28837 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
28838 auto maybe_layer = maybeCurrentDynamicLayer();
28839 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
28840 int64_t cur_level = maybe_layer->layerId();
28841 if (!isBatchedAtLevel(self, cur_level)) {
28842 return at::_ops::special_bessel_j1::call(self);
28843 }
28844 Tensor self_value;
28845 optional<int64_t> self_bdim;
28846 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
28847 auto results = batch_rule(self_value, self_bdim);
28848 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
28849}
28850template <typename batch_rule_t, batch_rule_t batch_rule>
28851at::Tensor special_bessel_y0_generated_plumbing(const at::Tensor & self) {
28852 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
28853 auto maybe_layer = maybeCurrentDynamicLayer();
28854 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
28855 int64_t cur_level = maybe_layer->layerId();
28856 if (!isBatchedAtLevel(self, cur_level)) {
28857 return at::_ops::special_bessel_y0::call(self);
28858 }
28859 Tensor self_value;
28860 optional<int64_t> self_bdim;
28861 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
28862 auto results = batch_rule(self_value, self_bdim);
28863 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
28864}
28865template <typename batch_rule_t, batch_rule_t batch_rule>
28866at::Tensor special_bessel_y1_generated_plumbing(const at::Tensor & self) {
28867 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
28868 auto maybe_layer = maybeCurrentDynamicLayer();
28869 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
28870 int64_t cur_level = maybe_layer->layerId();
28871 if (!isBatchedAtLevel(self, cur_level)) {
28872 return at::_ops::special_bessel_y1::call(self);
28873 }
28874 Tensor self_value;
28875 optional<int64_t> self_bdim;
28876 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
28877 auto results = batch_rule(self_value, self_bdim);
28878 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
28879}
28880template <typename batch_rule_t, batch_rule_t batch_rule>
28881at::Tensor special_chebyshev_polynomial_t_generated_plumbing(const at::Tensor & x, const at::Tensor & n) {
28882 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
28883 auto maybe_layer = maybeCurrentDynamicLayer();
28884 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
28885 int64_t cur_level = maybe_layer->layerId();
28886 if (!isBatchedAtLevel(x, cur_level) && !isBatchedAtLevel(n, cur_level)) {
28887 return at::_ops::special_chebyshev_polynomial_t::call(x, n);
28888 }
28889 Tensor x_value;
28890 optional<int64_t> x_bdim;
28891 std::tie(x_value, x_bdim) = unwrapTensorAtLevel(x, cur_level);
28892 Tensor n_value;
28893 optional<int64_t> n_bdim;
28894 std::tie(n_value, n_bdim) = unwrapTensorAtLevel(n, cur_level);
28895 auto results = batch_rule(x_value, x_bdim, n_value, n_bdim);
28896 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
28897}
28898template <typename batch_rule_t, batch_rule_t batch_rule>
28899at::Tensor special_chebyshev_polynomial_t_x_scalar_generated_plumbing(const at::Scalar & x, const at::Tensor & n) {
28900 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
28901 auto maybe_layer = maybeCurrentDynamicLayer();
28902 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
28903 int64_t cur_level = maybe_layer->layerId();
28904 if (!isBatchedAtLevel(n, cur_level)) {
28905 return at::_ops::special_chebyshev_polynomial_t_x_scalar::call(x, n);
28906 }
28907 Tensor n_value;
28908 optional<int64_t> n_bdim;
28909 std::tie(n_value, n_bdim) = unwrapTensorAtLevel(n, cur_level);
28910 auto results = batch_rule(x, n_value, n_bdim);
28911 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
28912}
28913template <typename batch_rule_t, batch_rule_t batch_rule>
28914at::Tensor special_chebyshev_polynomial_t_n_scalar_generated_plumbing(const at::Tensor & x, const at::Scalar & n) {
28915 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
28916 auto maybe_layer = maybeCurrentDynamicLayer();
28917 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
28918 int64_t cur_level = maybe_layer->layerId();
28919 if (!isBatchedAtLevel(x, cur_level)) {
28920 return at::_ops::special_chebyshev_polynomial_t_n_scalar::call(x, n);
28921 }
28922 Tensor x_value;
28923 optional<int64_t> x_bdim;
28924 std::tie(x_value, x_bdim) = unwrapTensorAtLevel(x, cur_level);
28925 auto results = batch_rule(x_value, x_bdim, n);
28926 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
28927}
28928template <typename batch_rule_t, batch_rule_t batch_rule>
28929at::Tensor special_chebyshev_polynomial_u_generated_plumbing(const at::Tensor & x, const at::Tensor & n) {
28930 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
28931 auto maybe_layer = maybeCurrentDynamicLayer();
28932 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
28933 int64_t cur_level = maybe_layer->layerId();
28934 if (!isBatchedAtLevel(x, cur_level) && !isBatchedAtLevel(n, cur_level)) {
28935 return at::_ops::special_chebyshev_polynomial_u::call(x, n);
28936 }
28937 Tensor x_value;
28938 optional<int64_t> x_bdim;
28939 std::tie(x_value, x_bdim) = unwrapTensorAtLevel(x, cur_level);
28940 Tensor n_value;
28941 optional<int64_t> n_bdim;
28942 std::tie(n_value, n_bdim) = unwrapTensorAtLevel(n, cur_level);
28943 auto results = batch_rule(x_value, x_bdim, n_value, n_bdim);
28944 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
28945}
28946template <typename batch_rule_t, batch_rule_t batch_rule>
28947at::Tensor special_chebyshev_polynomial_u_x_scalar_generated_plumbing(const at::Scalar & x, const at::Tensor & n) {
28948 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
28949 auto maybe_layer = maybeCurrentDynamicLayer();
28950 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
28951 int64_t cur_level = maybe_layer->layerId();
28952 if (!isBatchedAtLevel(n, cur_level)) {
28953 return at::_ops::special_chebyshev_polynomial_u_x_scalar::call(x, n);
28954 }
28955 Tensor n_value;
28956 optional<int64_t> n_bdim;
28957 std::tie(n_value, n_bdim) = unwrapTensorAtLevel(n, cur_level);
28958 auto results = batch_rule(x, n_value, n_bdim);
28959 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
28960}
28961template <typename batch_rule_t, batch_rule_t batch_rule>
28962at::Tensor special_chebyshev_polynomial_u_n_scalar_generated_plumbing(const at::Tensor & x, const at::Scalar & n) {
28963 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
28964 auto maybe_layer = maybeCurrentDynamicLayer();
28965 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
28966 int64_t cur_level = maybe_layer->layerId();
28967 if (!isBatchedAtLevel(x, cur_level)) {
28968 return at::_ops::special_chebyshev_polynomial_u_n_scalar::call(x, n);
28969 }
28970 Tensor x_value;
28971 optional<int64_t> x_bdim;
28972 std::tie(x_value, x_bdim) = unwrapTensorAtLevel(x, cur_level);
28973 auto results = batch_rule(x_value, x_bdim, n);
28974 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
28975}
28976template <typename batch_rule_t, batch_rule_t batch_rule>
28977at::Tensor special_chebyshev_polynomial_v_generated_plumbing(const at::Tensor & x, const at::Tensor & n) {
28978 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
28979 auto maybe_layer = maybeCurrentDynamicLayer();
28980 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
28981 int64_t cur_level = maybe_layer->layerId();
28982 if (!isBatchedAtLevel(x, cur_level) && !isBatchedAtLevel(n, cur_level)) {
28983 return at::_ops::special_chebyshev_polynomial_v::call(x, n);
28984 }
28985 Tensor x_value;
28986 optional<int64_t> x_bdim;
28987 std::tie(x_value, x_bdim) = unwrapTensorAtLevel(x, cur_level);
28988 Tensor n_value;
28989 optional<int64_t> n_bdim;
28990 std::tie(n_value, n_bdim) = unwrapTensorAtLevel(n, cur_level);
28991 auto results = batch_rule(x_value, x_bdim, n_value, n_bdim);
28992 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
28993}
28994template <typename batch_rule_t, batch_rule_t batch_rule>
28995at::Tensor special_chebyshev_polynomial_v_x_scalar_generated_plumbing(const at::Scalar & x, const at::Tensor & n) {
28996 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
28997 auto maybe_layer = maybeCurrentDynamicLayer();
28998 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
28999 int64_t cur_level = maybe_layer->layerId();
29000 if (!isBatchedAtLevel(n, cur_level)) {
29001 return at::_ops::special_chebyshev_polynomial_v_x_scalar::call(x, n);
29002 }
29003 Tensor n_value;
29004 optional<int64_t> n_bdim;
29005 std::tie(n_value, n_bdim) = unwrapTensorAtLevel(n, cur_level);
29006 auto results = batch_rule(x, n_value, n_bdim);
29007 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
29008}
29009template <typename batch_rule_t, batch_rule_t batch_rule>
29010at::Tensor special_chebyshev_polynomial_v_n_scalar_generated_plumbing(const at::Tensor & x, const at::Scalar & n) {
29011 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
29012 auto maybe_layer = maybeCurrentDynamicLayer();
29013 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
29014 int64_t cur_level = maybe_layer->layerId();
29015 if (!isBatchedAtLevel(x, cur_level)) {
29016 return at::_ops::special_chebyshev_polynomial_v_n_scalar::call(x, n);
29017 }
29018 Tensor x_value;
29019 optional<int64_t> x_bdim;
29020 std::tie(x_value, x_bdim) = unwrapTensorAtLevel(x, cur_level);
29021 auto results = batch_rule(x_value, x_bdim, n);
29022 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
29023}
29024template <typename batch_rule_t, batch_rule_t batch_rule>
29025at::Tensor special_chebyshev_polynomial_w_generated_plumbing(const at::Tensor & x, const at::Tensor & n) {
29026 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
29027 auto maybe_layer = maybeCurrentDynamicLayer();
29028 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
29029 int64_t cur_level = maybe_layer->layerId();
29030 if (!isBatchedAtLevel(x, cur_level) && !isBatchedAtLevel(n, cur_level)) {
29031 return at::_ops::special_chebyshev_polynomial_w::call(x, n);
29032 }
29033 Tensor x_value;
29034 optional<int64_t> x_bdim;
29035 std::tie(x_value, x_bdim) = unwrapTensorAtLevel(x, cur_level);
29036 Tensor n_value;
29037 optional<int64_t> n_bdim;
29038 std::tie(n_value, n_bdim) = unwrapTensorAtLevel(n, cur_level);
29039 auto results = batch_rule(x_value, x_bdim, n_value, n_bdim);
29040 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
29041}
29042template <typename batch_rule_t, batch_rule_t batch_rule>
29043at::Tensor special_chebyshev_polynomial_w_x_scalar_generated_plumbing(const at::Scalar & x, const at::Tensor & n) {
29044 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
29045 auto maybe_layer = maybeCurrentDynamicLayer();
29046 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
29047 int64_t cur_level = maybe_layer->layerId();
29048 if (!isBatchedAtLevel(n, cur_level)) {
29049 return at::_ops::special_chebyshev_polynomial_w_x_scalar::call(x, n);
29050 }
29051 Tensor n_value;
29052 optional<int64_t> n_bdim;
29053 std::tie(n_value, n_bdim) = unwrapTensorAtLevel(n, cur_level);
29054 auto results = batch_rule(x, n_value, n_bdim);
29055 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
29056}
29057template <typename batch_rule_t, batch_rule_t batch_rule>
29058at::Tensor special_chebyshev_polynomial_w_n_scalar_generated_plumbing(const at::Tensor & x, const at::Scalar & n) {
29059 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
29060 auto maybe_layer = maybeCurrentDynamicLayer();
29061 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
29062 int64_t cur_level = maybe_layer->layerId();
29063 if (!isBatchedAtLevel(x, cur_level)) {
29064 return at::_ops::special_chebyshev_polynomial_w_n_scalar::call(x, n);
29065 }
29066 Tensor x_value;
29067 optional<int64_t> x_bdim;
29068 std::tie(x_value, x_bdim) = unwrapTensorAtLevel(x, cur_level);
29069 auto results = batch_rule(x_value, x_bdim, n);
29070 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
29071}
29072template <typename batch_rule_t, batch_rule_t batch_rule>
29073at::Tensor special_hermite_polynomial_h_generated_plumbing(const at::Tensor & x, const at::Tensor & n) {
29074 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
29075 auto maybe_layer = maybeCurrentDynamicLayer();
29076 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
29077 int64_t cur_level = maybe_layer->layerId();
29078 if (!isBatchedAtLevel(x, cur_level) && !isBatchedAtLevel(n, cur_level)) {
29079 return at::_ops::special_hermite_polynomial_h::call(x, n);
29080 }
29081 Tensor x_value;
29082 optional<int64_t> x_bdim;
29083 std::tie(x_value, x_bdim) = unwrapTensorAtLevel(x, cur_level);
29084 Tensor n_value;
29085 optional<int64_t> n_bdim;
29086 std::tie(n_value, n_bdim) = unwrapTensorAtLevel(n, cur_level);
29087 auto results = batch_rule(x_value, x_bdim, n_value, n_bdim);
29088 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
29089}
29090template <typename batch_rule_t, batch_rule_t batch_rule>
29091at::Tensor special_hermite_polynomial_h_x_scalar_generated_plumbing(const at::Scalar & x, const at::Tensor & n) {
29092 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
29093 auto maybe_layer = maybeCurrentDynamicLayer();
29094 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
29095 int64_t cur_level = maybe_layer->layerId();
29096 if (!isBatchedAtLevel(n, cur_level)) {
29097 return at::_ops::special_hermite_polynomial_h_x_scalar::call(x, n);
29098 }
29099 Tensor n_value;
29100 optional<int64_t> n_bdim;
29101 std::tie(n_value, n_bdim) = unwrapTensorAtLevel(n, cur_level);
29102 auto results = batch_rule(x, n_value, n_bdim);
29103 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
29104}
29105template <typename batch_rule_t, batch_rule_t batch_rule>
29106at::Tensor special_hermite_polynomial_h_n_scalar_generated_plumbing(const at::Tensor & x, const at::Scalar & n) {
29107 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
29108 auto maybe_layer = maybeCurrentDynamicLayer();
29109 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
29110 int64_t cur_level = maybe_layer->layerId();
29111 if (!isBatchedAtLevel(x, cur_level)) {
29112 return at::_ops::special_hermite_polynomial_h_n_scalar::call(x, n);
29113 }
29114 Tensor x_value;
29115 optional<int64_t> x_bdim;
29116 std::tie(x_value, x_bdim) = unwrapTensorAtLevel(x, cur_level);
29117 auto results = batch_rule(x_value, x_bdim, n);
29118 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
29119}
29120template <typename batch_rule_t, batch_rule_t batch_rule>
29121at::Tensor special_hermite_polynomial_he_generated_plumbing(const at::Tensor & x, const at::Tensor & n) {
29122 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
29123 auto maybe_layer = maybeCurrentDynamicLayer();
29124 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
29125 int64_t cur_level = maybe_layer->layerId();
29126 if (!isBatchedAtLevel(x, cur_level) && !isBatchedAtLevel(n, cur_level)) {
29127 return at::_ops::special_hermite_polynomial_he::call(x, n);
29128 }
29129 Tensor x_value;
29130 optional<int64_t> x_bdim;
29131 std::tie(x_value, x_bdim) = unwrapTensorAtLevel(x, cur_level);
29132 Tensor n_value;
29133 optional<int64_t> n_bdim;
29134 std::tie(n_value, n_bdim) = unwrapTensorAtLevel(n, cur_level);
29135 auto results = batch_rule(x_value, x_bdim, n_value, n_bdim);
29136 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
29137}
29138template <typename batch_rule_t, batch_rule_t batch_rule>
29139at::Tensor special_hermite_polynomial_he_x_scalar_generated_plumbing(const at::Scalar & x, const at::Tensor & n) {
29140 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
29141 auto maybe_layer = maybeCurrentDynamicLayer();
29142 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
29143 int64_t cur_level = maybe_layer->layerId();
29144 if (!isBatchedAtLevel(n, cur_level)) {
29145 return at::_ops::special_hermite_polynomial_he_x_scalar::call(x, n);
29146 }
29147 Tensor n_value;
29148 optional<int64_t> n_bdim;
29149 std::tie(n_value, n_bdim) = unwrapTensorAtLevel(n, cur_level);
29150 auto results = batch_rule(x, n_value, n_bdim);
29151 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
29152}
29153template <typename batch_rule_t, batch_rule_t batch_rule>
29154at::Tensor special_hermite_polynomial_he_n_scalar_generated_plumbing(const at::Tensor & x, const at::Scalar & n) {
29155 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
29156 auto maybe_layer = maybeCurrentDynamicLayer();
29157 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
29158 int64_t cur_level = maybe_layer->layerId();
29159 if (!isBatchedAtLevel(x, cur_level)) {
29160 return at::_ops::special_hermite_polynomial_he_n_scalar::call(x, n);
29161 }
29162 Tensor x_value;
29163 optional<int64_t> x_bdim;
29164 std::tie(x_value, x_bdim) = unwrapTensorAtLevel(x, cur_level);
29165 auto results = batch_rule(x_value, x_bdim, n);
29166 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
29167}
29168template <typename batch_rule_t, batch_rule_t batch_rule>
29169at::Tensor special_laguerre_polynomial_l_generated_plumbing(const at::Tensor & x, const at::Tensor & n) {
29170 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
29171 auto maybe_layer = maybeCurrentDynamicLayer();
29172 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
29173 int64_t cur_level = maybe_layer->layerId();
29174 if (!isBatchedAtLevel(x, cur_level) && !isBatchedAtLevel(n, cur_level)) {
29175 return at::_ops::special_laguerre_polynomial_l::call(x, n);
29176 }
29177 Tensor x_value;
29178 optional<int64_t> x_bdim;
29179 std::tie(x_value, x_bdim) = unwrapTensorAtLevel(x, cur_level);
29180 Tensor n_value;
29181 optional<int64_t> n_bdim;
29182 std::tie(n_value, n_bdim) = unwrapTensorAtLevel(n, cur_level);
29183 auto results = batch_rule(x_value, x_bdim, n_value, n_bdim);
29184 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
29185}
29186template <typename batch_rule_t, batch_rule_t batch_rule>
29187at::Tensor special_laguerre_polynomial_l_x_scalar_generated_plumbing(const at::Scalar & x, const at::Tensor & n) {
29188 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
29189 auto maybe_layer = maybeCurrentDynamicLayer();
29190 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
29191 int64_t cur_level = maybe_layer->layerId();
29192 if (!isBatchedAtLevel(n, cur_level)) {
29193 return at::_ops::special_laguerre_polynomial_l_x_scalar::call(x, n);
29194 }
29195 Tensor n_value;
29196 optional<int64_t> n_bdim;
29197 std::tie(n_value, n_bdim) = unwrapTensorAtLevel(n, cur_level);
29198 auto results = batch_rule(x, n_value, n_bdim);
29199 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
29200}
29201template <typename batch_rule_t, batch_rule_t batch_rule>
29202at::Tensor special_laguerre_polynomial_l_n_scalar_generated_plumbing(const at::Tensor & x, const at::Scalar & n) {
29203 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
29204 auto maybe_layer = maybeCurrentDynamicLayer();
29205 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
29206 int64_t cur_level = maybe_layer->layerId();
29207 if (!isBatchedAtLevel(x, cur_level)) {
29208 return at::_ops::special_laguerre_polynomial_l_n_scalar::call(x, n);
29209 }
29210 Tensor x_value;
29211 optional<int64_t> x_bdim;
29212 std::tie(x_value, x_bdim) = unwrapTensorAtLevel(x, cur_level);
29213 auto results = batch_rule(x_value, x_bdim, n);
29214 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
29215}
29216template <typename batch_rule_t, batch_rule_t batch_rule>
29217at::Tensor special_legendre_polynomial_p_generated_plumbing(const at::Tensor & x, const at::Tensor & n) {
29218 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
29219 auto maybe_layer = maybeCurrentDynamicLayer();
29220 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
29221 int64_t cur_level = maybe_layer->layerId();
29222 if (!isBatchedAtLevel(x, cur_level) && !isBatchedAtLevel(n, cur_level)) {
29223 return at::_ops::special_legendre_polynomial_p::call(x, n);
29224 }
29225 Tensor x_value;
29226 optional<int64_t> x_bdim;
29227 std::tie(x_value, x_bdim) = unwrapTensorAtLevel(x, cur_level);
29228 Tensor n_value;
29229 optional<int64_t> n_bdim;
29230 std::tie(n_value, n_bdim) = unwrapTensorAtLevel(n, cur_level);
29231 auto results = batch_rule(x_value, x_bdim, n_value, n_bdim);
29232 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
29233}
29234template <typename batch_rule_t, batch_rule_t batch_rule>
29235at::Tensor special_legendre_polynomial_p_x_scalar_generated_plumbing(const at::Scalar & x, const at::Tensor & n) {
29236 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
29237 auto maybe_layer = maybeCurrentDynamicLayer();
29238 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
29239 int64_t cur_level = maybe_layer->layerId();
29240 if (!isBatchedAtLevel(n, cur_level)) {
29241 return at::_ops::special_legendre_polynomial_p_x_scalar::call(x, n);
29242 }
29243 Tensor n_value;
29244 optional<int64_t> n_bdim;
29245 std::tie(n_value, n_bdim) = unwrapTensorAtLevel(n, cur_level);
29246 auto results = batch_rule(x, n_value, n_bdim);
29247 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
29248}
29249template <typename batch_rule_t, batch_rule_t batch_rule>
29250at::Tensor special_legendre_polynomial_p_n_scalar_generated_plumbing(const at::Tensor & x, const at::Scalar & n) {
29251 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
29252 auto maybe_layer = maybeCurrentDynamicLayer();
29253 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
29254 int64_t cur_level = maybe_layer->layerId();
29255 if (!isBatchedAtLevel(x, cur_level)) {
29256 return at::_ops::special_legendre_polynomial_p_n_scalar::call(x, n);
29257 }
29258 Tensor x_value;
29259 optional<int64_t> x_bdim;
29260 std::tie(x_value, x_bdim) = unwrapTensorAtLevel(x, cur_level);
29261 auto results = batch_rule(x_value, x_bdim, n);
29262 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
29263}
29264template <typename batch_rule_t, batch_rule_t batch_rule>
29265at::Tensor special_modified_bessel_i0_generated_plumbing(const at::Tensor & self) {
29266 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
29267 auto maybe_layer = maybeCurrentDynamicLayer();
29268 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
29269 int64_t cur_level = maybe_layer->layerId();
29270 if (!isBatchedAtLevel(self, cur_level)) {
29271 return at::_ops::special_modified_bessel_i0::call(self);
29272 }
29273 Tensor self_value;
29274 optional<int64_t> self_bdim;
29275 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
29276 auto results = batch_rule(self_value, self_bdim);
29277 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
29278}
29279template <typename batch_rule_t, batch_rule_t batch_rule>
29280at::Tensor special_modified_bessel_i1_generated_plumbing(const at::Tensor & self) {
29281 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
29282 auto maybe_layer = maybeCurrentDynamicLayer();
29283 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
29284 int64_t cur_level = maybe_layer->layerId();
29285 if (!isBatchedAtLevel(self, cur_level)) {
29286 return at::_ops::special_modified_bessel_i1::call(self);
29287 }
29288 Tensor self_value;
29289 optional<int64_t> self_bdim;
29290 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
29291 auto results = batch_rule(self_value, self_bdim);
29292 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
29293}
29294template <typename batch_rule_t, batch_rule_t batch_rule>
29295at::Tensor special_modified_bessel_k0_generated_plumbing(const at::Tensor & self) {
29296 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
29297 auto maybe_layer = maybeCurrentDynamicLayer();
29298 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
29299 int64_t cur_level = maybe_layer->layerId();
29300 if (!isBatchedAtLevel(self, cur_level)) {
29301 return at::_ops::special_modified_bessel_k0::call(self);
29302 }
29303 Tensor self_value;
29304 optional<int64_t> self_bdim;
29305 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
29306 auto results = batch_rule(self_value, self_bdim);
29307 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
29308}
29309template <typename batch_rule_t, batch_rule_t batch_rule>
29310at::Tensor special_modified_bessel_k1_generated_plumbing(const at::Tensor & self) {
29311 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
29312 auto maybe_layer = maybeCurrentDynamicLayer();
29313 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
29314 int64_t cur_level = maybe_layer->layerId();
29315 if (!isBatchedAtLevel(self, cur_level)) {
29316 return at::_ops::special_modified_bessel_k1::call(self);
29317 }
29318 Tensor self_value;
29319 optional<int64_t> self_bdim;
29320 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
29321 auto results = batch_rule(self_value, self_bdim);
29322 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
29323}
29324template <typename batch_rule_t, batch_rule_t batch_rule>
29325at::Tensor special_scaled_modified_bessel_k0_generated_plumbing(const at::Tensor & x) {
29326 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
29327 auto maybe_layer = maybeCurrentDynamicLayer();
29328 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
29329 int64_t cur_level = maybe_layer->layerId();
29330 if (!isBatchedAtLevel(x, cur_level)) {
29331 return at::_ops::special_scaled_modified_bessel_k0::call(x);
29332 }
29333 Tensor x_value;
29334 optional<int64_t> x_bdim;
29335 std::tie(x_value, x_bdim) = unwrapTensorAtLevel(x, cur_level);
29336 auto results = batch_rule(x_value, x_bdim);
29337 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
29338}
29339template <typename batch_rule_t, batch_rule_t batch_rule>
29340at::Tensor special_scaled_modified_bessel_k1_generated_plumbing(const at::Tensor & x) {
29341 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
29342 auto maybe_layer = maybeCurrentDynamicLayer();
29343 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
29344 int64_t cur_level = maybe_layer->layerId();
29345 if (!isBatchedAtLevel(x, cur_level)) {
29346 return at::_ops::special_scaled_modified_bessel_k1::call(x);
29347 }
29348 Tensor x_value;
29349 optional<int64_t> x_bdim;
29350 std::tie(x_value, x_bdim) = unwrapTensorAtLevel(x, cur_level);
29351 auto results = batch_rule(x_value, x_bdim);
29352 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
29353}
29354template <typename batch_rule_t, batch_rule_t batch_rule>
29355at::Tensor special_shifted_chebyshev_polynomial_t_generated_plumbing(const at::Tensor & x, const at::Tensor & n) {
29356 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
29357 auto maybe_layer = maybeCurrentDynamicLayer();
29358 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
29359 int64_t cur_level = maybe_layer->layerId();
29360 if (!isBatchedAtLevel(x, cur_level) && !isBatchedAtLevel(n, cur_level)) {
29361 return at::_ops::special_shifted_chebyshev_polynomial_t::call(x, n);
29362 }
29363 Tensor x_value;
29364 optional<int64_t> x_bdim;
29365 std::tie(x_value, x_bdim) = unwrapTensorAtLevel(x, cur_level);
29366 Tensor n_value;
29367 optional<int64_t> n_bdim;
29368 std::tie(n_value, n_bdim) = unwrapTensorAtLevel(n, cur_level);
29369 auto results = batch_rule(x_value, x_bdim, n_value, n_bdim);
29370 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
29371}
29372template <typename batch_rule_t, batch_rule_t batch_rule>
29373at::Tensor special_shifted_chebyshev_polynomial_t_x_scalar_generated_plumbing(const at::Scalar & x, const at::Tensor & n) {
29374 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
29375 auto maybe_layer = maybeCurrentDynamicLayer();
29376 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
29377 int64_t cur_level = maybe_layer->layerId();
29378 if (!isBatchedAtLevel(n, cur_level)) {
29379 return at::_ops::special_shifted_chebyshev_polynomial_t_x_scalar::call(x, n);
29380 }
29381 Tensor n_value;
29382 optional<int64_t> n_bdim;
29383 std::tie(n_value, n_bdim) = unwrapTensorAtLevel(n, cur_level);
29384 auto results = batch_rule(x, n_value, n_bdim);
29385 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
29386}
29387template <typename batch_rule_t, batch_rule_t batch_rule>
29388at::Tensor special_shifted_chebyshev_polynomial_t_n_scalar_generated_plumbing(const at::Tensor & x, const at::Scalar & n) {
29389 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
29390 auto maybe_layer = maybeCurrentDynamicLayer();
29391 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
29392 int64_t cur_level = maybe_layer->layerId();
29393 if (!isBatchedAtLevel(x, cur_level)) {
29394 return at::_ops::special_shifted_chebyshev_polynomial_t_n_scalar::call(x, n);
29395 }
29396 Tensor x_value;
29397 optional<int64_t> x_bdim;
29398 std::tie(x_value, x_bdim) = unwrapTensorAtLevel(x, cur_level);
29399 auto results = batch_rule(x_value, x_bdim, n);
29400 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
29401}
29402template <typename batch_rule_t, batch_rule_t batch_rule>
29403at::Tensor special_shifted_chebyshev_polynomial_u_generated_plumbing(const at::Tensor & x, const at::Tensor & n) {
29404 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
29405 auto maybe_layer = maybeCurrentDynamicLayer();
29406 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
29407 int64_t cur_level = maybe_layer->layerId();
29408 if (!isBatchedAtLevel(x, cur_level) && !isBatchedAtLevel(n, cur_level)) {
29409 return at::_ops::special_shifted_chebyshev_polynomial_u::call(x, n);
29410 }
29411 Tensor x_value;
29412 optional<int64_t> x_bdim;
29413 std::tie(x_value, x_bdim) = unwrapTensorAtLevel(x, cur_level);
29414 Tensor n_value;
29415 optional<int64_t> n_bdim;
29416 std::tie(n_value, n_bdim) = unwrapTensorAtLevel(n, cur_level);
29417 auto results = batch_rule(x_value, x_bdim, n_value, n_bdim);
29418 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
29419}
29420template <typename batch_rule_t, batch_rule_t batch_rule>
29421at::Tensor special_shifted_chebyshev_polynomial_u_x_scalar_generated_plumbing(const at::Scalar & x, const at::Tensor & n) {
29422 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
29423 auto maybe_layer = maybeCurrentDynamicLayer();
29424 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
29425 int64_t cur_level = maybe_layer->layerId();
29426 if (!isBatchedAtLevel(n, cur_level)) {
29427 return at::_ops::special_shifted_chebyshev_polynomial_u_x_scalar::call(x, n);
29428 }
29429 Tensor n_value;
29430 optional<int64_t> n_bdim;
29431 std::tie(n_value, n_bdim) = unwrapTensorAtLevel(n, cur_level);
29432 auto results = batch_rule(x, n_value, n_bdim);
29433 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
29434}
29435template <typename batch_rule_t, batch_rule_t batch_rule>
29436at::Tensor special_shifted_chebyshev_polynomial_u_n_scalar_generated_plumbing(const at::Tensor & x, const at::Scalar & n) {
29437 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
29438 auto maybe_layer = maybeCurrentDynamicLayer();
29439 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
29440 int64_t cur_level = maybe_layer->layerId();
29441 if (!isBatchedAtLevel(x, cur_level)) {
29442 return at::_ops::special_shifted_chebyshev_polynomial_u_n_scalar::call(x, n);
29443 }
29444 Tensor x_value;
29445 optional<int64_t> x_bdim;
29446 std::tie(x_value, x_bdim) = unwrapTensorAtLevel(x, cur_level);
29447 auto results = batch_rule(x_value, x_bdim, n);
29448 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
29449}
29450template <typename batch_rule_t, batch_rule_t batch_rule>
29451at::Tensor special_shifted_chebyshev_polynomial_v_generated_plumbing(const at::Tensor & x, const at::Tensor & n) {
29452 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
29453 auto maybe_layer = maybeCurrentDynamicLayer();
29454 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
29455 int64_t cur_level = maybe_layer->layerId();
29456 if (!isBatchedAtLevel(x, cur_level) && !isBatchedAtLevel(n, cur_level)) {
29457 return at::_ops::special_shifted_chebyshev_polynomial_v::call(x, n);
29458 }
29459 Tensor x_value;
29460 optional<int64_t> x_bdim;
29461 std::tie(x_value, x_bdim) = unwrapTensorAtLevel(x, cur_level);
29462 Tensor n_value;
29463 optional<int64_t> n_bdim;
29464 std::tie(n_value, n_bdim) = unwrapTensorAtLevel(n, cur_level);
29465 auto results = batch_rule(x_value, x_bdim, n_value, n_bdim);
29466 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
29467}
29468template <typename batch_rule_t, batch_rule_t batch_rule>
29469at::Tensor special_shifted_chebyshev_polynomial_v_x_scalar_generated_plumbing(const at::Scalar & x, const at::Tensor & n) {
29470 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
29471 auto maybe_layer = maybeCurrentDynamicLayer();
29472 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
29473 int64_t cur_level = maybe_layer->layerId();
29474 if (!isBatchedAtLevel(n, cur_level)) {
29475 return at::_ops::special_shifted_chebyshev_polynomial_v_x_scalar::call(x, n);
29476 }
29477 Tensor n_value;
29478 optional<int64_t> n_bdim;
29479 std::tie(n_value, n_bdim) = unwrapTensorAtLevel(n, cur_level);
29480 auto results = batch_rule(x, n_value, n_bdim);
29481 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
29482}
29483template <typename batch_rule_t, batch_rule_t batch_rule>
29484at::Tensor special_shifted_chebyshev_polynomial_v_n_scalar_generated_plumbing(const at::Tensor & x, const at::Scalar & n) {
29485 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
29486 auto maybe_layer = maybeCurrentDynamicLayer();
29487 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
29488 int64_t cur_level = maybe_layer->layerId();
29489 if (!isBatchedAtLevel(x, cur_level)) {
29490 return at::_ops::special_shifted_chebyshev_polynomial_v_n_scalar::call(x, n);
29491 }
29492 Tensor x_value;
29493 optional<int64_t> x_bdim;
29494 std::tie(x_value, x_bdim) = unwrapTensorAtLevel(x, cur_level);
29495 auto results = batch_rule(x_value, x_bdim, n);
29496 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
29497}
29498template <typename batch_rule_t, batch_rule_t batch_rule>
29499at::Tensor special_shifted_chebyshev_polynomial_w_generated_plumbing(const at::Tensor & x, const at::Tensor & n) {
29500 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
29501 auto maybe_layer = maybeCurrentDynamicLayer();
29502 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
29503 int64_t cur_level = maybe_layer->layerId();
29504 if (!isBatchedAtLevel(x, cur_level) && !isBatchedAtLevel(n, cur_level)) {
29505 return at::_ops::special_shifted_chebyshev_polynomial_w::call(x, n);
29506 }
29507 Tensor x_value;
29508 optional<int64_t> x_bdim;
29509 std::tie(x_value, x_bdim) = unwrapTensorAtLevel(x, cur_level);
29510 Tensor n_value;
29511 optional<int64_t> n_bdim;
29512 std::tie(n_value, n_bdim) = unwrapTensorAtLevel(n, cur_level);
29513 auto results = batch_rule(x_value, x_bdim, n_value, n_bdim);
29514 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
29515}
29516template <typename batch_rule_t, batch_rule_t batch_rule>
29517at::Tensor special_shifted_chebyshev_polynomial_w_x_scalar_generated_plumbing(const at::Scalar & x, const at::Tensor & n) {
29518 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
29519 auto maybe_layer = maybeCurrentDynamicLayer();
29520 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
29521 int64_t cur_level = maybe_layer->layerId();
29522 if (!isBatchedAtLevel(n, cur_level)) {
29523 return at::_ops::special_shifted_chebyshev_polynomial_w_x_scalar::call(x, n);
29524 }
29525 Tensor n_value;
29526 optional<int64_t> n_bdim;
29527 std::tie(n_value, n_bdim) = unwrapTensorAtLevel(n, cur_level);
29528 auto results = batch_rule(x, n_value, n_bdim);
29529 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
29530}
29531template <typename batch_rule_t, batch_rule_t batch_rule>
29532at::Tensor special_shifted_chebyshev_polynomial_w_n_scalar_generated_plumbing(const at::Tensor & x, const at::Scalar & n) {
29533 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
29534 auto maybe_layer = maybeCurrentDynamicLayer();
29535 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
29536 int64_t cur_level = maybe_layer->layerId();
29537 if (!isBatchedAtLevel(x, cur_level)) {
29538 return at::_ops::special_shifted_chebyshev_polynomial_w_n_scalar::call(x, n);
29539 }
29540 Tensor x_value;
29541 optional<int64_t> x_bdim;
29542 std::tie(x_value, x_bdim) = unwrapTensorAtLevel(x, cur_level);
29543 auto results = batch_rule(x_value, x_bdim, n);
29544 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
29545}
29546template <typename batch_rule_t, batch_rule_t batch_rule>
29547at::Tensor special_spherical_bessel_j0_generated_plumbing(const at::Tensor & x) {
29548 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
29549 auto maybe_layer = maybeCurrentDynamicLayer();
29550 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
29551 int64_t cur_level = maybe_layer->layerId();
29552 if (!isBatchedAtLevel(x, cur_level)) {
29553 return at::_ops::special_spherical_bessel_j0::call(x);
29554 }
29555 Tensor x_value;
29556 optional<int64_t> x_bdim;
29557 std::tie(x_value, x_bdim) = unwrapTensorAtLevel(x, cur_level);
29558 auto results = batch_rule(x_value, x_bdim);
29559 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
29560}
29561template <typename batch_rule_t, batch_rule_t batch_rule>
29562at::Tensor _foobar_generated_plumbing(const at::Tensor & self, bool arg1, bool arg2, bool arg3) {
29563 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
29564 auto maybe_layer = maybeCurrentDynamicLayer();
29565 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
29566 int64_t cur_level = maybe_layer->layerId();
29567 if (!isBatchedAtLevel(self, cur_level)) {
29568 return at::_ops::_foobar::call(self, arg1, arg2, arg3);
29569 }
29570 Tensor self_value;
29571 optional<int64_t> self_bdim;
29572 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
29573 auto results = batch_rule(self_value, self_bdim, arg1, arg2, arg3);
29574 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
29575}
29576template <typename batch_rule_t, batch_rule_t batch_rule>
29577void _fused_adam__generated_plumbing(at::TensorList self, at::TensorList grads, at::TensorList exp_avgs, at::TensorList exp_avg_sqs, at::TensorList max_exp_avg_sqs, at::TensorList state_steps, double lr, double beta1, double beta2, double weight_decay, double eps, bool amsgrad, bool maximize, const c10::optional<at::Tensor> & grad_scale, const c10::optional<at::Tensor> & found_inf) {
29578 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
29579 auto maybe_layer = maybeCurrentDynamicLayer();
29580 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
29581 int64_t cur_level = maybe_layer->layerId();
29582 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(grads, cur_level) && !isBatchedAtLevel(exp_avgs, cur_level) && !isBatchedAtLevel(exp_avg_sqs, cur_level) && !isBatchedAtLevel(max_exp_avg_sqs, cur_level) && !isBatchedAtLevel(state_steps, cur_level) && !isBatchedAtLevel(grad_scale, cur_level) && !isBatchedAtLevel(found_inf, cur_level)) {
29583 return at::_ops::_fused_adam_::call(self, grads, exp_avgs, exp_avg_sqs, max_exp_avg_sqs, state_steps, lr, beta1, beta2, weight_decay, eps, amsgrad, maximize, grad_scale, found_inf);
29584 }
29585 optional<Tensor> grad_scale_value;
29586 optional<int64_t> grad_scale_bdim;
29587 if (grad_scale) {
29588 std::tie(grad_scale_value, grad_scale_bdim) = unwrapTensorAtLevel(grad_scale.value(), cur_level);
29589 }
29590 optional<Tensor> found_inf_value;
29591 optional<int64_t> found_inf_bdim;
29592 if (found_inf) {
29593 std::tie(found_inf_value, found_inf_bdim) = unwrapTensorAtLevel(found_inf.value(), cur_level);
29594 }
29595 batch_rule(self, grads, exp_avgs, exp_avg_sqs, max_exp_avg_sqs, state_steps, lr, beta1, beta2, weight_decay, eps, amsgrad, maximize, grad_scale_value, grad_scale_bdim, found_inf_value, found_inf_bdim);
29596}
29597template <typename batch_rule_t, batch_rule_t batch_rule>
29598void _fused_adamw__generated_plumbing(at::TensorList self, at::TensorList grads, at::TensorList exp_avgs, at::TensorList exp_avg_sqs, at::TensorList max_exp_avg_sqs, at::TensorList state_steps, double lr, double beta1, double beta2, double weight_decay, double eps, bool amsgrad, bool maximize, const c10::optional<at::Tensor> & grad_scale, const c10::optional<at::Tensor> & found_inf) {
29599 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
29600 auto maybe_layer = maybeCurrentDynamicLayer();
29601 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
29602 int64_t cur_level = maybe_layer->layerId();
29603 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(grads, cur_level) && !isBatchedAtLevel(exp_avgs, cur_level) && !isBatchedAtLevel(exp_avg_sqs, cur_level) && !isBatchedAtLevel(max_exp_avg_sqs, cur_level) && !isBatchedAtLevel(state_steps, cur_level) && !isBatchedAtLevel(grad_scale, cur_level) && !isBatchedAtLevel(found_inf, cur_level)) {
29604 return at::_ops::_fused_adamw_::call(self, grads, exp_avgs, exp_avg_sqs, max_exp_avg_sqs, state_steps, lr, beta1, beta2, weight_decay, eps, amsgrad, maximize, grad_scale, found_inf);
29605 }
29606 optional<Tensor> grad_scale_value;
29607 optional<int64_t> grad_scale_bdim;
29608 if (grad_scale) {
29609 std::tie(grad_scale_value, grad_scale_bdim) = unwrapTensorAtLevel(grad_scale.value(), cur_level);
29610 }
29611 optional<Tensor> found_inf_value;
29612 optional<int64_t> found_inf_bdim;
29613 if (found_inf) {
29614 std::tie(found_inf_value, found_inf_bdim) = unwrapTensorAtLevel(found_inf.value(), cur_level);
29615 }
29616 batch_rule(self, grads, exp_avgs, exp_avg_sqs, max_exp_avg_sqs, state_steps, lr, beta1, beta2, weight_decay, eps, amsgrad, maximize, grad_scale_value, grad_scale_bdim, found_inf_value, found_inf_bdim);
29617}
29618template <typename batch_rule_t, batch_rule_t batch_rule>
29619void _cudnn_rnn_backward_out_generated_plumbing(const at::Tensor & input, at::TensorList weight, int64_t weight_stride0, const at::Tensor & weight_buf, const at::Tensor & hx, const c10::optional<at::Tensor> & cx, const at::Tensor & output, const c10::optional<at::Tensor> & grad_output, const c10::optional<at::Tensor> & grad_hy, const c10::optional<at::Tensor> & grad_cy, int64_t mode, c10::SymInt hidden_size, c10::SymInt proj_size, int64_t num_layers, bool batch_first, double dropout, bool train, bool bidirectional, c10::SymIntArrayRef batch_sizes, const c10::optional<at::Tensor> & dropout_state, const at::Tensor & reserve, ::std::array<bool,4> output_mask, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, at::TensorList out3) {
29620 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
29621 auto maybe_layer = maybeCurrentDynamicLayer();
29622 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
29623 int64_t cur_level = maybe_layer->layerId();
29624 if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(weight_buf, cur_level) && !isBatchedAtLevel(hx, cur_level) && !isBatchedAtLevel(cx, cur_level) && !isBatchedAtLevel(output, cur_level) && !isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(grad_hy, cur_level) && !isBatchedAtLevel(grad_cy, cur_level) && !isBatchedAtLevel(dropout_state, cur_level) && !isBatchedAtLevel(reserve, cur_level) && !isBatchedAtLevel(out0, cur_level) && !isBatchedAtLevel(out1, cur_level) && !isBatchedAtLevel(out2, cur_level) && !isBatchedAtLevel(out3, cur_level)) {
29625 return at::_ops::_cudnn_rnn_backward_out::call(input, weight, weight_stride0, weight_buf, hx, cx, output, grad_output, grad_hy, grad_cy, mode, hidden_size, proj_size, num_layers, batch_first, dropout, train, bidirectional, batch_sizes, dropout_state, reserve, output_mask, out0, out1, out2, out3);
29626 }
29627 Tensor input_value;
29628 optional<int64_t> input_bdim;
29629 std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level);
29630 Tensor weight_buf_value;
29631 optional<int64_t> weight_buf_bdim;
29632 std::tie(weight_buf_value, weight_buf_bdim) = unwrapTensorAtLevel(weight_buf, cur_level);
29633 Tensor hx_value;
29634 optional<int64_t> hx_bdim;
29635 std::tie(hx_value, hx_bdim) = unwrapTensorAtLevel(hx, cur_level);
29636 Tensor output_value;
29637 optional<int64_t> output_bdim;
29638 std::tie(output_value, output_bdim) = unwrapTensorAtLevel(output, cur_level);
29639 Tensor reserve_value;
29640 optional<int64_t> reserve_bdim;
29641 std::tie(reserve_value, reserve_bdim) = unwrapTensorAtLevel(reserve, cur_level);
29642 Tensor out0_value;
29643 optional<int64_t> out0_bdim;
29644 std::tie(out0_value, out0_bdim) = unwrapTensorAtLevel(out0, cur_level);
29645 Tensor out1_value;
29646 optional<int64_t> out1_bdim;
29647 std::tie(out1_value, out1_bdim) = unwrapTensorAtLevel(out1, cur_level);
29648 Tensor out2_value;
29649 optional<int64_t> out2_bdim;
29650 std::tie(out2_value, out2_bdim) = unwrapTensorAtLevel(out2, cur_level);
29651 optional<Tensor> cx_value;
29652 optional<int64_t> cx_bdim;
29653 if (cx) {
29654 std::tie(cx_value, cx_bdim) = unwrapTensorAtLevel(cx.value(), cur_level);
29655 }
29656 optional<Tensor> grad_output_value;
29657 optional<int64_t> grad_output_bdim;
29658 if (grad_output) {
29659 std::tie(grad_output_value, grad_output_bdim) = unwrapTensorAtLevel(grad_output.value(), cur_level);
29660 }
29661 optional<Tensor> grad_hy_value;
29662 optional<int64_t> grad_hy_bdim;
29663 if (grad_hy) {
29664 std::tie(grad_hy_value, grad_hy_bdim) = unwrapTensorAtLevel(grad_hy.value(), cur_level);
29665 }
29666 optional<Tensor> grad_cy_value;
29667 optional<int64_t> grad_cy_bdim;
29668 if (grad_cy) {
29669 std::tie(grad_cy_value, grad_cy_bdim) = unwrapTensorAtLevel(grad_cy.value(), cur_level);
29670 }
29671 optional<Tensor> dropout_state_value;
29672 optional<int64_t> dropout_state_bdim;
29673 if (dropout_state) {
29674 std::tie(dropout_state_value, dropout_state_bdim) = unwrapTensorAtLevel(dropout_state.value(), cur_level);
29675 }
29676 batch_rule(input_value, input_bdim, weight, weight_stride0, weight_buf_value, weight_buf_bdim, hx_value, hx_bdim, cx_value, cx_bdim, output_value, output_bdim, grad_output_value, grad_output_bdim, grad_hy_value, grad_hy_bdim, grad_cy_value, grad_cy_bdim, mode, hidden_size, proj_size, num_layers, batch_first, dropout, train, bidirectional, batch_sizes, dropout_state_value, dropout_state_bdim, reserve_value, reserve_bdim, output_mask, out0_value, out0_bdim, out1_value, out1_bdim, out2_value, out2_bdim, out3);
29677}
29678template <typename batch_rule_t, batch_rule_t batch_rule>
29679at::Tensor bernoulli_Tensor_generated_plumbing(const at::Tensor & self, const at::Tensor & p, c10::optional<at::Generator> generator) {
29680 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
29681 auto maybe_layer = maybeCurrentDynamicLayer();
29682 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
29683 int64_t cur_level = maybe_layer->layerId();
29684 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(p, cur_level)) {
29685 return at::_ops::bernoulli_Tensor::call(self, p, generator);
29686 }
29687 Tensor self_value;
29688 optional<int64_t> self_bdim;
29689 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
29690 Tensor p_value;
29691 optional<int64_t> p_bdim;
29692 std::tie(p_value, p_bdim) = unwrapTensorAtLevel(p, cur_level);
29693 auto results = batch_rule(self_value, self_bdim, p_value, p_bdim, generator);
29694 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
29695}
29696template <typename batch_rule_t, batch_rule_t batch_rule>
29697at::Tensor embedding_renorm_generated_plumbing(const at::Tensor & self, const at::Tensor & indices, double max_norm, double norm_type) {
29698 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
29699 auto maybe_layer = maybeCurrentDynamicLayer();
29700 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
29701 int64_t cur_level = maybe_layer->layerId();
29702 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(indices, cur_level)) {
29703 return at::_ops::embedding_renorm::call(self, indices, max_norm, norm_type);
29704 }
29705 Tensor self_value;
29706 optional<int64_t> self_bdim;
29707 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
29708 Tensor indices_value;
29709 optional<int64_t> indices_bdim;
29710 std::tie(indices_value, indices_bdim) = unwrapTensorAtLevel(indices, cur_level);
29711 auto results = batch_rule(self_value, self_bdim, indices_value, indices_bdim, max_norm, norm_type);
29712 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
29713}
29714template <typename batch_rule_t, batch_rule_t batch_rule>
29715at::Tensor resize_generated_plumbing(const at::Tensor & self, c10::SymIntArrayRef size, c10::optional<at::MemoryFormat> memory_format) {
29716 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
29717 auto maybe_layer = maybeCurrentDynamicLayer();
29718 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
29719 int64_t cur_level = maybe_layer->layerId();
29720 if (!isBatchedAtLevel(self, cur_level)) {
29721 return at::_ops::resize::call(self, size, memory_format);
29722 }
29723 Tensor self_value;
29724 optional<int64_t> self_bdim;
29725 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
29726 auto results = batch_rule(self_value, self_bdim, size, memory_format);
29727 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
29728}
29729template <typename batch_rule_t, batch_rule_t batch_rule>
29730at::Tensor _resize_output_generated_plumbing(const at::Tensor & self, at::IntArrayRef size, at::Device device) {
29731 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
29732 auto maybe_layer = maybeCurrentDynamicLayer();
29733 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
29734 int64_t cur_level = maybe_layer->layerId();
29735 if (!isBatchedAtLevel(self, cur_level)) {
29736 return at::_ops::_resize_output::call(self, size, device);
29737 }
29738 Tensor self_value;
29739 optional<int64_t> self_bdim;
29740 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
29741 auto results = batch_rule(self_value, self_bdim, size, device);
29742 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
29743}
29744template <typename batch_rule_t, batch_rule_t batch_rule>
29745at::Tensor _index_put_impl_generated_plumbing(const at::Tensor & self, const c10::List<c10::optional<at::Tensor>> & indices, const at::Tensor & values, bool accumulate, bool unsafe) {
29746 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
29747 auto maybe_layer = maybeCurrentDynamicLayer();
29748 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
29749 int64_t cur_level = maybe_layer->layerId();
29750 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(indices, cur_level) && !isBatchedAtLevel(values, cur_level)) {
29751 return at::_ops::_index_put_impl::call(self, indices, values, accumulate, unsafe);
29752 }
29753 Tensor self_value;
29754 optional<int64_t> self_bdim;
29755 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
29756 Tensor values_value;
29757 optional<int64_t> values_bdim;
29758 std::tie(values_value, values_bdim) = unwrapTensorAtLevel(values, cur_level);
29759 auto results = batch_rule(self_value, self_bdim, indices, values_value, values_bdim, accumulate, unsafe);
29760 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
29761}
29762template <typename batch_rule_t, batch_rule_t batch_rule>
29763void miopen_rnn_backward_out_generated_plumbing(const at::Tensor & input, at::TensorList weight, int64_t weight_stride0, const at::Tensor & weight_buf, const at::Tensor & hx, const c10::optional<at::Tensor> & cx, const at::Tensor & output, const c10::optional<at::Tensor> & grad_output, const c10::optional<at::Tensor> & grad_hy, const c10::optional<at::Tensor> & grad_cy, int64_t mode, int64_t hidden_size, int64_t num_layers, bool batch_first, double dropout, bool train, bool bidirectional, at::IntArrayRef batch_sizes, const c10::optional<at::Tensor> & dropout_state, const at::Tensor & reserve, ::std::array<bool,4> output_mask, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, at::TensorList out3) {
29764 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
29765 auto maybe_layer = maybeCurrentDynamicLayer();
29766 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
29767 int64_t cur_level = maybe_layer->layerId();
29768 if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(weight_buf, cur_level) && !isBatchedAtLevel(hx, cur_level) && !isBatchedAtLevel(cx, cur_level) && !isBatchedAtLevel(output, cur_level) && !isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(grad_hy, cur_level) && !isBatchedAtLevel(grad_cy, cur_level) && !isBatchedAtLevel(dropout_state, cur_level) && !isBatchedAtLevel(reserve, cur_level) && !isBatchedAtLevel(out0, cur_level) && !isBatchedAtLevel(out1, cur_level) && !isBatchedAtLevel(out2, cur_level) && !isBatchedAtLevel(out3, cur_level)) {
29769 return at::_ops::miopen_rnn_backward_out::call(input, weight, weight_stride0, weight_buf, hx, cx, output, grad_output, grad_hy, grad_cy, mode, hidden_size, num_layers, batch_first, dropout, train, bidirectional, batch_sizes, dropout_state, reserve, output_mask, out0, out1, out2, out3);
29770 }
29771 Tensor input_value;
29772 optional<int64_t> input_bdim;
29773 std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level);
29774 Tensor weight_buf_value;
29775 optional<int64_t> weight_buf_bdim;
29776 std::tie(weight_buf_value, weight_buf_bdim) = unwrapTensorAtLevel(weight_buf, cur_level);
29777 Tensor hx_value;
29778 optional<int64_t> hx_bdim;
29779 std::tie(hx_value, hx_bdim) = unwrapTensorAtLevel(hx, cur_level);
29780 Tensor output_value;
29781 optional<int64_t> output_bdim;
29782 std::tie(output_value, output_bdim) = unwrapTensorAtLevel(output, cur_level);
29783 Tensor reserve_value;
29784 optional<int64_t> reserve_bdim;
29785 std::tie(reserve_value, reserve_bdim) = unwrapTensorAtLevel(reserve, cur_level);
29786 Tensor out0_value;
29787 optional<int64_t> out0_bdim;
29788 std::tie(out0_value, out0_bdim) = unwrapTensorAtLevel(out0, cur_level);
29789 Tensor out1_value;
29790 optional<int64_t> out1_bdim;
29791 std::tie(out1_value, out1_bdim) = unwrapTensorAtLevel(out1, cur_level);
29792 Tensor out2_value;
29793 optional<int64_t> out2_bdim;
29794 std::tie(out2_value, out2_bdim) = unwrapTensorAtLevel(out2, cur_level);
29795 optional<Tensor> cx_value;
29796 optional<int64_t> cx_bdim;
29797 if (cx) {
29798 std::tie(cx_value, cx_bdim) = unwrapTensorAtLevel(cx.value(), cur_level);
29799 }
29800 optional<Tensor> grad_output_value;
29801 optional<int64_t> grad_output_bdim;
29802 if (grad_output) {
29803 std::tie(grad_output_value, grad_output_bdim) = unwrapTensorAtLevel(grad_output.value(), cur_level);
29804 }
29805 optional<Tensor> grad_hy_value;
29806 optional<int64_t> grad_hy_bdim;
29807 if (grad_hy) {
29808 std::tie(grad_hy_value, grad_hy_bdim) = unwrapTensorAtLevel(grad_hy.value(), cur_level);
29809 }
29810 optional<Tensor> grad_cy_value;
29811 optional<int64_t> grad_cy_bdim;
29812 if (grad_cy) {
29813 std::tie(grad_cy_value, grad_cy_bdim) = unwrapTensorAtLevel(grad_cy.value(), cur_level);
29814 }
29815 optional<Tensor> dropout_state_value;
29816 optional<int64_t> dropout_state_bdim;
29817 if (dropout_state) {
29818 std::tie(dropout_state_value, dropout_state_bdim) = unwrapTensorAtLevel(dropout_state.value(), cur_level);
29819 }
29820 batch_rule(input_value, input_bdim, weight, weight_stride0, weight_buf_value, weight_buf_bdim, hx_value, hx_bdim, cx_value, cx_bdim, output_value, output_bdim, grad_output_value, grad_output_bdim, grad_hy_value, grad_hy_bdim, grad_cy_value, grad_cy_bdim, mode, hidden_size, num_layers, batch_first, dropout, train, bidirectional, batch_sizes, dropout_state_value, dropout_state_bdim, reserve_value, reserve_bdim, output_mask, out0_value, out0_bdim, out1_value, out1_bdim, out2_value, out2_bdim, out3);
29821}
29822template <typename batch_rule_t, batch_rule_t batch_rule>
29823::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor,at::Tensor> _native_batch_norm_legit_functional_generated_plumbing(const at::Tensor & input, const c10::optional<at::Tensor> & weight, const c10::optional<at::Tensor> & bias, const at::Tensor & running_mean, const at::Tensor & running_var, bool training, double momentum, double eps) {
29824 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
29825 auto maybe_layer = maybeCurrentDynamicLayer();
29826 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
29827 int64_t cur_level = maybe_layer->layerId();
29828 if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(bias, cur_level) && !isBatchedAtLevel(running_mean, cur_level) && !isBatchedAtLevel(running_var, cur_level)) {
29829 return at::_ops::_native_batch_norm_legit_functional::call(input, weight, bias, running_mean, running_var, training, momentum, eps);
29830 }
29831 Tensor input_value;
29832 optional<int64_t> input_bdim;
29833 std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level);
29834 Tensor running_mean_value;
29835 optional<int64_t> running_mean_bdim;
29836 std::tie(running_mean_value, running_mean_bdim) = unwrapTensorAtLevel(running_mean, cur_level);
29837 Tensor running_var_value;
29838 optional<int64_t> running_var_bdim;
29839 std::tie(running_var_value, running_var_bdim) = unwrapTensorAtLevel(running_var, cur_level);
29840 optional<Tensor> weight_value;
29841 optional<int64_t> weight_bdim;
29842 if (weight) {
29843 std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight.value(), cur_level);
29844 }
29845 optional<Tensor> bias_value;
29846 optional<int64_t> bias_bdim;
29847 if (bias) {
29848 std::tie(bias_value, bias_bdim) = unwrapTensorAtLevel(bias.value(), cur_level);
29849 }
29850 auto results = batch_rule(input_value, input_bdim, weight_value, weight_bdim, bias_value, bias_bdim, running_mean_value, running_mean_bdim, running_var_value, running_var_bdim, training, momentum, eps);
29851 return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level), makeBatched(std::get<6>(results), std::get<7>(results), cur_level), makeBatched(std::get<8>(results), std::get<9>(results), cur_level));
29852}
29853template <typename batch_rule_t, batch_rule_t batch_rule>
29854void unsafe_split_Tensor_out_generated_plumbing(const at::Tensor & self, c10::SymInt split_size, int64_t dim, at::TensorList out) {
29855 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
29856 auto maybe_layer = maybeCurrentDynamicLayer();
29857 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
29858 int64_t cur_level = maybe_layer->layerId();
29859 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(out, cur_level)) {
29860 return at::_ops::unsafe_split_Tensor_out::call(self, split_size, dim, out);
29861 }
29862 Tensor self_value;
29863 optional<int64_t> self_bdim;
29864 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
29865 batch_rule(self_value, self_bdim, split_size, dim, out);
29866}
29867template <typename batch_rule_t, batch_rule_t batch_rule>
29868void unsafe_split_with_sizes_out_generated_plumbing(const at::Tensor & self, c10::SymIntArrayRef split_sizes, int64_t dim, at::TensorList out) {
29869 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
29870 auto maybe_layer = maybeCurrentDynamicLayer();
29871 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
29872 int64_t cur_level = maybe_layer->layerId();
29873 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(out, cur_level)) {
29874 return at::_ops::unsafe_split_with_sizes_out::call(self, split_sizes, dim, out);
29875 }
29876 Tensor self_value;
29877 optional<int64_t> self_bdim;
29878 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
29879 batch_rule(self_value, self_bdim, split_sizes, dim, out);
29880}
29881template <typename batch_rule_t, batch_rule_t batch_rule>
29882at::Tensor resize_as_generated_plumbing(const at::Tensor & self, const at::Tensor & the_template, c10::optional<at::MemoryFormat> memory_format) {
29883 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
29884 auto maybe_layer = maybeCurrentDynamicLayer();
29885 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
29886 int64_t cur_level = maybe_layer->layerId();
29887 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(the_template, cur_level)) {
29888 return at::_ops::resize_as::call(self, the_template, memory_format);
29889 }
29890 Tensor self_value;
29891 optional<int64_t> self_bdim;
29892 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
29893 Tensor the_template_value;
29894 optional<int64_t> the_template_bdim;
29895 std::tie(the_template_value, the_template_bdim) = unwrapTensorAtLevel(the_template, cur_level);
29896 auto results = batch_rule(self_value, self_bdim, the_template_value, the_template_bdim, memory_format);
29897 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
29898}
29899template <typename batch_rule_t, batch_rule_t batch_rule>
29900at::Tensor resize_as_sparse_generated_plumbing(const at::Tensor & self, const at::Tensor & the_template) {
29901 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
29902 auto maybe_layer = maybeCurrentDynamicLayer();
29903 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
29904 int64_t cur_level = maybe_layer->layerId();
29905 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(the_template, cur_level)) {
29906 return at::_ops::resize_as_sparse::call(self, the_template);
29907 }
29908 Tensor self_value;
29909 optional<int64_t> self_bdim;
29910 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
29911 Tensor the_template_value;
29912 optional<int64_t> the_template_bdim;
29913 std::tie(the_template_value, the_template_bdim) = unwrapTensorAtLevel(the_template, cur_level);
29914 auto results = batch_rule(self_value, self_bdim, the_template_value, the_template_bdim);
29915 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
29916}
29917template <typename batch_rule_t, batch_rule_t batch_rule>
29918at::Tensor zero_generated_plumbing(const at::Tensor & self) {
29919 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
29920 auto maybe_layer = maybeCurrentDynamicLayer();
29921 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
29922 int64_t cur_level = maybe_layer->layerId();
29923 if (!isBatchedAtLevel(self, cur_level)) {
29924 return at::_ops::zero::call(self);
29925 }
29926 Tensor self_value;
29927 optional<int64_t> self_bdim;
29928 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
29929 auto results = batch_rule(self_value, self_bdim);
29930 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
29931}
29932template <typename batch_rule_t, batch_rule_t batch_rule>
29933at::Tensor sparse_resize_generated_plumbing(const at::Tensor & self, at::IntArrayRef size, int64_t sparse_dim, int64_t dense_dim) {
29934 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
29935 auto maybe_layer = maybeCurrentDynamicLayer();
29936 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
29937 int64_t cur_level = maybe_layer->layerId();
29938 if (!isBatchedAtLevel(self, cur_level)) {
29939 return at::_ops::sparse_resize::call(self, size, sparse_dim, dense_dim);
29940 }
29941 Tensor self_value;
29942 optional<int64_t> self_bdim;
29943 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
29944 auto results = batch_rule(self_value, self_bdim, size, sparse_dim, dense_dim);
29945 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
29946}
29947template <typename batch_rule_t, batch_rule_t batch_rule>
29948at::Tensor sparse_resize_and_clear_generated_plumbing(const at::Tensor & self, at::IntArrayRef size, int64_t sparse_dim, int64_t dense_dim) {
29949 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
29950 auto maybe_layer = maybeCurrentDynamicLayer();
29951 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
29952 int64_t cur_level = maybe_layer->layerId();
29953 if (!isBatchedAtLevel(self, cur_level)) {
29954 return at::_ops::sparse_resize_and_clear::call(self, size, sparse_dim, dense_dim);
29955 }
29956 Tensor self_value;
29957 optional<int64_t> self_bdim;
29958 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
29959 auto results = batch_rule(self_value, self_bdim, size, sparse_dim, dense_dim);
29960 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
29961}
29962template <typename batch_rule_t, batch_rule_t batch_rule>
29963at::Tensor _coalesced_generated_plumbing(const at::Tensor & self, bool coalesced) {
29964 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
29965 auto maybe_layer = maybeCurrentDynamicLayer();
29966 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
29967 int64_t cur_level = maybe_layer->layerId();
29968 if (!isBatchedAtLevel(self, cur_level)) {
29969 return at::_ops::_coalesced::call(self, coalesced);
29970 }
29971 Tensor self_value;
29972 optional<int64_t> self_bdim;
29973 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
29974 auto results = batch_rule(self_value, self_bdim, coalesced);
29975 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
29976}
29977template <typename batch_rule_t, batch_rule_t batch_rule>
29978at::Tensor copy_sparse_to_sparse_generated_plumbing(const at::Tensor & self, const at::Tensor & src, bool non_blocking) {
29979 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
29980 auto maybe_layer = maybeCurrentDynamicLayer();
29981 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
29982 int64_t cur_level = maybe_layer->layerId();
29983 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(src, cur_level)) {
29984 return at::_ops::copy_sparse_to_sparse::call(self, src, non_blocking);
29985 }
29986 Tensor self_value;
29987 optional<int64_t> self_bdim;
29988 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
29989 Tensor src_value;
29990 optional<int64_t> src_bdim;
29991 std::tie(src_value, src_bdim) = unwrapTensorAtLevel(src, cur_level);
29992 auto results = batch_rule(self_value, self_bdim, src_value, src_bdim, non_blocking);
29993 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
29994}
29995template <typename batch_rule_t, batch_rule_t batch_rule>
29996void quantize_per_tensor_tensors_out_generated_plumbing(at::TensorList tensors, const at::Tensor & scales, const at::Tensor & zero_points, at::ScalarType dtype, at::TensorList out) {
29997 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
29998 auto maybe_layer = maybeCurrentDynamicLayer();
29999 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
30000 int64_t cur_level = maybe_layer->layerId();
30001 if (!isBatchedAtLevel(tensors, cur_level) && !isBatchedAtLevel(scales, cur_level) && !isBatchedAtLevel(zero_points, cur_level) && !isBatchedAtLevel(out, cur_level)) {
30002 return at::_ops::quantize_per_tensor_tensors_out::call(tensors, scales, zero_points, dtype, out);
30003 }
30004 Tensor scales_value;
30005 optional<int64_t> scales_bdim;
30006 std::tie(scales_value, scales_bdim) = unwrapTensorAtLevel(scales, cur_level);
30007 Tensor zero_points_value;
30008 optional<int64_t> zero_points_bdim;
30009 std::tie(zero_points_value, zero_points_bdim) = unwrapTensorAtLevel(zero_points, cur_level);
30010 batch_rule(tensors, scales_value, scales_bdim, zero_points_value, zero_points_bdim, dtype, out);
30011}
30012template <typename batch_rule_t, batch_rule_t batch_rule>
30013void dequantize_tensors_out_generated_plumbing(at::TensorList tensors, at::TensorList out) {
30014 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
30015 auto maybe_layer = maybeCurrentDynamicLayer();
30016 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
30017 int64_t cur_level = maybe_layer->layerId();
30018 if (!isBatchedAtLevel(tensors, cur_level) && !isBatchedAtLevel(out, cur_level)) {
30019 return at::_ops::dequantize_tensors_out::call(tensors, out);
30020 }
30021
30022 batch_rule(tensors, out);
30023}
30024template <typename batch_rule_t, batch_rule_t batch_rule>
30025::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor,at::Tensor,at::Tensor> _fused_moving_avg_obs_fq_helper_functional_generated_plumbing(const at::Tensor & self, const at::Tensor & observer_on, const at::Tensor & fake_quant_on, const at::Tensor & running_min, const at::Tensor & running_max, const at::Tensor & scale, const at::Tensor & zero_point, double averaging_const, int64_t quant_min, int64_t quant_max, int64_t ch_axis, bool per_row_fake_quant, bool symmetric_quant) {
30026 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
30027 auto maybe_layer = maybeCurrentDynamicLayer();
30028 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
30029 int64_t cur_level = maybe_layer->layerId();
30030 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(observer_on, cur_level) && !isBatchedAtLevel(fake_quant_on, cur_level) && !isBatchedAtLevel(running_min, cur_level) && !isBatchedAtLevel(running_max, cur_level) && !isBatchedAtLevel(scale, cur_level) && !isBatchedAtLevel(zero_point, cur_level)) {
30031 return at::_ops::_fused_moving_avg_obs_fq_helper_functional::call(self, observer_on, fake_quant_on, running_min, running_max, scale, zero_point, averaging_const, quant_min, quant_max, ch_axis, per_row_fake_quant, symmetric_quant);
30032 }
30033 Tensor self_value;
30034 optional<int64_t> self_bdim;
30035 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
30036 Tensor observer_on_value;
30037 optional<int64_t> observer_on_bdim;
30038 std::tie(observer_on_value, observer_on_bdim) = unwrapTensorAtLevel(observer_on, cur_level);
30039 Tensor fake_quant_on_value;
30040 optional<int64_t> fake_quant_on_bdim;
30041 std::tie(fake_quant_on_value, fake_quant_on_bdim) = unwrapTensorAtLevel(fake_quant_on, cur_level);
30042 Tensor running_min_value;
30043 optional<int64_t> running_min_bdim;
30044 std::tie(running_min_value, running_min_bdim) = unwrapTensorAtLevel(running_min, cur_level);
30045 Tensor running_max_value;
30046 optional<int64_t> running_max_bdim;
30047 std::tie(running_max_value, running_max_bdim) = unwrapTensorAtLevel(running_max, cur_level);
30048 Tensor scale_value;
30049 optional<int64_t> scale_bdim;
30050 std::tie(scale_value, scale_bdim) = unwrapTensorAtLevel(scale, cur_level);
30051 Tensor zero_point_value;
30052 optional<int64_t> zero_point_bdim;
30053 std::tie(zero_point_value, zero_point_bdim) = unwrapTensorAtLevel(zero_point, cur_level);
30054 auto results = batch_rule(self_value, self_bdim, observer_on_value, observer_on_bdim, fake_quant_on_value, fake_quant_on_bdim, running_min_value, running_min_bdim, running_max_value, running_max_bdim, scale_value, scale_bdim, zero_point_value, zero_point_bdim, averaging_const, quant_min, quant_max, ch_axis, per_row_fake_quant, symmetric_quant);
30055 return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level), makeBatched(std::get<6>(results), std::get<7>(results), cur_level), makeBatched(std::get<8>(results), std::get<9>(results), cur_level), makeBatched(std::get<10>(results), std::get<11>(results), cur_level));
30056}
30057template <typename batch_rule_t, batch_rule_t batch_rule>
30058void lstm_mps_backward_out_generated_plumbing(const at::Tensor & grad_y, const c10::optional<at::Tensor> & grad_hy, const c10::optional<at::Tensor> & grad_cy, const at::Tensor & z_state, const at::Tensor & cell_state_fwd, const at::Tensor & input, at::TensorList hx, at::TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional, bool batch_first, at::Tensor & out0, at::TensorList out1, at::TensorList out2) {
30059 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
30060 auto maybe_layer = maybeCurrentDynamicLayer();
30061 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
30062 int64_t cur_level = maybe_layer->layerId();
30063 if (!isBatchedAtLevel(grad_y, cur_level) && !isBatchedAtLevel(grad_hy, cur_level) && !isBatchedAtLevel(grad_cy, cur_level) && !isBatchedAtLevel(z_state, cur_level) && !isBatchedAtLevel(cell_state_fwd, cur_level) && !isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(hx, cur_level) && !isBatchedAtLevel(params, cur_level) && !isBatchedAtLevel(out0, cur_level) && !isBatchedAtLevel(out1, cur_level) && !isBatchedAtLevel(out2, cur_level)) {
30064 return at::_ops::lstm_mps_backward_out::call(grad_y, grad_hy, grad_cy, z_state, cell_state_fwd, input, hx, params, has_biases, num_layers, dropout, train, bidirectional, batch_first, out0, out1, out2);
30065 }
30066 Tensor grad_y_value;
30067 optional<int64_t> grad_y_bdim;
30068 std::tie(grad_y_value, grad_y_bdim) = unwrapTensorAtLevel(grad_y, cur_level);
30069 Tensor z_state_value;
30070 optional<int64_t> z_state_bdim;
30071 std::tie(z_state_value, z_state_bdim) = unwrapTensorAtLevel(z_state, cur_level);
30072 Tensor cell_state_fwd_value;
30073 optional<int64_t> cell_state_fwd_bdim;
30074 std::tie(cell_state_fwd_value, cell_state_fwd_bdim) = unwrapTensorAtLevel(cell_state_fwd, cur_level);
30075 Tensor input_value;
30076 optional<int64_t> input_bdim;
30077 std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level);
30078 Tensor out0_value;
30079 optional<int64_t> out0_bdim;
30080 std::tie(out0_value, out0_bdim) = unwrapTensorAtLevel(out0, cur_level);
30081 optional<Tensor> grad_hy_value;
30082 optional<int64_t> grad_hy_bdim;
30083 if (grad_hy) {
30084 std::tie(grad_hy_value, grad_hy_bdim) = unwrapTensorAtLevel(grad_hy.value(), cur_level);
30085 }
30086 optional<Tensor> grad_cy_value;
30087 optional<int64_t> grad_cy_bdim;
30088 if (grad_cy) {
30089 std::tie(grad_cy_value, grad_cy_bdim) = unwrapTensorAtLevel(grad_cy.value(), cur_level);
30090 }
30091 batch_rule(grad_y_value, grad_y_bdim, grad_hy_value, grad_hy_bdim, grad_cy_value, grad_cy_bdim, z_state_value, z_state_bdim, cell_state_fwd_value, cell_state_fwd_bdim, input_value, input_bdim, hx, params, has_biases, num_layers, dropout, train, bidirectional, batch_first, out0_value, out0_bdim, out1, out2);
30092}
30093template <typename batch_rule_t, batch_rule_t batch_rule>
30094at::Tensor set_source_Storage_generated_plumbing(const at::Tensor & self, at::Storage source) {
30095 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
30096 auto maybe_layer = maybeCurrentDynamicLayer();
30097 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
30098 int64_t cur_level = maybe_layer->layerId();
30099 if (!isBatchedAtLevel(self, cur_level)) {
30100 return at::_ops::set_source_Storage::call(self, source);
30101 }
30102 Tensor self_value;
30103 optional<int64_t> self_bdim;
30104 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
30105 auto results = batch_rule(self_value, self_bdim, source);
30106 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
30107}
30108template <typename batch_rule_t, batch_rule_t batch_rule>
30109at::Tensor set_source_Storage_storage_offset_generated_plumbing(const at::Tensor & self, at::Storage source, c10::SymInt storage_offset, c10::SymIntArrayRef size, c10::SymIntArrayRef stride) {
30110 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
30111 auto maybe_layer = maybeCurrentDynamicLayer();
30112 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
30113 int64_t cur_level = maybe_layer->layerId();
30114 if (!isBatchedAtLevel(self, cur_level)) {
30115 return at::_ops::set_source_Storage_storage_offset::call(self, source, storage_offset, size, stride);
30116 }
30117 Tensor self_value;
30118 optional<int64_t> self_bdim;
30119 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
30120 auto results = batch_rule(self_value, self_bdim, source, storage_offset, size, stride);
30121 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
30122}
30123template <typename batch_rule_t, batch_rule_t batch_rule>
30124at::Tensor set_source_Tensor_generated_plumbing(const at::Tensor & self, const at::Tensor & source) {
30125 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
30126 auto maybe_layer = maybeCurrentDynamicLayer();
30127 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
30128 int64_t cur_level = maybe_layer->layerId();
30129 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(source, cur_level)) {
30130 return at::_ops::set_source_Tensor::call(self, source);
30131 }
30132 Tensor self_value;
30133 optional<int64_t> self_bdim;
30134 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
30135 Tensor source_value;
30136 optional<int64_t> source_bdim;
30137 std::tie(source_value, source_bdim) = unwrapTensorAtLevel(source, cur_level);
30138 auto results = batch_rule(self_value, self_bdim, source_value, source_bdim);
30139 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
30140}
30141template <typename batch_rule_t, batch_rule_t batch_rule>
30142at::Tensor set_generated_plumbing(const at::Tensor & self) {
30143 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
30144 auto maybe_layer = maybeCurrentDynamicLayer();
30145 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
30146 int64_t cur_level = maybe_layer->layerId();
30147 if (!isBatchedAtLevel(self, cur_level)) {
30148 return at::_ops::set::call(self);
30149 }
30150 Tensor self_value;
30151 optional<int64_t> self_bdim;
30152 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
30153 auto results = batch_rule(self_value, self_bdim);
30154 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
30155}
30156template <typename batch_rule_t, batch_rule_t batch_rule>
30157at::Tensor random_from_generated_plumbing(const at::Tensor & self, int64_t from, c10::optional<int64_t> to, c10::optional<at::Generator> generator) {
30158 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
30159 auto maybe_layer = maybeCurrentDynamicLayer();
30160 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
30161 int64_t cur_level = maybe_layer->layerId();
30162 if (!isBatchedAtLevel(self, cur_level)) {
30163 return at::_ops::random_from::call(self, from, to, generator);
30164 }
30165 Tensor self_value;
30166 optional<int64_t> self_bdim;
30167 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
30168 auto results = batch_rule(self_value, self_bdim, from, to, generator);
30169 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
30170}
30171template <typename batch_rule_t, batch_rule_t batch_rule>
30172at::Tensor random_to_generated_plumbing(const at::Tensor & self, int64_t to, c10::optional<at::Generator> generator) {
30173 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
30174 auto maybe_layer = maybeCurrentDynamicLayer();
30175 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
30176 int64_t cur_level = maybe_layer->layerId();
30177 if (!isBatchedAtLevel(self, cur_level)) {
30178 return at::_ops::random_to::call(self, to, generator);
30179 }
30180 Tensor self_value;
30181 optional<int64_t> self_bdim;
30182 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
30183 auto results = batch_rule(self_value, self_bdim, to, generator);
30184 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
30185}
30186template <typename batch_rule_t, batch_rule_t batch_rule>
30187at::Tensor random_generated_plumbing(const at::Tensor & self, c10::optional<at::Generator> generator) {
30188 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
30189 auto maybe_layer = maybeCurrentDynamicLayer();
30190 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
30191 int64_t cur_level = maybe_layer->layerId();
30192 if (!isBatchedAtLevel(self, cur_level)) {
30193 return at::_ops::random::call(self, generator);
30194 }
30195 Tensor self_value;
30196 optional<int64_t> self_bdim;
30197 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
30198 auto results = batch_rule(self_value, self_bdim, generator);
30199 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
30200}
30201template <typename batch_rule_t, batch_rule_t batch_rule>
30202at::Tensor uniform_generated_plumbing(const at::Tensor & self, double from, double to, c10::optional<at::Generator> generator) {
30203 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
30204 auto maybe_layer = maybeCurrentDynamicLayer();
30205 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
30206 int64_t cur_level = maybe_layer->layerId();
30207 if (!isBatchedAtLevel(self, cur_level)) {
30208 return at::_ops::uniform::call(self, from, to, generator);
30209 }
30210 Tensor self_value;
30211 optional<int64_t> self_bdim;
30212 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
30213 auto results = batch_rule(self_value, self_bdim, from, to, generator);
30214 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
30215}
30216template <typename batch_rule_t, batch_rule_t batch_rule>
30217at::Tensor cauchy_generated_plumbing(const at::Tensor & self, double median, double sigma, c10::optional<at::Generator> generator) {
30218 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
30219 auto maybe_layer = maybeCurrentDynamicLayer();
30220 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
30221 int64_t cur_level = maybe_layer->layerId();
30222 if (!isBatchedAtLevel(self, cur_level)) {
30223 return at::_ops::cauchy::call(self, median, sigma, generator);
30224 }
30225 Tensor self_value;
30226 optional<int64_t> self_bdim;
30227 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
30228 auto results = batch_rule(self_value, self_bdim, median, sigma, generator);
30229 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
30230}
30231template <typename batch_rule_t, batch_rule_t batch_rule>
30232at::Tensor log_normal_generated_plumbing(const at::Tensor & self, double mean, double std, c10::optional<at::Generator> generator) {
30233 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
30234 auto maybe_layer = maybeCurrentDynamicLayer();
30235 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
30236 int64_t cur_level = maybe_layer->layerId();
30237 if (!isBatchedAtLevel(self, cur_level)) {
30238 return at::_ops::log_normal::call(self, mean, std, generator);
30239 }
30240 Tensor self_value;
30241 optional<int64_t> self_bdim;
30242 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
30243 auto results = batch_rule(self_value, self_bdim, mean, std, generator);
30244 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
30245}
30246template <typename batch_rule_t, batch_rule_t batch_rule>
30247at::Tensor exponential_generated_plumbing(const at::Tensor & self, double lambd, c10::optional<at::Generator> generator) {
30248 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
30249 auto maybe_layer = maybeCurrentDynamicLayer();
30250 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
30251 int64_t cur_level = maybe_layer->layerId();
30252 if (!isBatchedAtLevel(self, cur_level)) {
30253 return at::_ops::exponential::call(self, lambd, generator);
30254 }
30255 Tensor self_value;
30256 optional<int64_t> self_bdim;
30257 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
30258 auto results = batch_rule(self_value, self_bdim, lambd, generator);
30259 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
30260}
30261template <typename batch_rule_t, batch_rule_t batch_rule>
30262at::Tensor geometric_generated_plumbing(const at::Tensor & self, double p, c10::optional<at::Generator> generator) {
30263 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
30264 auto maybe_layer = maybeCurrentDynamicLayer();
30265 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
30266 int64_t cur_level = maybe_layer->layerId();
30267 if (!isBatchedAtLevel(self, cur_level)) {
30268 return at::_ops::geometric::call(self, p, generator);
30269 }
30270 Tensor self_value;
30271 optional<int64_t> self_bdim;
30272 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
30273 auto results = batch_rule(self_value, self_bdim, p, generator);
30274 return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
30275}
30276template <typename batch_rule_t, batch_rule_t batch_rule>
30277void _histogramdd_bin_edges_out_generated_plumbing(const at::Tensor & self, at::IntArrayRef bins, c10::optional<at::ArrayRef<double>> range, const c10::optional<at::Tensor> & weight, bool density, at::TensorList out) {
30278 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
30279 auto maybe_layer = maybeCurrentDynamicLayer();
30280 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
30281 int64_t cur_level = maybe_layer->layerId();
30282 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(out, cur_level)) {
30283 return at::_ops::_histogramdd_bin_edges_out::call(self, bins, range, weight, density, out);
30284 }
30285 Tensor self_value;
30286 optional<int64_t> self_bdim;
30287 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
30288 optional<Tensor> weight_value;
30289 optional<int64_t> weight_bdim;
30290 if (weight) {
30291 std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight.value(), cur_level);
30292 }
30293 batch_rule(self_value, self_bdim, bins, range, weight_value, weight_bdim, density, out);
30294}
30295template <typename batch_rule_t, batch_rule_t batch_rule>
30296void _amp_foreach_non_finite_check_and_unscale_out_generated_plumbing(at::TensorList self, at::Tensor & found_inf, const at::Tensor & inv_scale, at::TensorList out) {
30297 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
30298 auto maybe_layer = maybeCurrentDynamicLayer();
30299 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
30300 int64_t cur_level = maybe_layer->layerId();
30301 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(found_inf, cur_level) && !isBatchedAtLevel(inv_scale, cur_level) && !isBatchedAtLevel(out, cur_level)) {
30302 return at::_ops::_amp_foreach_non_finite_check_and_unscale_out::call(self, found_inf, inv_scale, out);
30303 }
30304 Tensor found_inf_value;
30305 optional<int64_t> found_inf_bdim;
30306 std::tie(found_inf_value, found_inf_bdim) = unwrapTensorAtLevel(found_inf, cur_level);
30307 Tensor inv_scale_value;
30308 optional<int64_t> inv_scale_bdim;
30309 std::tie(inv_scale_value, inv_scale_bdim) = unwrapTensorAtLevel(inv_scale, cur_level);
30310 batch_rule(self, found_inf_value, found_inf_bdim, inv_scale_value, inv_scale_bdim, out);
30311}
30312template <typename batch_rule_t, batch_rule_t batch_rule>
30313::std::tuple<::std::vector<at::Tensor>,at::Tensor> _amp_foreach_non_finite_check_and_unscale_generated_plumbing(at::TensorList self, const at::Tensor & found_inf, const at::Tensor & inv_scale) {
30314 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
30315 auto maybe_layer = maybeCurrentDynamicLayer();
30316 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
30317 int64_t cur_level = maybe_layer->layerId();
30318 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(found_inf, cur_level) && !isBatchedAtLevel(inv_scale, cur_level)) {
30319 return at::_ops::_amp_foreach_non_finite_check_and_unscale::call(self, found_inf, inv_scale);
30320 }
30321 Tensor found_inf_value;
30322 optional<int64_t> found_inf_bdim;
30323 std::tie(found_inf_value, found_inf_bdim) = unwrapTensorAtLevel(found_inf, cur_level);
30324 Tensor inv_scale_value;
30325 optional<int64_t> inv_scale_bdim;
30326 std::tie(inv_scale_value, inv_scale_bdim) = unwrapTensorAtLevel(inv_scale, cur_level);
30327 auto results = batch_rule(self, found_inf_value, found_inf_bdim, inv_scale_value, inv_scale_bdim);
30328 return std::make_tuple(makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
30329}
30330template <typename batch_rule_t, batch_rule_t batch_rule>
30331::std::tuple<at::Tensor,at::Tensor> _amp_update_scale_generated_plumbing(const at::Tensor & self, const at::Tensor & growth_tracker, const at::Tensor & found_inf, double scale_growth_factor, double scale_backoff_factor, int64_t growth_interval) {
30332 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
30333 auto maybe_layer = maybeCurrentDynamicLayer();
30334 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
30335 int64_t cur_level = maybe_layer->layerId();
30336 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(growth_tracker, cur_level) && !isBatchedAtLevel(found_inf, cur_level)) {
30337 return at::_ops::_amp_update_scale::call(self, growth_tracker, found_inf, scale_growth_factor, scale_backoff_factor, growth_interval);
30338 }
30339 Tensor self_value;
30340 optional<int64_t> self_bdim;
30341 std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
30342 Tensor growth_tracker_value;
30343 optional<int64_t> growth_tracker_bdim;
30344 std::tie(growth_tracker_value, growth_tracker_bdim) = unwrapTensorAtLevel(growth_tracker, cur_level);
30345 Tensor found_inf_value;
30346 optional<int64_t> found_inf_bdim;
30347 std::tie(found_inf_value, found_inf_bdim) = unwrapTensorAtLevel(found_inf, cur_level);
30348 auto results = batch_rule(self_value, self_bdim, growth_tracker_value, growth_tracker_bdim, found_inf_value, found_inf_bdim, scale_growth_factor, scale_backoff_factor, growth_interval);
30349 return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
30350}
30351template <typename batch_rule_t, batch_rule_t batch_rule>
30352void _foreach_add_Scalar_out_generated_plumbing(at::TensorList self, const at::Scalar & scalar, at::TensorList out) {
30353 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
30354 auto maybe_layer = maybeCurrentDynamicLayer();
30355 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
30356 int64_t cur_level = maybe_layer->layerId();
30357 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(out, cur_level)) {
30358 return at::_ops::_foreach_add_Scalar_out::call(self, scalar, out);
30359 }
30360
30361 batch_rule(self, scalar, out);
30362}
30363template <typename batch_rule_t, batch_rule_t batch_rule>
30364void _foreach_sub_Scalar_out_generated_plumbing(at::TensorList self, const at::Scalar & scalar, at::TensorList out) {
30365 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
30366 auto maybe_layer = maybeCurrentDynamicLayer();
30367 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
30368 int64_t cur_level = maybe_layer->layerId();
30369 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(out, cur_level)) {
30370 return at::_ops::_foreach_sub_Scalar_out::call(self, scalar, out);
30371 }
30372
30373 batch_rule(self, scalar, out);
30374}
30375template <typename batch_rule_t, batch_rule_t batch_rule>
30376void _foreach_mul_Scalar_out_generated_plumbing(at::TensorList self, const at::Scalar & scalar, at::TensorList out) {
30377 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
30378 auto maybe_layer = maybeCurrentDynamicLayer();
30379 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
30380 int64_t cur_level = maybe_layer->layerId();
30381 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(out, cur_level)) {
30382 return at::_ops::_foreach_mul_Scalar_out::call(self, scalar, out);
30383 }
30384
30385 batch_rule(self, scalar, out);
30386}
30387template <typename batch_rule_t, batch_rule_t batch_rule>
30388void _foreach_div_Scalar_out_generated_plumbing(at::TensorList self, const at::Scalar & scalar, at::TensorList out) {
30389 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
30390 auto maybe_layer = maybeCurrentDynamicLayer();
30391 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
30392 int64_t cur_level = maybe_layer->layerId();
30393 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(out, cur_level)) {
30394 return at::_ops::_foreach_div_Scalar_out::call(self, scalar, out);
30395 }
30396
30397 batch_rule(self, scalar, out);
30398}
30399template <typename batch_rule_t, batch_rule_t batch_rule>
30400void _foreach_clamp_min_Scalar_out_generated_plumbing(at::TensorList self, const at::Scalar & scalar, at::TensorList out) {
30401 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
30402 auto maybe_layer = maybeCurrentDynamicLayer();
30403 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
30404 int64_t cur_level = maybe_layer->layerId();
30405 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(out, cur_level)) {
30406 return at::_ops::_foreach_clamp_min_Scalar_out::call(self, scalar, out);
30407 }
30408
30409 batch_rule(self, scalar, out);
30410}
30411template <typename batch_rule_t, batch_rule_t batch_rule>
30412void _foreach_clamp_max_Scalar_out_generated_plumbing(at::TensorList self, const at::Scalar & scalar, at::TensorList out) {
30413 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
30414 auto maybe_layer = maybeCurrentDynamicLayer();
30415 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
30416 int64_t cur_level = maybe_layer->layerId();
30417 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(out, cur_level)) {
30418 return at::_ops::_foreach_clamp_max_Scalar_out::call(self, scalar, out);
30419 }
30420
30421 batch_rule(self, scalar, out);
30422}
30423template <typename batch_rule_t, batch_rule_t batch_rule>
30424void _foreach_maximum_Scalar_out_generated_plumbing(at::TensorList self, const at::Scalar & scalar, at::TensorList out) {
30425 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
30426 auto maybe_layer = maybeCurrentDynamicLayer();
30427 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
30428 int64_t cur_level = maybe_layer->layerId();
30429 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(out, cur_level)) {
30430 return at::_ops::_foreach_maximum_Scalar_out::call(self, scalar, out);
30431 }
30432
30433 batch_rule(self, scalar, out);
30434}
30435template <typename batch_rule_t, batch_rule_t batch_rule>
30436void _foreach_minimum_Scalar_out_generated_plumbing(at::TensorList self, const at::Scalar & scalar, at::TensorList out) {
30437 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
30438 auto maybe_layer = maybeCurrentDynamicLayer();
30439 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
30440 int64_t cur_level = maybe_layer->layerId();
30441 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(out, cur_level)) {
30442 return at::_ops::_foreach_minimum_Scalar_out::call(self, scalar, out);
30443 }
30444
30445 batch_rule(self, scalar, out);
30446}
30447template <typename batch_rule_t, batch_rule_t batch_rule>
30448void _foreach_add_List_out_generated_plumbing(at::TensorList self, at::TensorList other, const at::Scalar & alpha, at::TensorList out) {
30449 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
30450 auto maybe_layer = maybeCurrentDynamicLayer();
30451 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
30452 int64_t cur_level = maybe_layer->layerId();
30453 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level) && !isBatchedAtLevel(out, cur_level)) {
30454 return at::_ops::_foreach_add_List_out::call(self, other, alpha, out);
30455 }
30456
30457 batch_rule(self, other, alpha, out);
30458}
30459template <typename batch_rule_t, batch_rule_t batch_rule>
30460void _foreach_sub_List_out_generated_plumbing(at::TensorList self, at::TensorList other, const at::Scalar & alpha, at::TensorList out) {
30461 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
30462 auto maybe_layer = maybeCurrentDynamicLayer();
30463 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
30464 int64_t cur_level = maybe_layer->layerId();
30465 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level) && !isBatchedAtLevel(out, cur_level)) {
30466 return at::_ops::_foreach_sub_List_out::call(self, other, alpha, out);
30467 }
30468
30469 batch_rule(self, other, alpha, out);
30470}
30471template <typename batch_rule_t, batch_rule_t batch_rule>
30472void _foreach_mul_List_out_generated_plumbing(at::TensorList self, at::TensorList other, at::TensorList out) {
30473 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
30474 auto maybe_layer = maybeCurrentDynamicLayer();
30475 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
30476 int64_t cur_level = maybe_layer->layerId();
30477 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level) && !isBatchedAtLevel(out, cur_level)) {
30478 return at::_ops::_foreach_mul_List_out::call(self, other, out);
30479 }
30480
30481 batch_rule(self, other, out);
30482}
30483template <typename batch_rule_t, batch_rule_t batch_rule>
30484void _foreach_div_List_out_generated_plumbing(at::TensorList self, at::TensorList other, at::TensorList out) {
30485 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
30486 auto maybe_layer = maybeCurrentDynamicLayer();
30487 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
30488 int64_t cur_level = maybe_layer->layerId();
30489 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level) && !isBatchedAtLevel(out, cur_level)) {
30490 return at::_ops::_foreach_div_List_out::call(self, other, out);
30491 }
30492
30493 batch_rule(self, other, out);
30494}
30495template <typename batch_rule_t, batch_rule_t batch_rule>
30496void _foreach_clamp_min_List_out_generated_plumbing(at::TensorList self, at::TensorList other, at::TensorList out) {
30497 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
30498 auto maybe_layer = maybeCurrentDynamicLayer();
30499 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
30500 int64_t cur_level = maybe_layer->layerId();
30501 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level) && !isBatchedAtLevel(out, cur_level)) {
30502 return at::_ops::_foreach_clamp_min_List_out::call(self, other, out);
30503 }
30504
30505 batch_rule(self, other, out);
30506}
30507template <typename batch_rule_t, batch_rule_t batch_rule>
30508void _foreach_clamp_max_List_out_generated_plumbing(at::TensorList self, at::TensorList other, at::TensorList out) {
30509 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
30510 auto maybe_layer = maybeCurrentDynamicLayer();
30511 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
30512 int64_t cur_level = maybe_layer->layerId();
30513 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level) && !isBatchedAtLevel(out, cur_level)) {
30514 return at::_ops::_foreach_clamp_max_List_out::call(self, other, out);
30515 }
30516
30517 batch_rule(self, other, out);
30518}
30519template <typename batch_rule_t, batch_rule_t batch_rule>
30520void _foreach_maximum_List_out_generated_plumbing(at::TensorList self, at::TensorList other, at::TensorList out) {
30521 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
30522 auto maybe_layer = maybeCurrentDynamicLayer();
30523 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
30524 int64_t cur_level = maybe_layer->layerId();
30525 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level) && !isBatchedAtLevel(out, cur_level)) {
30526 return at::_ops::_foreach_maximum_List_out::call(self, other, out);
30527 }
30528
30529 batch_rule(self, other, out);
30530}
30531template <typename batch_rule_t, batch_rule_t batch_rule>
30532void _foreach_minimum_List_out_generated_plumbing(at::TensorList self, at::TensorList other, at::TensorList out) {
30533 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
30534 auto maybe_layer = maybeCurrentDynamicLayer();
30535 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
30536 int64_t cur_level = maybe_layer->layerId();
30537 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level) && !isBatchedAtLevel(out, cur_level)) {
30538 return at::_ops::_foreach_minimum_List_out::call(self, other, out);
30539 }
30540
30541 batch_rule(self, other, out);
30542}
30543template <typename batch_rule_t, batch_rule_t batch_rule>
30544void _foreach_add_ScalarList_out_generated_plumbing(at::TensorList self, at::ArrayRef<at::Scalar> scalars, at::TensorList out) {
30545 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
30546 auto maybe_layer = maybeCurrentDynamicLayer();
30547 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
30548 int64_t cur_level = maybe_layer->layerId();
30549 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(out, cur_level)) {
30550 return at::_ops::_foreach_add_ScalarList_out::call(self, scalars, out);
30551 }
30552
30553 batch_rule(self, scalars, out);
30554}
30555template <typename batch_rule_t, batch_rule_t batch_rule>
30556void _foreach_sub_ScalarList_out_generated_plumbing(at::TensorList self, at::ArrayRef<at::Scalar> scalars, at::TensorList out) {
30557 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
30558 auto maybe_layer = maybeCurrentDynamicLayer();
30559 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
30560 int64_t cur_level = maybe_layer->layerId();
30561 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(out, cur_level)) {
30562 return at::_ops::_foreach_sub_ScalarList_out::call(self, scalars, out);
30563 }
30564
30565 batch_rule(self, scalars, out);
30566}
30567template <typename batch_rule_t, batch_rule_t batch_rule>
30568void _foreach_div_ScalarList_out_generated_plumbing(at::TensorList self, at::ArrayRef<at::Scalar> scalars, at::TensorList out) {
30569 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
30570 auto maybe_layer = maybeCurrentDynamicLayer();
30571 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
30572 int64_t cur_level = maybe_layer->layerId();
30573 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(out, cur_level)) {
30574 return at::_ops::_foreach_div_ScalarList_out::call(self, scalars, out);
30575 }
30576
30577 batch_rule(self, scalars, out);
30578}
30579template <typename batch_rule_t, batch_rule_t batch_rule>
30580void _foreach_mul_ScalarList_out_generated_plumbing(at::TensorList self, at::ArrayRef<at::Scalar> scalars, at::TensorList out) {
30581 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
30582 auto maybe_layer = maybeCurrentDynamicLayer();
30583 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
30584 int64_t cur_level = maybe_layer->layerId();
30585 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(out, cur_level)) {
30586 return at::_ops::_foreach_mul_ScalarList_out::call(self, scalars, out);
30587 }
30588
30589 batch_rule(self, scalars, out);
30590}
30591template <typename batch_rule_t, batch_rule_t batch_rule>
30592void _foreach_clamp_min_ScalarList_out_generated_plumbing(at::TensorList self, at::ArrayRef<at::Scalar> scalars, at::TensorList out) {
30593 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
30594 auto maybe_layer = maybeCurrentDynamicLayer();
30595 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
30596 int64_t cur_level = maybe_layer->layerId();
30597 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(out, cur_level)) {
30598 return at::_ops::_foreach_clamp_min_ScalarList_out::call(self, scalars, out);
30599 }
30600
30601 batch_rule(self, scalars, out);
30602}
30603template <typename batch_rule_t, batch_rule_t batch_rule>
30604void _foreach_clamp_max_ScalarList_out_generated_plumbing(at::TensorList self, at::ArrayRef<at::Scalar> scalars, at::TensorList out) {
30605 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
30606 auto maybe_layer = maybeCurrentDynamicLayer();
30607 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
30608 int64_t cur_level = maybe_layer->layerId();
30609 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(out, cur_level)) {
30610 return at::_ops::_foreach_clamp_max_ScalarList_out::call(self, scalars, out);
30611 }
30612
30613 batch_rule(self, scalars, out);
30614}
30615template <typename batch_rule_t, batch_rule_t batch_rule>
30616void _foreach_maximum_ScalarList_out_generated_plumbing(at::TensorList self, at::ArrayRef<at::Scalar> scalars, at::TensorList out) {
30617 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
30618 auto maybe_layer = maybeCurrentDynamicLayer();
30619 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
30620 int64_t cur_level = maybe_layer->layerId();
30621 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(out, cur_level)) {
30622 return at::_ops::_foreach_maximum_ScalarList_out::call(self, scalars, out);
30623 }
30624
30625 batch_rule(self, scalars, out);
30626}
30627template <typename batch_rule_t, batch_rule_t batch_rule>
30628void _foreach_minimum_ScalarList_out_generated_plumbing(at::TensorList self, at::ArrayRef<at::Scalar> scalars, at::TensorList out) {
30629 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
30630 auto maybe_layer = maybeCurrentDynamicLayer();
30631 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
30632 int64_t cur_level = maybe_layer->layerId();
30633 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(out, cur_level)) {
30634 return at::_ops::_foreach_minimum_ScalarList_out::call(self, scalars, out);
30635 }
30636
30637 batch_rule(self, scalars, out);
30638}
30639template <typename batch_rule_t, batch_rule_t batch_rule>
30640void _foreach_exp_out_generated_plumbing(at::TensorList self, at::TensorList out) {
30641 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
30642 auto maybe_layer = maybeCurrentDynamicLayer();
30643 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
30644 int64_t cur_level = maybe_layer->layerId();
30645 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(out, cur_level)) {
30646 return at::_ops::_foreach_exp_out::call(self, out);
30647 }
30648
30649 batch_rule(self, out);
30650}
30651template <typename batch_rule_t, batch_rule_t batch_rule>
30652void _foreach_zero_out_generated_plumbing(at::TensorList self, at::TensorList out) {
30653 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
30654 auto maybe_layer = maybeCurrentDynamicLayer();
30655 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
30656 int64_t cur_level = maybe_layer->layerId();
30657 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(out, cur_level)) {
30658 return at::_ops::_foreach_zero_out::call(self, out);
30659 }
30660
30661 batch_rule(self, out);
30662}
30663template <typename batch_rule_t, batch_rule_t batch_rule>
30664::std::vector<at::Tensor> _foreach_zero_generated_plumbing(at::TensorList self) {
30665 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
30666 auto maybe_layer = maybeCurrentDynamicLayer();
30667 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
30668 int64_t cur_level = maybe_layer->layerId();
30669 if (!isBatchedAtLevel(self, cur_level)) {
30670 return at::_ops::_foreach_zero::call(self);
30671 }
30672
30673 auto results = batch_rule(self);
30674 return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
30675}
30676template <typename batch_rule_t, batch_rule_t batch_rule>
30677void _foreach_sqrt_out_generated_plumbing(at::TensorList self, at::TensorList out) {
30678 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
30679 auto maybe_layer = maybeCurrentDynamicLayer();
30680 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
30681 int64_t cur_level = maybe_layer->layerId();
30682 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(out, cur_level)) {
30683 return at::_ops::_foreach_sqrt_out::call(self, out);
30684 }
30685
30686 batch_rule(self, out);
30687}
30688template <typename batch_rule_t, batch_rule_t batch_rule>
30689void _foreach_abs_out_generated_plumbing(at::TensorList self, at::TensorList out) {
30690 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
30691 auto maybe_layer = maybeCurrentDynamicLayer();
30692 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
30693 int64_t cur_level = maybe_layer->layerId();
30694 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(out, cur_level)) {
30695 return at::_ops::_foreach_abs_out::call(self, out);
30696 }
30697
30698 batch_rule(self, out);
30699}
30700template <typename batch_rule_t, batch_rule_t batch_rule>
30701void _foreach_acos_out_generated_plumbing(at::TensorList self, at::TensorList out) {
30702 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
30703 auto maybe_layer = maybeCurrentDynamicLayer();
30704 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
30705 int64_t cur_level = maybe_layer->layerId();
30706 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(out, cur_level)) {
30707 return at::_ops::_foreach_acos_out::call(self, out);
30708 }
30709
30710 batch_rule(self, out);
30711}
30712template <typename batch_rule_t, batch_rule_t batch_rule>
30713void _foreach_asin_out_generated_plumbing(at::TensorList self, at::TensorList out) {
30714 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
30715 auto maybe_layer = maybeCurrentDynamicLayer();
30716 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
30717 int64_t cur_level = maybe_layer->layerId();
30718 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(out, cur_level)) {
30719 return at::_ops::_foreach_asin_out::call(self, out);
30720 }
30721
30722 batch_rule(self, out);
30723}
30724template <typename batch_rule_t, batch_rule_t batch_rule>
30725void _foreach_atan_out_generated_plumbing(at::TensorList self, at::TensorList out) {
30726 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
30727 auto maybe_layer = maybeCurrentDynamicLayer();
30728 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
30729 int64_t cur_level = maybe_layer->layerId();
30730 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(out, cur_level)) {
30731 return at::_ops::_foreach_atan_out::call(self, out);
30732 }
30733
30734 batch_rule(self, out);
30735}
30736template <typename batch_rule_t, batch_rule_t batch_rule>
30737void _foreach_ceil_out_generated_plumbing(at::TensorList self, at::TensorList out) {
30738 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
30739 auto maybe_layer = maybeCurrentDynamicLayer();
30740 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
30741 int64_t cur_level = maybe_layer->layerId();
30742 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(out, cur_level)) {
30743 return at::_ops::_foreach_ceil_out::call(self, out);
30744 }
30745
30746 batch_rule(self, out);
30747}
30748template <typename batch_rule_t, batch_rule_t batch_rule>
30749void _foreach_cos_out_generated_plumbing(at::TensorList self, at::TensorList out) {
30750 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
30751 auto maybe_layer = maybeCurrentDynamicLayer();
30752 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
30753 int64_t cur_level = maybe_layer->layerId();
30754 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(out, cur_level)) {
30755 return at::_ops::_foreach_cos_out::call(self, out);
30756 }
30757
30758 batch_rule(self, out);
30759}
30760template <typename batch_rule_t, batch_rule_t batch_rule>
30761void _foreach_cosh_out_generated_plumbing(at::TensorList self, at::TensorList out) {
30762 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
30763 auto maybe_layer = maybeCurrentDynamicLayer();
30764 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
30765 int64_t cur_level = maybe_layer->layerId();
30766 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(out, cur_level)) {
30767 return at::_ops::_foreach_cosh_out::call(self, out);
30768 }
30769
30770 batch_rule(self, out);
30771}
30772template <typename batch_rule_t, batch_rule_t batch_rule>
30773void _foreach_erf_out_generated_plumbing(at::TensorList self, at::TensorList out) {
30774 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
30775 auto maybe_layer = maybeCurrentDynamicLayer();
30776 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
30777 int64_t cur_level = maybe_layer->layerId();
30778 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(out, cur_level)) {
30779 return at::_ops::_foreach_erf_out::call(self, out);
30780 }
30781
30782 batch_rule(self, out);
30783}
30784template <typename batch_rule_t, batch_rule_t batch_rule>
30785void _foreach_erfc_out_generated_plumbing(at::TensorList self, at::TensorList out) {
30786 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
30787 auto maybe_layer = maybeCurrentDynamicLayer();
30788 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
30789 int64_t cur_level = maybe_layer->layerId();
30790 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(out, cur_level)) {
30791 return at::_ops::_foreach_erfc_out::call(self, out);
30792 }
30793
30794 batch_rule(self, out);
30795}
30796template <typename batch_rule_t, batch_rule_t batch_rule>
30797void _foreach_expm1_out_generated_plumbing(at::TensorList self, at::TensorList out) {
30798 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
30799 auto maybe_layer = maybeCurrentDynamicLayer();
30800 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
30801 int64_t cur_level = maybe_layer->layerId();
30802 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(out, cur_level)) {
30803 return at::_ops::_foreach_expm1_out::call(self, out);
30804 }
30805
30806 batch_rule(self, out);
30807}
30808template <typename batch_rule_t, batch_rule_t batch_rule>
30809void _foreach_floor_out_generated_plumbing(at::TensorList self, at::TensorList out) {
30810 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
30811 auto maybe_layer = maybeCurrentDynamicLayer();
30812 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
30813 int64_t cur_level = maybe_layer->layerId();
30814 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(out, cur_level)) {
30815 return at::_ops::_foreach_floor_out::call(self, out);
30816 }
30817
30818 batch_rule(self, out);
30819}
30820template <typename batch_rule_t, batch_rule_t batch_rule>
30821void _foreach_log_out_generated_plumbing(at::TensorList self, at::TensorList out) {
30822 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
30823 auto maybe_layer = maybeCurrentDynamicLayer();
30824 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
30825 int64_t cur_level = maybe_layer->layerId();
30826 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(out, cur_level)) {
30827 return at::_ops::_foreach_log_out::call(self, out);
30828 }
30829
30830 batch_rule(self, out);
30831}
30832template <typename batch_rule_t, batch_rule_t batch_rule>
30833void _foreach_log10_out_generated_plumbing(at::TensorList self, at::TensorList out) {
30834 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
30835 auto maybe_layer = maybeCurrentDynamicLayer();
30836 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
30837 int64_t cur_level = maybe_layer->layerId();
30838 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(out, cur_level)) {
30839 return at::_ops::_foreach_log10_out::call(self, out);
30840 }
30841
30842 batch_rule(self, out);
30843}
30844template <typename batch_rule_t, batch_rule_t batch_rule>
30845void _foreach_log1p_out_generated_plumbing(at::TensorList self, at::TensorList out) {
30846 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
30847 auto maybe_layer = maybeCurrentDynamicLayer();
30848 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
30849 int64_t cur_level = maybe_layer->layerId();
30850 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(out, cur_level)) {
30851 return at::_ops::_foreach_log1p_out::call(self, out);
30852 }
30853
30854 batch_rule(self, out);
30855}
30856template <typename batch_rule_t, batch_rule_t batch_rule>
30857void _foreach_log2_out_generated_plumbing(at::TensorList self, at::TensorList out) {
30858 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
30859 auto maybe_layer = maybeCurrentDynamicLayer();
30860 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
30861 int64_t cur_level = maybe_layer->layerId();
30862 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(out, cur_level)) {
30863 return at::_ops::_foreach_log2_out::call(self, out);
30864 }
30865
30866 batch_rule(self, out);
30867}
30868template <typename batch_rule_t, batch_rule_t batch_rule>
30869void _foreach_neg_out_generated_plumbing(at::TensorList self, at::TensorList out) {
30870 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
30871 auto maybe_layer = maybeCurrentDynamicLayer();
30872 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
30873 int64_t cur_level = maybe_layer->layerId();
30874 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(out, cur_level)) {
30875 return at::_ops::_foreach_neg_out::call(self, out);
30876 }
30877
30878 batch_rule(self, out);
30879}
30880template <typename batch_rule_t, batch_rule_t batch_rule>
30881void _foreach_tan_out_generated_plumbing(at::TensorList self, at::TensorList out) {
30882 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
30883 auto maybe_layer = maybeCurrentDynamicLayer();
30884 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
30885 int64_t cur_level = maybe_layer->layerId();
30886 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(out, cur_level)) {
30887 return at::_ops::_foreach_tan_out::call(self, out);
30888 }
30889
30890 batch_rule(self, out);
30891}
30892template <typename batch_rule_t, batch_rule_t batch_rule>
30893void _foreach_tanh_out_generated_plumbing(at::TensorList self, at::TensorList out) {
30894 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
30895 auto maybe_layer = maybeCurrentDynamicLayer();
30896 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
30897 int64_t cur_level = maybe_layer->layerId();
30898 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(out, cur_level)) {
30899 return at::_ops::_foreach_tanh_out::call(self, out);
30900 }
30901
30902 batch_rule(self, out);
30903}
30904template <typename batch_rule_t, batch_rule_t batch_rule>
30905void _foreach_sin_out_generated_plumbing(at::TensorList self, at::TensorList out) {
30906 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
30907 auto maybe_layer = maybeCurrentDynamicLayer();
30908 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
30909 int64_t cur_level = maybe_layer->layerId();
30910 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(out, cur_level)) {
30911 return at::_ops::_foreach_sin_out::call(self, out);
30912 }
30913
30914 batch_rule(self, out);
30915}
30916template <typename batch_rule_t, batch_rule_t batch_rule>
30917void _foreach_sinh_out_generated_plumbing(at::TensorList self, at::TensorList out) {
30918 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
30919 auto maybe_layer = maybeCurrentDynamicLayer();
30920 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
30921 int64_t cur_level = maybe_layer->layerId();
30922 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(out, cur_level)) {
30923 return at::_ops::_foreach_sinh_out::call(self, out);
30924 }
30925
30926 batch_rule(self, out);
30927}
30928template <typename batch_rule_t, batch_rule_t batch_rule>
30929void _foreach_round_out_generated_plumbing(at::TensorList self, at::TensorList out) {
30930 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
30931 auto maybe_layer = maybeCurrentDynamicLayer();
30932 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
30933 int64_t cur_level = maybe_layer->layerId();
30934 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(out, cur_level)) {
30935 return at::_ops::_foreach_round_out::call(self, out);
30936 }
30937
30938 batch_rule(self, out);
30939}
30940template <typename batch_rule_t, batch_rule_t batch_rule>
30941void _foreach_lgamma_out_generated_plumbing(at::TensorList self, at::TensorList out) {
30942 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
30943 auto maybe_layer = maybeCurrentDynamicLayer();
30944 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
30945 int64_t cur_level = maybe_layer->layerId();
30946 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(out, cur_level)) {
30947 return at::_ops::_foreach_lgamma_out::call(self, out);
30948 }
30949
30950 batch_rule(self, out);
30951}
30952template <typename batch_rule_t, batch_rule_t batch_rule>
30953void _foreach_frac_out_generated_plumbing(at::TensorList self, at::TensorList out) {
30954 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
30955 auto maybe_layer = maybeCurrentDynamicLayer();
30956 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
30957 int64_t cur_level = maybe_layer->layerId();
30958 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(out, cur_level)) {
30959 return at::_ops::_foreach_frac_out::call(self, out);
30960 }
30961
30962 batch_rule(self, out);
30963}
30964template <typename batch_rule_t, batch_rule_t batch_rule>
30965void _foreach_reciprocal_out_generated_plumbing(at::TensorList self, at::TensorList out) {
30966 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
30967 auto maybe_layer = maybeCurrentDynamicLayer();
30968 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
30969 int64_t cur_level = maybe_layer->layerId();
30970 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(out, cur_level)) {
30971 return at::_ops::_foreach_reciprocal_out::call(self, out);
30972 }
30973
30974 batch_rule(self, out);
30975}
30976template <typename batch_rule_t, batch_rule_t batch_rule>
30977void _foreach_sigmoid_out_generated_plumbing(at::TensorList self, at::TensorList out) {
30978 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
30979 auto maybe_layer = maybeCurrentDynamicLayer();
30980 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
30981 int64_t cur_level = maybe_layer->layerId();
30982 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(out, cur_level)) {
30983 return at::_ops::_foreach_sigmoid_out::call(self, out);
30984 }
30985
30986 batch_rule(self, out);
30987}
30988template <typename batch_rule_t, batch_rule_t batch_rule>
30989void _foreach_trunc_out_generated_plumbing(at::TensorList self, at::TensorList out) {
30990 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
30991 auto maybe_layer = maybeCurrentDynamicLayer();
30992 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
30993 int64_t cur_level = maybe_layer->layerId();
30994 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(out, cur_level)) {
30995 return at::_ops::_foreach_trunc_out::call(self, out);
30996 }
30997
30998 batch_rule(self, out);
30999}
31000template <typename batch_rule_t, batch_rule_t batch_rule>
31001void _foreach_addcdiv_Scalar_out_generated_plumbing(at::TensorList self, at::TensorList tensor1, at::TensorList tensor2, const at::Scalar & value, at::TensorList out) {
31002 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
31003 auto maybe_layer = maybeCurrentDynamicLayer();
31004 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
31005 int64_t cur_level = maybe_layer->layerId();
31006 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(tensor1, cur_level) && !isBatchedAtLevel(tensor2, cur_level) && !isBatchedAtLevel(out, cur_level)) {
31007 return at::_ops::_foreach_addcdiv_Scalar_out::call(self, tensor1, tensor2, value, out);
31008 }
31009
31010 batch_rule(self, tensor1, tensor2, value, out);
31011}
31012template <typename batch_rule_t, batch_rule_t batch_rule>
31013void _foreach_addcmul_Scalar_out_generated_plumbing(at::TensorList self, at::TensorList tensor1, at::TensorList tensor2, const at::Scalar & value, at::TensorList out) {
31014 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
31015 auto maybe_layer = maybeCurrentDynamicLayer();
31016 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
31017 int64_t cur_level = maybe_layer->layerId();
31018 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(tensor1, cur_level) && !isBatchedAtLevel(tensor2, cur_level) && !isBatchedAtLevel(out, cur_level)) {
31019 return at::_ops::_foreach_addcmul_Scalar_out::call(self, tensor1, tensor2, value, out);
31020 }
31021
31022 batch_rule(self, tensor1, tensor2, value, out);
31023}
31024template <typename batch_rule_t, batch_rule_t batch_rule>
31025void _foreach_addcdiv_ScalarList_out_generated_plumbing(at::TensorList self, at::TensorList tensor1, at::TensorList tensor2, at::ArrayRef<at::Scalar> scalars, at::TensorList out) {
31026 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
31027 auto maybe_layer = maybeCurrentDynamicLayer();
31028 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
31029 int64_t cur_level = maybe_layer->layerId();
31030 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(tensor1, cur_level) && !isBatchedAtLevel(tensor2, cur_level) && !isBatchedAtLevel(out, cur_level)) {
31031 return at::_ops::_foreach_addcdiv_ScalarList_out::call(self, tensor1, tensor2, scalars, out);
31032 }
31033
31034 batch_rule(self, tensor1, tensor2, scalars, out);
31035}
31036template <typename batch_rule_t, batch_rule_t batch_rule>
31037void _foreach_addcdiv_Tensor_out_generated_plumbing(at::TensorList self, at::TensorList tensor1, at::TensorList tensor2, const at::Tensor & scalars, at::TensorList out) {
31038 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
31039 auto maybe_layer = maybeCurrentDynamicLayer();
31040 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
31041 int64_t cur_level = maybe_layer->layerId();
31042 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(tensor1, cur_level) && !isBatchedAtLevel(tensor2, cur_level) && !isBatchedAtLevel(scalars, cur_level) && !isBatchedAtLevel(out, cur_level)) {
31043 return at::_ops::_foreach_addcdiv_Tensor_out::call(self, tensor1, tensor2, scalars, out);
31044 }
31045 Tensor scalars_value;
31046 optional<int64_t> scalars_bdim;
31047 std::tie(scalars_value, scalars_bdim) = unwrapTensorAtLevel(scalars, cur_level);
31048 batch_rule(self, tensor1, tensor2, scalars_value, scalars_bdim, out);
31049}
31050template <typename batch_rule_t, batch_rule_t batch_rule>
31051void _foreach_addcmul_ScalarList_out_generated_plumbing(at::TensorList self, at::TensorList tensor1, at::TensorList tensor2, at::ArrayRef<at::Scalar> scalars, at::TensorList out) {
31052 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
31053 auto maybe_layer = maybeCurrentDynamicLayer();
31054 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
31055 int64_t cur_level = maybe_layer->layerId();
31056 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(tensor1, cur_level) && !isBatchedAtLevel(tensor2, cur_level) && !isBatchedAtLevel(out, cur_level)) {
31057 return at::_ops::_foreach_addcmul_ScalarList_out::call(self, tensor1, tensor2, scalars, out);
31058 }
31059
31060 batch_rule(self, tensor1, tensor2, scalars, out);
31061}
31062template <typename batch_rule_t, batch_rule_t batch_rule>
31063void _foreach_addcmul_Tensor_out_generated_plumbing(at::TensorList self, at::TensorList tensor1, at::TensorList tensor2, const at::Tensor & scalars, at::TensorList out) {
31064 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
31065 auto maybe_layer = maybeCurrentDynamicLayer();
31066 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
31067 int64_t cur_level = maybe_layer->layerId();
31068 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(tensor1, cur_level) && !isBatchedAtLevel(tensor2, cur_level) && !isBatchedAtLevel(scalars, cur_level) && !isBatchedAtLevel(out, cur_level)) {
31069 return at::_ops::_foreach_addcmul_Tensor_out::call(self, tensor1, tensor2, scalars, out);
31070 }
31071 Tensor scalars_value;
31072 optional<int64_t> scalars_bdim;
31073 std::tie(scalars_value, scalars_bdim) = unwrapTensorAtLevel(scalars, cur_level);
31074 batch_rule(self, tensor1, tensor2, scalars_value, scalars_bdim, out);
31075}
31076template <typename batch_rule_t, batch_rule_t batch_rule>
31077void _foreach_norm_Scalar_out_generated_plumbing(at::TensorList self, const at::Scalar & ord, at::TensorList out) {
31078 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
31079 auto maybe_layer = maybeCurrentDynamicLayer();
31080 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
31081 int64_t cur_level = maybe_layer->layerId();
31082 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(out, cur_level)) {
31083 return at::_ops::_foreach_norm_Scalar_out::call(self, ord, out);
31084 }
31085
31086 batch_rule(self, ord, out);
31087}
31088template <typename batch_rule_t, batch_rule_t batch_rule>
31089void _foreach_lerp_List_out_generated_plumbing(at::TensorList self, at::TensorList tensors1, at::TensorList weights, at::TensorList out) {
31090 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
31091 auto maybe_layer = maybeCurrentDynamicLayer();
31092 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
31093 int64_t cur_level = maybe_layer->layerId();
31094 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(tensors1, cur_level) && !isBatchedAtLevel(weights, cur_level) && !isBatchedAtLevel(out, cur_level)) {
31095 return at::_ops::_foreach_lerp_List_out::call(self, tensors1, weights, out);
31096 }
31097
31098 batch_rule(self, tensors1, weights, out);
31099}
31100template <typename batch_rule_t, batch_rule_t batch_rule>
31101void _foreach_lerp_Scalar_out_generated_plumbing(at::TensorList self, at::TensorList tensors1, const at::Scalar & weight, at::TensorList out) {
31102 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
31103 auto maybe_layer = maybeCurrentDynamicLayer();
31104 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
31105 int64_t cur_level = maybe_layer->layerId();
31106 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(tensors1, cur_level) && !isBatchedAtLevel(out, cur_level)) {
31107 return at::_ops::_foreach_lerp_Scalar_out::call(self, tensors1, weight, out);
31108 }
31109
31110 batch_rule(self, tensors1, weight, out);
31111}
31112template <typename batch_rule_t, batch_rule_t batch_rule>
31113void _fused_adam_out_generated_plumbing(at::TensorList self, at::TensorList grads, at::TensorList exp_avgs, at::TensorList exp_avg_sqs, at::TensorList max_exp_avg_sqs, at::TensorList state_steps, double lr, double beta1, double beta2, double weight_decay, double eps, bool amsgrad, bool maximize, const c10::optional<at::Tensor> & grad_scale, const c10::optional<at::Tensor> & found_inf, at::TensorList out) {
31114 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
31115 auto maybe_layer = maybeCurrentDynamicLayer();
31116 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
31117 int64_t cur_level = maybe_layer->layerId();
31118 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(grads, cur_level) && !isBatchedAtLevel(exp_avgs, cur_level) && !isBatchedAtLevel(exp_avg_sqs, cur_level) && !isBatchedAtLevel(max_exp_avg_sqs, cur_level) && !isBatchedAtLevel(state_steps, cur_level) && !isBatchedAtLevel(grad_scale, cur_level) && !isBatchedAtLevel(found_inf, cur_level) && !isBatchedAtLevel(out, cur_level)) {
31119 return at::_ops::_fused_adam_out::call(self, grads, exp_avgs, exp_avg_sqs, max_exp_avg_sqs, state_steps, lr, beta1, beta2, weight_decay, eps, amsgrad, maximize, grad_scale, found_inf, out);
31120 }
31121 optional<Tensor> grad_scale_value;
31122 optional<int64_t> grad_scale_bdim;
31123 if (grad_scale) {
31124 std::tie(grad_scale_value, grad_scale_bdim) = unwrapTensorAtLevel(grad_scale.value(), cur_level);
31125 }
31126 optional<Tensor> found_inf_value;
31127 optional<int64_t> found_inf_bdim;
31128 if (found_inf) {
31129 std::tie(found_inf_value, found_inf_bdim) = unwrapTensorAtLevel(found_inf.value(), cur_level);
31130 }
31131 batch_rule(self, grads, exp_avgs, exp_avg_sqs, max_exp_avg_sqs, state_steps, lr, beta1, beta2, weight_decay, eps, amsgrad, maximize, grad_scale_value, grad_scale_bdim, found_inf_value, found_inf_bdim, out);
31132}
31133template <typename batch_rule_t, batch_rule_t batch_rule>
31134::std::tuple<::std::vector<at::Tensor>,::std::vector<at::Tensor>,::std::vector<at::Tensor>,::std::vector<at::Tensor>,::std::vector<at::Tensor>> _fused_adam_generated_plumbing(at::TensorList self, at::TensorList grads, at::TensorList exp_avgs, at::TensorList exp_avg_sqs, at::TensorList max_exp_avg_sqs, at::TensorList state_steps, double lr, double beta1, double beta2, double weight_decay, double eps, bool amsgrad, bool maximize, const c10::optional<at::Tensor> & grad_scale, const c10::optional<at::Tensor> & found_inf) {
31135 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
31136 auto maybe_layer = maybeCurrentDynamicLayer();
31137 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
31138 int64_t cur_level = maybe_layer->layerId();
31139 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(grads, cur_level) && !isBatchedAtLevel(exp_avgs, cur_level) && !isBatchedAtLevel(exp_avg_sqs, cur_level) && !isBatchedAtLevel(max_exp_avg_sqs, cur_level) && !isBatchedAtLevel(state_steps, cur_level) && !isBatchedAtLevel(grad_scale, cur_level) && !isBatchedAtLevel(found_inf, cur_level)) {
31140 return at::_ops::_fused_adam::call(self, grads, exp_avgs, exp_avg_sqs, max_exp_avg_sqs, state_steps, lr, beta1, beta2, weight_decay, eps, amsgrad, maximize, grad_scale, found_inf);
31141 }
31142 optional<Tensor> grad_scale_value;
31143 optional<int64_t> grad_scale_bdim;
31144 if (grad_scale) {
31145 std::tie(grad_scale_value, grad_scale_bdim) = unwrapTensorAtLevel(grad_scale.value(), cur_level);
31146 }
31147 optional<Tensor> found_inf_value;
31148 optional<int64_t> found_inf_bdim;
31149 if (found_inf) {
31150 std::tie(found_inf_value, found_inf_bdim) = unwrapTensorAtLevel(found_inf.value(), cur_level);
31151 }
31152 auto results = batch_rule(self, grads, exp_avgs, exp_avg_sqs, max_exp_avg_sqs, state_steps, lr, beta1, beta2, weight_decay, eps, amsgrad, maximize, grad_scale_value, grad_scale_bdim, found_inf_value, found_inf_bdim);
31153 return std::make_tuple(makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level), makeBatchedVector(std::get<2>(results), std::get<3>(results), cur_level), makeBatchedVector(std::get<4>(results), std::get<5>(results), cur_level), makeBatchedVector(std::get<6>(results), std::get<7>(results), cur_level), makeBatchedVector(std::get<8>(results), std::get<9>(results), cur_level));
31154}
31155template <typename batch_rule_t, batch_rule_t batch_rule>
31156void _fused_adamw_out_generated_plumbing(at::TensorList self, at::TensorList grads, at::TensorList exp_avgs, at::TensorList exp_avg_sqs, at::TensorList max_exp_avg_sqs, at::TensorList state_steps, double lr, double beta1, double beta2, double weight_decay, double eps, bool amsgrad, bool maximize, const c10::optional<at::Tensor> & grad_scale, const c10::optional<at::Tensor> & found_inf, at::TensorList out) {
31157 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
31158 auto maybe_layer = maybeCurrentDynamicLayer();
31159 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
31160 int64_t cur_level = maybe_layer->layerId();
31161 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(grads, cur_level) && !isBatchedAtLevel(exp_avgs, cur_level) && !isBatchedAtLevel(exp_avg_sqs, cur_level) && !isBatchedAtLevel(max_exp_avg_sqs, cur_level) && !isBatchedAtLevel(state_steps, cur_level) && !isBatchedAtLevel(grad_scale, cur_level) && !isBatchedAtLevel(found_inf, cur_level) && !isBatchedAtLevel(out, cur_level)) {
31162 return at::_ops::_fused_adamw_out::call(self, grads, exp_avgs, exp_avg_sqs, max_exp_avg_sqs, state_steps, lr, beta1, beta2, weight_decay, eps, amsgrad, maximize, grad_scale, found_inf, out);
31163 }
31164 optional<Tensor> grad_scale_value;
31165 optional<int64_t> grad_scale_bdim;
31166 if (grad_scale) {
31167 std::tie(grad_scale_value, grad_scale_bdim) = unwrapTensorAtLevel(grad_scale.value(), cur_level);
31168 }
31169 optional<Tensor> found_inf_value;
31170 optional<int64_t> found_inf_bdim;
31171 if (found_inf) {
31172 std::tie(found_inf_value, found_inf_bdim) = unwrapTensorAtLevel(found_inf.value(), cur_level);
31173 }
31174 batch_rule(self, grads, exp_avgs, exp_avg_sqs, max_exp_avg_sqs, state_steps, lr, beta1, beta2, weight_decay, eps, amsgrad, maximize, grad_scale_value, grad_scale_bdim, found_inf_value, found_inf_bdim, out);
31175}
31176template <typename batch_rule_t, batch_rule_t batch_rule>
31177::std::tuple<::std::vector<at::Tensor>,::std::vector<at::Tensor>,::std::vector<at::Tensor>,::std::vector<at::Tensor>,::std::vector<at::Tensor>> _fused_adamw_generated_plumbing(at::TensorList self, at::TensorList grads, at::TensorList exp_avgs, at::TensorList exp_avg_sqs, at::TensorList max_exp_avg_sqs, at::TensorList state_steps, double lr, double beta1, double beta2, double weight_decay, double eps, bool amsgrad, bool maximize, const c10::optional<at::Tensor> & grad_scale, const c10::optional<at::Tensor> & found_inf) {
31178 c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
31179 auto maybe_layer = maybeCurrentDynamicLayer();
31180 vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
31181 int64_t cur_level = maybe_layer->layerId();
31182 if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(grads, cur_level) && !isBatchedAtLevel(exp_avgs, cur_level) && !isBatchedAtLevel(exp_avg_sqs, cur_level) && !isBatchedAtLevel(max_exp_avg_sqs, cur_level) && !isBatchedAtLevel(state_steps, cur_level) && !isBatchedAtLevel(grad_scale, cur_level) && !isBatchedAtLevel(found_inf, cur_level)) {
31183 return at::_ops::_fused_adamw::call(self, grads, exp_avgs, exp_avg_sqs, max_exp_avg_sqs, state_steps, lr, beta1, beta2, weight_decay, eps, amsgrad, maximize, grad_scale, found_inf);
31184 }
31185 optional<Tensor> grad_scale_value;
31186 optional<int64_t> grad_scale_bdim;
31187 if (grad_scale) {
31188 std::tie(grad_scale_value, grad_scale_bdim) = unwrapTensorAtLevel(grad_scale.value(), cur_level);
31189 }
31190 optional<Tensor> found_inf_value;
31191 optional<int64_t> found_inf_bdim;
31192 if (found_inf) {
31193 std::tie(found_inf_value, found_inf_bdim) = unwrapTensorAtLevel(found_inf.value(), cur_level);
31194 }
31195 auto results = batch_rule(self, grads, exp_avgs, exp_avg_sqs, max_exp_avg_sqs, state_steps, lr, beta1, beta2, weight_decay, eps, amsgrad, maximize, grad_scale_value, grad_scale_bdim, found_inf_value, found_inf_bdim);
31196 return std::make_tuple(makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level), makeBatchedVector(std::get<2>(results), std::get<3>(results), cur_level), makeBatchedVector(std::get<4>(results), std::get<5>(results), cur_level), makeBatchedVector(std::get<6>(results), std::get<7>(results), cur_level), makeBatchedVector(std::get<8>(results), std::get<9>(results), cur_level));
31197}
31198
31199}} // namespace at::functorch
31200