1 | #pragma once |
2 | |
3 | // NB: Must be at the top of file to avoid including the deprecated "math.h". |
4 | // https://stackoverflow.com/questions/6563810/m-pi-works-with-math-h-but-not-with-cmath-in-visual-studio |
5 | #ifdef _MSC_VER |
6 | #ifndef _USE_MATH_DEFINES |
7 | #define _USE_MATH_DEFINES |
8 | #endif |
9 | #include <cmath> |
10 | #endif |
11 | |
12 | #include <ATen/ATen.h> |
13 | #include <torch/csrc/autograd/generated/Functions.h> |
14 | |
15 | namespace torch { |
16 | namespace autograd { |
17 | namespace generated { |
18 | namespace details { |
19 | |
20 | extern const char* kCudnnDoubleBackwardMsg; |
21 | |
22 | // A simple way to imperatively compute index ranges for slots |
23 | // that have been flattened |
24 | struct IndexRangeGenerator { |
25 | IndexRange range(size_t range_size) { |
26 | i += range_size; |
27 | return {i - range_size, i}; |
28 | } |
29 | size_t size() { |
30 | return i; |
31 | } |
32 | |
33 | private: |
34 | size_t i = 0; |
35 | }; |
36 | |
37 | Tensor toNonOptFwGrad(const c10::optional<Tensor>& t); |
38 | Tensor toNonOptPrimal(const c10::optional<Tensor>& t); |
39 | Tensor toNonOptTensor(const c10::optional<Tensor>& t); |
40 | |
41 | Tensor apply_loss_reduction(const Tensor& unreduced, int64_t reduction); |
42 | bool any_variable_defined(const variable_list& variables); |
43 | void copy_range(variable_list& out, IndexRange range, const at::Tensor& t); |
44 | void copy_range( |
45 | variable_list& out, |
46 | IndexRange range, |
47 | at::ArrayRef<at::Tensor> t); |
48 | at::Tensor copysign_tensor_self_backward( |
49 | const Tensor& grad, |
50 | const Tensor& self, |
51 | const Tensor& result); |
52 | at::Tensor not_implemented(const char* name, const char* reason = "" ); |
53 | std::vector<Tensor> not_implemented_list( |
54 | const char* name, |
55 | const char* reason = "" ); |
56 | at::Tensor handle_r_to_c(ScalarType self_st, Tensor gradient_result); |
57 | at::Tensor maybe_multiply(const at::Tensor& t, const at::Scalar& s); |
58 | int64_t _safe_size(IntArrayRef sizes, IntArrayRef dim); |
59 | Tensor restore_reduced_dims( |
60 | const Tensor& output, |
61 | IntArrayRef dims, |
62 | bool keepdim); |
63 | Tensor scale_grad_by_count( |
64 | const Tensor& grad, |
65 | const Tensor& mask, |
66 | IntArrayRef dims); |
67 | at::Tensor norm_backward( |
68 | const at::Tensor& grad, |
69 | const at::Tensor& self, |
70 | const optional<at::Scalar>& p_, |
71 | const at::Tensor& norm); |
72 | at::Tensor norm_backward( |
73 | at::Tensor grad, |
74 | const at::Tensor& self, |
75 | const optional<at::Scalar>& p_, |
76 | at::Tensor norm, |
77 | at::IntArrayRef dim, |
78 | bool keepdim); |
79 | Tensor norm_jvp( |
80 | const Tensor& self_p, |
81 | const Tensor& self_t, |
82 | const optional<Scalar>& p_, |
83 | Tensor norm, |
84 | IntArrayRef dim, |
85 | bool keepdim); |
86 | Tensor norm_jvp( |
87 | const Tensor& grad, |
88 | const Tensor& self, |
89 | const optional<Scalar>& p_, |
90 | Tensor norm); |
91 | Tensor _nested_from_padded_backward( |
92 | const Tensor& grad, |
93 | const Tensor& input, |
94 | const bool do_transform_0213); |
95 | Tensor linalg_vector_norm_jvp( |
96 | const Tensor& self_p, |
97 | const Tensor& self_t, |
98 | const Scalar& scalar_ord, |
99 | Tensor norm, |
100 | const at::OptionalIntArrayRef& opt_dim, |
101 | bool keepdim); |
102 | at::Tensor linalg_vector_norm_backward( |
103 | at::Tensor grad, |
104 | const at::Tensor& self, |
105 | const at::Scalar& ord, |
106 | at::Tensor norm, |
107 | const at::OptionalIntArrayRef& opt_dim, |
108 | bool keepdim); |
109 | at::Tensor pow_backward( |
110 | at::Tensor grad, |
111 | const at::Tensor& self, |
112 | const at::Scalar& exponent_); |
113 | at::Tensor pow_backward_self( |
114 | at::Tensor grad, |
115 | const at::Tensor& self, |
116 | const at::Tensor& exponent); |
117 | at::Tensor pow_backward_exponent( |
118 | at::Tensor grad, |
119 | const at::Tensor& self, |
120 | const at::Tensor& exponent, |
121 | at::Tensor result); |
122 | at::Tensor pow_backward_exponent( |
123 | at::Tensor grad, |
124 | const at::Scalar& base, |
125 | const at::Tensor& exponent, |
126 | at::Tensor result); |
127 | at::Tensor angle_backward(at::Tensor grad, const at::Tensor& self); |
128 | template <typename T> |
129 | at::Tensor mul_tensor_backward(Tensor grad, T other, ScalarType self_st); |
130 | template <typename T> |
131 | at::Tensor div_tensor_self_backward(Tensor grad, T other, ScalarType self_st); |
132 | at::Tensor div_tensor_other_backward(Tensor grad, Tensor self, Tensor other); |
133 | template <typename T> |
134 | at::Tensor div_tensor_self_backward( |
135 | Tensor grad, |
136 | T other, |
137 | ScalarType self_st, |
138 | const c10::optional<c10::string_view>& rounding_mode); |
139 | at::Tensor div_tensor_other_backward( |
140 | Tensor grad, |
141 | Tensor self, |
142 | Tensor other, |
143 | const c10::optional<c10::string_view>& rounding_mode); |
144 | at::Tensor mvlgamma_backward( |
145 | at::Tensor grad, |
146 | const at::Tensor& self, |
147 | int64_t p); |
148 | at::Tensor permute_backwards(const at::Tensor& grad, at::IntArrayRef fwd_dims); |
149 | at::Tensor rad2deg_backward(const at::Tensor& grad); |
150 | at::Tensor deg2rad_backward(const at::Tensor& grad); |
151 | at::Tensor unsqueeze_multiple( |
152 | const at::Tensor& t, |
153 | at::OptionalIntArrayRef opt_dim, |
154 | size_t n_dims); |
155 | at::Tensor sum_backward( |
156 | const at::Tensor& grad, |
157 | at::SymIntArrayRef sizes, |
158 | at::OptionalIntArrayRef opt_dims, |
159 | bool keepdim); |
160 | at::Tensor sum_backward( |
161 | const at::Tensor& grad, |
162 | c10::SymIntArrayRef sizes, |
163 | c10::IntArrayRef dims, |
164 | bool keepdim); |
165 | at::Tensor nansum_backward( |
166 | const at::Tensor& grad, |
167 | const at::Tensor& self, |
168 | at::OptionalIntArrayRef dims, |
169 | bool keepdim); |
170 | std::vector<int64_t> reverse_list(const at::IntArrayRef list); |
171 | at::Tensor reverse_dim(const at::Tensor& t, int64_t dim); |
172 | at::Tensor prod_safe_zeros_backward( |
173 | const at::Tensor& grad, |
174 | const at::Tensor& inp, |
175 | int64_t dim); |
176 | at::Tensor prod_backward( |
177 | const at::Tensor& grad, |
178 | const at::Tensor& input, |
179 | const at::Tensor& result); |
180 | at::Tensor prod_backward( |
181 | at::Tensor grad, |
182 | const at::Tensor& input, |
183 | at::Tensor result, |
184 | int64_t dim, |
185 | bool keepdim); |
186 | at::Tensor solve_jvp( |
187 | const Tensor& X, |
188 | const Tensor& A, |
189 | const Tensor& dA, |
190 | const Tensor& dB); |
191 | at::Tensor solve_backward_self( |
192 | const at::Tensor& grad, |
193 | const at::Tensor& self, |
194 | const at::Tensor& A); |
195 | at::Tensor solve_backward_A( |
196 | const at::Tensor& grad, |
197 | const at::Tensor& self, |
198 | const at::Tensor& A, |
199 | const at::Tensor& solution); |
200 | at::Tensor cumsum_backward(const at::Tensor& grad, int64_t dim); |
201 | at::Tensor logsumexp_backward( |
202 | at::Tensor grad, |
203 | const at::Tensor& self, |
204 | at::Tensor result, |
205 | at::IntArrayRef dim, |
206 | bool keepdim); |
207 | at::Tensor logsumexp_jvp( |
208 | const at::Tensor& self_p, |
209 | const at::Tensor& self_t, |
210 | IntArrayRef dim, |
211 | bool keepdim); |
212 | at::Tensor logcumsumexp_backward( |
213 | at::Tensor grad, |
214 | const at::Tensor& self, |
215 | at::Tensor result, |
216 | int64_t dim); |
217 | at::Tensor unbind_backward(const variable_list& grads, int64_t dim); |
218 | at::Tensor unsqueeze_to(const at::Tensor& self, c10::SymIntArrayRef sym_sizes); |
219 | at::Tensor unsqueeze_to( |
220 | const at::Tensor& self, |
221 | int64_t dim, |
222 | c10::SymIntArrayRef sym_sizes); |
223 | at::Tensor unsqueeze_to( |
224 | const at::Tensor& self, |
225 | IntArrayRef dim, |
226 | c10::SymIntArrayRef sym_sizes); |
227 | std::vector<at::Tensor> cat_tensors_backward( |
228 | const at::Tensor& grad, |
229 | const std::vector<std::vector<c10::SymInt>>& sizes, |
230 | const std::vector<ScalarType>& dtypes, |
231 | int64_t dim); |
232 | std::vector<at::Tensor> stack_tensors_backward( |
233 | const at::Tensor& grad, |
234 | int64_t dim, |
235 | const std::vector<ScalarType>& dtypes); |
236 | std::vector<at::Tensor> block_diag_backward( |
237 | const at::Tensor& grad, |
238 | const std::vector<std::vector<int64_t>>& sizes, |
239 | const std::vector<ScalarType>& dtypes); |
240 | at::Tensor clamp_backward( |
241 | const at::Tensor& grad, |
242 | const at::Tensor& self, |
243 | const optional<at::Scalar>& min, |
244 | const optional<at::Scalar>& max); |
245 | at::Tensor clamp_backward( |
246 | const at::Tensor& grad, |
247 | const at::Tensor& self, |
248 | const at::Tensor& min, |
249 | const at::Tensor& max); |
250 | std::tuple<at::Tensor, at::Tensor> clamp_backward_min_max( |
251 | const at::Tensor& grad, |
252 | const at::Tensor& self, |
253 | const at::Tensor& min, |
254 | const at::Tensor& max, |
255 | const std::array<bool, 2>&); |
256 | at::Tensor clamp_jvp( |
257 | const Tensor& self_p, |
258 | const Tensor& self_t, |
259 | const Tensor& min_p, |
260 | const Tensor& min_t, |
261 | const Tensor& max_p, |
262 | const Tensor& max_t); |
263 | at::SymIntArrayRef strides_or_error( |
264 | const Tensor& input, |
265 | c10::string_view const& input_name); |
266 | at::Tensor mm_mat1_backward( |
267 | const Tensor& grad, |
268 | const Tensor& mat2, |
269 | at::SymIntArrayRef mat1_sizes, |
270 | at::SymIntArrayRef mat1_strides, |
271 | c10::Layout mat1_layout, |
272 | const Scalar& alpha); |
273 | at::Tensor mm_mat2_backward( |
274 | const at::Tensor& grad, |
275 | const at::Tensor& mat1, |
276 | at::SymIntArrayRef sizes, |
277 | at::SymIntArrayRef strides, |
278 | c10::Layout layout, |
279 | const at::Scalar& alpha); |
280 | at::Tensor mm_mat1_sparse_backward( |
281 | const at::Tensor& grad, |
282 | const at::Tensor& mat1, |
283 | const at::Tensor& mat2, |
284 | const at::Scalar& alpha); |
285 | at::Tensor sparse_sparse_matmul_backward( |
286 | const at::Tensor& grad, |
287 | const at::Tensor& mat1, |
288 | const at::Tensor& mat2, |
289 | int64_t grad_order); |
290 | at::Tensor renorm_backward( |
291 | const at::Tensor& grad, |
292 | const at::Tensor& self, |
293 | const at::Scalar& p, |
294 | int64_t dim, |
295 | const at::Scalar& maxnorm); |
296 | at::Tensor repeat_backward( |
297 | at::Tensor grad, |
298 | at::SymIntArrayRef repeats, |
299 | at::SymIntArrayRef input_shape); |
300 | at::Tensor _fused_dropout_backward( |
301 | at::Tensor grad, |
302 | at::Tensor mask, |
303 | double p1m); |
304 | at::Tensor infinitely_differentiable_native_dropout_backward( |
305 | const at::Tensor& grad, |
306 | const at::Tensor& mask, |
307 | double scale); |
308 | at::Tensor native_dropout_double_backward( |
309 | const at::Tensor& ggI, |
310 | const at::Tensor& grad, |
311 | const at::Tensor& mask, |
312 | double scale); |
313 | at::Tensor evenly_distribute_backward( |
314 | at::Tensor grad, |
315 | const at::Tensor& input, |
316 | const at::Tensor& value); |
317 | Tensor sgn_backward(const Tensor& x, const Tensor& gx, const Tensor& sgn); |
318 | Tensor masked_fill_backward(const Tensor& grad, const Tensor& mask); |
319 | at::Tensor var_backward( |
320 | at::Tensor grad, |
321 | const at::Tensor& self, |
322 | at::OptionalIntArrayRef dim, |
323 | c10::optional<int64_t> correction, |
324 | bool keepdim); |
325 | at::Tensor var_jvp( |
326 | const at::Tensor& self_t, |
327 | const at::Tensor& self_p, |
328 | const at::Tensor& result, |
329 | at::OptionalIntArrayRef dim_opt, |
330 | c10::optional<int64_t> correction_opt, |
331 | bool keepdim); |
332 | at::Tensor std_backward( |
333 | const at::Tensor& result, |
334 | const at::Tensor& grad, |
335 | const at::Tensor& self, |
336 | at::OptionalIntArrayRef dim, |
337 | c10::optional<int64_t> correction, |
338 | bool keepdim); |
339 | Tensor mean_backward( |
340 | const Tensor& grad, |
341 | c10::SymIntArrayRef shape, |
342 | at::OptionalIntArrayRef opt_dim, |
343 | c10::SymInt numel, |
344 | bool keepdim); |
345 | Tensor var_mean_backward( |
346 | const Tensor& gvar, |
347 | const Tensor& gmean, |
348 | const Tensor& self, |
349 | at::OptionalIntArrayRef dim_opt, |
350 | c10::optional<int64_t> correction_opt, |
351 | bool keepdim); |
352 | Tensor std_mean_backward( |
353 | const Tensor& gstd, |
354 | const Tensor& gmean, |
355 | const Tensor& self, |
356 | const Tensor& std, |
357 | at::OptionalIntArrayRef dim_opt, |
358 | c10::optional<int64_t> correction_opt, |
359 | bool keepdim); |
360 | at::Tensor masked_scatter_backward( |
361 | const at::Tensor& grad, |
362 | const at::Tensor& mask, |
363 | c10::SymIntArrayRef sizes); |
364 | at::Tensor cholesky_backward( |
365 | const at::Tensor& grad, |
366 | bool upper, |
367 | const at::Tensor& L); |
368 | at::Tensor cholesky_jvp( |
369 | const at::Tensor& input_tangent, |
370 | const at::Tensor& L, |
371 | bool upper); |
372 | at::Tensor cholesky_inverse_backward( |
373 | at::Tensor grad, |
374 | at::Tensor L, |
375 | bool upper, |
376 | at::Tensor inverse); |
377 | at::Tensor cholesky_inverse_jvp( |
378 | const at::Tensor& F, |
379 | const at::Tensor& dF, |
380 | const at::Tensor& X, |
381 | bool upper); |
382 | Tensor pinv_jvp(const Tensor& A, const Tensor& pinvA, const Tensor& dA); |
383 | Tensor pinv_backward(const Tensor& grad, const Tensor& pinvA, const Tensor& A); |
384 | at::Tensor split_with_sizes_backward( |
385 | const std::vector<torch::autograd::Variable>& grads, |
386 | c10::SymIntArrayRef split_sizes, |
387 | int64_t dim, |
388 | c10::SymIntArrayRef sizes, |
389 | const at::TensorOptions& options); |
390 | at::Tensor split_backward( |
391 | const std::vector<torch::autograd::Variable>& grads, |
392 | c10::SymInt split_size, |
393 | int64_t dim, |
394 | c10::SymIntArrayRef sizes, |
395 | const at::TensorOptions& options); |
396 | at::Tensor max_pool_double_backward( |
397 | const at::Tensor& grad, |
398 | const at::Tensor& indices, |
399 | int dim); |
400 | at::Tensor glu_double_backward( |
401 | const at::Tensor& grad, |
402 | const at::Tensor& grad_output, |
403 | const at::Tensor& input, |
404 | int64_t dim); |
405 | at::Tensor glu_double_backward_grad_output( |
406 | const at::Tensor& grad, |
407 | const at::Tensor& input, |
408 | int64_t dim); |
409 | at::Tensor infinitely_differentiable_silu_backward( |
410 | const at::Tensor& grad_output, |
411 | const at::Tensor& input); |
412 | at::Tensor infinitely_differentiable_mish_backward( |
413 | const at::Tensor& grad_output, |
414 | const at::Tensor& input); |
415 | Tensor infinitely_differentiable_logit_backward( |
416 | const Tensor& grad, |
417 | const Tensor& self, |
418 | c10::optional<double> eps); |
419 | Tensor binary_cross_entropy_target_backward( |
420 | const Tensor& grad, |
421 | const Tensor& self, |
422 | const Tensor& target, |
423 | const c10::optional<Tensor>& weight, |
424 | int64_t reduction); |
425 | Tensor binary_cross_entropy_double_backward_target( |
426 | const Tensor& grad, |
427 | const Tensor& grad_output, |
428 | const Tensor& self, |
429 | const Tensor& target, |
430 | const c10::optional<Tensor>& weight, |
431 | int64_t reduction); |
432 | Tensor binary_cross_entropy_with_logits_backward( |
433 | const Tensor& grad, |
434 | const Tensor& input, |
435 | const Tensor& target, |
436 | const c10::optional<Tensor>& weight_opt, |
437 | const c10::optional<Tensor>& pos_weight_opt, |
438 | int64_t reduction); |
439 | at::Tensor binary_cross_entropy_with_logits_target_backward( |
440 | const at::Tensor& grad_output, |
441 | const at::Tensor& self, |
442 | const at::Tensor& target, |
443 | const c10::optional<at::Tensor>& weight, |
444 | const c10::optional<at::Tensor>& pos_weight, |
445 | int64_t reduction); |
446 | at::Tensor log_sigmoid_double_backward( |
447 | const at::Tensor& grad, |
448 | const at::Tensor& input); |
449 | at::Tensor softmax_double_backward( |
450 | const at::Tensor& grad, |
451 | const at::Tensor& grad_output, |
452 | int dim, |
453 | const at::Tensor& output); |
454 | at::Tensor binary_cross_entropy_double_backward( |
455 | const at::Tensor& grad_output, |
456 | const at::Tensor& grad, |
457 | const at::Tensor& input, |
458 | const at::Tensor& target, |
459 | const c10::optional<at::Tensor>& weight, |
460 | int64_t reduction); |
461 | at::Tensor binary_cross_entropy_double_backward_grad_output( |
462 | const at::Tensor& grad, |
463 | const at::Tensor& input, |
464 | const at::Tensor& target, |
465 | const c10::optional<at::Tensor>& weight, |
466 | int64_t reduction); |
467 | at::Tensor smooth_l1_loss_double_backward( |
468 | const at::Tensor& grad, |
469 | const at::Tensor& input, |
470 | const at::Tensor& target, |
471 | int64_t reduction, |
472 | double beta); |
473 | at::Tensor huber_loss_double_backward( |
474 | const at::Tensor& grad, |
475 | const at::Tensor& input, |
476 | const at::Tensor& target, |
477 | int64_t reduction, |
478 | double delta); |
479 | at::Tensor huber_loss_double_backward_grad_output( |
480 | const at::Tensor& grad, |
481 | const at::Tensor& grad_output, |
482 | const at::Tensor& input, |
483 | const at::Tensor& target, |
484 | int64_t reduction, |
485 | double delta); |
486 | at::Tensor mse_loss_double_backward( |
487 | const at::Tensor& grad, |
488 | const at::Tensor& input, |
489 | int64_t reduction); |
490 | at::Tensor soft_margin_loss_double_backward( |
491 | const at::Tensor& grad, |
492 | const at::Tensor& input, |
493 | const at::Tensor& target, |
494 | int64_t reduction); |
495 | at::Tensor soft_margin_loss_double_backward_grad_output( |
496 | const at::Tensor& grad, |
497 | const at::Tensor& grad_output, |
498 | const at::Tensor& input, |
499 | const at::Tensor& target, |
500 | int64_t reduction); |
501 | at::Tensor softplus_double_backward( |
502 | const at::Tensor& grad, |
503 | const at::Tensor& input, |
504 | const at::Scalar& beta, |
505 | const at::Scalar& threshold); |
506 | std::tuple<at::Tensor, at::Tensor> slogdet_jvp( |
507 | const at::Tensor& LU, |
508 | const at::Tensor& pivots, |
509 | const at::Tensor& dA, |
510 | const at::Tensor& sign, |
511 | const bool use_A_T); |
512 | at::Tensor slogdet_backward( |
513 | const at::Tensor& grad_sign, |
514 | const at::Tensor& grad_logabsdet, |
515 | const at::Tensor& A, |
516 | const at::Tensor& signdet, |
517 | const at::Tensor& LU, |
518 | const at::Tensor& pivots); |
519 | at::Tensor log1p_backward(const at::Tensor& grad, const at::Tensor& self); |
520 | at::Tensor sinc_backward(const at::Tensor& grad, const at::Tensor& self); |
521 | at::Tensor sparse_constructor_values_backward( |
522 | const at::Tensor& sparse_grad_out, |
523 | const at::Tensor& indices); |
524 | at::Tensor embedding_dense_double_backward_symint( |
525 | const at::Tensor& grad, |
526 | const at::Tensor& indices, |
527 | c10::SymInt padding_idx); |
528 | at::Tensor index_backward( |
529 | at::Tensor zeros_like_self, |
530 | const torch::List<c10::optional<Tensor>>& indices, |
531 | const at::Tensor& grad); |
532 | at::Tensor _cudnn_ctc_loss_backward( |
533 | const at::Tensor& grad_out, |
534 | const at::Tensor& loss, |
535 | const at::Tensor& raw_grad, |
536 | bool zero_infinity); |
537 | at::Tensor elu_double_backward( |
538 | const Tensor& grad, |
539 | const Tensor& grad_output, |
540 | const Scalar& alpha, |
541 | const Scalar& scale, |
542 | const Scalar& input_scale, |
543 | bool is_result, |
544 | const Tensor& self_or_result); |
545 | |
546 | Tensor svd_backward( |
547 | const Tensor& gU, |
548 | const Tensor& gS, |
549 | const Tensor& gVh, |
550 | const Tensor& U, |
551 | const Tensor& S, |
552 | const Tensor& Vh); |
553 | |
554 | std::tuple<Tensor, Tensor, Tensor> linalg_svd_jvp( |
555 | const Tensor& dA, |
556 | const Tensor& U, |
557 | const Tensor& S, |
558 | const Tensor& Vh, |
559 | const bool full_matrices); |
560 | Tensor slice_backward_wrapper( |
561 | const at::Tensor& grad, |
562 | const c10::SymIntArrayRef& input_sizes, |
563 | int64_t dim, |
564 | c10::optional<c10::SymInt> start, |
565 | c10::optional<c10::SymInt> end, |
566 | c10::SymInt step); |
567 | std::tuple<Tensor, Tensor> linalg_eig_jvp( |
568 | const Tensor& dA, |
569 | const Tensor& L, |
570 | const Tensor& V, |
571 | const bool is_hermitian); |
572 | Tensor linalg_eig_backward( |
573 | const Tensor& gL, |
574 | const Tensor& gV, |
575 | const Tensor& L, |
576 | const Tensor& V, |
577 | const bool is_hermitian, |
578 | const bool symeig_eigenvectors = true); |
579 | Tensor linalg_lstsq_jvp( |
580 | const Tensor& A, |
581 | const Tensor& B, |
582 | const Tensor& dA, |
583 | const Tensor& dB); |
584 | std::tuple<Tensor, Tensor> triangular_solve_backward( |
585 | const Tensor& grad_x, |
586 | const Tensor& grad_m, |
587 | const Tensor& b, |
588 | const Tensor& a, |
589 | const Tensor& x, |
590 | const bool upper, |
591 | const bool transpose, |
592 | const bool unitriangular, |
593 | std::array<bool, 2> output_mask); |
594 | Tensor triangular_solve_jvp( |
595 | const Tensor& X, |
596 | const Tensor& A, |
597 | const Tensor& dA, |
598 | const Tensor& dB, |
599 | const bool upper, |
600 | const bool transpose, |
601 | const bool unitriangular); |
602 | Tensor linalg_solve_triangular_forward_AD( |
603 | const Tensor& A_t, |
604 | const Tensor& B_t, |
605 | const Tensor& A, |
606 | const Tensor& X, |
607 | const bool upper, |
608 | const bool left, |
609 | const bool unitriangular); |
610 | std::tuple<Tensor, Tensor> linalg_solve_triangular_backward( |
611 | const Tensor& grad, |
612 | const Tensor& A, |
613 | const Tensor& X, |
614 | const bool upper, |
615 | const bool left, |
616 | const bool unitriangular, |
617 | std::array<bool, 2> output_mask); |
618 | std::tuple<Tensor, Tensor, Tensor> _trilinear_backward( |
619 | const Tensor& grad_out, |
620 | const Tensor& i1, |
621 | const Tensor& i2, |
622 | const Tensor& i3, |
623 | IntArrayRef expand1, |
624 | IntArrayRef expand2, |
625 | IntArrayRef expand3, |
626 | IntArrayRef sumdim, |
627 | std::array<bool, 3> grad_mask); |
628 | std::tuple<Tensor, Tensor> linalg_qr_jvp( |
629 | const Tensor& dA, |
630 | const Tensor& Q, |
631 | const Tensor& R, |
632 | const c10::string_view mode); |
633 | Tensor linalg_qr_backward( |
634 | const Tensor& gQ, |
635 | const Tensor& gR, |
636 | const Tensor& Q, |
637 | const Tensor& R, |
638 | const c10::string_view mode); |
639 | Tensor linalg_matrix_exp_differential( |
640 | const Tensor& self, |
641 | const Tensor& grad, |
642 | bool adjoint); |
643 | std::tuple<Tensor, Tensor, Tensor> batchnorm_double_backward( |
644 | const Tensor& input, |
645 | const c10::optional<Tensor>& gamma, |
646 | const Tensor& ggI, |
647 | const Tensor& ggG, |
648 | const Tensor& ggB, |
649 | const Tensor& gO, |
650 | const c10::optional<Tensor>& running_mean, |
651 | const c10::optional<Tensor>& running_var, |
652 | bool training, |
653 | double eps, |
654 | const c10::optional<Tensor>& save_mean, |
655 | const c10::optional<Tensor>& save_invstd, |
656 | std::array<bool, 3> output_mask); |
657 | std::tuple<Tensor, Tensor> _euclidean_dist_backward( |
658 | const Tensor& grad, |
659 | const Tensor& x1, |
660 | const Tensor& x2, |
661 | const Tensor& res); |
662 | Tensor fft_backward( |
663 | const Tensor& self, |
664 | const Tensor& grad, |
665 | int64_t signal_ndim, |
666 | bool complex_input, |
667 | bool complex_output, |
668 | bool inverse, |
669 | IntArrayRef checked_signal_sizes, |
670 | int64_t normalization, |
671 | bool onesided, |
672 | IntArrayRef output_sizes); |
673 | Tensor fft_r2c_backward( |
674 | const Tensor& grad, |
675 | at::IntArrayRef dim, |
676 | int64_t normalization, |
677 | bool onesided, |
678 | c10::SymInt last_dim_size); |
679 | Tensor fft_c2r_backward( |
680 | const Tensor& grad, |
681 | IntArrayRef dim, |
682 | int64_t normalization); |
683 | Tensor constant_pad_nd_backward(const Tensor& grad, c10::SymIntArrayRef pad); |
684 | std::tuple<Tensor, Tensor> cholesky_solve_backward( |
685 | const Tensor& grad_x, |
686 | const Tensor& self, |
687 | const Tensor& input2, |
688 | const Tensor& result, |
689 | const bool upper); |
690 | Tensor cholesky_solve_jvp( |
691 | const Tensor& X, |
692 | const Tensor& U, |
693 | const Tensor& dU, |
694 | const Tensor& dB, |
695 | const bool upper); |
696 | std::tuple<Tensor, Tensor, Tensor> |
697 | infinitely_differentiable_native_group_norm_backward( |
698 | const Tensor& dY, |
699 | const Tensor& dmean, |
700 | const Tensor& drstd, |
701 | const Tensor& X, |
702 | const Tensor& mean, |
703 | const Tensor& rstd, |
704 | const c10::optional<Tensor>& gamma, |
705 | c10::SymInt N, |
706 | c10::SymInt C, |
707 | c10::SymInt HxW, |
708 | int64_t group, |
709 | double eps, |
710 | std::array<bool, 3> grad_input_mask); |
711 | Tensor gelu_double_backward( |
712 | const Tensor& ggI, |
713 | const Tensor& gO, |
714 | const Tensor& input, |
715 | c10::string_view approximate); |
716 | Tensor as_strided_backward( |
717 | Tensor grad, |
718 | const TensorGeometry& input_geometry, |
719 | c10::SymIntArrayRef sizes, |
720 | c10::SymIntArrayRef strides, |
721 | optional<c10::SymInt> storage_offset_); |
722 | Tensor as_strided_scatter_backward( |
723 | Tensor grad, |
724 | const TensorGeometry& input_geometry, |
725 | TensorGeometry src_geometry, |
726 | c10::SymIntArrayRef sizes, |
727 | c10::SymIntArrayRef strides, |
728 | optional<c10::SymInt> storage_offset); |
729 | std::tuple<Tensor, Tensor> atan2_backward( |
730 | const Tensor& grad, |
731 | const Tensor& self, |
732 | const Tensor& other, |
733 | std::array<bool, 2> output_mask); |
734 | Tensor amaxamin_jvp( |
735 | const Tensor& x, |
736 | const Tensor& dx, |
737 | const Tensor& result, |
738 | IntArrayRef dim, |
739 | bool keepdim); |
740 | std::tuple<Tensor, Tensor, Tensor> layer_norm_double_backward( |
741 | const Tensor& input, |
742 | const c10::optional<Tensor>& gamma, |
743 | const Tensor& ggI, |
744 | const Tensor& ggG, |
745 | const Tensor& ggB, |
746 | const Tensor& gO, |
747 | const Tensor& save_mean, |
748 | const Tensor& save_invstd, |
749 | c10::SymIntArrayRef normalized_shape, |
750 | std::array<bool, 3> output_mask); |
751 | |
752 | std::tuple<Tensor, Tensor> householder_product_backward( |
753 | const Tensor& grad, |
754 | const Tensor& result, |
755 | const Tensor& input, |
756 | const Tensor& tau, |
757 | const bool flip_order = false); |
758 | Tensor householder_product_jvp( |
759 | const Tensor& dV, |
760 | const Tensor& dtau, |
761 | const Tensor& prod, |
762 | const Tensor& V, |
763 | const Tensor& tau); |
764 | std::tuple<Tensor, Tensor, Tensor> ormqr_backward( |
765 | const Tensor& grad, |
766 | const Tensor& result, |
767 | const Tensor& self, |
768 | const Tensor& tau, |
769 | const Tensor& other, |
770 | bool left, |
771 | bool transpose, |
772 | std::array<bool, 3> grad_output_mask); |
773 | std::tuple<Tensor, Tensor> polar_backward( |
774 | const Tensor& grad, |
775 | const Tensor& result); |
776 | Tensor i1_backward( |
777 | const Tensor& grad, |
778 | const Tensor& self, |
779 | const Tensor& result); |
780 | Tensor i1e_backward( |
781 | const Tensor& grad, |
782 | const Tensor& self, |
783 | const Tensor& result); |
784 | Tensor linalg_lu_solve_LU( |
785 | const Tensor& grad, |
786 | const Tensor& LU, |
787 | const Tensor& pivots, |
788 | const Tensor& X, |
789 | const bool left, |
790 | const bool adjoint); |
791 | Tensor linalg_lu_solve_jvp( |
792 | const Tensor& X, |
793 | const Tensor& LU, |
794 | const Tensor& pivots, |
795 | const Tensor& dLU, |
796 | const Tensor& dB, |
797 | const bool left, |
798 | const bool adjoint); |
799 | std::tuple<Tensor, Tensor> linalg_solve_backward( |
800 | const Tensor& gX, |
801 | const Tensor& X, |
802 | const Tensor& A, |
803 | const Tensor& LU, |
804 | const Tensor& pivots, |
805 | const bool left, |
806 | const bool B_requires_grad); |
807 | Tensor linalg_solve_jvp( |
808 | const Tensor& dA, |
809 | const Tensor& dB, |
810 | const Tensor& X, |
811 | const Tensor& LU, |
812 | const Tensor& pivots, |
813 | const bool left, |
814 | const bool use_A_T); |
815 | Tensor lu_unpack_backward( |
816 | const Tensor& L_grad, |
817 | const Tensor& U_grad, |
818 | const c10::SymInt m, |
819 | const c10::SymInt n); |
820 | |
821 | Tensor linalg_det_backward( |
822 | const Tensor& grad, |
823 | const Tensor& det, |
824 | const Tensor& A, |
825 | const Tensor& LU, |
826 | const Tensor& pivots); |
827 | Tensor linalg_det_jvp( |
828 | const Tensor& dA, |
829 | const Tensor& det, |
830 | const Tensor& LU, |
831 | const Tensor& pivots, |
832 | const bool use_A_T); |
833 | std::tuple<Tensor, Tensor> linalg_lstsq_backward( |
834 | const Tensor& grad, |
835 | const Tensor& A, |
836 | const Tensor& B_, |
837 | const std::array<bool, 2>& grad_input_mask); |
838 | Tensor linalg_lu_backward( |
839 | const Tensor& L_grad, |
840 | const Tensor& U_grad, |
841 | const Tensor& P, |
842 | const Tensor& L, |
843 | const Tensor& U, |
844 | const bool pivot); |
845 | |
846 | std::tuple<Tensor, Tensor> linalg_lu_jvp( |
847 | const Tensor& dA, |
848 | const Tensor& P, |
849 | const Tensor& L, |
850 | const Tensor& U, |
851 | const bool pivot); |
852 | |
853 | Tensor lu_factor_ex_backward( |
854 | const Tensor& grad, |
855 | const Tensor& LU, |
856 | const Tensor& pivs, |
857 | const bool pivot); |
858 | Tensor lu_factor_ex_jvp( |
859 | const Tensor& dX, |
860 | const Tensor& LU, |
861 | const Tensor& pivs, |
862 | const bool pivot); |
863 | |
864 | Tensor batch_norm_jvp( |
865 | const Tensor& input_p, |
866 | const Tensor& input_t, |
867 | const Tensor& weight_p, |
868 | const Tensor& weight_t, |
869 | const Tensor& bias_p, |
870 | const Tensor& bias_t, |
871 | const c10::optional<Tensor>& running_mean, |
872 | const c10::optional<Tensor>& running_var, |
873 | const Tensor& saved_mean, |
874 | const Tensor& saved_invstd, |
875 | bool train, |
876 | double eps); |
877 | |
878 | Tensor layer_norm_jvp( |
879 | const Tensor& input_p, |
880 | const Tensor& input_t, |
881 | const Tensor& weight_p, |
882 | const Tensor& weight_t, |
883 | const Tensor& bias_p, |
884 | const Tensor& bias_t, |
885 | const Tensor& saved_mean, |
886 | const Tensor& saved_invstd, |
887 | c10::SymIntArrayRef normalized_shape); |
888 | |
889 | Tensor group_norm_jvp( |
890 | const Tensor& input_p, |
891 | const Tensor& input_t, |
892 | const Tensor& weight_p, |
893 | const Tensor& weight_t, |
894 | const Tensor& bias_p, |
895 | const Tensor& bias_t, |
896 | const Tensor& saved_mean, |
897 | const Tensor& saved_invstd, |
898 | int64_t groups); |
899 | Tensor group_norm_mean_jvp( |
900 | const Tensor& input_t, |
901 | const Tensor& mean_p, |
902 | int64_t groups); |
903 | Tensor group_norm_invstd_jvp( |
904 | const Tensor& input_p, |
905 | const Tensor& input_t, |
906 | const Tensor& mean_p, |
907 | const Tensor& invstd_p, |
908 | int64_t groups); |
909 | |
910 | Tensor convolution_jvp( |
911 | const Tensor& input_p, |
912 | const Tensor& input_t, |
913 | const Tensor& weight_p, |
914 | const Tensor& weight_t, |
915 | const Tensor& bias_p, |
916 | const Tensor& bias_t, |
917 | IntArrayRef stride, |
918 | at::SymIntArrayRef padding, |
919 | IntArrayRef dilation, |
920 | bool transposed, |
921 | at::SymIntArrayRef output_padding, |
922 | int64_t groups); |
923 | |
924 | Tensor _convolution_jvp( |
925 | const Tensor& input_p, |
926 | const Tensor& input_t, |
927 | const Tensor& weight_p, |
928 | const Tensor& weight_t, |
929 | const Tensor& bias_p, |
930 | const Tensor& bias_t, |
931 | IntArrayRef stride, |
932 | at::SymIntArrayRef padding, |
933 | IntArrayRef dilation, |
934 | bool transposed, |
935 | at::SymIntArrayRef output_padding, |
936 | int64_t groups, |
937 | bool benchmark, |
938 | bool deterministic, |
939 | bool cudnn_enabled, |
940 | bool allow_tf32); |
941 | |
942 | Tensor convolution_backward_jvp_grad_bias( |
943 | const Tensor& grad_out_t, |
944 | const Tensor& grad_bias); |
945 | |
946 | Tensor cat_jvp(at::ITensorListRef tensors, int64_t dim); |
947 | Tensor block_diag_jvp(at::TensorList tensors); |
948 | Tensor stack_jvp(at::TensorList tensors, int64_t dim); |
949 | Tensor cumprod_jvp(Tensor self_t, Tensor self_p, Tensor result, int dim); |
950 | Tensor gather_with_keepdimed_indices( |
951 | const Tensor& input, |
952 | int64_t dim, |
953 | const Tensor& indices, |
954 | bool keepdim); |
955 | Tensor evenly_read_jvp( |
956 | const Tensor& fw_grad, |
957 | const Tensor& input, |
958 | const Tensor& value); |
959 | Tensor warn_backwards(const Tensor& grad_output); |
960 | |
961 | std::tuple<Tensor, Tensor> _cudnn_convolution_backward( |
962 | const at::Tensor& self, |
963 | const at::Tensor& grad_output, |
964 | const at::Tensor& weight, |
965 | at::IntArrayRef padding, |
966 | at::IntArrayRef output_padding, |
967 | at::IntArrayRef stride, |
968 | at::IntArrayRef dilation, |
969 | bool transposed, |
970 | int64_t groups, |
971 | ::std::array<bool, 2> output_mask); |
972 | |
973 | Tensor scatter_reduce_jvp( |
974 | const Tensor& self_p, |
975 | const Tensor& self_t, |
976 | int dim, |
977 | const Tensor& index, |
978 | const Tensor& src_p, |
979 | const Tensor& src_t, |
980 | c10::string_view reduce, |
981 | bool include_self, |
982 | const Tensor& result); |
983 | |
984 | std::tuple<Tensor, Tensor> scatter_reduce_backward( |
985 | const Tensor& grad, |
986 | const Tensor& self, |
987 | int dim, |
988 | const Tensor& index, |
989 | const Tensor& src, |
990 | c10::string_view reduce, |
991 | bool include_self, |
992 | const Tensor& result); |
993 | |
994 | Tensor _to_copy_backward( |
995 | const Tensor& grad, |
996 | const c10::TensorOptions& self_options); |
997 | |
998 | std::tuple<Tensor, Tensor> index_reduce_backward( |
999 | const Tensor& grad, |
1000 | const Tensor& self, |
1001 | int dim, |
1002 | const Tensor& index, |
1003 | const Tensor& source, |
1004 | c10::string_view reduce, |
1005 | bool include_self, |
1006 | const Tensor& result); |
1007 | |
1008 | Tensor take_backward( |
1009 | const Tensor& grad, |
1010 | const Tensor& self, |
1011 | const Tensor& indices); |
1012 | |
1013 | Tensor to_sparse_backward( |
1014 | const Tensor& grad, |
1015 | const c10::Layout self_layout, |
1016 | const c10::OptionalArrayRef<c10::SymInt>& self_blocksize); |
1017 | |
1018 | } // namespace details |
1019 | } // namespace generated |
1020 | } // namespace autograd |
1021 | } // namespace torch |
1022 | |