1 | // This file is MACHINE GENERATED! Do not edit. |
2 | |
3 | #ifndef TENSORFLOW_CC_OPS_LINALG_OPS_H_ |
4 | #define TENSORFLOW_CC_OPS_LINALG_OPS_H_ |
5 | |
6 | // This file is MACHINE GENERATED! Do not edit. |
7 | |
8 | #include "tensorflow/cc/framework/ops.h" |
9 | #include "tensorflow/cc/framework/scope.h" |
10 | #include "tensorflow/core/framework/tensor.h" |
11 | #include "tensorflow/core/framework/tensor_shape.h" |
12 | #include "tensorflow/core/framework/types.h" |
13 | #include "tensorflow/core/lib/gtl/array_slice.h" |
14 | |
15 | namespace tensorflow { |
16 | namespace ops { |
17 | |
18 | /// @defgroup linalg_ops Linalg Ops |
19 | /// @{ |
20 | |
21 | /// Computes the Cholesky decomposition of one or more square matrices. |
22 | /// |
23 | /// The input is a tensor of shape `[..., M, M]` whose inner-most 2 dimensions |
24 | /// form square matrices. |
25 | /// |
26 | /// The input has to be symmetric and positive definite. Only the lower-triangular |
27 | /// part of the input will be used for this operation. The upper-triangular part |
28 | /// will not be read. |
29 | /// |
30 | /// The output is a tensor of the same shape as the input |
31 | /// containing the Cholesky decompositions for all input submatrices `[..., :, :]`. |
32 | /// |
33 | /// **Note**: The gradient computation on GPU is faster for large matrices but |
34 | /// not for large batch dimensions when the submatrices are small. In this |
35 | /// case it might be faster to use the CPU. |
36 | /// |
37 | /// Args: |
38 | /// * scope: A Scope object |
39 | /// * input: Shape is `[..., M, M]`. |
40 | /// |
41 | /// Returns: |
42 | /// * `Output`: Shape is `[..., M, M]`. |
43 | class Cholesky { |
44 | public: |
45 | Cholesky(const ::tensorflow::Scope& scope, ::tensorflow::Input input); |
46 | operator ::tensorflow::Output() const { return output; } |
47 | operator ::tensorflow::Input() const { return output; } |
48 | ::tensorflow::Node* node() const { return output.node(); } |
49 | |
50 | Operation operation; |
51 | ::tensorflow::Output output; |
52 | }; |
53 | |
54 | /// Computes the reverse mode backpropagated gradient of the Cholesky algorithm. |
55 | /// |
56 | /// For an explanation see "Differentiation of the Cholesky algorithm" by |
57 | /// Iain Murray http://arxiv.org/abs/1602.07527. |
58 | /// |
59 | /// Args: |
60 | /// * scope: A Scope object |
61 | /// * l: Output of batch Cholesky algorithm l = cholesky(A). Shape is `[..., M, M]`. |
62 | /// Algorithm depends only on lower triangular part of the innermost matrices of |
63 | /// this tensor. |
64 | /// * grad: df/dl where f is some scalar function. Shape is `[..., M, M]`. |
65 | /// Algorithm depends only on lower triangular part of the innermost matrices of |
66 | /// this tensor. |
67 | /// |
68 | /// Returns: |
69 | /// * `Output`: Symmetrized version of df/dA . Shape is `[..., M, M]` |
70 | class CholeskyGrad { |
71 | public: |
72 | CholeskyGrad(const ::tensorflow::Scope& scope, ::tensorflow::Input l, |
73 | ::tensorflow::Input grad); |
74 | operator ::tensorflow::Output() const { return output; } |
75 | operator ::tensorflow::Input() const { return output; } |
76 | ::tensorflow::Node* node() const { return output.node(); } |
77 | |
78 | Operation operation; |
79 | ::tensorflow::Output output; |
80 | }; |
81 | |
82 | /// Computes the eigen decomposition of one or more square matrices. |
83 | /// |
84 | /// Computes the eigenvalues and (optionally) right eigenvectors of each inner matrix in |
85 | /// `input` such that `input[..., :, :] = v[..., :, :] * diag(e[..., :])`. The eigenvalues |
86 | /// are sorted in non-decreasing order. |
87 | /// |
88 | /// ```python |
89 | /// # a is a tensor. |
90 | /// # e is a tensor of eigenvalues. |
91 | /// # v is a tensor of eigenvectors. |
92 | /// e, v = eig(a) |
93 | /// e = eig(a, compute_v=False) |
94 | /// ``` |
95 | /// |
96 | /// Args: |
97 | /// * scope: A Scope object |
98 | /// * input: `Tensor` input of shape `[N, N]`. |
99 | /// |
100 | /// Optional attributes (see `Attrs`): |
101 | /// * compute_v: If `True` then eigenvectors will be computed and returned in `v`. |
102 | /// Otherwise, only the eigenvalues will be computed. |
103 | /// |
104 | /// Returns: |
105 | /// * `Output` e: Eigenvalues. Shape is `[N]`. |
106 | /// * `Output` v: Eigenvectors. Shape is `[N, N]`. |
107 | class Eig { |
108 | public: |
109 | /// Optional attribute setters for Eig |
110 | struct Attrs { |
111 | /// If `True` then eigenvectors will be computed and returned in `v`. |
112 | /// Otherwise, only the eigenvalues will be computed. |
113 | /// |
114 | /// Defaults to true |
115 | TF_MUST_USE_RESULT Attrs ComputeV(bool x) { |
116 | Attrs ret = *this; |
117 | ret.compute_v_ = x; |
118 | return ret; |
119 | } |
120 | |
121 | bool compute_v_ = true; |
122 | }; |
123 | Eig(const ::tensorflow::Scope& scope, ::tensorflow::Input input, DataType Tout); |
124 | Eig(const ::tensorflow::Scope& scope, ::tensorflow::Input input, DataType Tout, |
125 | const Eig::Attrs& attrs); |
126 | |
127 | static Attrs ComputeV(bool x) { |
128 | return Attrs().ComputeV(x); |
129 | } |
130 | |
131 | Operation operation; |
132 | ::tensorflow::Output e; |
133 | ::tensorflow::Output v; |
134 | }; |
135 | |
136 | /// Tensor contraction according to Einstein summation convention. |
137 | /// |
138 | /// Implements generalized Tensor contraction and reduction. Each input Tensor must |
139 | /// have a corresponding input subscript appearing in the comma-separated left-hand |
140 | /// side of the equation. The right-hand side of the equation consists of the |
141 | /// output subscript. The input subscripts and the output subscript should consist |
142 | /// of zero or more named axis labels and at most one ellipsis (`...`). |
143 | /// |
144 | /// The named axis labels may be any single character other than those having |
145 | /// special meaning, namely `,.->`. The behavior of this Op is undefined if it |
146 | /// receives an ill-formatted equation; since the validation is done at |
147 | /// graph-building time, we omit format validation checks at runtime. |
148 | /// |
149 | /// Note: This Op is *not* intended to be called by the user; instead users should |
150 | /// call `tf.einsum` directly. It is a hidden Op used by `tf.einsum`. |
151 | /// |
152 | /// Operations are applied to the input(s) according to the following rules: |
153 | /// |
154 | /// (a) Generalized Diagonals: For input dimensions corresponding to axis labels |
155 | /// appearing more than once in the same input subscript, we take the |
156 | /// generalized (`k`-dimensional) diagonal. |
157 | /// For example, in the equation `iii->i` with input shape `[3, 3, 3]`, the |
158 | /// generalized diagonal would consist of `3` elements at indices `(0, 0, 0)`, |
159 | /// `(1, 1, 1)` and `(2, 2, 2)` to create a Tensor of shape `[3]`. |
160 | /// |
161 | /// (b) Reduction: Axes corresponding to labels appearing only in one input |
162 | /// subscript but not in the output subscript are summed over prior to Tensor |
163 | /// contraction. |
164 | /// For example, in the equation `ab,bc->b`, the axis labels `a` and `c` are |
165 | /// the reduction axis labels. |
166 | /// |
167 | /// (c) Batch Dimensions: Axes corresponding to labels appearing in each of the |
168 | /// input subscripts and also in the output subscript make up the batch |
169 | /// dimensions in Tensor contraction. Unnamed axis labels corresponding to |
170 | /// ellipsis (`...`) also correspond to batch dimensions. |
171 | /// For example, for the equation denoting batch matrix multiplication, |
172 | /// `bij,bjk->bik`, the axis label `b` corresponds to a batch dimension. |
173 | /// |
174 | /// (d) Contraction: In case of binary einsum, axes corresponding to labels |
175 | /// appearing in two different inputs (and not in the output) are contracted |
176 | /// against each other. |
177 | /// Considering the batch matrix multiplication equation again |
178 | /// (`bij,bjk->bik`), the contracted axis label is `j`. |
179 | /// |
180 | /// (e) Expand Diagonal: If the output subscripts contain repeated (explicit) axis |
181 | /// labels, the opposite operation of (a) is applied. For example, in the |
182 | /// equation `i->iii`, and input shape `[3]`, the output of shape `[3, 3, 3]` |
183 | /// are all zeros, except for the (generalized) diagonal which is populated |
184 | /// with values from the input. |
185 | /// Note: This operation is not supported by `np.einsum` or `tf.einsum`; it is |
186 | /// provided to enable computing the symbolic gradient of `tf.einsum`. |
187 | /// |
188 | /// The output subscripts must contain only labels appearing in at least one of the |
189 | /// input subscripts. Furthermore, all dimensions mapping to the same axis label |
190 | /// must be equal. |
191 | /// |
192 | /// Any of the input and output subscripts may contain at most a single ellipsis |
193 | /// (`...`). These ellipsis are mapped against dimensions not corresponding to any |
194 | /// named axis label. If two inputs contain ellipsis, then they are broadcasted |
195 | /// according to standard NumPy broadcasting |
196 | /// [rules](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html). |
197 | /// |
198 | /// The broadcasted dimensions are placed in the corresponding location of the |
199 | /// ellipsis in the output subscript. If the broadcasted dimensions are non-empty |
200 | /// and the output subscripts do not contain ellipsis, then an InvalidArgument error |
201 | /// is raised. |
202 | /// |
203 | /// @compatibility(numpy) |
204 | /// Similar to [`numpy.einsum`](https://docs.scipy.org/doc/numpy/reference/generated/numpy.einsum.html). |
205 | /// |
206 | /// Comparison with `numpy.einsum`: |
207 | /// |
208 | /// * This Op only supports unary and binary forms of `numpy.einsum`. |
209 | /// * This Op does not support implicit form. (i.e. equations without `->`). |
210 | /// * This Op also supports repeated indices in the output subscript, which is not |
211 | /// supported by `numpy.einsum`. |
212 | /// @end_compatibility |
213 | /// |
214 | /// |
215 | /// Args: |
216 | /// * scope: A Scope object |
217 | /// * inputs: List of 1 or 2 Tensors. |
218 | /// * equation: String describing the Einstein Summation operation; in the format of np.einsum. |
219 | /// |
220 | /// Returns: |
221 | /// * `Output`: Output Tensor with shape depending upon `equation`. |
222 | class Einsum { |
223 | public: |
224 | Einsum(const ::tensorflow::Scope& scope, ::tensorflow::InputList inputs, |
225 | StringPiece equation); |
226 | operator ::tensorflow::Output() const { return output; } |
227 | operator ::tensorflow::Input() const { return output; } |
228 | ::tensorflow::Node* node() const { return output.node(); } |
229 | |
230 | Operation operation; |
231 | ::tensorflow::Output output; |
232 | }; |
233 | |
234 | /// Computes the sign and the log of the absolute value of the determinant of |
235 | /// |
236 | /// one or more square matrices. |
237 | /// |
238 | /// The input is a tensor of shape `[N, M, M]` whose inner-most 2 dimensions |
239 | /// form square matrices. The outputs are two tensors containing the signs and |
240 | /// absolute values of the log determinants for all N input submatrices |
241 | /// `[..., :, :]` such that `determinant = sign*exp(log_abs_determinant)`. |
242 | /// The `log_abs_determinant` is computed as `det(P)*sum(log(diag(LU)))` where `LU` |
243 | /// is the `LU` decomposition of the input and `P` is the corresponding |
244 | /// permutation matrix. |
245 | /// |
246 | /// Args: |
247 | /// * scope: A Scope object |
248 | /// * input: Shape is `[N, M, M]`. |
249 | /// |
250 | /// Returns: |
251 | /// * `Output` sign: The signs of the log determinants of the inputs. Shape is `[N]`. |
252 | /// * `Output` log_abs_determinant: The logs of the absolute values of the determinants |
253 | /// of the N input matrices. Shape is `[N]`. |
254 | class LogMatrixDeterminant { |
255 | public: |
256 | LogMatrixDeterminant(const ::tensorflow::Scope& scope, ::tensorflow::Input |
257 | input); |
258 | |
259 | Operation operation; |
260 | ::tensorflow::Output sign; |
261 | ::tensorflow::Output log_abs_determinant; |
262 | }; |
263 | |
264 | /// Computes the LU decomposition of one or more square matrices. |
265 | /// |
266 | /// The input is a tensor of shape `[..., M, M]` whose inner-most 2 dimensions |
267 | /// form square matrices. |
268 | /// |
269 | /// The input has to be invertible. |
270 | /// |
271 | /// The output consists of two tensors LU and P containing the LU decomposition |
272 | /// of all input submatrices `[..., :, :]`. LU encodes the lower triangular and |
273 | /// upper triangular factors. |
274 | /// |
275 | /// For each input submatrix of shape `[M, M]`, L is a lower triangular matrix of |
276 | /// shape `[M, M]` with unit diagonal whose entries correspond to the strictly lower |
277 | /// triangular part of LU. U is a upper triangular matrix of shape `[M, M]` whose |
278 | /// entries correspond to the upper triangular part, including the diagonal, of LU. |
279 | /// |
280 | /// P represents a permutation matrix encoded as a list of indices each between `0` |
281 | /// and `M-1`, inclusive. If P_mat denotes the permutation matrix corresponding to |
282 | /// P, then the L, U and P satisfies P_mat * input = L * U. |
283 | /// |
284 | /// Args: |
285 | /// * scope: A Scope object |
286 | /// * input: A tensor of shape `[..., M, M]` whose inner-most 2 dimensions form matrices of |
287 | /// size `[M, M]`. |
288 | /// |
289 | /// Returns: |
290 | /// * `Output` lu: A tensor of shape `[..., M, M]` whose strictly lower triangular part denotes the |
291 | /// lower triangular factor `L` with unit diagonal, and whose upper triangular part |
292 | /// denotes the upper triangular factor `U`. |
293 | /// * `Output` p: Permutation of the rows encoded as a list of indices in `0..M-1`. Shape is |
294 | /// `[..., M]`. |
295 | /// @compatibility(scipy) |
296 | /// Similar to `scipy.linalg.lu`, except the triangular factors `L` and `U` are |
297 | /// packed into a single tensor, the permutation is applied to `input` instead of |
298 | /// the right hand side and the permutation `P` is returned as a list of indices |
299 | /// instead of a permutation matrix. |
300 | /// @end_compatibility |
301 | class Lu { |
302 | public: |
303 | /// Optional attribute setters for Lu |
304 | struct Attrs { |
305 | /// Defaults to DT_INT32 |
306 | TF_MUST_USE_RESULT Attrs OutputIdxType(DataType x) { |
307 | Attrs ret = *this; |
308 | ret.output_idx_type_ = x; |
309 | return ret; |
310 | } |
311 | |
312 | DataType output_idx_type_ = DT_INT32; |
313 | }; |
314 | Lu(const ::tensorflow::Scope& scope, ::tensorflow::Input input); |
315 | Lu(const ::tensorflow::Scope& scope, ::tensorflow::Input input, const |
316 | Lu::Attrs& attrs); |
317 | |
318 | static Attrs OutputIdxType(DataType x) { |
319 | return Attrs().OutputIdxType(x); |
320 | } |
321 | |
322 | Operation operation; |
323 | ::tensorflow::Output lu; |
324 | ::tensorflow::Output p; |
325 | }; |
326 | |
327 | /// Computes the determinant of one or more square matrices. |
328 | /// |
329 | /// The input is a tensor of shape `[..., M, M]` whose inner-most 2 dimensions |
330 | /// form square matrices. The output is a tensor containing the determinants |
331 | /// for all input submatrices `[..., :, :]`. |
332 | /// |
333 | /// Args: |
334 | /// * scope: A Scope object |
335 | /// * input: Shape is `[..., M, M]`. |
336 | /// |
337 | /// Returns: |
338 | /// * `Output`: Shape is `[...]`. |
339 | class MatrixDeterminant { |
340 | public: |
341 | MatrixDeterminant(const ::tensorflow::Scope& scope, ::tensorflow::Input input); |
342 | operator ::tensorflow::Output() const { return output; } |
343 | operator ::tensorflow::Input() const { return output; } |
344 | ::tensorflow::Node* node() const { return output.node(); } |
345 | |
346 | Operation operation; |
347 | ::tensorflow::Output output; |
348 | }; |
349 | |
350 | /// Computes the inverse of one or more square invertible matrices or their adjoints (conjugate transposes). |
351 | /// |
352 | /// |
353 | /// The input is a tensor of shape `[..., M, M]` whose inner-most 2 dimensions |
354 | /// form square matrices. The output is a tensor of the same shape as the input |
355 | /// containing the inverse for all input submatrices `[..., :, :]`. |
356 | /// |
357 | /// The op uses LU decomposition with partial pivoting to compute the inverses. |
358 | /// |
359 | /// If a matrix is not invertible there is no guarantee what the op does. It |
360 | /// may detect the condition and raise an exception or it may simply return a |
361 | /// garbage result. |
362 | /// |
363 | /// Args: |
364 | /// * scope: A Scope object |
365 | /// * input: Shape is `[..., M, M]`. |
366 | /// |
367 | /// Returns: |
368 | /// * `Output`: Shape is `[..., M, M]`. |
369 | /// |
370 | /// @compatibility(numpy) |
371 | /// Equivalent to np.linalg.inv |
372 | /// @end_compatibility |
373 | class MatrixInverse { |
374 | public: |
375 | /// Optional attribute setters for MatrixInverse |
376 | struct Attrs { |
377 | /// Defaults to false |
378 | TF_MUST_USE_RESULT Attrs Adjoint(bool x) { |
379 | Attrs ret = *this; |
380 | ret.adjoint_ = x; |
381 | return ret; |
382 | } |
383 | |
384 | bool adjoint_ = false; |
385 | }; |
386 | MatrixInverse(const ::tensorflow::Scope& scope, ::tensorflow::Input input); |
387 | MatrixInverse(const ::tensorflow::Scope& scope, ::tensorflow::Input input, |
388 | const MatrixInverse::Attrs& attrs); |
389 | operator ::tensorflow::Output() const { return output; } |
390 | operator ::tensorflow::Input() const { return output; } |
391 | ::tensorflow::Node* node() const { return output.node(); } |
392 | |
393 | static Attrs Adjoint(bool x) { |
394 | return Attrs().Adjoint(x); |
395 | } |
396 | |
397 | Operation operation; |
398 | ::tensorflow::Output output; |
399 | }; |
400 | |
401 | /// Solves systems of linear equations. |
402 | /// |
403 | /// `Matrix` is a tensor of shape `[..., M, M]` whose inner-most 2 dimensions |
404 | /// form square matrices. `Rhs` is a tensor of shape `[..., M, K]`. The `output` is |
405 | /// a tensor shape `[..., M, K]`. If `adjoint` is `False` then each output matrix |
406 | /// satisfies `matrix[..., :, :] * output[..., :, :] = rhs[..., :, :]`. |
407 | /// If `adjoint` is `True` then each output matrix satisfies |
408 | /// `adjoint(matrix[..., :, :]) * output[..., :, :] = rhs[..., :, :]`. |
409 | /// |
410 | /// Args: |
411 | /// * scope: A Scope object |
412 | /// * matrix: Shape is `[..., M, M]`. |
413 | /// * rhs: Shape is `[..., M, K]`. |
414 | /// |
415 | /// Optional attributes (see `Attrs`): |
416 | /// * adjoint: Boolean indicating whether to solve with `matrix` or its (block-wise) |
417 | /// adjoint. |
418 | /// |
419 | /// Returns: |
420 | /// * `Output`: Shape is `[..., M, K]`. |
421 | class MatrixSolve { |
422 | public: |
423 | /// Optional attribute setters for MatrixSolve |
424 | struct Attrs { |
425 | /// Boolean indicating whether to solve with `matrix` or its (block-wise) |
426 | /// adjoint. |
427 | /// |
428 | /// Defaults to false |
429 | TF_MUST_USE_RESULT Attrs Adjoint(bool x) { |
430 | Attrs ret = *this; |
431 | ret.adjoint_ = x; |
432 | return ret; |
433 | } |
434 | |
435 | bool adjoint_ = false; |
436 | }; |
437 | MatrixSolve(const ::tensorflow::Scope& scope, ::tensorflow::Input matrix, |
438 | ::tensorflow::Input rhs); |
439 | MatrixSolve(const ::tensorflow::Scope& scope, ::tensorflow::Input matrix, |
440 | ::tensorflow::Input rhs, const MatrixSolve::Attrs& attrs); |
441 | operator ::tensorflow::Output() const { return output; } |
442 | operator ::tensorflow::Input() const { return output; } |
443 | ::tensorflow::Node* node() const { return output.node(); } |
444 | |
445 | static Attrs Adjoint(bool x) { |
446 | return Attrs().Adjoint(x); |
447 | } |
448 | |
449 | Operation operation; |
450 | ::tensorflow::Output output; |
451 | }; |
452 | |
453 | /// Solves one or more linear least-squares problems. |
454 | /// |
455 | /// `matrix` is a tensor of shape `[..., M, N]` whose inner-most 2 dimensions |
456 | /// form real or complex matrices of size `[M, N]`. `Rhs` is a tensor of the same |
457 | /// type as `matrix` and shape `[..., M, K]`. |
458 | /// The output is a tensor shape `[..., N, K]` where each output matrix solves |
459 | /// each of the equations |
460 | /// `matrix[..., :, :]` * `output[..., :, :]` = `rhs[..., :, :]` |
461 | /// in the least squares sense. |
462 | /// |
463 | /// We use the following notation for (complex) matrix and right-hand sides |
464 | /// in the batch: |
465 | /// |
466 | /// `matrix`=\\(A \in \mathbb{C}^{m \times n}\\), |
467 | /// `rhs`=\\(B \in \mathbb{C}^{m \times k}\\), |
468 | /// `output`=\\(X \in \mathbb{C}^{n \times k}\\), |
469 | /// `l2_regularizer`=\\(\lambda \in \mathbb{R}\\). |
470 | /// |
471 | /// If `fast` is `True`, then the solution is computed by solving the normal |
472 | /// equations using Cholesky decomposition. Specifically, if \\(m \ge n\\) then |
473 | /// \\(X = (A^H A + \lambda I)^{-1} A^H B\\), which solves the least-squares |
474 | /// problem \\(X = \mathrm{argmin}_{Z \in \Re^{n \times k} } ||A Z - B||_F^2 + \lambda ||Z||_F^2\\). |
475 | /// If \\(m \lt n\\) then `output` is computed as |
476 | /// \\(X = A^H (A A^H + \lambda I)^{-1} B\\), which (for \\(\lambda = 0\\)) is the |
477 | /// minimum-norm solution to the under-determined linear system, i.e. |
478 | /// \\(X = \mathrm{argmin}_{Z \in \mathbb{C}^{n \times k} } ||Z||_F^2 \\), |
479 | /// subject to \\(A Z = B\\). Notice that the fast path is only numerically stable |
480 | /// when \\(A\\) is numerically full rank and has a condition number |
481 | /// \\(\mathrm{cond}(A) \lt \frac{1}{\sqrt{\epsilon_{mach} } }\\) or \\(\lambda\\) is |
482 | /// sufficiently large. |
483 | /// |
484 | /// If `fast` is `False` an algorithm based on the numerically robust complete |
485 | /// orthogonal decomposition is used. This computes the minimum-norm |
486 | /// least-squares solution, even when \\(A\\) is rank deficient. This path is |
487 | /// typically 6-7 times slower than the fast path. If `fast` is `False` then |
488 | /// `l2_regularizer` is ignored. |
489 | /// |
490 | /// Args: |
491 | /// * scope: A Scope object |
492 | /// * matrix: Shape is `[..., M, N]`. |
493 | /// * rhs: Shape is `[..., M, K]`. |
494 | /// * l2_regularizer: Scalar tensor. |
495 | /// |
496 | /// @compatibility(numpy) |
497 | /// Equivalent to np.linalg.lstsq |
498 | /// @end_compatibility |
499 | /// |
500 | /// Returns: |
501 | /// * `Output`: Shape is `[..., N, K]`. |
502 | class MatrixSolveLs { |
503 | public: |
504 | /// Optional attribute setters for MatrixSolveLs |
505 | struct Attrs { |
506 | /// Defaults to true |
507 | TF_MUST_USE_RESULT Attrs Fast(bool x) { |
508 | Attrs ret = *this; |
509 | ret.fast_ = x; |
510 | return ret; |
511 | } |
512 | |
513 | bool fast_ = true; |
514 | }; |
515 | MatrixSolveLs(const ::tensorflow::Scope& scope, ::tensorflow::Input matrix, |
516 | ::tensorflow::Input rhs, ::tensorflow::Input l2_regularizer); |
517 | MatrixSolveLs(const ::tensorflow::Scope& scope, ::tensorflow::Input matrix, |
518 | ::tensorflow::Input rhs, ::tensorflow::Input l2_regularizer, |
519 | const MatrixSolveLs::Attrs& attrs); |
520 | operator ::tensorflow::Output() const { return output; } |
521 | operator ::tensorflow::Input() const { return output; } |
522 | ::tensorflow::Node* node() const { return output.node(); } |
523 | |
524 | static Attrs Fast(bool x) { |
525 | return Attrs().Fast(x); |
526 | } |
527 | |
528 | Operation operation; |
529 | ::tensorflow::Output output; |
530 | }; |
531 | |
532 | /// Computes the matrix square root of one or more square matrices: |
533 | /// |
534 | /// matmul(sqrtm(A), sqrtm(A)) = A |
535 | /// |
536 | /// The input matrix should be invertible. If the input matrix is real, it should |
537 | /// have no eigenvalues which are real and negative (pairs of complex conjugate |
538 | /// eigenvalues are allowed). |
539 | /// |
540 | /// The matrix square root is computed by first reducing the matrix to |
541 | /// quasi-triangular form with the real Schur decomposition. The square root |
542 | /// of the quasi-triangular matrix is then computed directly. Details of |
543 | /// the algorithm can be found in: Nicholas J. Higham, "Computing real |
544 | /// square roots of a real matrix", Linear Algebra Appl., 1987. |
545 | /// |
546 | /// The input is a tensor of shape `[..., M, M]` whose inner-most 2 dimensions |
547 | /// form square matrices. The output is a tensor of the same shape as the input |
548 | /// containing the matrix square root for all input submatrices `[..., :, :]`. |
549 | /// |
550 | /// Args: |
551 | /// * scope: A Scope object |
552 | /// * input: Shape is `[..., M, M]`. |
553 | /// |
554 | /// Returns: |
555 | /// * `Output`: Shape is `[..., M, M]`. |
556 | /// |
557 | /// @compatibility(scipy) |
558 | /// Equivalent to scipy.linalg.sqrtm |
559 | /// @end_compatibility |
560 | class MatrixSquareRoot { |
561 | public: |
562 | MatrixSquareRoot(const ::tensorflow::Scope& scope, ::tensorflow::Input input); |
563 | operator ::tensorflow::Output() const { return output; } |
564 | operator ::tensorflow::Input() const { return output; } |
565 | ::tensorflow::Node* node() const { return output.node(); } |
566 | |
567 | Operation operation; |
568 | ::tensorflow::Output output; |
569 | }; |
570 | |
571 | /// Solves systems of linear equations with upper or lower triangular matrices by backsubstitution. |
572 | /// |
573 | /// |
574 | /// `matrix` is a tensor of shape `[..., M, M]` whose inner-most 2 dimensions form |
575 | /// square matrices. If `lower` is `True` then the strictly upper triangular part |
576 | /// of each inner-most matrix is assumed to be zero and not accessed. |
577 | /// If `lower` is False then the strictly lower triangular part of each inner-most |
578 | /// matrix is assumed to be zero and not accessed. |
579 | /// `rhs` is a tensor of shape `[..., M, N]`. |
580 | /// |
581 | /// The output is a tensor of shape `[..., M, N]`. If `adjoint` is |
582 | /// `True` then the innermost matrices in `output` satisfy matrix equations |
583 | /// `matrix[..., :, :] * output[..., :, :] = rhs[..., :, :]`. |
584 | /// If `adjoint` is `False` then the strictly then the innermost matrices in |
585 | /// `output` satisfy matrix equations |
586 | /// `adjoint(matrix[..., i, k]) * output[..., k, j] = rhs[..., i, j]`. |
587 | /// |
588 | /// Note, the batch shapes for the inputs only need to broadcast. |
589 | /// |
590 | /// Example: |
591 | /// ```python |
592 | /// |
593 | /// a = tf.constant([[3, 0, 0, 0], |
594 | /// [2, 1, 0, 0], |
595 | /// [1, 0, 1, 0], |
596 | /// [1, 1, 1, 1]], dtype=tf.float32) |
597 | /// |
598 | /// b = tf.constant([[4], |
599 | /// [2], |
600 | /// [4], |
601 | /// [2]], dtype=tf.float32) |
602 | /// |
603 | /// x = tf.linalg.triangular_solve(a, b, lower=True) |
604 | /// x |
605 | /// # <tf.Tensor: shape=(4, 1), dtype=float32, numpy= |
606 | /// # array([[ 1.3333334 ], |
607 | /// # [-0.66666675], |
608 | /// # [ 2.6666665 ], |
609 | /// # [-1.3333331 ]], dtype=float32)> |
610 | /// |
611 | /// # in python3 one can use `a@x` |
612 | /// tf.matmul(a, x) |
613 | /// # <tf.Tensor: shape=(4, 1), dtype=float32, numpy= |
614 | /// # array([[4. ], |
615 | /// # [2. ], |
616 | /// # [4. ], |
617 | /// # [1.9999999]], dtype=float32)> |
618 | /// ``` |
619 | /// |
620 | /// Args: |
621 | /// * scope: A Scope object |
622 | /// * matrix: Shape is `[..., M, M]`. |
623 | /// * rhs: Shape is `[..., M, K]`. |
624 | /// |
625 | /// Optional attributes (see `Attrs`): |
626 | /// * lower: Boolean indicating whether the innermost matrices in `matrix` are |
627 | /// lower or upper triangular. |
628 | /// * adjoint: Boolean indicating whether to solve with `matrix` or its (block-wise) |
629 | /// adjoint. |
630 | /// |
631 | /// @compatibility(numpy) |
632 | /// Equivalent to scipy.linalg.solve_triangular |
633 | /// @end_compatibility |
634 | /// |
635 | /// Returns: |
636 | /// * `Output`: Shape is `[..., M, K]`. |
637 | class MatrixTriangularSolve { |
638 | public: |
639 | /// Optional attribute setters for MatrixTriangularSolve |
640 | struct Attrs { |
641 | /// Boolean indicating whether the innermost matrices in `matrix` are |
642 | /// lower or upper triangular. |
643 | /// |
644 | /// Defaults to true |
645 | TF_MUST_USE_RESULT Attrs Lower(bool x) { |
646 | Attrs ret = *this; |
647 | ret.lower_ = x; |
648 | return ret; |
649 | } |
650 | |
651 | /// Boolean indicating whether to solve with `matrix` or its (block-wise) |
652 | /// adjoint. |
653 | /// |
654 | /// @compatibility(numpy) |
655 | /// Equivalent to scipy.linalg.solve_triangular |
656 | /// @end_compatibility |
657 | /// |
658 | /// Defaults to false |
659 | TF_MUST_USE_RESULT Attrs Adjoint(bool x) { |
660 | Attrs ret = *this; |
661 | ret.adjoint_ = x; |
662 | return ret; |
663 | } |
664 | |
665 | bool lower_ = true; |
666 | bool adjoint_ = false; |
667 | }; |
668 | MatrixTriangularSolve(const ::tensorflow::Scope& scope, ::tensorflow::Input |
669 | matrix, ::tensorflow::Input rhs); |
670 | MatrixTriangularSolve(const ::tensorflow::Scope& scope, ::tensorflow::Input |
671 | matrix, ::tensorflow::Input rhs, const |
672 | MatrixTriangularSolve::Attrs& attrs); |
673 | operator ::tensorflow::Output() const { return output; } |
674 | operator ::tensorflow::Input() const { return output; } |
675 | ::tensorflow::Node* node() const { return output.node(); } |
676 | |
677 | static Attrs Lower(bool x) { |
678 | return Attrs().Lower(x); |
679 | } |
680 | static Attrs Adjoint(bool x) { |
681 | return Attrs().Adjoint(x); |
682 | } |
683 | |
684 | Operation operation; |
685 | ::tensorflow::Output output; |
686 | }; |
687 | |
688 | /// Computes the QR decompositions of one or more matrices. |
689 | /// |
690 | /// Computes the QR decomposition of each inner matrix in `tensor` such that |
691 | /// `tensor[..., :, :] = q[..., :, :] * r[..., :,:])` |
692 | /// |
693 | /// Currently, the gradient for the QR decomposition is well-defined only when |
694 | /// the first `P` columns of the inner matrix are linearly independent, where |
695 | /// `P` is the minimum of `M` and `N`, the 2 inner-most dimmensions of `tensor`. |
696 | /// |
697 | /// ```python |
698 | /// # a is a tensor. |
699 | /// # q is a tensor of orthonormal matrices. |
700 | /// # r is a tensor of upper triangular matrices. |
701 | /// q, r = qr(a) |
702 | /// q_full, r_full = qr(a, full_matrices=True) |
703 | /// ``` |
704 | /// |
705 | /// Args: |
706 | /// * scope: A Scope object |
707 | /// * input: A tensor of shape `[..., M, N]` whose inner-most 2 dimensions |
708 | /// form matrices of size `[M, N]`. Let `P` be the minimum of `M` and `N`. |
709 | /// |
710 | /// Optional attributes (see `Attrs`): |
711 | /// * full_matrices: If true, compute full-sized `q` and `r`. If false |
712 | /// (the default), compute only the leading `P` columns of `q`. |
713 | /// |
714 | /// Returns: |
715 | /// * `Output` q: Orthonormal basis for range of `a`. If `full_matrices` is `False` then |
716 | /// shape is `[..., M, P]`; if `full_matrices` is `True` then shape is |
717 | /// `[..., M, M]`. |
718 | /// * `Output` r: Triangular factor. If `full_matrices` is `False` then shape is |
719 | /// `[..., P, N]`. If `full_matrices` is `True` then shape is `[..., M, N]`. |
720 | class Qr { |
721 | public: |
722 | /// Optional attribute setters for Qr |
723 | struct Attrs { |
724 | /// If true, compute full-sized `q` and `r`. If false |
725 | /// (the default), compute only the leading `P` columns of `q`. |
726 | /// |
727 | /// Defaults to false |
728 | TF_MUST_USE_RESULT Attrs FullMatrices(bool x) { |
729 | Attrs ret = *this; |
730 | ret.full_matrices_ = x; |
731 | return ret; |
732 | } |
733 | |
734 | bool full_matrices_ = false; |
735 | }; |
736 | Qr(const ::tensorflow::Scope& scope, ::tensorflow::Input input); |
737 | Qr(const ::tensorflow::Scope& scope, ::tensorflow::Input input, const |
738 | Qr::Attrs& attrs); |
739 | |
740 | static Attrs FullMatrices(bool x) { |
741 | return Attrs().FullMatrices(x); |
742 | } |
743 | |
744 | Operation operation; |
745 | ::tensorflow::Output q; |
746 | ::tensorflow::Output r; |
747 | }; |
748 | |
749 | /// Computes the eigen decomposition of one or more square self-adjoint matrices. |
750 | /// |
751 | /// Computes the eigenvalues and (optionally) eigenvectors of each inner matrix in |
752 | /// `input` such that `input[..., :, :] = v[..., :, :] * diag(e[..., :])`. The eigenvalues |
753 | /// are sorted in non-decreasing order. |
754 | /// |
755 | /// ```python |
756 | /// # a is a tensor. |
757 | /// # e is a tensor of eigenvalues. |
758 | /// # v is a tensor of eigenvectors. |
759 | /// e, v = self_adjoint_eig(a) |
760 | /// e = self_adjoint_eig(a, compute_v=False) |
761 | /// ``` |
762 | /// |
763 | /// Args: |
764 | /// * scope: A Scope object |
765 | /// * input: `Tensor` input of shape `[N, N]`. |
766 | /// |
767 | /// Optional attributes (see `Attrs`): |
768 | /// * compute_v: If `True` then eigenvectors will be computed and returned in `v`. |
769 | /// Otherwise, only the eigenvalues will be computed. |
770 | /// |
771 | /// Returns: |
772 | /// * `Output` e: Eigenvalues. Shape is `[N]`. |
773 | /// * `Output` v: Eigenvectors. Shape is `[N, N]`. |
774 | class SelfAdjointEig { |
775 | public: |
776 | /// Optional attribute setters for SelfAdjointEig |
777 | struct Attrs { |
778 | /// If `True` then eigenvectors will be computed and returned in `v`. |
779 | /// Otherwise, only the eigenvalues will be computed. |
780 | /// |
781 | /// Defaults to true |
782 | TF_MUST_USE_RESULT Attrs ComputeV(bool x) { |
783 | Attrs ret = *this; |
784 | ret.compute_v_ = x; |
785 | return ret; |
786 | } |
787 | |
788 | bool compute_v_ = true; |
789 | }; |
790 | SelfAdjointEig(const ::tensorflow::Scope& scope, ::tensorflow::Input input); |
791 | SelfAdjointEig(const ::tensorflow::Scope& scope, ::tensorflow::Input input, |
792 | const SelfAdjointEig::Attrs& attrs); |
793 | |
794 | static Attrs ComputeV(bool x) { |
795 | return Attrs().ComputeV(x); |
796 | } |
797 | |
798 | Operation operation; |
799 | ::tensorflow::Output e; |
800 | ::tensorflow::Output v; |
801 | }; |
802 | |
803 | /// Computes the singular value decompositions of one or more matrices. |
804 | /// |
805 | /// Computes the SVD of each inner matrix in `input` such that |
806 | /// `input[..., :, :] = u[..., :, :] * diag(s[..., :, :]) * transpose(v[..., :, :])` |
807 | /// |
808 | /// ```python |
809 | /// # a is a tensor containing a batch of matrices. |
810 | /// # s is a tensor of singular values for each matrix. |
811 | /// # u is the tensor containing the left singular vectors for each matrix. |
812 | /// # v is the tensor containing the right singular vectors for each matrix. |
813 | /// s, u, v = svd(a) |
814 | /// s, _, _ = svd(a, compute_uv=False) |
815 | /// ``` |
816 | /// |
817 | /// Args: |
818 | /// * scope: A Scope object |
819 | /// * input: A tensor of shape `[..., M, N]` whose inner-most 2 dimensions |
820 | /// form matrices of size `[M, N]`. Let `P` be the minimum of `M` and `N`. |
821 | /// |
822 | /// Optional attributes (see `Attrs`): |
823 | /// * compute_uv: If true, left and right singular vectors will be |
824 | /// computed and returned in `u` and `v`, respectively. |
825 | /// If false, `u` and `v` are not set and should never referenced. |
826 | /// * full_matrices: If true, compute full-sized `u` and `v`. If false |
827 | /// (the default), compute only the leading `P` singular vectors. |
828 | /// Ignored if `compute_uv` is `False`. |
829 | /// |
830 | /// Returns: |
831 | /// * `Output` s: Singular values. Shape is `[..., P]`. |
832 | /// * `Output` u: Left singular vectors. If `full_matrices` is `False` then shape is |
833 | /// `[..., M, P]`; if `full_matrices` is `True` then shape is |
834 | /// `[..., M, M]`. Undefined if `compute_uv` is `False`. |
835 | /// * `Output` v: Left singular vectors. If `full_matrices` is `False` then shape is |
836 | /// `[..., N, P]`. If `full_matrices` is `True` then shape is `[..., N, N]`. |
837 | /// Undefined if `compute_uv` is false. |
838 | class Svd { |
839 | public: |
840 | /// Optional attribute setters for Svd |
841 | struct Attrs { |
842 | /// If true, left and right singular vectors will be |
843 | /// computed and returned in `u` and `v`, respectively. |
844 | /// If false, `u` and `v` are not set and should never referenced. |
845 | /// |
846 | /// Defaults to true |
847 | TF_MUST_USE_RESULT Attrs ComputeUv(bool x) { |
848 | Attrs ret = *this; |
849 | ret.compute_uv_ = x; |
850 | return ret; |
851 | } |
852 | |
853 | /// If true, compute full-sized `u` and `v`. If false |
854 | /// (the default), compute only the leading `P` singular vectors. |
855 | /// Ignored if `compute_uv` is `False`. |
856 | /// |
857 | /// Defaults to false |
858 | TF_MUST_USE_RESULT Attrs FullMatrices(bool x) { |
859 | Attrs ret = *this; |
860 | ret.full_matrices_ = x; |
861 | return ret; |
862 | } |
863 | |
864 | bool compute_uv_ = true; |
865 | bool full_matrices_ = false; |
866 | }; |
867 | Svd(const ::tensorflow::Scope& scope, ::tensorflow::Input input); |
868 | Svd(const ::tensorflow::Scope& scope, ::tensorflow::Input input, const |
869 | Svd::Attrs& attrs); |
870 | |
871 | static Attrs ComputeUv(bool x) { |
872 | return Attrs().ComputeUv(x); |
873 | } |
874 | static Attrs FullMatrices(bool x) { |
875 | return Attrs().FullMatrices(x); |
876 | } |
877 | |
878 | Operation operation; |
879 | ::tensorflow::Output s; |
880 | ::tensorflow::Output u; |
881 | ::tensorflow::Output v; |
882 | }; |
883 | |
884 | /// @} |
885 | |
886 | } // namespace ops |
887 | } // namespace tensorflow |
888 | |
889 | #endif // TENSORFLOW_CC_OPS_LINALG_OPS_H_ |
890 | |