1 | // This file is MACHINE GENERATED! Do not edit. |
2 | |
3 | #ifndef TENSORFLOW_CC_OPS_MATH_OPS_H_ |
4 | #define TENSORFLOW_CC_OPS_MATH_OPS_H_ |
5 | |
6 | // This file is MACHINE GENERATED! Do not edit. |
7 | |
8 | #include "tensorflow/cc/framework/ops.h" |
9 | #include "tensorflow/cc/framework/scope.h" |
10 | #include "tensorflow/core/framework/tensor.h" |
11 | #include "tensorflow/core/framework/tensor_shape.h" |
12 | #include "tensorflow/core/framework/types.h" |
13 | #include "tensorflow/core/lib/gtl/array_slice.h" |
14 | |
15 | namespace tensorflow { |
16 | namespace ops { |
17 | |
18 | /// @defgroup math_ops Math Ops |
19 | /// @{ |
20 | |
21 | /// Computes the absolute value of a tensor. |
22 | /// |
23 | /// Given a tensor `x`, this operation returns a tensor containing the absolute |
24 | /// value of each element in `x`. For example, if x is an input element and y is |
25 | /// an output element, this operation computes \\(y = |x|\\). |
26 | /// |
27 | /// Args: |
28 | /// * scope: A Scope object |
29 | /// |
30 | /// Returns: |
31 | /// * `Output`: The y tensor. |
32 | class Abs { |
33 | public: |
34 | Abs(const ::tensorflow::Scope& scope, ::tensorflow::Input x); |
35 | operator ::tensorflow::Output() const { return y; } |
36 | operator ::tensorflow::Input() const { return y; } |
37 | ::tensorflow::Node* node() const { return y.node(); } |
38 | |
39 | Operation operation; |
40 | ::tensorflow::Output y; |
41 | }; |
42 | |
43 | /// Returns the element-wise sum of a list of tensors. |
44 | /// |
45 | /// `tf.accumulate_n_v2` performs the same operation as `tf.add_n`, but does not |
46 | /// wait for all of its inputs to be ready before beginning to sum. This can |
47 | /// save memory if inputs are ready at different times, since minimum temporary |
48 | /// storage is proportional to the output size rather than the inputs size. |
49 | /// |
50 | /// Unlike the original `accumulate_n`, `accumulate_n_v2` is differentiable. |
51 | /// |
52 | /// Returns a `Tensor` of same shape and type as the elements of `inputs`. |
53 | /// |
54 | /// Args: |
55 | /// * scope: A Scope object |
56 | /// * inputs: A list of `Tensor` objects, each with same shape and type. |
57 | /// * shape: Shape of elements of `inputs`. |
58 | /// |
59 | /// Returns: |
60 | /// * `Output`: The sum tensor. |
61 | class AccumulateNV2 { |
62 | public: |
63 | AccumulateNV2(const ::tensorflow::Scope& scope, ::tensorflow::InputList inputs, |
64 | PartialTensorShape shape); |
65 | operator ::tensorflow::Output() const { return sum; } |
66 | operator ::tensorflow::Input() const { return sum; } |
67 | ::tensorflow::Node* node() const { return sum.node(); } |
68 | |
69 | Operation operation; |
70 | ::tensorflow::Output sum; |
71 | }; |
72 | |
73 | /// Computes acos of x element-wise. |
74 | /// |
75 | /// |
76 | /// Provided an input tensor, the `tf.math.acos` operation returns the inverse cosine of each element of the tensor. If `y = tf.math.cos(x)` then, `x = tf.math.acos(y)`. |
77 | /// |
78 | /// Input range is `[-1, 1]` and the output has a range of `[0, pi]`. |
79 | /// |
80 | /// |
81 | /// Args: |
82 | /// * scope: A Scope object |
83 | /// |
84 | /// Returns: |
85 | /// * `Output`: The y tensor. |
86 | class Acos { |
87 | public: |
88 | Acos(const ::tensorflow::Scope& scope, ::tensorflow::Input x); |
89 | operator ::tensorflow::Output() const { return y; } |
90 | operator ::tensorflow::Input() const { return y; } |
91 | ::tensorflow::Node* node() const { return y.node(); } |
92 | |
93 | Operation operation; |
94 | ::tensorflow::Output y; |
95 | }; |
96 | |
97 | /// Computes inverse hyperbolic cosine of x element-wise. |
98 | /// |
99 | /// Given an input tensor, the function computes inverse hyperbolic cosine of every element. |
100 | /// Input range is `[1, inf]`. It returns `nan` if the input lies outside the range. |
101 | /// |
102 | /// ```python |
103 | /// x = tf.constant([-2, -0.5, 1, 1.2, 200, 10000, float("inf")]) |
104 | /// tf.math.acosh(x) ==> [nan nan 0. 0.62236255 5.9914584 9.903487 inf] |
105 | /// ``` |
106 | /// |
107 | /// Args: |
108 | /// * scope: A Scope object |
109 | /// |
110 | /// Returns: |
111 | /// * `Output`: The y tensor. |
112 | class Acosh { |
113 | public: |
114 | Acosh(const ::tensorflow::Scope& scope, ::tensorflow::Input x); |
115 | operator ::tensorflow::Output() const { return y; } |
116 | operator ::tensorflow::Input() const { return y; } |
117 | ::tensorflow::Node* node() const { return y.node(); } |
118 | |
119 | Operation operation; |
120 | ::tensorflow::Output y; |
121 | }; |
122 | |
123 | /// Returns x + y element-wise. |
124 | /// |
125 | /// *NOTE*: `Add` supports broadcasting. `AddN` does not. More about broadcasting |
126 | /// [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) |
127 | /// |
128 | /// Given two input tensors, the `tf.add` operation computes the sum for every element in the tensor. |
129 | /// |
130 | /// Both input and output have a range `(-inf, inf)`. |
131 | /// |
132 | /// |
133 | /// Args: |
134 | /// * scope: A Scope object |
135 | /// |
136 | /// Returns: |
137 | /// * `Output`: The z tensor. |
138 | class Add { |
139 | public: |
140 | Add(const ::tensorflow::Scope& scope, ::tensorflow::Input x, |
141 | ::tensorflow::Input y); |
142 | operator ::tensorflow::Output() const { return z; } |
143 | operator ::tensorflow::Input() const { return z; } |
144 | ::tensorflow::Node* node() const { return z.node(); } |
145 | |
146 | Operation operation; |
147 | ::tensorflow::Output z; |
148 | }; |
149 | |
150 | /// Add all input tensors element wise. |
151 | /// |
152 | /// Inputs must be of same size and shape. |
153 | /// |
154 | /// ```python |
155 | /// x = [9, 7, 10] |
156 | /// tf.math.add_n(x) ==> 26 |
157 | /// ``` |
158 | /// |
159 | /// Args: |
160 | /// * scope: A Scope object |
161 | /// |
162 | /// Returns: |
163 | /// * `Output`: The sum tensor. |
164 | class AddN { |
165 | public: |
166 | AddN(const ::tensorflow::Scope& scope, ::tensorflow::InputList inputs); |
167 | operator ::tensorflow::Output() const { return sum; } |
168 | operator ::tensorflow::Input() const { return sum; } |
169 | ::tensorflow::Node* node() const { return sum.node(); } |
170 | |
171 | Operation operation; |
172 | ::tensorflow::Output sum; |
173 | }; |
174 | |
175 | /// Returns x + y element-wise. |
176 | /// |
177 | /// *NOTE*: `Add` supports broadcasting. `AddN` does not. More about broadcasting |
178 | /// [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) |
179 | /// |
180 | /// Args: |
181 | /// * scope: A Scope object |
182 | /// |
183 | /// Returns: |
184 | /// * `Output`: The z tensor. |
185 | class AddV2 { |
186 | public: |
187 | AddV2(const ::tensorflow::Scope& scope, ::tensorflow::Input x, |
188 | ::tensorflow::Input y); |
189 | operator ::tensorflow::Output() const { return z; } |
190 | operator ::tensorflow::Input() const { return z; } |
191 | ::tensorflow::Node* node() const { return z.node(); } |
192 | |
193 | Operation operation; |
194 | ::tensorflow::Output z; |
195 | }; |
196 | |
197 | /// Computes the "logical and" of elements across dimensions of a tensor. |
198 | /// |
199 | /// Reduces `input` along the dimensions given in `axis`. Unless |
200 | /// `keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in |
201 | /// `axis`. If `keep_dims` is true, the reduced dimensions are |
202 | /// retained with length 1. |
203 | /// |
204 | /// Args: |
205 | /// * scope: A Scope object |
206 | /// * input: The tensor to reduce. |
207 | /// * axis: The dimensions to reduce. Must be in the range |
208 | /// `[-rank(input), rank(input))`. |
209 | /// |
210 | /// Optional attributes (see `Attrs`): |
211 | /// * keep_dims: If true, retain reduced dimensions with length 1. |
212 | /// |
213 | /// Returns: |
214 | /// * `Output`: The reduced tensor. |
215 | /// |
216 | /// Aliases: |
217 | /// * ReduceAll |
218 | class All { |
219 | public: |
220 | /// Optional attribute setters for All |
221 | struct Attrs { |
222 | /// If true, retain reduced dimensions with length 1. |
223 | /// |
224 | /// Defaults to false |
225 | TF_MUST_USE_RESULT Attrs KeepDims(bool x) { |
226 | Attrs ret = *this; |
227 | ret.keep_dims_ = x; |
228 | return ret; |
229 | } |
230 | |
231 | bool keep_dims_ = false; |
232 | }; |
233 | All(const ::tensorflow::Scope& scope, ::tensorflow::Input input, |
234 | ::tensorflow::Input axis); |
235 | All(const ::tensorflow::Scope& scope, ::tensorflow::Input input, |
236 | ::tensorflow::Input axis, const All::Attrs& attrs); |
237 | operator ::tensorflow::Output() const { return output; } |
238 | operator ::tensorflow::Input() const { return output; } |
239 | ::tensorflow::Node* node() const { return output.node(); } |
240 | |
241 | static Attrs KeepDims(bool x) { |
242 | return Attrs().KeepDims(x); |
243 | } |
244 | |
245 | Operation operation; |
246 | ::tensorflow::Output output; |
247 | }; |
248 | typedef All ReduceAll; |
249 | |
250 | /// Returns the argument of a complex number. |
251 | /// |
252 | /// Given a tensor `input` of complex numbers, this operation returns a tensor of |
253 | /// type `float` that is the argument of each element in `input`. All elements in |
254 | /// `input` must be complex numbers of the form \\(a + bj\\), where *a* |
255 | /// is the real part and *b* is the imaginary part. |
256 | /// |
257 | /// The argument returned by this operation is of the form \\(atan2(b, a)\\). |
258 | /// |
259 | /// For example: |
260 | /// |
261 | /// ``` |
262 | /// # tensor 'input' is [-2.25 + 4.75j, 3.25 + 5.75j] |
263 | /// tf.angle(input) ==> [2.0132, 1.056] |
264 | /// ``` |
265 | /// |
266 | /// @compatibility(numpy) |
267 | /// Equivalent to np.angle. |
268 | /// @end_compatibility |
269 | /// |
270 | /// Args: |
271 | /// * scope: A Scope object |
272 | /// |
273 | /// Returns: |
274 | /// * `Output`: The output tensor. |
275 | class Angle { |
276 | public: |
277 | /// Optional attribute setters for Angle |
278 | struct Attrs { |
279 | /// Defaults to DT_FLOAT |
280 | TF_MUST_USE_RESULT Attrs Tout(DataType x) { |
281 | Attrs ret = *this; |
282 | ret.Tout_ = x; |
283 | return ret; |
284 | } |
285 | |
286 | DataType Tout_ = DT_FLOAT; |
287 | }; |
288 | Angle(const ::tensorflow::Scope& scope, ::tensorflow::Input input); |
289 | Angle(const ::tensorflow::Scope& scope, ::tensorflow::Input input, const |
290 | Angle::Attrs& attrs); |
291 | operator ::tensorflow::Output() const { return output; } |
292 | operator ::tensorflow::Input() const { return output; } |
293 | ::tensorflow::Node* node() const { return output.node(); } |
294 | |
295 | static Attrs Tout(DataType x) { |
296 | return Attrs().Tout(x); |
297 | } |
298 | |
299 | Operation operation; |
300 | ::tensorflow::Output output; |
301 | }; |
302 | |
303 | /// Computes the "logical or" of elements across dimensions of a tensor. |
304 | /// |
305 | /// Reduces `input` along the dimensions given in `axis`. Unless |
306 | /// `keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in |
307 | /// `axis`. If `keep_dims` is true, the reduced dimensions are |
308 | /// retained with length 1. |
309 | /// |
310 | /// Args: |
311 | /// * scope: A Scope object |
312 | /// * input: The tensor to reduce. |
313 | /// * axis: The dimensions to reduce. Must be in the range |
314 | /// `[-rank(input), rank(input))`. |
315 | /// |
316 | /// Optional attributes (see `Attrs`): |
317 | /// * keep_dims: If true, retain reduced dimensions with length 1. |
318 | /// |
319 | /// Returns: |
320 | /// * `Output`: The reduced tensor. |
321 | /// |
322 | /// Aliases: |
323 | /// * ReduceAny |
324 | class Any { |
325 | public: |
326 | /// Optional attribute setters for Any |
327 | struct Attrs { |
328 | /// If true, retain reduced dimensions with length 1. |
329 | /// |
330 | /// Defaults to false |
331 | TF_MUST_USE_RESULT Attrs KeepDims(bool x) { |
332 | Attrs ret = *this; |
333 | ret.keep_dims_ = x; |
334 | return ret; |
335 | } |
336 | |
337 | bool keep_dims_ = false; |
338 | }; |
339 | Any(const ::tensorflow::Scope& scope, ::tensorflow::Input input, |
340 | ::tensorflow::Input axis); |
341 | Any(const ::tensorflow::Scope& scope, ::tensorflow::Input input, |
342 | ::tensorflow::Input axis, const Any::Attrs& attrs); |
343 | operator ::tensorflow::Output() const { return output; } |
344 | operator ::tensorflow::Input() const { return output; } |
345 | ::tensorflow::Node* node() const { return output.node(); } |
346 | |
347 | static Attrs KeepDims(bool x) { |
348 | return Attrs().KeepDims(x); |
349 | } |
350 | |
351 | Operation operation; |
352 | ::tensorflow::Output output; |
353 | }; |
354 | typedef Any ReduceAny; |
355 | |
356 | /// Returns the truth value of abs(x-y) < tolerance element-wise. |
357 | /// |
358 | /// Args: |
359 | /// * scope: A Scope object |
360 | /// |
361 | /// Returns: |
362 | /// * `Output`: The z tensor. |
363 | class ApproximateEqual { |
364 | public: |
365 | /// Optional attribute setters for ApproximateEqual |
366 | struct Attrs { |
367 | /// Defaults to 1e-05 |
368 | TF_MUST_USE_RESULT Attrs Tolerance(float x) { |
369 | Attrs ret = *this; |
370 | ret.tolerance_ = x; |
371 | return ret; |
372 | } |
373 | |
374 | float tolerance_ = 1e-05f; |
375 | }; |
376 | ApproximateEqual(const ::tensorflow::Scope& scope, ::tensorflow::Input x, |
377 | ::tensorflow::Input y); |
378 | ApproximateEqual(const ::tensorflow::Scope& scope, ::tensorflow::Input x, |
379 | ::tensorflow::Input y, const ApproximateEqual::Attrs& attrs); |
380 | operator ::tensorflow::Output() const { return z; } |
381 | operator ::tensorflow::Input() const { return z; } |
382 | ::tensorflow::Node* node() const { return z.node(); } |
383 | |
384 | static Attrs Tolerance(float x) { |
385 | return Attrs().Tolerance(x); |
386 | } |
387 | |
388 | Operation operation; |
389 | ::tensorflow::Output z; |
390 | }; |
391 | |
392 | /// Returns the index with the largest value across dimensions of a tensor. |
393 | /// |
394 | /// Note that in case of ties the identity of the return value is not guaranteed. |
395 | /// |
396 | /// Usage: |
397 | /// ```python |
398 | /// import tensorflow as tf |
399 | /// a = [1, 10, 26.9, 2.8, 166.32, 62.3] |
400 | /// b = tf.math.argmax(input = a) |
401 | /// c = tf.keras.backend.eval(b) |
402 | /// # c = 4 |
403 | /// # here a[4] = 166.32 which is the largest element of a across axis 0 |
404 | /// ``` |
405 | /// |
406 | /// Args: |
407 | /// * scope: A Scope object |
408 | /// * dimension: int16, int32 or int64, must be in the range `[-rank(input), rank(input))`. |
409 | /// Describes which dimension of the input Tensor to reduce across. For vectors, |
410 | /// use dimension = 0. |
411 | /// |
412 | /// Returns: |
413 | /// * `Output`: The output tensor. |
414 | class ArgMax { |
415 | public: |
416 | /// Optional attribute setters for ArgMax |
417 | struct Attrs { |
418 | /// Defaults to DT_INT64 |
419 | TF_MUST_USE_RESULT Attrs OutputType(DataType x) { |
420 | Attrs ret = *this; |
421 | ret.output_type_ = x; |
422 | return ret; |
423 | } |
424 | |
425 | DataType output_type_ = DT_INT64; |
426 | }; |
427 | ArgMax(const ::tensorflow::Scope& scope, ::tensorflow::Input input, |
428 | ::tensorflow::Input dimension); |
429 | ArgMax(const ::tensorflow::Scope& scope, ::tensorflow::Input input, |
430 | ::tensorflow::Input dimension, const ArgMax::Attrs& attrs); |
431 | operator ::tensorflow::Output() const { return output; } |
432 | operator ::tensorflow::Input() const { return output; } |
433 | ::tensorflow::Node* node() const { return output.node(); } |
434 | |
435 | static Attrs OutputType(DataType x) { |
436 | return Attrs().OutputType(x); |
437 | } |
438 | |
439 | Operation operation; |
440 | ::tensorflow::Output output; |
441 | }; |
442 | |
443 | /// Returns the index with the smallest value across dimensions of a tensor. |
444 | /// |
445 | /// Note that in case of ties the identity of the return value is not guaranteed. |
446 | /// |
447 | /// Usage: |
448 | /// ```python |
449 | /// import tensorflow as tf |
450 | /// a = [1, 10, 26.9, 2.8, 166.32, 62.3] |
451 | /// b = tf.math.argmin(input = a) |
452 | /// c = tf.keras.backend.eval(b) |
453 | /// # c = 0 |
454 | /// # here a[0] = 1 which is the smallest element of a across axis 0 |
455 | /// ``` |
456 | /// |
457 | /// Args: |
458 | /// * scope: A Scope object |
459 | /// * dimension: int32 or int64, must be in the range `[-rank(input), rank(input))`. |
460 | /// Describes which dimension of the input Tensor to reduce across. For vectors, |
461 | /// use dimension = 0. |
462 | /// |
463 | /// Returns: |
464 | /// * `Output`: The output tensor. |
465 | class ArgMin { |
466 | public: |
467 | /// Optional attribute setters for ArgMin |
468 | struct Attrs { |
469 | /// Defaults to DT_INT64 |
470 | TF_MUST_USE_RESULT Attrs OutputType(DataType x) { |
471 | Attrs ret = *this; |
472 | ret.output_type_ = x; |
473 | return ret; |
474 | } |
475 | |
476 | DataType output_type_ = DT_INT64; |
477 | }; |
478 | ArgMin(const ::tensorflow::Scope& scope, ::tensorflow::Input input, |
479 | ::tensorflow::Input dimension); |
480 | ArgMin(const ::tensorflow::Scope& scope, ::tensorflow::Input input, |
481 | ::tensorflow::Input dimension, const ArgMin::Attrs& attrs); |
482 | operator ::tensorflow::Output() const { return output; } |
483 | operator ::tensorflow::Input() const { return output; } |
484 | ::tensorflow::Node* node() const { return output.node(); } |
485 | |
486 | static Attrs OutputType(DataType x) { |
487 | return Attrs().OutputType(x); |
488 | } |
489 | |
490 | Operation operation; |
491 | ::tensorflow::Output output; |
492 | }; |
493 | |
494 | /// Computes the trignometric inverse sine of x element-wise. |
495 | /// |
496 | /// The `tf.math.asin` operation returns the inverse of `tf.math.sin`, such that |
497 | /// if `y = tf.math.sin(x)` then, `x = tf.math.asin(y)`. |
498 | /// |
499 | /// **Note**: The output of `tf.math.asin` will lie within the invertible range |
500 | /// of sine, i.e [-pi/2, pi/2]. |
501 | /// |
502 | /// For example: |
503 | /// |
504 | /// ```python |
505 | /// # Note: [1.047, 0.785] ~= [(pi/3), (pi/4)] |
506 | /// x = tf.constant([1.047, 0.785]) |
507 | /// y = tf.math.sin(x) # [0.8659266, 0.7068252] |
508 | /// |
509 | /// tf.math.asin(y) # [1.047, 0.785] = x |
510 | /// ``` |
511 | /// |
512 | /// |
513 | /// Args: |
514 | /// * scope: A Scope object |
515 | /// |
516 | /// Returns: |
517 | /// * `Output`: The y tensor. |
518 | class Asin { |
519 | public: |
520 | Asin(const ::tensorflow::Scope& scope, ::tensorflow::Input x); |
521 | operator ::tensorflow::Output() const { return y; } |
522 | operator ::tensorflow::Input() const { return y; } |
523 | ::tensorflow::Node* node() const { return y.node(); } |
524 | |
525 | Operation operation; |
526 | ::tensorflow::Output y; |
527 | }; |
528 | |
529 | /// Computes inverse hyperbolic sine of x element-wise. |
530 | /// |
531 | /// Given an input tensor, this function computes inverse hyperbolic sine |
532 | /// for every element in the tensor. Both input and output has a range of |
533 | /// `[-inf, inf]`. |
534 | /// |
535 | /// ```python |
536 | /// x = tf.constant([-float("inf"), -2, -0.5, 1, 1.2, 200, 10000, float("inf")]) |
537 | /// tf.math.asinh(x) ==> [-inf -1.4436355 -0.4812118 0.8813736 1.0159732 5.991471 9.903487 inf] |
538 | /// ``` |
539 | /// |
540 | /// Args: |
541 | /// * scope: A Scope object |
542 | /// |
543 | /// Returns: |
544 | /// * `Output`: The y tensor. |
545 | class Asinh { |
546 | public: |
547 | Asinh(const ::tensorflow::Scope& scope, ::tensorflow::Input x); |
548 | operator ::tensorflow::Output() const { return y; } |
549 | operator ::tensorflow::Input() const { return y; } |
550 | ::tensorflow::Node* node() const { return y.node(); } |
551 | |
552 | Operation operation; |
553 | ::tensorflow::Output y; |
554 | }; |
555 | |
556 | /// Computes the trignometric inverse tangent of x element-wise. |
557 | /// |
558 | /// The `tf.math.atan` operation returns the inverse of `tf.math.tan`, such that |
559 | /// if `y = tf.math.tan(x)` then, `x = tf.math.atan(y)`. |
560 | /// |
561 | /// **Note**: The output of `tf.math.atan` will lie within the invertible range |
562 | /// of tan, i.e (-pi/2, pi/2). |
563 | /// |
564 | /// For example: |
565 | /// |
566 | /// ```python |
567 | /// # Note: [1.047, 0.785] ~= [(pi/3), (pi/4)] |
568 | /// x = tf.constant([1.047, 0.785]) |
569 | /// y = tf.math.tan(x) # [1.731261, 0.99920404] |
570 | /// |
571 | /// tf.math.atan(y) # [1.047, 0.785] = x |
572 | /// ``` |
573 | /// |
574 | /// |
575 | /// Args: |
576 | /// * scope: A Scope object |
577 | /// |
578 | /// Returns: |
579 | /// * `Output`: The y tensor. |
580 | class Atan { |
581 | public: |
582 | Atan(const ::tensorflow::Scope& scope, ::tensorflow::Input x); |
583 | operator ::tensorflow::Output() const { return y; } |
584 | operator ::tensorflow::Input() const { return y; } |
585 | ::tensorflow::Node* node() const { return y.node(); } |
586 | |
587 | Operation operation; |
588 | ::tensorflow::Output y; |
589 | }; |
590 | |
591 | /// Computes arctangent of `y/x` element-wise, respecting signs of the arguments. |
592 | /// |
593 | /// This is the angle \\( \theta \in [-\pi, \pi] \\) such that |
594 | /// \\[ x = r \cos(\theta) \\] |
595 | /// and |
596 | /// \\[ y = r \sin(\theta) \\] |
597 | /// where \\(r = \sqrt{x^2 + y^2} \\). |
598 | /// |
599 | /// For example: |
600 | /// |
601 | /// >>> x = [1., 1.] |
602 | /// >>> y = [1., -1.] |
603 | /// >>> print((tf.math.atan2(y,x) * (180 / np.pi)).numpy()) |
604 | /// [ 45. -45.] |
605 | /// |
606 | /// |
607 | /// |
608 | /// Args: |
609 | /// * scope: A Scope object |
610 | /// |
611 | /// Returns: |
612 | /// * `Output`: The z tensor. |
613 | class Atan2 { |
614 | public: |
615 | Atan2(const ::tensorflow::Scope& scope, ::tensorflow::Input y, |
616 | ::tensorflow::Input x); |
617 | operator ::tensorflow::Output() const { return z; } |
618 | operator ::tensorflow::Input() const { return z; } |
619 | ::tensorflow::Node* node() const { return z.node(); } |
620 | |
621 | Operation operation; |
622 | ::tensorflow::Output z; |
623 | }; |
624 | |
625 | /// Computes inverse hyperbolic tangent of x element-wise. |
626 | /// |
627 | /// Given an input tensor, this function computes inverse hyperbolic tangent |
628 | /// for every element in the tensor. Input range is `[-1,1]` and output range is |
629 | /// `[-inf, inf]`. If input is `-1`, output will be `-inf` and if the |
630 | /// input is `1`, output will be `inf`. Values outside the range will have |
631 | /// `nan` as output. |
632 | /// |
633 | /// ```python |
634 | /// x = tf.constant([-float("inf"), -1, -0.5, 1, 0, 0.5, 10, float("inf")]) |
635 | /// tf.math.atanh(x) ==> [nan -inf -0.54930615 inf 0. 0.54930615 nan nan] |
636 | /// ``` |
637 | /// |
638 | /// Args: |
639 | /// * scope: A Scope object |
640 | /// |
641 | /// Returns: |
642 | /// * `Output`: The y tensor. |
643 | class Atanh { |
644 | public: |
645 | Atanh(const ::tensorflow::Scope& scope, ::tensorflow::Input x); |
646 | operator ::tensorflow::Output() const { return y; } |
647 | operator ::tensorflow::Input() const { return y; } |
648 | ::tensorflow::Node* node() const { return y.node(); } |
649 | |
650 | Operation operation; |
651 | ::tensorflow::Output y; |
652 | }; |
653 | |
654 | /// Multiplies slices of two tensors in batches. |
655 | /// |
656 | /// Multiplies all slices of `Tensor` `x` and `y` (each slice can be |
657 | /// viewed as an element of a batch), and arranges the individual results |
658 | /// in a single output tensor of the same batch size. Each of the |
659 | /// individual slices can optionally be adjointed (to adjoint a matrix |
660 | /// means to transpose and conjugate it) before multiplication by setting |
661 | /// the `adj_x` or `adj_y` flag to `True`, which are by default `False`. |
662 | /// |
663 | /// The input tensors `x` and `y` are 2-D or higher with shape `[..., r_x, c_x]` |
664 | /// and `[..., r_y, c_y]`. |
665 | /// |
666 | /// The output tensor is 2-D or higher with shape `[..., r_o, c_o]`, where: |
667 | /// |
668 | /// r_o = c_x if adj_x else r_x |
669 | /// c_o = r_y if adj_y else c_y |
670 | /// |
671 | /// It is computed as: |
672 | /// |
673 | /// output[..., :, :] = matrix(x[..., :, :]) * matrix(y[..., :, :]) |
674 | /// |
675 | /// Args: |
676 | /// * scope: A Scope object |
677 | /// * x: 2-D or higher with shape `[..., r_x, c_x]`. |
678 | /// * y: 2-D or higher with shape `[..., r_y, c_y]`. |
679 | /// |
680 | /// Optional attributes (see `Attrs`): |
681 | /// * adj_x: If `True`, adjoint the slices of `x`. Defaults to `False`. |
682 | /// * adj_y: If `True`, adjoint the slices of `y`. Defaults to `False`. |
683 | /// |
684 | /// Returns: |
685 | /// * `Output`: 3-D or higher with shape `[..., r_o, c_o]` |
686 | class BatchMatMul { |
687 | public: |
688 | /// Optional attribute setters for BatchMatMul |
689 | struct Attrs { |
690 | /// If `True`, adjoint the slices of `x`. Defaults to `False`. |
691 | /// |
692 | /// Defaults to false |
693 | TF_MUST_USE_RESULT Attrs AdjX(bool x) { |
694 | Attrs ret = *this; |
695 | ret.adj_x_ = x; |
696 | return ret; |
697 | } |
698 | |
699 | /// If `True`, adjoint the slices of `y`. Defaults to `False`. |
700 | /// |
701 | /// Defaults to false |
702 | TF_MUST_USE_RESULT Attrs AdjY(bool x) { |
703 | Attrs ret = *this; |
704 | ret.adj_y_ = x; |
705 | return ret; |
706 | } |
707 | |
708 | bool adj_x_ = false; |
709 | bool adj_y_ = false; |
710 | }; |
711 | BatchMatMul(const ::tensorflow::Scope& scope, ::tensorflow::Input x, |
712 | ::tensorflow::Input y); |
713 | BatchMatMul(const ::tensorflow::Scope& scope, ::tensorflow::Input x, |
714 | ::tensorflow::Input y, const BatchMatMul::Attrs& attrs); |
715 | operator ::tensorflow::Output() const { return output; } |
716 | operator ::tensorflow::Input() const { return output; } |
717 | ::tensorflow::Node* node() const { return output.node(); } |
718 | |
719 | static Attrs AdjX(bool x) { |
720 | return Attrs().AdjX(x); |
721 | } |
722 | static Attrs AdjY(bool x) { |
723 | return Attrs().AdjY(x); |
724 | } |
725 | |
726 | Operation operation; |
727 | ::tensorflow::Output output; |
728 | }; |
729 | |
730 | /// Multiplies slices of two tensors in batches. |
731 | /// |
732 | /// Multiplies all slices of `Tensor` `x` and `y` (each slice can be |
733 | /// viewed as an element of a batch), and arranges the individual results |
734 | /// in a single output tensor of the same batch size. Each of the |
735 | /// individual slices can optionally be adjointed (to adjoint a matrix |
736 | /// means to transpose and conjugate it) before multiplication by setting |
737 | /// the `adj_x` or `adj_y` flag to `True`, which are by default `False`. |
738 | /// |
739 | /// The input tensors `x` and `y` are 2-D or higher with shape `[..., r_x, c_x]` |
740 | /// and `[..., r_y, c_y]`. |
741 | /// |
742 | /// The output tensor is 2-D or higher with shape `[..., r_o, c_o]`, where: |
743 | /// |
744 | /// r_o = c_x if adj_x else r_x |
745 | /// c_o = r_y if adj_y else c_y |
746 | /// |
747 | /// It is computed as: |
748 | /// |
749 | /// output[..., :, :] = matrix(x[..., :, :]) * matrix(y[..., :, :]) |
750 | /// |
751 | /// *NOTE*: `BatchMatMulV2` supports broadcasting in the batch dimensions. More |
752 | /// about broadcasting |
753 | /// [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html). |
754 | /// |
755 | /// |
756 | /// Args: |
757 | /// * scope: A Scope object |
758 | /// * x: 2-D or higher with shape `[..., r_x, c_x]`. |
759 | /// * y: 2-D or higher with shape `[..., r_y, c_y]`. |
760 | /// |
761 | /// Optional attributes (see `Attrs`): |
762 | /// * adj_x: If `True`, adjoint the slices of `x`. Defaults to `False`. |
763 | /// * adj_y: If `True`, adjoint the slices of `y`. Defaults to `False`. |
764 | /// |
765 | /// Returns: |
766 | /// * `Output`: 3-D or higher with shape `[..., r_o, c_o]` |
767 | class BatchMatMulV2 { |
768 | public: |
769 | /// Optional attribute setters for BatchMatMulV2 |
770 | struct Attrs { |
771 | /// If `True`, adjoint the slices of `x`. Defaults to `False`. |
772 | /// |
773 | /// Defaults to false |
774 | TF_MUST_USE_RESULT Attrs AdjX(bool x) { |
775 | Attrs ret = *this; |
776 | ret.adj_x_ = x; |
777 | return ret; |
778 | } |
779 | |
780 | /// If `True`, adjoint the slices of `y`. Defaults to `False`. |
781 | /// |
782 | /// Defaults to false |
783 | TF_MUST_USE_RESULT Attrs AdjY(bool x) { |
784 | Attrs ret = *this; |
785 | ret.adj_y_ = x; |
786 | return ret; |
787 | } |
788 | |
789 | bool adj_x_ = false; |
790 | bool adj_y_ = false; |
791 | }; |
792 | BatchMatMulV2(const ::tensorflow::Scope& scope, ::tensorflow::Input x, |
793 | ::tensorflow::Input y); |
794 | BatchMatMulV2(const ::tensorflow::Scope& scope, ::tensorflow::Input x, |
795 | ::tensorflow::Input y, const BatchMatMulV2::Attrs& attrs); |
796 | operator ::tensorflow::Output() const { return output; } |
797 | operator ::tensorflow::Input() const { return output; } |
798 | ::tensorflow::Node* node() const { return output.node(); } |
799 | |
800 | static Attrs AdjX(bool x) { |
801 | return Attrs().AdjX(x); |
802 | } |
803 | static Attrs AdjY(bool x) { |
804 | return Attrs().AdjY(x); |
805 | } |
806 | |
807 | Operation operation; |
808 | ::tensorflow::Output output; |
809 | }; |
810 | |
811 | /// Multiplies slices of two tensors in batches. |
812 | /// |
813 | /// Multiplies all slices of `Tensor` `x` and `y` (each slice can be |
814 | /// viewed as an element of a batch), and arranges the individual results |
815 | /// in a single output tensor of the same batch size. Each of the |
816 | /// individual slices can optionally be adjointed (to adjoint a matrix |
817 | /// means to transpose and conjugate it) before multiplication by setting |
818 | /// the `adj_x` or `adj_y` flag to `True`, which are by default `False`. |
819 | /// |
820 | /// The input tensors `x` and `y` are 2-D or higher with shape `[..., r_x, c_x]` |
821 | /// and `[..., r_y, c_y]`. |
822 | /// |
823 | /// The output tensor is 2-D or higher with shape `[..., r_o, c_o]`, where: |
824 | /// |
825 | /// r_o = c_x if adj_x else r_x |
826 | /// c_o = r_y if adj_y else c_y |
827 | /// |
828 | /// It is computed as: |
829 | /// |
830 | /// output[..., :, :] = matrix(x[..., :, :]) * matrix(y[..., :, :]) |
831 | /// |
832 | /// *NOTE*: `BatchMatMulV3` supports broadcasting in the batch dimensions. More |
833 | /// about broadcasting |
834 | /// [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html). |
835 | /// |
836 | /// |
837 | /// Args: |
838 | /// * scope: A Scope object |
839 | /// * x: 2-D or higher with shape `[..., r_x, c_x]`. |
840 | /// * y: 2-D or higher with shape `[..., r_y, c_y]`. |
841 | /// * Tout: If not spcified, Tout is the same type to input type. |
842 | /// |
843 | /// Optional attributes (see `Attrs`): |
844 | /// * adj_x: If `True`, adjoint the slices of `x`. Defaults to `False`. |
845 | /// * adj_y: If `True`, adjoint the slices of `y`. Defaults to `False`. |
846 | /// |
847 | /// Returns: |
848 | /// * `Output`: 3-D or higher with shape `[..., r_o, c_o]` |
849 | class BatchMatMulV3 { |
850 | public: |
851 | /// Optional attribute setters for BatchMatMulV3 |
852 | struct Attrs { |
853 | /// If `True`, adjoint the slices of `x`. Defaults to `False`. |
854 | /// |
855 | /// Defaults to false |
856 | TF_MUST_USE_RESULT Attrs AdjX(bool x) { |
857 | Attrs ret = *this; |
858 | ret.adj_x_ = x; |
859 | return ret; |
860 | } |
861 | |
862 | /// If `True`, adjoint the slices of `y`. Defaults to `False`. |
863 | /// |
864 | /// Defaults to false |
865 | TF_MUST_USE_RESULT Attrs AdjY(bool x) { |
866 | Attrs ret = *this; |
867 | ret.adj_y_ = x; |
868 | return ret; |
869 | } |
870 | |
871 | bool adj_x_ = false; |
872 | bool adj_y_ = false; |
873 | }; |
874 | BatchMatMulV3(const ::tensorflow::Scope& scope, ::tensorflow::Input x, |
875 | ::tensorflow::Input y, DataType Tout); |
876 | BatchMatMulV3(const ::tensorflow::Scope& scope, ::tensorflow::Input x, |
877 | ::tensorflow::Input y, DataType Tout, const BatchMatMulV3::Attrs& |
878 | attrs); |
879 | operator ::tensorflow::Output() const { return output; } |
880 | operator ::tensorflow::Input() const { return output; } |
881 | ::tensorflow::Node* node() const { return output.node(); } |
882 | |
883 | static Attrs AdjX(bool x) { |
884 | return Attrs().AdjX(x); |
885 | } |
886 | static Attrs AdjY(bool x) { |
887 | return Attrs().AdjY(x); |
888 | } |
889 | |
890 | Operation operation; |
891 | ::tensorflow::Output output; |
892 | }; |
893 | |
894 | /// Compute the regularized incomplete beta integral \\(I_x(a, b)\\). |
895 | /// |
896 | /// The regularized incomplete beta integral is defined as: |
897 | /// |
898 | /// |
899 | /// \\(I_x(a, b) = \frac{B(x; a, b)}{B(a, b)}\\) |
900 | /// |
901 | /// where |
902 | /// |
903 | /// |
904 | /// \\(B(x; a, b) = \int_0^x t^{a-1} (1 - t)^{b-1} dt\\) |
905 | /// |
906 | /// |
907 | /// is the incomplete beta function and \\(B(a, b)\\) is the *complete* |
908 | /// beta function. |
909 | /// |
910 | /// Args: |
911 | /// * scope: A Scope object |
912 | /// |
913 | /// Returns: |
914 | /// * `Output`: The z tensor. |
915 | class Betainc { |
916 | public: |
917 | Betainc(const ::tensorflow::Scope& scope, ::tensorflow::Input a, |
918 | ::tensorflow::Input b, ::tensorflow::Input x); |
919 | operator ::tensorflow::Output() const { return z; } |
920 | operator ::tensorflow::Input() const { return z; } |
921 | ::tensorflow::Node* node() const { return z.node(); } |
922 | |
923 | Operation operation; |
924 | ::tensorflow::Output z; |
925 | }; |
926 | |
927 | /// Counts the number of occurrences of each value in an integer array. |
928 | /// |
929 | /// Outputs a vector with length `size` and the same dtype as `weights`. If |
930 | /// `weights` are empty, then index `i` stores the number of times the value `i` is |
931 | /// counted in `arr`. If `weights` are non-empty, then index `i` stores the sum of |
932 | /// the value in `weights` at each index where the corresponding value in `arr` is |
933 | /// `i`. |
934 | /// |
935 | /// Values in `arr` outside of the range [0, size) are ignored. |
936 | /// |
937 | /// Args: |
938 | /// * scope: A Scope object |
939 | /// * arr: int32 `Tensor`. |
940 | /// * size: non-negative int32 scalar `Tensor`. |
941 | /// * weights: is an int32, int64, float32, or float64 `Tensor` with the same |
942 | /// shape as `arr`, or a length-0 `Tensor`, in which case it acts as all weights |
943 | /// equal to 1. |
944 | /// |
945 | /// Returns: |
946 | /// * `Output`: 1D `Tensor` with length equal to `size`. The counts or summed weights for |
947 | /// each value in the range [0, size). |
948 | class Bincount { |
949 | public: |
950 | Bincount(const ::tensorflow::Scope& scope, ::tensorflow::Input arr, |
951 | ::tensorflow::Input size, ::tensorflow::Input weights); |
952 | operator ::tensorflow::Output() const { return bins; } |
953 | operator ::tensorflow::Input() const { return bins; } |
954 | ::tensorflow::Node* node() const { return bins.node(); } |
955 | |
956 | Operation operation; |
957 | ::tensorflow::Output bins; |
958 | }; |
959 | |
960 | /// Bucketizes 'input' based on 'boundaries'. |
961 | /// |
962 | /// For example, if the inputs are |
963 | /// boundaries = [0, 10, 100] |
964 | /// input = [[-5, 10000] |
965 | /// [150, 10] |
966 | /// [5, 100]] |
967 | /// |
968 | /// then the output will be |
969 | /// output = [[0, 3] |
970 | /// [3, 2] |
971 | /// [1, 3]] |
972 | /// |
973 | /// Args: |
974 | /// * scope: A Scope object |
975 | /// * input: Any shape of Tensor contains with int or float type. |
976 | /// * boundaries: A sorted list of floats gives the boundary of the buckets. |
977 | /// |
978 | /// Returns: |
979 | /// * `Output`: Same shape with 'input', each value of input replaced with bucket index. |
980 | /// |
981 | /// @compatibility(numpy) |
982 | /// Equivalent to np.digitize. |
983 | /// @end_compatibility |
984 | class Bucketize { |
985 | public: |
986 | Bucketize(const ::tensorflow::Scope& scope, ::tensorflow::Input input, const |
987 | gtl::ArraySlice<float>& boundaries); |
988 | operator ::tensorflow::Output() const { return output; } |
989 | operator ::tensorflow::Input() const { return output; } |
990 | ::tensorflow::Node* node() const { return output.node(); } |
991 | |
992 | Operation operation; |
993 | ::tensorflow::Output output; |
994 | }; |
995 | |
996 | /// Cast x of type SrcT to y of DstT. |
997 | /// |
998 | /// Args: |
999 | /// * scope: A Scope object |
1000 | /// |
1001 | /// Returns: |
1002 | /// * `Output`: The y tensor. |
1003 | class Cast { |
1004 | public: |
1005 | /// Optional attribute setters for Cast |
1006 | struct Attrs { |
1007 | /// Defaults to false |
1008 | TF_MUST_USE_RESULT Attrs Truncate(bool x) { |
1009 | Attrs ret = *this; |
1010 | ret.Truncate_ = x; |
1011 | return ret; |
1012 | } |
1013 | |
1014 | bool Truncate_ = false; |
1015 | }; |
1016 | Cast(const ::tensorflow::Scope& scope, ::tensorflow::Input x, DataType DstT); |
1017 | Cast(const ::tensorflow::Scope& scope, ::tensorflow::Input x, DataType DstT, |
1018 | const Cast::Attrs& attrs); |
1019 | operator ::tensorflow::Output() const { return y; } |
1020 | operator ::tensorflow::Input() const { return y; } |
1021 | ::tensorflow::Node* node() const { return y.node(); } |
1022 | |
1023 | static Attrs Truncate(bool x) { |
1024 | return Attrs().Truncate(x); |
1025 | } |
1026 | |
1027 | Operation operation; |
1028 | ::tensorflow::Output y; |
1029 | }; |
1030 | |
1031 | /// Returns element-wise smallest integer not less than x. |
1032 | /// |
1033 | /// Args: |
1034 | /// * scope: A Scope object |
1035 | /// |
1036 | /// Returns: |
1037 | /// * `Output`: The y tensor. |
1038 | class Ceil { |
1039 | public: |
1040 | Ceil(const ::tensorflow::Scope& scope, ::tensorflow::Input x); |
1041 | operator ::tensorflow::Output() const { return y; } |
1042 | operator ::tensorflow::Input() const { return y; } |
1043 | ::tensorflow::Node* node() const { return y.node(); } |
1044 | |
1045 | Operation operation; |
1046 | ::tensorflow::Output y; |
1047 | }; |
1048 | |
1049 | /// Clips tensor values to a specified min and max. |
1050 | /// |
1051 | /// Given a tensor `t`, this operation returns a tensor of the same type and |
1052 | /// shape as `t` with its values clipped to `clip_value_min` and `clip_value_max`. |
1053 | /// Any values less than `clip_value_min` are set to `clip_value_min`. Any values |
1054 | /// greater than `clip_value_max` are set to `clip_value_max`. |
1055 | /// |
1056 | /// Args: |
1057 | /// * scope: A Scope object |
1058 | /// * t: A `Tensor`. |
1059 | /// * clip_value_min: A 0-D (scalar) `Tensor`, or a `Tensor` with the same shape |
1060 | /// as `t`. The minimum value to clip by. |
1061 | /// * clip_value_max: A 0-D (scalar) `Tensor`, or a `Tensor` with the same shape |
1062 | /// as `t`. The maximum value to clip by. |
1063 | /// |
1064 | /// Returns: |
1065 | /// * `Output`: A clipped `Tensor` with the same shape as input 't'. |
1066 | class ClipByValue { |
1067 | public: |
1068 | ClipByValue(const ::tensorflow::Scope& scope, ::tensorflow::Input t, |
1069 | ::tensorflow::Input clip_value_min, ::tensorflow::Input |
1070 | clip_value_max); |
1071 | operator ::tensorflow::Output() const { return output; } |
1072 | operator ::tensorflow::Input() const { return output; } |
1073 | ::tensorflow::Node* node() const { return output.node(); } |
1074 | |
1075 | Operation operation; |
1076 | ::tensorflow::Output output; |
1077 | }; |
1078 | |
1079 | /// Converts two real numbers to a complex number. |
1080 | /// |
1081 | /// Given a tensor `real` representing the real part of a complex number, and a |
1082 | /// tensor `imag` representing the imaginary part of a complex number, this |
1083 | /// operation returns complex numbers elementwise of the form \\(a + bj\\), where |
1084 | /// *a* represents the `real` part and *b* represents the `imag` part. |
1085 | /// |
1086 | /// The input tensors `real` and `imag` must have the same shape. |
1087 | /// |
1088 | /// For example: |
1089 | /// |
1090 | /// ``` |
1091 | /// # tensor 'real' is [2.25, 3.25] |
1092 | /// # tensor `imag` is [4.75, 5.75] |
1093 | /// tf.complex(real, imag) ==> [[2.25 + 4.75j], [3.25 + 5.75j]] |
1094 | /// ``` |
1095 | /// |
1096 | /// Args: |
1097 | /// * scope: A Scope object |
1098 | /// |
1099 | /// Returns: |
1100 | /// * `Output`: The out tensor. |
1101 | class Complex { |
1102 | public: |
1103 | /// Optional attribute setters for Complex |
1104 | struct Attrs { |
1105 | /// Defaults to DT_COMPLEX64 |
1106 | TF_MUST_USE_RESULT Attrs Tout(DataType x) { |
1107 | Attrs ret = *this; |
1108 | ret.Tout_ = x; |
1109 | return ret; |
1110 | } |
1111 | |
1112 | DataType Tout_ = DT_COMPLEX64; |
1113 | }; |
1114 | Complex(const ::tensorflow::Scope& scope, ::tensorflow::Input real, |
1115 | ::tensorflow::Input imag); |
1116 | Complex(const ::tensorflow::Scope& scope, ::tensorflow::Input real, |
1117 | ::tensorflow::Input imag, const Complex::Attrs& attrs); |
1118 | operator ::tensorflow::Output() const { return out; } |
1119 | operator ::tensorflow::Input() const { return out; } |
1120 | ::tensorflow::Node* node() const { return out.node(); } |
1121 | |
1122 | static Attrs Tout(DataType x) { |
1123 | return Attrs().Tout(x); |
1124 | } |
1125 | |
1126 | Operation operation; |
1127 | ::tensorflow::Output out; |
1128 | }; |
1129 | |
1130 | /// Computes the complex absolute value of a tensor. |
1131 | /// |
1132 | /// Given a tensor `x` of complex numbers, this operation returns a tensor of type |
1133 | /// `float` or `double` that is the absolute value of each element in `x`. All |
1134 | /// elements in `x` must be complex numbers of the form \\(a + bj\\). The absolute |
1135 | /// value is computed as \\( \sqrt{a^2 + b^2}\\). |
1136 | /// |
1137 | /// For example: |
1138 | /// |
1139 | /// >>> x = tf.complex(3.0, 4.0) |
1140 | /// >>> print((tf.raw_ops.ComplexAbs(x=x, Tout=tf.dtypes.float32, name=None)).numpy()) |
1141 | /// 5.0 |
1142 | /// |
1143 | /// |
1144 | /// Args: |
1145 | /// * scope: A Scope object |
1146 | /// |
1147 | /// Returns: |
1148 | /// * `Output`: The y tensor. |
1149 | class ComplexAbs { |
1150 | public: |
1151 | /// Optional attribute setters for ComplexAbs |
1152 | struct Attrs { |
1153 | /// Defaults to DT_FLOAT |
1154 | TF_MUST_USE_RESULT Attrs Tout(DataType x) { |
1155 | Attrs ret = *this; |
1156 | ret.Tout_ = x; |
1157 | return ret; |
1158 | } |
1159 | |
1160 | DataType Tout_ = DT_FLOAT; |
1161 | }; |
1162 | ComplexAbs(const ::tensorflow::Scope& scope, ::tensorflow::Input x); |
1163 | ComplexAbs(const ::tensorflow::Scope& scope, ::tensorflow::Input x, const |
1164 | ComplexAbs::Attrs& attrs); |
1165 | operator ::tensorflow::Output() const { return y; } |
1166 | operator ::tensorflow::Input() const { return y; } |
1167 | ::tensorflow::Node* node() const { return y.node(); } |
1168 | |
1169 | static Attrs Tout(DataType x) { |
1170 | return Attrs().Tout(x); |
1171 | } |
1172 | |
1173 | Operation operation; |
1174 | ::tensorflow::Output y; |
1175 | }; |
1176 | |
1177 | /// Returns the complex conjugate of a complex number. |
1178 | /// |
1179 | /// Given a tensor `input` of complex numbers, this operation returns a tensor of |
1180 | /// complex numbers that are the complex conjugate of each element in `input`. The |
1181 | /// complex numbers in `input` must be of the form \\(a + bj\\), where *a* is the |
1182 | /// real part and *b* is the imaginary part. |
1183 | /// |
1184 | /// The complex conjugate returned by this operation is of the form \\(a - bj\\). |
1185 | /// |
1186 | /// For example: |
1187 | /// |
1188 | /// ``` |
1189 | /// # tensor 'input' is [-2.25 + 4.75j, 3.25 + 5.75j] |
1190 | /// tf.conj(input) ==> [-2.25 - 4.75j, 3.25 - 5.75j] |
1191 | /// ``` |
1192 | /// |
1193 | /// Args: |
1194 | /// * scope: A Scope object |
1195 | /// |
1196 | /// Returns: |
1197 | /// * `Output`: The output tensor. |
1198 | class Conj { |
1199 | public: |
1200 | Conj(const ::tensorflow::Scope& scope, ::tensorflow::Input input); |
1201 | operator ::tensorflow::Output() const { return output; } |
1202 | operator ::tensorflow::Input() const { return output; } |
1203 | ::tensorflow::Node* node() const { return output.node(); } |
1204 | |
1205 | Operation operation; |
1206 | ::tensorflow::Output output; |
1207 | }; |
1208 | |
1209 | /// Computes cos of x element-wise. |
1210 | /// |
1211 | /// Given an input tensor, this function computes cosine of every |
1212 | /// element in the tensor. Input range is `(-inf, inf)` and |
1213 | /// output range is `[-1,1]`. If input lies outside the boundary, `nan` |
1214 | /// is returned. |
1215 | /// |
1216 | /// ```python |
1217 | /// x = tf.constant([-float("inf"), -9, -0.5, 1, 1.2, 200, 10000, float("inf")]) |
1218 | /// tf.math.cos(x) ==> [nan -0.91113025 0.87758255 0.5403023 0.36235774 0.48718765 -0.95215535 nan] |
1219 | /// ``` |
1220 | /// |
1221 | /// Args: |
1222 | /// * scope: A Scope object |
1223 | /// |
1224 | /// Returns: |
1225 | /// * `Output`: The y tensor. |
1226 | class Cos { |
1227 | public: |
1228 | Cos(const ::tensorflow::Scope& scope, ::tensorflow::Input x); |
1229 | operator ::tensorflow::Output() const { return y; } |
1230 | operator ::tensorflow::Input() const { return y; } |
1231 | ::tensorflow::Node* node() const { return y.node(); } |
1232 | |
1233 | Operation operation; |
1234 | ::tensorflow::Output y; |
1235 | }; |
1236 | |
1237 | /// Computes hyperbolic cosine of x element-wise. |
1238 | /// |
1239 | /// Given an input tensor, this function computes hyperbolic cosine of every |
1240 | /// element in the tensor. Input range is `[-inf, inf]` and output range |
1241 | /// is `[1, inf]`. |
1242 | /// |
1243 | /// ```python |
1244 | /// x = tf.constant([-float("inf"), -9, -0.5, 1, 1.2, 2, 10, float("inf")]) |
1245 | /// tf.math.cosh(x) ==> [inf 4.0515420e+03 1.1276259e+00 1.5430807e+00 1.8106556e+00 3.7621956e+00 1.1013233e+04 inf] |
1246 | /// ``` |
1247 | /// |
1248 | /// Args: |
1249 | /// * scope: A Scope object |
1250 | /// |
1251 | /// Returns: |
1252 | /// * `Output`: The y tensor. |
1253 | class Cosh { |
1254 | public: |
1255 | Cosh(const ::tensorflow::Scope& scope, ::tensorflow::Input x); |
1256 | operator ::tensorflow::Output() const { return y; } |
1257 | operator ::tensorflow::Input() const { return y; } |
1258 | ::tensorflow::Node* node() const { return y.node(); } |
1259 | |
1260 | Operation operation; |
1261 | ::tensorflow::Output y; |
1262 | }; |
1263 | |
1264 | /// Compute the pairwise cross product. |
1265 | /// |
1266 | /// `a` and `b` must be the same shape; they can either be simple 3-element vectors, |
1267 | /// or any shape where the innermost dimension is 3. In the latter case, each pair |
1268 | /// of corresponding 3-element vectors is cross-multiplied independently. |
1269 | /// |
1270 | /// Args: |
1271 | /// * scope: A Scope object |
1272 | /// * a: A tensor containing 3-element vectors. |
1273 | /// * b: Another tensor, of same type and shape as `a`. |
1274 | /// |
1275 | /// Returns: |
1276 | /// * `Output`: Pairwise cross product of the vectors in `a` and `b`. |
1277 | class Cross { |
1278 | public: |
1279 | Cross(const ::tensorflow::Scope& scope, ::tensorflow::Input a, |
1280 | ::tensorflow::Input b); |
1281 | operator ::tensorflow::Output() const { return product; } |
1282 | operator ::tensorflow::Input() const { return product; } |
1283 | ::tensorflow::Node* node() const { return product.node(); } |
1284 | |
1285 | Operation operation; |
1286 | ::tensorflow::Output product; |
1287 | }; |
1288 | |
1289 | /// Compute the cumulative product of the tensor `x` along `axis`. |
1290 | /// |
1291 | /// By default, this op performs an inclusive cumprod, which means that the first |
1292 | /// element of the input is identical to the first element of the output: |
1293 | /// |
1294 | /// ```python |
1295 | /// tf.cumprod([a, b, c]) # => [a, a * b, a * b * c] |
1296 | /// ``` |
1297 | /// |
1298 | /// By setting the `exclusive` kwarg to `True`, an exclusive cumprod is |
1299 | /// performed instead: |
1300 | /// |
1301 | /// ```python |
1302 | /// tf.cumprod([a, b, c], exclusive=True) # => [1, a, a * b] |
1303 | /// ``` |
1304 | /// |
1305 | /// By setting the `reverse` kwarg to `True`, the cumprod is performed in the |
1306 | /// opposite direction: |
1307 | /// |
1308 | /// ```python |
1309 | /// tf.cumprod([a, b, c], reverse=True) # => [a * b * c, b * c, c] |
1310 | /// ``` |
1311 | /// |
1312 | /// This is more efficient than using separate `tf.reverse` ops. |
1313 | /// |
1314 | /// The `reverse` and `exclusive` kwargs can also be combined: |
1315 | /// |
1316 | /// ```python |
1317 | /// tf.cumprod([a, b, c], exclusive=True, reverse=True) # => [b * c, c, 1] |
1318 | /// ``` |
1319 | /// |
1320 | /// Args: |
1321 | /// * scope: A Scope object |
1322 | /// * x: A `Tensor`. Must be one of the following types: `float32`, `float64`, |
1323 | /// `int64`, `int32`, `uint8`, `uint16`, `int16`, `int8`, `complex64`, |
1324 | /// `complex128`, `qint8`, `quint8`, `qint32`, `half`. |
1325 | /// * axis: A `Tensor` of type `int32` (default: 0). Must be in the range |
1326 | /// `[-rank(x), rank(x))`. |
1327 | /// |
1328 | /// Optional attributes (see `Attrs`): |
1329 | /// * exclusive: If `True`, perform exclusive cumprod. |
1330 | /// * reverse: A `bool` (default: False). |
1331 | /// |
1332 | /// Returns: |
1333 | /// * `Output`: The out tensor. |
1334 | class Cumprod { |
1335 | public: |
1336 | /// Optional attribute setters for Cumprod |
1337 | struct Attrs { |
1338 | /// If `True`, perform exclusive cumprod. |
1339 | /// |
1340 | /// Defaults to false |
1341 | TF_MUST_USE_RESULT Attrs Exclusive(bool x) { |
1342 | Attrs ret = *this; |
1343 | ret.exclusive_ = x; |
1344 | return ret; |
1345 | } |
1346 | |
1347 | /// A `bool` (default: False). |
1348 | /// |
1349 | /// Defaults to false |
1350 | TF_MUST_USE_RESULT Attrs Reverse(bool x) { |
1351 | Attrs ret = *this; |
1352 | ret.reverse_ = x; |
1353 | return ret; |
1354 | } |
1355 | |
1356 | bool exclusive_ = false; |
1357 | bool reverse_ = false; |
1358 | }; |
1359 | Cumprod(const ::tensorflow::Scope& scope, ::tensorflow::Input x, |
1360 | ::tensorflow::Input axis); |
1361 | Cumprod(const ::tensorflow::Scope& scope, ::tensorflow::Input x, |
1362 | ::tensorflow::Input axis, const Cumprod::Attrs& attrs); |
1363 | operator ::tensorflow::Output() const { return out; } |
1364 | operator ::tensorflow::Input() const { return out; } |
1365 | ::tensorflow::Node* node() const { return out.node(); } |
1366 | |
1367 | static Attrs Exclusive(bool x) { |
1368 | return Attrs().Exclusive(x); |
1369 | } |
1370 | static Attrs Reverse(bool x) { |
1371 | return Attrs().Reverse(x); |
1372 | } |
1373 | |
1374 | Operation operation; |
1375 | ::tensorflow::Output out; |
1376 | }; |
1377 | |
1378 | /// Compute the cumulative sum of the tensor `x` along `axis`. |
1379 | /// |
1380 | /// By default, this op performs an inclusive cumsum, which means that the first |
1381 | /// element of the input is identical to the first element of the output: |
1382 | /// |
1383 | /// ```python |
1384 | /// tf.cumsum([a, b, c]) # => [a, a + b, a + b + c] |
1385 | /// ``` |
1386 | /// |
1387 | /// By setting the `exclusive` kwarg to `True`, an exclusive cumsum is |
1388 | /// performed instead: |
1389 | /// |
1390 | /// ```python |
1391 | /// tf.cumsum([a, b, c], exclusive=True) # => [0, a, a + b] |
1392 | /// ``` |
1393 | /// |
1394 | /// By setting the `reverse` kwarg to `True`, the cumsum is performed in the |
1395 | /// opposite direction: |
1396 | /// |
1397 | /// ```python |
1398 | /// tf.cumsum([a, b, c], reverse=True) # => [a + b + c, b + c, c] |
1399 | /// ``` |
1400 | /// |
1401 | /// This is more efficient than using separate `tf.reverse` ops. |
1402 | /// |
1403 | /// The `reverse` and `exclusive` kwargs can also be combined: |
1404 | /// |
1405 | /// ```python |
1406 | /// tf.cumsum([a, b, c], exclusive=True, reverse=True) # => [b + c, c, 0] |
1407 | /// ``` |
1408 | /// |
1409 | /// Args: |
1410 | /// * scope: A Scope object |
1411 | /// * x: A `Tensor`. Must be one of the following types: `float32`, `float64`, |
1412 | /// `int64`, `int32`, `uint8`, `uint16`, `int16`, `int8`, `complex64`, |
1413 | /// `complex128`, `qint8`, `quint8`, `qint32`, `half`. |
1414 | /// * axis: A `Tensor` of type `int32` (default: 0). Must be in the range |
1415 | /// `[-rank(x), rank(x))`. |
1416 | /// |
1417 | /// Optional attributes (see `Attrs`): |
1418 | /// * exclusive: If `True`, perform exclusive cumsum. |
1419 | /// * reverse: A `bool` (default: False). |
1420 | /// |
1421 | /// Returns: |
1422 | /// * `Output`: The out tensor. |
1423 | class Cumsum { |
1424 | public: |
1425 | /// Optional attribute setters for Cumsum |
1426 | struct Attrs { |
1427 | /// If `True`, perform exclusive cumsum. |
1428 | /// |
1429 | /// Defaults to false |
1430 | TF_MUST_USE_RESULT Attrs Exclusive(bool x) { |
1431 | Attrs ret = *this; |
1432 | ret.exclusive_ = x; |
1433 | return ret; |
1434 | } |
1435 | |
1436 | /// A `bool` (default: False). |
1437 | /// |
1438 | /// Defaults to false |
1439 | TF_MUST_USE_RESULT Attrs Reverse(bool x) { |
1440 | Attrs ret = *this; |
1441 | ret.reverse_ = x; |
1442 | return ret; |
1443 | } |
1444 | |
1445 | bool exclusive_ = false; |
1446 | bool reverse_ = false; |
1447 | }; |
1448 | Cumsum(const ::tensorflow::Scope& scope, ::tensorflow::Input x, |
1449 | ::tensorflow::Input axis); |
1450 | Cumsum(const ::tensorflow::Scope& scope, ::tensorflow::Input x, |
1451 | ::tensorflow::Input axis, const Cumsum::Attrs& attrs); |
1452 | operator ::tensorflow::Output() const { return out; } |
1453 | operator ::tensorflow::Input() const { return out; } |
1454 | ::tensorflow::Node* node() const { return out.node(); } |
1455 | |
1456 | static Attrs Exclusive(bool x) { |
1457 | return Attrs().Exclusive(x); |
1458 | } |
1459 | static Attrs Reverse(bool x) { |
1460 | return Attrs().Reverse(x); |
1461 | } |
1462 | |
1463 | Operation operation; |
1464 | ::tensorflow::Output out; |
1465 | }; |
1466 | |
1467 | /// Counts the number of occurrences of each value in an integer array. |
1468 | /// |
1469 | /// Outputs a vector with length `size` and the same dtype as `weights`. If |
1470 | /// `weights` are empty, then index `i` stores the number of times the value `i` is |
1471 | /// counted in `arr`. If `weights` are non-empty, then index `i` stores the sum of |
1472 | /// the value in `weights` at each index where the corresponding value in `arr` is |
1473 | /// `i`. |
1474 | /// |
1475 | /// Values in `arr` outside of the range [0, size) are ignored. |
1476 | /// |
1477 | /// Args: |
1478 | /// * scope: A Scope object |
1479 | /// * input: 1D or 2D int `Tensor`. |
1480 | /// * size: non-negative int scalar `Tensor`. |
1481 | /// * weights: is an int32, int64, float32, or float64 `Tensor` with the same |
1482 | /// shape as `arr`, or a length-0 `Tensor`, in which case it acts as all weights |
1483 | /// equal to 1. |
1484 | /// |
1485 | /// Optional attributes (see `Attrs`): |
1486 | /// * binary_output: bool; Whether the kernel should count the appearance or number of occurrences. |
1487 | /// |
1488 | /// Returns: |
1489 | /// * `Output`: 1D `Tensor` with length equal to `size` or 2D `Tensor` with [batch_size, `size`]. |
1490 | /// The counts or summed weights for each value in the range [0, size). |
1491 | class DenseBincount { |
1492 | public: |
1493 | /// Optional attribute setters for DenseBincount |
1494 | struct Attrs { |
1495 | /// bool; Whether the kernel should count the appearance or number of occurrences. |
1496 | /// |
1497 | /// Defaults to false |
1498 | TF_MUST_USE_RESULT Attrs BinaryOutput(bool x) { |
1499 | Attrs ret = *this; |
1500 | ret.binary_output_ = x; |
1501 | return ret; |
1502 | } |
1503 | |
1504 | bool binary_output_ = false; |
1505 | }; |
1506 | DenseBincount(const ::tensorflow::Scope& scope, ::tensorflow::Input input, |
1507 | ::tensorflow::Input size, ::tensorflow::Input weights); |
1508 | DenseBincount(const ::tensorflow::Scope& scope, ::tensorflow::Input input, |
1509 | ::tensorflow::Input size, ::tensorflow::Input weights, const |
1510 | DenseBincount::Attrs& attrs); |
1511 | operator ::tensorflow::Output() const { return output; } |
1512 | operator ::tensorflow::Input() const { return output; } |
1513 | ::tensorflow::Node* node() const { return output.node(); } |
1514 | |
1515 | static Attrs BinaryOutput(bool x) { |
1516 | return Attrs().BinaryOutput(x); |
1517 | } |
1518 | |
1519 | Operation operation; |
1520 | ::tensorflow::Output output; |
1521 | }; |
1522 | |
1523 | /// Computes Psi, the derivative of Lgamma (the log of the absolute value of |
1524 | /// |
1525 | /// `Gamma(x)`), element-wise. |
1526 | /// |
1527 | /// Args: |
1528 | /// * scope: A Scope object |
1529 | /// |
1530 | /// Returns: |
1531 | /// * `Output`: The y tensor. |
1532 | class Digamma { |
1533 | public: |
1534 | Digamma(const ::tensorflow::Scope& scope, ::tensorflow::Input x); |
1535 | operator ::tensorflow::Output() const { return y; } |
1536 | operator ::tensorflow::Input() const { return y; } |
1537 | ::tensorflow::Node* node() const { return y.node(); } |
1538 | |
1539 | Operation operation; |
1540 | ::tensorflow::Output y; |
1541 | }; |
1542 | |
1543 | /// Returns x / y element-wise. |
1544 | /// |
1545 | /// *NOTE*: `Div` supports broadcasting. More about broadcasting |
1546 | /// [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) |
1547 | /// |
1548 | /// Args: |
1549 | /// * scope: A Scope object |
1550 | /// |
1551 | /// Returns: |
1552 | /// * `Output`: The z tensor. |
1553 | class Div { |
1554 | public: |
1555 | Div(const ::tensorflow::Scope& scope, ::tensorflow::Input x, |
1556 | ::tensorflow::Input y); |
1557 | operator ::tensorflow::Output() const { return z; } |
1558 | operator ::tensorflow::Input() const { return z; } |
1559 | ::tensorflow::Node* node() const { return z.node(); } |
1560 | |
1561 | Operation operation; |
1562 | ::tensorflow::Output z; |
1563 | }; |
1564 | |
1565 | /// Returns 0 if the denominator is zero. |
1566 | /// |
1567 | /// |
1568 | /// *NOTE*: `DivNoNan` supports broadcasting. More about broadcasting |
1569 | /// [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) |
1570 | /// |
1571 | /// Args: |
1572 | /// * scope: A Scope object |
1573 | /// |
1574 | /// Returns: |
1575 | /// * `Output`: The z tensor. |
1576 | class DivNoNan { |
1577 | public: |
1578 | DivNoNan(const ::tensorflow::Scope& scope, ::tensorflow::Input x, |
1579 | ::tensorflow::Input y); |
1580 | operator ::tensorflow::Output() const { return z; } |
1581 | operator ::tensorflow::Input() const { return z; } |
1582 | ::tensorflow::Node* node() const { return z.node(); } |
1583 | |
1584 | Operation operation; |
1585 | ::tensorflow::Output z; |
1586 | }; |
1587 | |
1588 | /// Returns the truth value of (x == y) element-wise. |
1589 | /// |
1590 | /// *NOTE*: `Equal` supports broadcasting. More about broadcasting |
1591 | /// [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) |
1592 | /// |
1593 | /// ```python |
1594 | /// x = tf.constant([2, 4]) |
1595 | /// y = tf.constant(2) |
1596 | /// tf.math.equal(x, y) ==> array([True, False]) |
1597 | /// |
1598 | /// x = tf.constant([2, 4]) |
1599 | /// y = tf.constant([2, 4]) |
1600 | /// tf.math.equal(x, y) ==> array([True, True]) |
1601 | /// ``` |
1602 | /// |
1603 | /// Args: |
1604 | /// * scope: A Scope object |
1605 | /// |
1606 | /// Returns: |
1607 | /// * `Output`: The z tensor. |
1608 | class Equal { |
1609 | public: |
1610 | /// Optional attribute setters for Equal |
1611 | struct Attrs { |
1612 | /// Defaults to true |
1613 | TF_MUST_USE_RESULT Attrs IncompatibleShapeError(bool x) { |
1614 | Attrs ret = *this; |
1615 | ret.incompatible_shape_error_ = x; |
1616 | return ret; |
1617 | } |
1618 | |
1619 | bool incompatible_shape_error_ = true; |
1620 | }; |
1621 | Equal(const ::tensorflow::Scope& scope, ::tensorflow::Input x, |
1622 | ::tensorflow::Input y); |
1623 | Equal(const ::tensorflow::Scope& scope, ::tensorflow::Input x, |
1624 | ::tensorflow::Input y, const Equal::Attrs& attrs); |
1625 | operator ::tensorflow::Output() const { return z; } |
1626 | operator ::tensorflow::Input() const { return z; } |
1627 | ::tensorflow::Node* node() const { return z.node(); } |
1628 | |
1629 | static Attrs IncompatibleShapeError(bool x) { |
1630 | return Attrs().IncompatibleShapeError(x); |
1631 | } |
1632 | |
1633 | Operation operation; |
1634 | ::tensorflow::Output z; |
1635 | }; |
1636 | |
1637 | /// Computes the [Gauss error function](https://en.wikipedia.org/wiki/Error_function) of `x` element-wise. In statistics, for non-negative values of $x$, the error function has the following interpretation: for a random variable $Y$ that is normally distributed with mean 0 and variance $1/\sqrt{2}$, $erf(x)$ is the probability that $Y$ falls in the range $[−x, x]$. |
1638 | /// |
1639 | /// Args: |
1640 | /// * scope: A Scope object |
1641 | /// |
1642 | /// Returns: |
1643 | /// * `Output`: The y tensor. |
1644 | class Erf { |
1645 | public: |
1646 | Erf(const ::tensorflow::Scope& scope, ::tensorflow::Input x); |
1647 | operator ::tensorflow::Output() const { return y; } |
1648 | operator ::tensorflow::Input() const { return y; } |
1649 | ::tensorflow::Node* node() const { return y.node(); } |
1650 | |
1651 | Operation operation; |
1652 | ::tensorflow::Output y; |
1653 | }; |
1654 | |
1655 | /// Computes the complementary error function of `x` element-wise. |
1656 | /// |
1657 | /// Args: |
1658 | /// * scope: A Scope object |
1659 | /// |
1660 | /// Returns: |
1661 | /// * `Output`: The y tensor. |
1662 | class Erfc { |
1663 | public: |
1664 | Erfc(const ::tensorflow::Scope& scope, ::tensorflow::Input x); |
1665 | operator ::tensorflow::Output() const { return y; } |
1666 | operator ::tensorflow::Input() const { return y; } |
1667 | ::tensorflow::Node* node() const { return y.node(); } |
1668 | |
1669 | Operation operation; |
1670 | ::tensorflow::Output y; |
1671 | }; |
1672 | |
1673 | /// TODO: add doc. |
1674 | /// |
1675 | /// Args: |
1676 | /// * scope: A Scope object |
1677 | /// |
1678 | /// Returns: |
1679 | /// * `Output`: The y tensor. |
1680 | class Erfinv { |
1681 | public: |
1682 | Erfinv(const ::tensorflow::Scope& scope, ::tensorflow::Input x); |
1683 | operator ::tensorflow::Output() const { return y; } |
1684 | operator ::tensorflow::Input() const { return y; } |
1685 | ::tensorflow::Node* node() const { return y.node(); } |
1686 | |
1687 | Operation operation; |
1688 | ::tensorflow::Output y; |
1689 | }; |
1690 | |
1691 | /// Computes the euclidean norm of elements across dimensions of a tensor. |
1692 | /// |
1693 | /// Reduces `input` along the dimensions given in `axis`. Unless |
1694 | /// `keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in |
1695 | /// `axis`. If `keep_dims` is true, the reduced dimensions are |
1696 | /// retained with length 1. |
1697 | /// |
1698 | /// Args: |
1699 | /// * scope: A Scope object |
1700 | /// * input: The tensor to reduce. |
1701 | /// * axis: The dimensions to reduce. Must be in the range |
1702 | /// `[-rank(input), rank(input))`. |
1703 | /// |
1704 | /// Optional attributes (see `Attrs`): |
1705 | /// * keep_dims: If true, retain reduced dimensions with length 1. |
1706 | /// |
1707 | /// Returns: |
1708 | /// * `Output`: The reduced tensor. |
1709 | class EuclideanNorm { |
1710 | public: |
1711 | /// Optional attribute setters for EuclideanNorm |
1712 | struct Attrs { |
1713 | /// If true, retain reduced dimensions with length 1. |
1714 | /// |
1715 | /// Defaults to false |
1716 | TF_MUST_USE_RESULT Attrs KeepDims(bool x) { |
1717 | Attrs ret = *this; |
1718 | ret.keep_dims_ = x; |
1719 | return ret; |
1720 | } |
1721 | |
1722 | bool keep_dims_ = false; |
1723 | }; |
1724 | EuclideanNorm(const ::tensorflow::Scope& scope, ::tensorflow::Input input, |
1725 | ::tensorflow::Input axis); |
1726 | EuclideanNorm(const ::tensorflow::Scope& scope, ::tensorflow::Input input, |
1727 | ::tensorflow::Input axis, const EuclideanNorm::Attrs& attrs); |
1728 | operator ::tensorflow::Output() const { return output; } |
1729 | operator ::tensorflow::Input() const { return output; } |
1730 | ::tensorflow::Node* node() const { return output.node(); } |
1731 | |
1732 | static Attrs KeepDims(bool x) { |
1733 | return Attrs().KeepDims(x); |
1734 | } |
1735 | |
1736 | Operation operation; |
1737 | ::tensorflow::Output output; |
1738 | }; |
1739 | |
1740 | /// Computes exponential of x element-wise. \\(y = e^x\\). |
1741 | /// |
1742 | /// This function computes the exponential of every element in the input tensor. |
1743 | /// i.e. `exp(x)` or `e^(x)`, where `x` is the input tensor. |
1744 | /// `e` denotes Euler's number and is approximately equal to 2.718281. |
1745 | /// Output is positive for any real input. |
1746 | /// |
1747 | /// ```python |
1748 | /// x = tf.constant(2.0) |
1749 | /// tf.math.exp(x) ==> 7.389056 |
1750 | /// |
1751 | /// x = tf.constant([2.0, 8.0]) |
1752 | /// tf.math.exp(x) ==> array([7.389056, 2980.958], dtype=float32) |
1753 | /// ``` |
1754 | /// |
1755 | /// For complex numbers, the exponential value is calculated as follows: |
1756 | /// |
1757 | /// ``` |
1758 | /// e^(x+iy) = e^x * e^iy = e^x * (cos y + i sin y) |
1759 | /// ``` |
1760 | /// |
1761 | /// Let's consider complex number 1+1j as an example. |
1762 | /// e^1 * (cos 1 + i sin 1) = 2.7182818284590 * (0.54030230586+0.8414709848j) |
1763 | /// |
1764 | /// ```python |
1765 | /// x = tf.constant(1 + 1j) |
1766 | /// tf.math.exp(x) ==> 1.4686939399158851+2.2873552871788423j |
1767 | /// ``` |
1768 | /// |
1769 | /// Args: |
1770 | /// * scope: A Scope object |
1771 | /// |
1772 | /// Returns: |
1773 | /// * `Output`: The y tensor. |
1774 | class Exp { |
1775 | public: |
1776 | Exp(const ::tensorflow::Scope& scope, ::tensorflow::Input x); |
1777 | operator ::tensorflow::Output() const { return y; } |
1778 | operator ::tensorflow::Input() const { return y; } |
1779 | ::tensorflow::Node* node() const { return y.node(); } |
1780 | |
1781 | Operation operation; |
1782 | ::tensorflow::Output y; |
1783 | }; |
1784 | |
1785 | /// Computes `exp(x) - 1` element-wise. |
1786 | /// |
1787 | /// i.e. `exp(x) - 1` or `e^(x) - 1`, where `x` is the input tensor. |
1788 | /// `e` denotes Euler's number and is approximately equal to 2.718281. |
1789 | /// |
1790 | /// ```python |
1791 | /// x = tf.constant(2.0) |
1792 | /// tf.math.expm1(x) ==> 6.389056 |
1793 | /// |
1794 | /// x = tf.constant([2.0, 8.0]) |
1795 | /// tf.math.expm1(x) ==> array([6.389056, 2979.958], dtype=float32) |
1796 | /// |
1797 | /// x = tf.constant(1 + 1j) |
1798 | /// tf.math.expm1(x) ==> (0.46869393991588515+2.2873552871788423j) |
1799 | /// ``` |
1800 | /// |
1801 | /// Args: |
1802 | /// * scope: A Scope object |
1803 | /// |
1804 | /// Returns: |
1805 | /// * `Output`: The y tensor. |
1806 | class Expm1 { |
1807 | public: |
1808 | Expm1(const ::tensorflow::Scope& scope, ::tensorflow::Input x); |
1809 | operator ::tensorflow::Output() const { return y; } |
1810 | operator ::tensorflow::Input() const { return y; } |
1811 | ::tensorflow::Node* node() const { return y.node(); } |
1812 | |
1813 | Operation operation; |
1814 | ::tensorflow::Output y; |
1815 | }; |
1816 | |
1817 | /// Returns element-wise largest integer not greater than x. |
1818 | /// |
1819 | /// Args: |
1820 | /// * scope: A Scope object |
1821 | /// |
1822 | /// Returns: |
1823 | /// * `Output`: The y tensor. |
1824 | class Floor { |
1825 | public: |
1826 | Floor(const ::tensorflow::Scope& scope, ::tensorflow::Input x); |
1827 | operator ::tensorflow::Output() const { return y; } |
1828 | operator ::tensorflow::Input() const { return y; } |
1829 | ::tensorflow::Node* node() const { return y.node(); } |
1830 | |
1831 | Operation operation; |
1832 | ::tensorflow::Output y; |
1833 | }; |
1834 | |
1835 | /// Returns x // y element-wise. |
1836 | /// |
1837 | /// *NOTE*: `FloorDiv` supports broadcasting. More about broadcasting |
1838 | /// [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) |
1839 | /// |
1840 | /// Args: |
1841 | /// * scope: A Scope object |
1842 | /// |
1843 | /// Returns: |
1844 | /// * `Output`: The z tensor. |
1845 | class FloorDiv { |
1846 | public: |
1847 | FloorDiv(const ::tensorflow::Scope& scope, ::tensorflow::Input x, |
1848 | ::tensorflow::Input y); |
1849 | operator ::tensorflow::Output() const { return z; } |
1850 | operator ::tensorflow::Input() const { return z; } |
1851 | ::tensorflow::Node* node() const { return z.node(); } |
1852 | |
1853 | Operation operation; |
1854 | ::tensorflow::Output z; |
1855 | }; |
1856 | |
1857 | /// Returns element-wise remainder of division. |
1858 | /// |
1859 | /// This follows Python semantics in that the |
1860 | /// result here is consistent with a flooring divide. E.g. |
1861 | /// `floor(x / y) * y + floormod(x, y) = x`, regardless of the signs of x and y. |
1862 | /// |
1863 | /// *NOTE*: `FloorMod` supports broadcasting. More about broadcasting |
1864 | /// [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) |
1865 | /// |
1866 | /// Args: |
1867 | /// * scope: A Scope object |
1868 | /// |
1869 | /// Returns: |
1870 | /// * `Output`: The z tensor. |
1871 | class FloorMod { |
1872 | public: |
1873 | FloorMod(const ::tensorflow::Scope& scope, ::tensorflow::Input x, |
1874 | ::tensorflow::Input y); |
1875 | operator ::tensorflow::Output() const { return z; } |
1876 | operator ::tensorflow::Input() const { return z; } |
1877 | ::tensorflow::Node* node() const { return z.node(); } |
1878 | |
1879 | Operation operation; |
1880 | ::tensorflow::Output z; |
1881 | }; |
1882 | |
1883 | /// Returns the truth value of (x > y) element-wise. |
1884 | /// |
1885 | /// *NOTE*: `Greater` supports broadcasting. More about broadcasting |
1886 | /// [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) |
1887 | /// |
1888 | /// Example: |
1889 | /// |
1890 | /// ```python |
1891 | /// x = tf.constant([5, 4, 6]) |
1892 | /// y = tf.constant([5, 2, 5]) |
1893 | /// tf.math.greater(x, y) ==> [False, True, True] |
1894 | /// |
1895 | /// x = tf.constant([5, 4, 6]) |
1896 | /// y = tf.constant([5]) |
1897 | /// tf.math.greater(x, y) ==> [False, False, True] |
1898 | /// ``` |
1899 | /// |
1900 | /// Args: |
1901 | /// * scope: A Scope object |
1902 | /// |
1903 | /// Returns: |
1904 | /// * `Output`: The z tensor. |
1905 | class Greater { |
1906 | public: |
1907 | Greater(const ::tensorflow::Scope& scope, ::tensorflow::Input x, |
1908 | ::tensorflow::Input y); |
1909 | operator ::tensorflow::Output() const { return z; } |
1910 | operator ::tensorflow::Input() const { return z; } |
1911 | ::tensorflow::Node* node() const { return z.node(); } |
1912 | |
1913 | Operation operation; |
1914 | ::tensorflow::Output z; |
1915 | }; |
1916 | |
1917 | /// Returns the truth value of (x >= y) element-wise. |
1918 | /// |
1919 | /// *NOTE*: `GreaterEqual` supports broadcasting. More about broadcasting |
1920 | /// [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) |
1921 | /// |
1922 | /// Example: |
1923 | /// |
1924 | /// ```python |
1925 | /// x = tf.constant([5, 4, 6, 7]) |
1926 | /// y = tf.constant([5, 2, 5, 10]) |
1927 | /// tf.math.greater_equal(x, y) ==> [True, True, True, False] |
1928 | /// |
1929 | /// x = tf.constant([5, 4, 6, 7]) |
1930 | /// y = tf.constant([5]) |
1931 | /// tf.math.greater_equal(x, y) ==> [True, False, True, True] |
1932 | /// ``` |
1933 | /// |
1934 | /// Args: |
1935 | /// * scope: A Scope object |
1936 | /// |
1937 | /// Returns: |
1938 | /// * `Output`: The z tensor. |
1939 | class GreaterEqual { |
1940 | public: |
1941 | GreaterEqual(const ::tensorflow::Scope& scope, ::tensorflow::Input x, |
1942 | ::tensorflow::Input y); |
1943 | operator ::tensorflow::Output() const { return z; } |
1944 | operator ::tensorflow::Input() const { return z; } |
1945 | ::tensorflow::Node* node() const { return z.node(); } |
1946 | |
1947 | Operation operation; |
1948 | ::tensorflow::Output z; |
1949 | }; |
1950 | |
1951 | /// Return histogram of values. |
1952 | /// |
1953 | /// Given the tensor `values`, this operation returns a rank 1 histogram counting |
1954 | /// the number of entries in `values` that fall into every bin. The bins are |
1955 | /// equal width and determined by the arguments `value_range` and `nbins`. |
1956 | /// |
1957 | /// ```python |
1958 | /// # Bins will be: (-inf, 1), [1, 2), [2, 3), [3, 4), [4, inf) |
1959 | /// nbins = 5 |
1960 | /// value_range = [0.0, 5.0] |
1961 | /// new_values = [-1.0, 0.0, 1.5, 2.0, 5.0, 15] |
1962 | /// |
1963 | /// with tf.get_default_session() as sess: |
1964 | /// hist = tf.histogram_fixed_width(new_values, value_range, nbins=5) |
1965 | /// variables.global_variables_initializer().run() |
1966 | /// sess.run(hist) => [2, 1, 1, 0, 2] |
1967 | /// ``` |
1968 | /// |
1969 | /// Args: |
1970 | /// * scope: A Scope object |
1971 | /// * values: Numeric `Tensor`. |
1972 | /// * value_range: Shape [2] `Tensor` of same `dtype` as `values`. |
1973 | /// values <= value_range[0] will be mapped to hist[0], |
1974 | /// values >= value_range[1] will be mapped to hist[-1]. |
1975 | /// * nbins: Scalar `int32 Tensor`. Number of histogram bins. |
1976 | /// |
1977 | /// Returns: |
1978 | /// * `Output`: A 1-D `Tensor` holding histogram of values. |
1979 | class HistogramFixedWidth { |
1980 | public: |
1981 | /// Optional attribute setters for HistogramFixedWidth |
1982 | struct Attrs { |
1983 | /// Defaults to DT_INT32 |
1984 | TF_MUST_USE_RESULT Attrs Dtype(DataType x) { |
1985 | Attrs ret = *this; |
1986 | ret.dtype_ = x; |
1987 | return ret; |
1988 | } |
1989 | |
1990 | DataType dtype_ = DT_INT32; |
1991 | }; |
1992 | HistogramFixedWidth(const ::tensorflow::Scope& scope, ::tensorflow::Input |
1993 | values, ::tensorflow::Input value_range, |
1994 | ::tensorflow::Input nbins); |
1995 | HistogramFixedWidth(const ::tensorflow::Scope& scope, ::tensorflow::Input |
1996 | values, ::tensorflow::Input value_range, |
1997 | ::tensorflow::Input nbins, const |
1998 | HistogramFixedWidth::Attrs& attrs); |
1999 | operator ::tensorflow::Output() const { return out; } |
2000 | operator ::tensorflow::Input() const { return out; } |
2001 | ::tensorflow::Node* node() const { return out.node(); } |
2002 | |
2003 | static Attrs Dtype(DataType x) { |
2004 | return Attrs().Dtype(x); |
2005 | } |
2006 | |
2007 | Operation operation; |
2008 | ::tensorflow::Output out; |
2009 | }; |
2010 | |
2011 | /// Compute the lower regularized incomplete Gamma function `P(a, x)`. |
2012 | /// |
2013 | /// The lower regularized incomplete Gamma function is defined as: |
2014 | /// |
2015 | /// |
2016 | /// \\(P(a, x) = gamma(a, x) / Gamma(a) = 1 - Q(a, x)\\) |
2017 | /// |
2018 | /// where |
2019 | /// |
2020 | /// \\(gamma(a, x) = \\int_{0}^{x} t^{a-1} exp(-t) dt\\) |
2021 | /// |
2022 | /// is the lower incomplete Gamma function. |
2023 | /// |
2024 | /// Note, above `Q(a, x)` (`Igammac`) is the upper regularized complete |
2025 | /// Gamma function. |
2026 | /// |
2027 | /// Args: |
2028 | /// * scope: A Scope object |
2029 | /// |
2030 | /// Returns: |
2031 | /// * `Output`: The z tensor. |
2032 | class Igamma { |
2033 | public: |
2034 | Igamma(const ::tensorflow::Scope& scope, ::tensorflow::Input a, |
2035 | ::tensorflow::Input x); |
2036 | operator ::tensorflow::Output() const { return z; } |
2037 | operator ::tensorflow::Input() const { return z; } |
2038 | ::tensorflow::Node* node() const { return z.node(); } |
2039 | |
2040 | Operation operation; |
2041 | ::tensorflow::Output z; |
2042 | }; |
2043 | |
2044 | /// Compute the upper regularized incomplete Gamma function `Q(a, x)`. |
2045 | /// |
2046 | /// The upper regularized incomplete Gamma function is defined as: |
2047 | /// |
2048 | /// \\(Q(a, x) = Gamma(a, x) / Gamma(a) = 1 - P(a, x)\\) |
2049 | /// |
2050 | /// where |
2051 | /// |
2052 | /// \\(Gamma(a, x) = \int_{x}^{\infty} t^{a-1} exp(-t) dt\\) |
2053 | /// |
2054 | /// is the upper incomplete Gamma function. |
2055 | /// |
2056 | /// Note, above `P(a, x)` (`Igamma`) is the lower regularized complete |
2057 | /// Gamma function. |
2058 | /// |
2059 | /// Args: |
2060 | /// * scope: A Scope object |
2061 | /// |
2062 | /// Returns: |
2063 | /// * `Output`: The z tensor. |
2064 | class Igammac { |
2065 | public: |
2066 | Igammac(const ::tensorflow::Scope& scope, ::tensorflow::Input a, |
2067 | ::tensorflow::Input x); |
2068 | operator ::tensorflow::Output() const { return z; } |
2069 | operator ::tensorflow::Input() const { return z; } |
2070 | ::tensorflow::Node* node() const { return z.node(); } |
2071 | |
2072 | Operation operation; |
2073 | ::tensorflow::Output z; |
2074 | }; |
2075 | |
2076 | /// Returns the imaginary part of a complex number. |
2077 | /// |
2078 | /// Given a tensor `input` of complex numbers, this operation returns a tensor of |
2079 | /// type `float` that is the imaginary part of each element in `input`. All |
2080 | /// elements in `input` must be complex numbers of the form \\(a + bj\\), where *a* |
2081 | /// is the real part and *b* is the imaginary part returned by this operation. |
2082 | /// |
2083 | /// For example: |
2084 | /// |
2085 | /// ``` |
2086 | /// # tensor 'input' is [-2.25 + 4.75j, 3.25 + 5.75j] |
2087 | /// tf.imag(input) ==> [4.75, 5.75] |
2088 | /// ``` |
2089 | /// |
2090 | /// Args: |
2091 | /// * scope: A Scope object |
2092 | /// |
2093 | /// Returns: |
2094 | /// * `Output`: The output tensor. |
2095 | class Imag { |
2096 | public: |
2097 | /// Optional attribute setters for Imag |
2098 | struct Attrs { |
2099 | /// Defaults to DT_FLOAT |
2100 | TF_MUST_USE_RESULT Attrs Tout(DataType x) { |
2101 | Attrs ret = *this; |
2102 | ret.Tout_ = x; |
2103 | return ret; |
2104 | } |
2105 | |
2106 | DataType Tout_ = DT_FLOAT; |
2107 | }; |
2108 | Imag(const ::tensorflow::Scope& scope, ::tensorflow::Input input); |
2109 | Imag(const ::tensorflow::Scope& scope, ::tensorflow::Input input, const |
2110 | Imag::Attrs& attrs); |
2111 | operator ::tensorflow::Output() const { return output; } |
2112 | operator ::tensorflow::Input() const { return output; } |
2113 | ::tensorflow::Node* node() const { return output.node(); } |
2114 | |
2115 | static Attrs Tout(DataType x) { |
2116 | return Attrs().Tout(x); |
2117 | } |
2118 | |
2119 | Operation operation; |
2120 | ::tensorflow::Output output; |
2121 | }; |
2122 | |
2123 | /// Computes the reciprocal of x element-wise. |
2124 | /// |
2125 | /// I.e., \\(y = 1 / x\\). |
2126 | /// |
2127 | /// Args: |
2128 | /// * scope: A Scope object |
2129 | /// |
2130 | /// Returns: |
2131 | /// * `Output`: The y tensor. |
2132 | class Inv { |
2133 | public: |
2134 | Inv(const ::tensorflow::Scope& scope, ::tensorflow::Input x); |
2135 | operator ::tensorflow::Output() const { return y; } |
2136 | operator ::tensorflow::Input() const { return y; } |
2137 | ::tensorflow::Node* node() const { return y.node(); } |
2138 | |
2139 | Operation operation; |
2140 | ::tensorflow::Output y; |
2141 | }; |
2142 | |
2143 | /// Returns which elements of x are finite. |
2144 | /// |
2145 | /// @compatibility(numpy) |
2146 | /// Equivalent to np.isfinite |
2147 | /// @end_compatibility |
2148 | /// |
2149 | /// Example: |
2150 | /// |
2151 | /// ```python |
2152 | /// x = tf.constant([5.0, 4.8, 6.8, np.inf, np.nan]) |
2153 | /// tf.math.is_finite(x) ==> [True, True, True, False, False] |
2154 | /// ``` |
2155 | /// |
2156 | /// Args: |
2157 | /// * scope: A Scope object |
2158 | /// |
2159 | /// Returns: |
2160 | /// * `Output`: The y tensor. |
2161 | class IsFinite { |
2162 | public: |
2163 | IsFinite(const ::tensorflow::Scope& scope, ::tensorflow::Input x); |
2164 | operator ::tensorflow::Output() const { return y; } |
2165 | operator ::tensorflow::Input() const { return y; } |
2166 | ::tensorflow::Node* node() const { return y.node(); } |
2167 | |
2168 | Operation operation; |
2169 | ::tensorflow::Output y; |
2170 | }; |
2171 | |
2172 | /// Returns which elements of x are Inf. |
2173 | /// |
2174 | /// @compatibility(numpy) |
2175 | /// Equivalent to np.isinf |
2176 | /// @end_compatibility |
2177 | /// |
2178 | /// Example: |
2179 | /// |
2180 | /// ```python |
2181 | /// x = tf.constant([5.0, np.inf, 6.8, np.inf]) |
2182 | /// tf.math.is_inf(x) ==> [False, True, False, True] |
2183 | /// ``` |
2184 | /// |
2185 | /// Args: |
2186 | /// * scope: A Scope object |
2187 | /// |
2188 | /// Returns: |
2189 | /// * `Output`: The y tensor. |
2190 | class IsInf { |
2191 | public: |
2192 | IsInf(const ::tensorflow::Scope& scope, ::tensorflow::Input x); |
2193 | operator ::tensorflow::Output() const { return y; } |
2194 | operator ::tensorflow::Input() const { return y; } |
2195 | ::tensorflow::Node* node() const { return y.node(); } |
2196 | |
2197 | Operation operation; |
2198 | ::tensorflow::Output y; |
2199 | }; |
2200 | |
2201 | /// Returns which elements of x are NaN. |
2202 | /// |
2203 | /// @compatibility(numpy) |
2204 | /// Equivalent to np.isnan |
2205 | /// @end_compatibility |
2206 | /// |
2207 | /// Example: |
2208 | /// |
2209 | /// ```python |
2210 | /// x = tf.constant([5.0, np.nan, 6.8, np.nan, np.inf]) |
2211 | /// tf.math.is_nan(x) ==> [False, True, False, True, False] |
2212 | /// ``` |
2213 | /// |
2214 | /// Args: |
2215 | /// * scope: A Scope object |
2216 | /// |
2217 | /// Returns: |
2218 | /// * `Output`: The y tensor. |
2219 | class IsNan { |
2220 | public: |
2221 | IsNan(const ::tensorflow::Scope& scope, ::tensorflow::Input x); |
2222 | operator ::tensorflow::Output() const { return y; } |
2223 | operator ::tensorflow::Input() const { return y; } |
2224 | ::tensorflow::Node* node() const { return y.node(); } |
2225 | |
2226 | Operation operation; |
2227 | ::tensorflow::Output y; |
2228 | }; |
2229 | |
2230 | /// Returns the truth value of (x < y) element-wise. |
2231 | /// |
2232 | /// *NOTE*: `Less` supports broadcasting. More about broadcasting |
2233 | /// [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) |
2234 | /// |
2235 | /// Example: |
2236 | /// |
2237 | /// ```python |
2238 | /// x = tf.constant([5, 4, 6]) |
2239 | /// y = tf.constant([5]) |
2240 | /// tf.math.less(x, y) ==> [False, True, False] |
2241 | /// |
2242 | /// x = tf.constant([5, 4, 6]) |
2243 | /// y = tf.constant([5, 6, 7]) |
2244 | /// tf.math.less(x, y) ==> [False, True, True] |
2245 | /// ``` |
2246 | /// |
2247 | /// Args: |
2248 | /// * scope: A Scope object |
2249 | /// |
2250 | /// Returns: |
2251 | /// * `Output`: The z tensor. |
2252 | class Less { |
2253 | public: |
2254 | Less(const ::tensorflow::Scope& scope, ::tensorflow::Input x, |
2255 | ::tensorflow::Input y); |
2256 | operator ::tensorflow::Output() const { return z; } |
2257 | operator ::tensorflow::Input() const { return z; } |
2258 | ::tensorflow::Node* node() const { return z.node(); } |
2259 | |
2260 | Operation operation; |
2261 | ::tensorflow::Output z; |
2262 | }; |
2263 | |
2264 | /// Returns the truth value of (x <= y) element-wise. |
2265 | /// |
2266 | /// *NOTE*: `LessEqual` supports broadcasting. More about broadcasting |
2267 | /// [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) |
2268 | /// |
2269 | /// Example: |
2270 | /// |
2271 | /// ```python |
2272 | /// x = tf.constant([5, 4, 6]) |
2273 | /// y = tf.constant([5]) |
2274 | /// tf.math.less_equal(x, y) ==> [True, True, False] |
2275 | /// |
2276 | /// x = tf.constant([5, 4, 6]) |
2277 | /// y = tf.constant([5, 6, 6]) |
2278 | /// tf.math.less_equal(x, y) ==> [True, True, True] |
2279 | /// ``` |
2280 | /// |
2281 | /// Args: |
2282 | /// * scope: A Scope object |
2283 | /// |
2284 | /// Returns: |
2285 | /// * `Output`: The z tensor. |
2286 | class LessEqual { |
2287 | public: |
2288 | LessEqual(const ::tensorflow::Scope& scope, ::tensorflow::Input x, |
2289 | ::tensorflow::Input y); |
2290 | operator ::tensorflow::Output() const { return z; } |
2291 | operator ::tensorflow::Input() const { return z; } |
2292 | ::tensorflow::Node* node() const { return z.node(); } |
2293 | |
2294 | Operation operation; |
2295 | ::tensorflow::Output z; |
2296 | }; |
2297 | |
2298 | /// Computes the log of the absolute value of `Gamma(x)` element-wise. |
2299 | /// |
2300 | /// For positive numbers, this function computes log((input - 1)!) for every element in the tensor. |
2301 | /// `lgamma(5) = log((5-1)!) = log(4!) = log(24) = 3.1780539` |
2302 | /// |
2303 | /// Example: |
2304 | /// |
2305 | /// ```python |
2306 | /// x = tf.constant([0, 0.5, 1, 4.5, -4, -5.6]) |
2307 | /// tf.math.lgamma(x) ==> [inf, 0.5723649, 0., 2.4537368, inf, -4.6477685] |
2308 | /// ``` |
2309 | /// |
2310 | /// Args: |
2311 | /// * scope: A Scope object |
2312 | /// |
2313 | /// Returns: |
2314 | /// * `Output`: The y tensor. |
2315 | class Lgamma { |
2316 | public: |
2317 | Lgamma(const ::tensorflow::Scope& scope, ::tensorflow::Input x); |
2318 | operator ::tensorflow::Output() const { return y; } |
2319 | operator ::tensorflow::Input() const { return y; } |
2320 | ::tensorflow::Node* node() const { return y.node(); } |
2321 | |
2322 | Operation operation; |
2323 | ::tensorflow::Output y; |
2324 | }; |
2325 | |
2326 | /// Computes natural logarithm of x element-wise. |
2327 | /// |
2328 | /// I.e., \\(y = \log_e x\\). |
2329 | /// |
2330 | /// Example: |
2331 | /// |
2332 | /// ```python |
2333 | /// x = tf.constant([0, 0.5, 1, 5]) |
2334 | /// tf.math.log(x) ==> [-inf, -0.6931472, 0. , 1.609438] |
2335 | /// ``` |
2336 | /// |
2337 | /// Args: |
2338 | /// * scope: A Scope object |
2339 | /// |
2340 | /// Returns: |
2341 | /// * `Output`: The y tensor. |
2342 | class Log { |
2343 | public: |
2344 | Log(const ::tensorflow::Scope& scope, ::tensorflow::Input x); |
2345 | operator ::tensorflow::Output() const { return y; } |
2346 | operator ::tensorflow::Input() const { return y; } |
2347 | ::tensorflow::Node* node() const { return y.node(); } |
2348 | |
2349 | Operation operation; |
2350 | ::tensorflow::Output y; |
2351 | }; |
2352 | |
2353 | /// Computes natural logarithm of (1 + x) element-wise. |
2354 | /// |
2355 | /// I.e., \\(y = \log_e (1 + x)\\). |
2356 | /// |
2357 | /// Example: |
2358 | /// |
2359 | /// ```python |
2360 | /// x = tf.constant([0, 0.5, 1, 5]) |
2361 | /// tf.math.log1p(x) ==> [0., 0.4054651, 0.6931472, 1.7917595] |
2362 | /// ``` |
2363 | /// |
2364 | /// Args: |
2365 | /// * scope: A Scope object |
2366 | /// |
2367 | /// Returns: |
2368 | /// * `Output`: The y tensor. |
2369 | class Log1p { |
2370 | public: |
2371 | Log1p(const ::tensorflow::Scope& scope, ::tensorflow::Input x); |
2372 | operator ::tensorflow::Output() const { return y; } |
2373 | operator ::tensorflow::Input() const { return y; } |
2374 | ::tensorflow::Node* node() const { return y.node(); } |
2375 | |
2376 | Operation operation; |
2377 | ::tensorflow::Output y; |
2378 | }; |
2379 | |
2380 | /// Returns the truth value of x AND y element-wise. |
2381 | /// |
2382 | /// *NOTE*: `LogicalAnd` supports broadcasting. More about broadcasting |
2383 | /// [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) |
2384 | /// |
2385 | /// Args: |
2386 | /// * scope: A Scope object |
2387 | /// |
2388 | /// Returns: |
2389 | /// * `Output`: The z tensor. |
2390 | class LogicalAnd { |
2391 | public: |
2392 | LogicalAnd(const ::tensorflow::Scope& scope, ::tensorflow::Input x, |
2393 | ::tensorflow::Input y); |
2394 | operator ::tensorflow::Output() const { return z; } |
2395 | operator ::tensorflow::Input() const { return z; } |
2396 | ::tensorflow::Node* node() const { return z.node(); } |
2397 | |
2398 | Operation operation; |
2399 | ::tensorflow::Output z; |
2400 | }; |
2401 | |
2402 | /// Returns the truth value of `NOT x` element-wise. |
2403 | /// |
2404 | /// Args: |
2405 | /// * scope: A Scope object |
2406 | /// * x: A `Tensor` of type `bool`. |
2407 | /// |
2408 | /// Returns: |
2409 | /// * `Output`: A `Tensor` of type `bool` with the same shape as `x`. The logical negation of `x`. |
2410 | class LogicalNot { |
2411 | public: |
2412 | LogicalNot(const ::tensorflow::Scope& scope, ::tensorflow::Input x); |
2413 | operator ::tensorflow::Output() const { return y; } |
2414 | operator ::tensorflow::Input() const { return y; } |
2415 | ::tensorflow::Node* node() const { return y.node(); } |
2416 | |
2417 | Operation operation; |
2418 | ::tensorflow::Output y; |
2419 | }; |
2420 | |
2421 | /// Returns the truth value of x OR y element-wise. |
2422 | /// |
2423 | /// *NOTE*: `LogicalOr` supports broadcasting. More about broadcasting |
2424 | /// [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) |
2425 | /// |
2426 | /// Args: |
2427 | /// * scope: A Scope object |
2428 | /// |
2429 | /// Returns: |
2430 | /// * `Output`: The z tensor. |
2431 | class LogicalOr { |
2432 | public: |
2433 | LogicalOr(const ::tensorflow::Scope& scope, ::tensorflow::Input x, |
2434 | ::tensorflow::Input y); |
2435 | operator ::tensorflow::Output() const { return z; } |
2436 | operator ::tensorflow::Input() const { return z; } |
2437 | ::tensorflow::Node* node() const { return z.node(); } |
2438 | |
2439 | Operation operation; |
2440 | ::tensorflow::Output z; |
2441 | }; |
2442 | |
2443 | /// Multiply the matrix "a" by the matrix "b". |
2444 | /// |
2445 | /// The inputs must be two-dimensional matrices and the inner dimension of |
2446 | /// "a" (after being transposed if transpose_a is true) must match the |
2447 | /// outer dimension of "b" (after being transposed if transposed_b is |
2448 | /// true). |
2449 | /// |
2450 | /// *Note*: The default kernel implementation for MatMul on GPUs uses |
2451 | /// cublas. |
2452 | /// |
2453 | /// Args: |
2454 | /// * scope: A Scope object |
2455 | /// |
2456 | /// Optional attributes (see `Attrs`): |
2457 | /// * transpose_a: If true, "a" is transposed before multiplication. |
2458 | /// * transpose_b: If true, "b" is transposed before multiplication. |
2459 | /// |
2460 | /// Returns: |
2461 | /// * `Output`: The product tensor. |
2462 | class MatMul { |
2463 | public: |
2464 | /// Optional attribute setters for MatMul |
2465 | struct Attrs { |
2466 | /// If true, "a" is transposed before multiplication. |
2467 | /// |
2468 | /// Defaults to false |
2469 | TF_MUST_USE_RESULT Attrs TransposeA(bool x) { |
2470 | Attrs ret = *this; |
2471 | ret.transpose_a_ = x; |
2472 | return ret; |
2473 | } |
2474 | |
2475 | /// If true, "b" is transposed before multiplication. |
2476 | /// |
2477 | /// Defaults to false |
2478 | TF_MUST_USE_RESULT Attrs TransposeB(bool x) { |
2479 | Attrs ret = *this; |
2480 | ret.transpose_b_ = x; |
2481 | return ret; |
2482 | } |
2483 | |
2484 | bool transpose_a_ = false; |
2485 | bool transpose_b_ = false; |
2486 | }; |
2487 | MatMul(const ::tensorflow::Scope& scope, ::tensorflow::Input a, |
2488 | ::tensorflow::Input b); |
2489 | MatMul(const ::tensorflow::Scope& scope, ::tensorflow::Input a, |
2490 | ::tensorflow::Input b, const MatMul::Attrs& attrs); |
2491 | operator ::tensorflow::Output() const { return product; } |
2492 | operator ::tensorflow::Input() const { return product; } |
2493 | ::tensorflow::Node* node() const { return product.node(); } |
2494 | |
2495 | static Attrs TransposeA(bool x) { |
2496 | return Attrs().TransposeA(x); |
2497 | } |
2498 | static Attrs TransposeB(bool x) { |
2499 | return Attrs().TransposeB(x); |
2500 | } |
2501 | |
2502 | Operation operation; |
2503 | ::tensorflow::Output product; |
2504 | }; |
2505 | |
2506 | /// Computes the maximum of elements across dimensions of a tensor. |
2507 | /// |
2508 | /// Reduces `input` along the dimensions given in `axis`. Unless |
2509 | /// `keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in |
2510 | /// `axis`. If `keep_dims` is true, the reduced dimensions are |
2511 | /// retained with length 1. |
2512 | /// |
2513 | /// Args: |
2514 | /// * scope: A Scope object |
2515 | /// * input: The tensor to reduce. |
2516 | /// * axis: The dimensions to reduce. Must be in the range |
2517 | /// `[-rank(input), rank(input))`. |
2518 | /// |
2519 | /// Optional attributes (see `Attrs`): |
2520 | /// * keep_dims: If true, retain reduced dimensions with length 1. |
2521 | /// |
2522 | /// Returns: |
2523 | /// * `Output`: The reduced tensor. |
2524 | /// |
2525 | /// Aliases: |
2526 | /// * ReduceMax |
2527 | class Max { |
2528 | public: |
2529 | /// Optional attribute setters for Max |
2530 | struct Attrs { |
2531 | /// If true, retain reduced dimensions with length 1. |
2532 | /// |
2533 | /// Defaults to false |
2534 | TF_MUST_USE_RESULT Attrs KeepDims(bool x) { |
2535 | Attrs ret = *this; |
2536 | ret.keep_dims_ = x; |
2537 | return ret; |
2538 | } |
2539 | |
2540 | bool keep_dims_ = false; |
2541 | }; |
2542 | Max(const ::tensorflow::Scope& scope, ::tensorflow::Input input, |
2543 | ::tensorflow::Input axis); |
2544 | Max(const ::tensorflow::Scope& scope, ::tensorflow::Input input, |
2545 | ::tensorflow::Input axis, const Max::Attrs& attrs); |
2546 | operator ::tensorflow::Output() const { return output; } |
2547 | operator ::tensorflow::Input() const { return output; } |
2548 | ::tensorflow::Node* node() const { return output.node(); } |
2549 | |
2550 | static Attrs KeepDims(bool x) { |
2551 | return Attrs().KeepDims(x); |
2552 | } |
2553 | |
2554 | Operation operation; |
2555 | ::tensorflow::Output output; |
2556 | }; |
2557 | typedef Max ReduceMax; |
2558 | |
2559 | /// Returns the max of x and y (i.e. x > y ? x : y) element-wise. |
2560 | /// |
2561 | /// *NOTE*: `Maximum` supports broadcasting. More about broadcasting |
2562 | /// [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) |
2563 | /// |
2564 | /// Args: |
2565 | /// * scope: A Scope object |
2566 | /// |
2567 | /// Returns: |
2568 | /// * `Output`: The z tensor. |
2569 | class Maximum { |
2570 | public: |
2571 | Maximum(const ::tensorflow::Scope& scope, ::tensorflow::Input x, |
2572 | ::tensorflow::Input y); |
2573 | operator ::tensorflow::Output() const { return z; } |
2574 | operator ::tensorflow::Input() const { return z; } |
2575 | ::tensorflow::Node* node() const { return z.node(); } |
2576 | |
2577 | Operation operation; |
2578 | ::tensorflow::Output z; |
2579 | }; |
2580 | |
2581 | /// Computes the mean of elements across dimensions of a tensor. |
2582 | /// |
2583 | /// Reduces `input` along the dimensions given in `axis`. Unless |
2584 | /// `keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in |
2585 | /// `axis`. If `keep_dims` is true, the reduced dimensions are |
2586 | /// retained with length 1. |
2587 | /// |
2588 | /// Args: |
2589 | /// * scope: A Scope object |
2590 | /// * input: The tensor to reduce. |
2591 | /// * axis: The dimensions to reduce. Must be in the range |
2592 | /// `[-rank(input), rank(input))`. |
2593 | /// |
2594 | /// Optional attributes (see `Attrs`): |
2595 | /// * keep_dims: If true, retain reduced dimensions with length 1. |
2596 | /// |
2597 | /// Returns: |
2598 | /// * `Output`: The reduced tensor. |
2599 | /// |
2600 | /// Aliases: |
2601 | /// * ReduceMean |
2602 | class Mean { |
2603 | public: |
2604 | /// Optional attribute setters for Mean |
2605 | struct Attrs { |
2606 | /// If true, retain reduced dimensions with length 1. |
2607 | /// |
2608 | /// Defaults to false |
2609 | TF_MUST_USE_RESULT Attrs KeepDims(bool x) { |
2610 | Attrs ret = *this; |
2611 | ret.keep_dims_ = x; |
2612 | return ret; |
2613 | } |
2614 | |
2615 | bool keep_dims_ = false; |
2616 | }; |
2617 | Mean(const ::tensorflow::Scope& scope, ::tensorflow::Input input, |
2618 | ::tensorflow::Input axis); |
2619 | Mean(const ::tensorflow::Scope& scope, ::tensorflow::Input input, |
2620 | ::tensorflow::Input axis, const Mean::Attrs& attrs); |
2621 | operator ::tensorflow::Output() const { return output; } |
2622 | operator ::tensorflow::Input() const { return output; } |
2623 | ::tensorflow::Node* node() const { return output.node(); } |
2624 | |
2625 | static Attrs KeepDims(bool x) { |
2626 | return Attrs().KeepDims(x); |
2627 | } |
2628 | |
2629 | Operation operation; |
2630 | ::tensorflow::Output output; |
2631 | }; |
2632 | typedef Mean ReduceMean; |
2633 | |
2634 | /// Computes the minimum of elements across dimensions of a tensor. |
2635 | /// |
2636 | /// Reduces `input` along the dimensions given in `axis`. Unless |
2637 | /// `keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in |
2638 | /// `axis`. If `keep_dims` is true, the reduced dimensions are |
2639 | /// retained with length 1. |
2640 | /// |
2641 | /// Args: |
2642 | /// * scope: A Scope object |
2643 | /// * input: The tensor to reduce. |
2644 | /// * axis: The dimensions to reduce. Must be in the range |
2645 | /// `[-rank(input), rank(input))`. |
2646 | /// |
2647 | /// Optional attributes (see `Attrs`): |
2648 | /// * keep_dims: If true, retain reduced dimensions with length 1. |
2649 | /// |
2650 | /// Returns: |
2651 | /// * `Output`: The reduced tensor. |
2652 | /// |
2653 | /// Aliases: |
2654 | /// * ReduceMin |
2655 | class Min { |
2656 | public: |
2657 | /// Optional attribute setters for Min |
2658 | struct Attrs { |
2659 | /// If true, retain reduced dimensions with length 1. |
2660 | /// |
2661 | /// Defaults to false |
2662 | TF_MUST_USE_RESULT Attrs KeepDims(bool x) { |
2663 | Attrs ret = *this; |
2664 | ret.keep_dims_ = x; |
2665 | return ret; |
2666 | } |
2667 | |
2668 | bool keep_dims_ = false; |
2669 | }; |
2670 | Min(const ::tensorflow::Scope& scope, ::tensorflow::Input input, |
2671 | ::tensorflow::Input axis); |
2672 | Min(const ::tensorflow::Scope& scope, ::tensorflow::Input input, |
2673 | ::tensorflow::Input axis, const Min::Attrs& attrs); |
2674 | operator ::tensorflow::Output() const { return output; } |
2675 | operator ::tensorflow::Input() const { return output; } |
2676 | ::tensorflow::Node* node() const { return output.node(); } |
2677 | |
2678 | static Attrs KeepDims(bool x) { |
2679 | return Attrs().KeepDims(x); |
2680 | } |
2681 | |
2682 | Operation operation; |
2683 | ::tensorflow::Output output; |
2684 | }; |
2685 | typedef Min ReduceMin; |
2686 | |
2687 | /// Returns the min of x and y (i.e. x < y ? x : y) element-wise. |
2688 | /// |
2689 | /// *NOTE*: `Minimum` supports broadcasting. More about broadcasting |
2690 | /// [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) |
2691 | /// |
2692 | /// Args: |
2693 | /// * scope: A Scope object |
2694 | /// |
2695 | /// Returns: |
2696 | /// * `Output`: The z tensor. |
2697 | class Minimum { |
2698 | public: |
2699 | Minimum(const ::tensorflow::Scope& scope, ::tensorflow::Input x, |
2700 | ::tensorflow::Input y); |
2701 | operator ::tensorflow::Output() const { return z; } |
2702 | operator ::tensorflow::Input() const { return z; } |
2703 | ::tensorflow::Node* node() const { return z.node(); } |
2704 | |
2705 | Operation operation; |
2706 | ::tensorflow::Output z; |
2707 | }; |
2708 | |
2709 | /// Returns element-wise remainder of division. This emulates C semantics in that |
2710 | /// |
2711 | /// the result here is consistent with a truncating divide. E.g. |
2712 | /// `tf.truncatediv(x, y) * y + truncate_mod(x, y) = x`. |
2713 | /// |
2714 | /// *NOTE*: `Mod` supports broadcasting. More about broadcasting |
2715 | /// [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) |
2716 | /// |
2717 | /// Args: |
2718 | /// * scope: A Scope object |
2719 | /// |
2720 | /// Returns: |
2721 | /// * `Output`: The z tensor. |
2722 | class Mod { |
2723 | public: |
2724 | Mod(const ::tensorflow::Scope& scope, ::tensorflow::Input x, |
2725 | ::tensorflow::Input y); |
2726 | operator ::tensorflow::Output() const { return z; } |
2727 | operator ::tensorflow::Input() const { return z; } |
2728 | ::tensorflow::Node* node() const { return z.node(); } |
2729 | |
2730 | Operation operation; |
2731 | ::tensorflow::Output z; |
2732 | }; |
2733 | |
2734 | /// Returns x * y element-wise. |
2735 | /// |
2736 | /// *NOTE*: `Multiply` supports broadcasting. More about broadcasting |
2737 | /// [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) |
2738 | /// |
2739 | /// Args: |
2740 | /// * scope: A Scope object |
2741 | /// |
2742 | /// Returns: |
2743 | /// * `Output`: The z tensor. |
2744 | /// |
2745 | /// Aliases: |
2746 | /// * Mul |
2747 | class Multiply { |
2748 | public: |
2749 | Multiply(const ::tensorflow::Scope& scope, ::tensorflow::Input x, |
2750 | ::tensorflow::Input y); |
2751 | operator ::tensorflow::Output() const { return z; } |
2752 | operator ::tensorflow::Input() const { return z; } |
2753 | ::tensorflow::Node* node() const { return z.node(); } |
2754 | |
2755 | Operation operation; |
2756 | ::tensorflow::Output z; |
2757 | }; |
2758 | typedef Multiply Mul; |
2759 | |
2760 | /// Returns x * y element-wise. Returns zero if y is zero, even if x if infinite or NaN. |
2761 | /// |
2762 | /// *NOTE*: `MulNoNan` supports broadcasting. More about broadcasting |
2763 | /// [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) |
2764 | /// |
2765 | /// Args: |
2766 | /// * scope: A Scope object |
2767 | /// |
2768 | /// Returns: |
2769 | /// * `Output`: The z tensor. |
2770 | class MulNoNan { |
2771 | public: |
2772 | MulNoNan(const ::tensorflow::Scope& scope, ::tensorflow::Input x, |
2773 | ::tensorflow::Input y); |
2774 | operator ::tensorflow::Output() const { return z; } |
2775 | operator ::tensorflow::Input() const { return z; } |
2776 | ::tensorflow::Node* node() const { return z.node(); } |
2777 | |
2778 | Operation operation; |
2779 | ::tensorflow::Output z; |
2780 | }; |
2781 | |
2782 | /// TODO: add doc. |
2783 | /// |
2784 | /// Args: |
2785 | /// * scope: A Scope object |
2786 | /// |
2787 | /// Returns: |
2788 | /// * `Output`: The y tensor. |
2789 | class Ndtri { |
2790 | public: |
2791 | Ndtri(const ::tensorflow::Scope& scope, ::tensorflow::Input x); |
2792 | operator ::tensorflow::Output() const { return y; } |
2793 | operator ::tensorflow::Input() const { return y; } |
2794 | ::tensorflow::Node* node() const { return y.node(); } |
2795 | |
2796 | Operation operation; |
2797 | ::tensorflow::Output y; |
2798 | }; |
2799 | |
2800 | /// Computes numerical negative value element-wise. |
2801 | /// |
2802 | /// I.e., \\(y = -x\\). |
2803 | /// |
2804 | /// Args: |
2805 | /// * scope: A Scope object |
2806 | /// |
2807 | /// Returns: |
2808 | /// * `Output`: The y tensor. |
2809 | /// |
2810 | /// Aliases: |
2811 | /// * Neg |
2812 | class Negate { |
2813 | public: |
2814 | Negate(const ::tensorflow::Scope& scope, ::tensorflow::Input x); |
2815 | operator ::tensorflow::Output() const { return y; } |
2816 | operator ::tensorflow::Input() const { return y; } |
2817 | ::tensorflow::Node* node() const { return y.node(); } |
2818 | |
2819 | Operation operation; |
2820 | ::tensorflow::Output y; |
2821 | }; |
2822 | typedef Negate Neg; |
2823 | |
2824 | /// Returns the next representable value of `x1` in the direction of `x2`, element-wise. |
2825 | /// |
2826 | /// This operation returns the same result as the C++ std::nextafter function. |
2827 | /// |
2828 | /// It can also return a subnormal number. |
2829 | /// |
2830 | /// @compatibility(cpp) |
2831 | /// Equivalent to C++ std::nextafter function. |
2832 | /// @end_compatibility |
2833 | /// |
2834 | /// Args: |
2835 | /// * scope: A Scope object |
2836 | /// |
2837 | /// Returns: |
2838 | /// * `Output`: The output tensor. |
2839 | class NextAfter { |
2840 | public: |
2841 | NextAfter(const ::tensorflow::Scope& scope, ::tensorflow::Input x1, |
2842 | ::tensorflow::Input x2); |
2843 | operator ::tensorflow::Output() const { return output; } |
2844 | operator ::tensorflow::Input() const { return output; } |
2845 | ::tensorflow::Node* node() const { return output.node(); } |
2846 | |
2847 | Operation operation; |
2848 | ::tensorflow::Output output; |
2849 | }; |
2850 | |
2851 | /// Returns the truth value of (x != y) element-wise. |
2852 | /// |
2853 | /// *NOTE*: `NotEqual` supports broadcasting. More about broadcasting |
2854 | /// [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) |
2855 | /// |
2856 | /// Args: |
2857 | /// * scope: A Scope object |
2858 | /// |
2859 | /// Returns: |
2860 | /// * `Output`: The z tensor. |
2861 | class NotEqual { |
2862 | public: |
2863 | /// Optional attribute setters for NotEqual |
2864 | struct Attrs { |
2865 | /// Defaults to true |
2866 | TF_MUST_USE_RESULT Attrs IncompatibleShapeError(bool x) { |
2867 | Attrs ret = *this; |
2868 | ret.incompatible_shape_error_ = x; |
2869 | return ret; |
2870 | } |
2871 | |
2872 | bool incompatible_shape_error_ = true; |
2873 | }; |
2874 | NotEqual(const ::tensorflow::Scope& scope, ::tensorflow::Input x, |
2875 | ::tensorflow::Input y); |
2876 | NotEqual(const ::tensorflow::Scope& scope, ::tensorflow::Input x, |
2877 | ::tensorflow::Input y, const NotEqual::Attrs& attrs); |
2878 | operator ::tensorflow::Output() const { return z; } |
2879 | operator ::tensorflow::Input() const { return z; } |
2880 | ::tensorflow::Node* node() const { return z.node(); } |
2881 | |
2882 | static Attrs IncompatibleShapeError(bool x) { |
2883 | return Attrs().IncompatibleShapeError(x); |
2884 | } |
2885 | |
2886 | Operation operation; |
2887 | ::tensorflow::Output z; |
2888 | }; |
2889 | |
2890 | /// Compute the polygamma function \\(\psi^{(n)}(x)\\). |
2891 | /// |
2892 | /// The polygamma function is defined as: |
2893 | /// |
2894 | /// |
2895 | /// \\(\psi^{(a)}(x) = \frac{d^a}{dx^a} \psi(x)\\) |
2896 | /// |
2897 | /// where \\(\psi(x)\\) is the digamma function. |
2898 | /// The polygamma function is defined only for non-negative integer orders \\a\\. |
2899 | /// |
2900 | /// Args: |
2901 | /// * scope: A Scope object |
2902 | /// |
2903 | /// Returns: |
2904 | /// * `Output`: The z tensor. |
2905 | class Polygamma { |
2906 | public: |
2907 | Polygamma(const ::tensorflow::Scope& scope, ::tensorflow::Input a, |
2908 | ::tensorflow::Input x); |
2909 | operator ::tensorflow::Output() const { return z; } |
2910 | operator ::tensorflow::Input() const { return z; } |
2911 | ::tensorflow::Node* node() const { return z.node(); } |
2912 | |
2913 | Operation operation; |
2914 | ::tensorflow::Output z; |
2915 | }; |
2916 | |
2917 | /// Computes the power of one value to another. |
2918 | /// |
2919 | /// Given a tensor `x` and a tensor `y`, this operation computes \\(x^y\\) for |
2920 | /// corresponding elements in `x` and `y`. For example: |
2921 | /// |
2922 | /// ``` |
2923 | /// # tensor 'x' is [[2, 2]], [3, 3]] |
2924 | /// # tensor 'y' is [[8, 16], [2, 3]] |
2925 | /// tf.pow(x, y) ==> [[256, 65536], [9, 27]] |
2926 | /// ``` |
2927 | /// |
2928 | /// Args: |
2929 | /// * scope: A Scope object |
2930 | /// |
2931 | /// Returns: |
2932 | /// * `Output`: The z tensor. |
2933 | class Pow { |
2934 | public: |
2935 | Pow(const ::tensorflow::Scope& scope, ::tensorflow::Input x, |
2936 | ::tensorflow::Input y); |
2937 | operator ::tensorflow::Output() const { return z; } |
2938 | operator ::tensorflow::Input() const { return z; } |
2939 | ::tensorflow::Node* node() const { return z.node(); } |
2940 | |
2941 | Operation operation; |
2942 | ::tensorflow::Output z; |
2943 | }; |
2944 | |
2945 | /// Computes the product of elements across dimensions of a tensor. |
2946 | /// |
2947 | /// Reduces `input` along the dimensions given in `axis`. Unless |
2948 | /// `keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in |
2949 | /// `axis`. If `keep_dims` is true, the reduced dimensions are |
2950 | /// retained with length 1. |
2951 | /// |
2952 | /// Args: |
2953 | /// * scope: A Scope object |
2954 | /// * input: The tensor to reduce. |
2955 | /// * axis: The dimensions to reduce. Must be in the range |
2956 | /// `[-rank(input), rank(input))`. |
2957 | /// |
2958 | /// Optional attributes (see `Attrs`): |
2959 | /// * keep_dims: If true, retain reduced dimensions with length 1. |
2960 | /// |
2961 | /// Returns: |
2962 | /// * `Output`: The reduced tensor. |
2963 | /// |
2964 | /// Aliases: |
2965 | /// * ReduceProd |
2966 | class Prod { |
2967 | public: |
2968 | /// Optional attribute setters for Prod |
2969 | struct Attrs { |
2970 | /// If true, retain reduced dimensions with length 1. |
2971 | /// |
2972 | /// Defaults to false |
2973 | TF_MUST_USE_RESULT Attrs KeepDims(bool x) { |
2974 | Attrs ret = *this; |
2975 | ret.keep_dims_ = x; |
2976 | return ret; |
2977 | } |
2978 | |
2979 | bool keep_dims_ = false; |
2980 | }; |
2981 | Prod(const ::tensorflow::Scope& scope, ::tensorflow::Input input, |
2982 | ::tensorflow::Input axis); |
2983 | Prod(const ::tensorflow::Scope& scope, ::tensorflow::Input input, |
2984 | ::tensorflow::Input axis, const Prod::Attrs& attrs); |
2985 | operator ::tensorflow::Output() const { return output; } |
2986 | operator ::tensorflow::Input() const { return output; } |
2987 | ::tensorflow::Node* node() const { return output.node(); } |
2988 | |
2989 | static Attrs KeepDims(bool x) { |
2990 | return Attrs().KeepDims(x); |
2991 | } |
2992 | |
2993 | Operation operation; |
2994 | ::tensorflow::Output output; |
2995 | }; |
2996 | typedef Prod ReduceProd; |
2997 | |
2998 | /// Convert the quantized 'input' tensor into a lower-precision 'output', using the |
2999 | /// |
3000 | /// actual distribution of the values to maximize the usage of the lower bit depth |
3001 | /// and adjusting the output min and max ranges accordingly. |
3002 | /// |
3003 | /// [input_min, input_max] are scalar floats that specify the range for the float |
3004 | /// interpretation of the 'input' data. For example, if input_min is -1.0f and |
3005 | /// input_max is 1.0f, and we are dealing with quint16 quantized data, then a 0 |
3006 | /// value in the 16-bit data should be interpreted as -1.0f, and a 65535 means 1.0f. |
3007 | /// |
3008 | /// This operator tries to squeeze as much precision as possible into an output with |
3009 | /// a lower bit depth by calculating the actual min and max values found in the |
3010 | /// data. For example, maybe that quint16 input has no values lower than 16,384 and |
3011 | /// none higher than 49,152. That means only half the range is actually needed, all |
3012 | /// the float interpretations are between -0.5f and 0.5f, so if we want to compress |
3013 | /// the data into a quint8 output, we can use that range rather than the theoretical |
3014 | /// -1.0f to 1.0f that is suggested by the input min and max. |
3015 | /// |
3016 | /// In practice, this is most useful for taking output from operations like |
3017 | /// QuantizedMatMul that can produce higher bit-depth outputs than their inputs and |
3018 | /// may have large potential output ranges, but in practice have a distribution of |
3019 | /// input values that only uses a small fraction of the possible range. By feeding |
3020 | /// that output into this operator, we can reduce it from 32 bits down to 8 with |
3021 | /// minimal loss of accuracy. |
3022 | /// |
3023 | /// Args: |
3024 | /// * scope: A Scope object |
3025 | /// * input_min: The float value that the minimum quantized input value represents. |
3026 | /// * input_max: The float value that the maximum quantized input value represents. |
3027 | /// * out_type: The type of the output. Should be a lower bit depth than Tinput. |
3028 | /// |
3029 | /// Returns: |
3030 | /// * `Output` output |
3031 | /// * `Output` output_min: The float value that the minimum quantized output value represents. |
3032 | /// * `Output` output_max: The float value that the maximum quantized output value represents. |
3033 | class QuantizeDownAndShrinkRange { |
3034 | public: |
3035 | QuantizeDownAndShrinkRange(const ::tensorflow::Scope& scope, |
3036 | ::tensorflow::Input input, ::tensorflow::Input |
3037 | input_min, ::tensorflow::Input input_max, DataType |
3038 | out_type); |
3039 | |
3040 | Operation operation; |
3041 | ::tensorflow::Output output; |
3042 | ::tensorflow::Output output_min; |
3043 | ::tensorflow::Output output_max; |
3044 | }; |
3045 | |
3046 | /// Returns x + y element-wise, working on quantized buffers. |
3047 | /// |
3048 | /// Args: |
3049 | /// * scope: A Scope object |
3050 | /// * min_x: The float value that the lowest quantized `x` value represents. |
3051 | /// * max_x: The float value that the highest quantized `x` value represents. |
3052 | /// * min_y: The float value that the lowest quantized `y` value represents. |
3053 | /// * max_y: The float value that the highest quantized `y` value represents. |
3054 | /// |
3055 | /// Returns: |
3056 | /// * `Output` z |
3057 | /// * `Output` min_z: The float value that the lowest quantized output value represents. |
3058 | /// * `Output` max_z: The float value that the highest quantized output value represents. |
3059 | /// |
3060 | /// *NOTE*: `QuantizedAdd` supports limited forms of broadcasting. More about |
3061 | /// broadcasting [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) |
3062 | class QuantizedAdd { |
3063 | public: |
3064 | /// Optional attribute setters for QuantizedAdd |
3065 | struct Attrs { |
3066 | /// Defaults to DT_QINT32 |
3067 | TF_MUST_USE_RESULT Attrs Toutput(DataType x) { |
3068 | Attrs ret = *this; |
3069 | ret.Toutput_ = x; |
3070 | return ret; |
3071 | } |
3072 | |
3073 | DataType Toutput_ = DT_QINT32; |
3074 | }; |
3075 | QuantizedAdd(const ::tensorflow::Scope& scope, ::tensorflow::Input x, |
3076 | ::tensorflow::Input y, ::tensorflow::Input min_x, |
3077 | ::tensorflow::Input max_x, ::tensorflow::Input min_y, |
3078 | ::tensorflow::Input max_y); |
3079 | QuantizedAdd(const ::tensorflow::Scope& scope, ::tensorflow::Input x, |
3080 | ::tensorflow::Input y, ::tensorflow::Input min_x, |
3081 | ::tensorflow::Input max_x, ::tensorflow::Input min_y, |
3082 | ::tensorflow::Input max_y, const QuantizedAdd::Attrs& attrs); |
3083 | |
3084 | static Attrs Toutput(DataType x) { |
3085 | return Attrs().Toutput(x); |
3086 | } |
3087 | |
3088 | Operation operation; |
3089 | ::tensorflow::Output z; |
3090 | ::tensorflow::Output min_z; |
3091 | ::tensorflow::Output max_z; |
3092 | }; |
3093 | |
3094 | /// Perform a quantized matrix multiplication of `a` by the matrix `b`. |
3095 | /// |
3096 | /// The inputs must be two-dimensional matrices and the inner dimension of |
3097 | /// `a` (after being transposed if `transpose_a` is non-zero) must match the |
3098 | /// outer dimension of `b` (after being transposed if `transposed_b` is |
3099 | /// non-zero). |
3100 | /// |
3101 | /// Args: |
3102 | /// * scope: A Scope object |
3103 | /// * a: Must be a two-dimensional tensor. |
3104 | /// * b: Must be a two-dimensional tensor. |
3105 | /// * min_a: The float value that the lowest quantized `a` value represents. |
3106 | /// * max_a: The float value that the highest quantized `a` value represents. |
3107 | /// * min_b: The float value that the lowest quantized `b` value represents. |
3108 | /// * max_b: The float value that the highest quantized `b` value represents. |
3109 | /// |
3110 | /// Optional attributes (see `Attrs`): |
3111 | /// * transpose_a: If true, `a` is transposed before multiplication. |
3112 | /// * transpose_b: If true, `b` is transposed before multiplication. |
3113 | /// * Tactivation: The type of output produced by activation function |
3114 | /// following this operation. |
3115 | /// |
3116 | /// Returns: |
3117 | /// * `Output` out |
3118 | /// * `Output` min_out: The float value that the lowest quantized output value represents. |
3119 | /// * `Output` max_out: The float value that the highest quantized output value represents. |
3120 | class QuantizedMatMul { |
3121 | public: |
3122 | /// Optional attribute setters for QuantizedMatMul |
3123 | struct Attrs { |
3124 | /// Defaults to DT_QINT32 |
3125 | TF_MUST_USE_RESULT Attrs Toutput(DataType x) { |
3126 | Attrs ret = *this; |
3127 | ret.Toutput_ = x; |
3128 | return ret; |
3129 | } |
3130 | |
3131 | /// If true, `a` is transposed before multiplication. |
3132 | /// |
3133 | /// Defaults to false |
3134 | TF_MUST_USE_RESULT Attrs TransposeA(bool x) { |
3135 | Attrs ret = *this; |
3136 | ret.transpose_a_ = x; |
3137 | return ret; |
3138 | } |
3139 | |
3140 | /// If true, `b` is transposed before multiplication. |
3141 | /// |
3142 | /// Defaults to false |
3143 | TF_MUST_USE_RESULT Attrs TransposeB(bool x) { |
3144 | Attrs ret = *this; |
3145 | ret.transpose_b_ = x; |
3146 | return ret; |
3147 | } |
3148 | |
3149 | /// The type of output produced by activation function |
3150 | /// following this operation. |
3151 | /// |
3152 | /// Defaults to DT_QUINT8 |
3153 | TF_MUST_USE_RESULT Attrs Tactivation(DataType x) { |
3154 | Attrs ret = *this; |
3155 | ret.Tactivation_ = x; |
3156 | return ret; |
3157 | } |
3158 | |
3159 | DataType Toutput_ = DT_QINT32; |
3160 | bool transpose_a_ = false; |
3161 | bool transpose_b_ = false; |
3162 | DataType Tactivation_ = DT_QUINT8; |
3163 | }; |
3164 | QuantizedMatMul(const ::tensorflow::Scope& scope, ::tensorflow::Input a, |
3165 | ::tensorflow::Input b, ::tensorflow::Input min_a, |
3166 | ::tensorflow::Input max_a, ::tensorflow::Input min_b, |
3167 | ::tensorflow::Input max_b); |
3168 | QuantizedMatMul(const ::tensorflow::Scope& scope, ::tensorflow::Input a, |
3169 | ::tensorflow::Input b, ::tensorflow::Input min_a, |
3170 | ::tensorflow::Input max_a, ::tensorflow::Input min_b, |
3171 | ::tensorflow::Input max_b, const QuantizedMatMul::Attrs& attrs); |
3172 | |
3173 | static Attrs Toutput(DataType x) { |
3174 | return Attrs().Toutput(x); |
3175 | } |
3176 | static Attrs TransposeA(bool x) { |
3177 | return Attrs().TransposeA(x); |
3178 | } |
3179 | static Attrs TransposeB(bool x) { |
3180 | return Attrs().TransposeB(x); |
3181 | } |
3182 | static Attrs Tactivation(DataType x) { |
3183 | return Attrs().Tactivation(x); |
3184 | } |
3185 | |
3186 | Operation operation; |
3187 | ::tensorflow::Output out; |
3188 | ::tensorflow::Output min_out; |
3189 | ::tensorflow::Output max_out; |
3190 | }; |
3191 | |
3192 | /// Returns x * y element-wise, working on quantized buffers. |
3193 | /// |
3194 | /// Args: |
3195 | /// * scope: A Scope object |
3196 | /// * min_x: The float value that the lowest quantized `x` value represents. |
3197 | /// * max_x: The float value that the highest quantized `x` value represents. |
3198 | /// * min_y: The float value that the lowest quantized `y` value represents. |
3199 | /// * max_y: The float value that the highest quantized `y` value represents. |
3200 | /// |
3201 | /// Returns: |
3202 | /// * `Output` z |
3203 | /// * `Output` min_z: The float value that the lowest quantized output value represents. |
3204 | /// * `Output` max_z: The float value that the highest quantized output value represents. |
3205 | /// |
3206 | /// *NOTE*: `QuantizedMul` supports limited forms of broadcasting. More about |
3207 | /// broadcasting [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) |
3208 | class QuantizedMul { |
3209 | public: |
3210 | /// Optional attribute setters for QuantizedMul |
3211 | struct Attrs { |
3212 | /// Defaults to DT_QINT32 |
3213 | TF_MUST_USE_RESULT Attrs Toutput(DataType x) { |
3214 | Attrs ret = *this; |
3215 | ret.Toutput_ = x; |
3216 | return ret; |
3217 | } |
3218 | |
3219 | DataType Toutput_ = DT_QINT32; |
3220 | }; |
3221 | QuantizedMul(const ::tensorflow::Scope& scope, ::tensorflow::Input x, |
3222 | ::tensorflow::Input y, ::tensorflow::Input min_x, |
3223 | ::tensorflow::Input max_x, ::tensorflow::Input min_y, |
3224 | ::tensorflow::Input max_y); |
3225 | QuantizedMul(const ::tensorflow::Scope& scope, ::tensorflow::Input x, |
3226 | ::tensorflow::Input y, ::tensorflow::Input min_x, |
3227 | ::tensorflow::Input max_x, ::tensorflow::Input min_y, |
3228 | ::tensorflow::Input max_y, const QuantizedMul::Attrs& attrs); |
3229 | |
3230 | static Attrs Toutput(DataType x) { |
3231 | return Attrs().Toutput(x); |
3232 | } |
3233 | |
3234 | Operation operation; |
3235 | ::tensorflow::Output z; |
3236 | ::tensorflow::Output min_z; |
3237 | ::tensorflow::Output max_z; |
3238 | }; |
3239 | |
3240 | /// Counts the number of occurrences of each value in an integer array. |
3241 | /// |
3242 | /// Outputs a vector with length `size` and the same dtype as `weights`. If |
3243 | /// `weights` are empty, then index `i` stores the number of times the value `i` is |
3244 | /// counted in `arr`. If `weights` are non-empty, then index `i` stores the sum of |
3245 | /// the value in `weights` at each index where the corresponding value in `arr` is |
3246 | /// `i`. |
3247 | /// |
3248 | /// Values in `arr` outside of the range [0, size) are ignored. |
3249 | /// |
3250 | /// Args: |
3251 | /// * scope: A Scope object |
3252 | /// * splits: 1D int64 `Tensor`. |
3253 | /// * values: 2D int `Tensor`. |
3254 | /// * size: non-negative int scalar `Tensor`. |
3255 | /// * weights: is an int32, int64, float32, or float64 `Tensor` with the same |
3256 | /// shape as `input`, or a length-0 `Tensor`, in which case it acts as all weights |
3257 | /// equal to 1. |
3258 | /// |
3259 | /// Optional attributes (see `Attrs`): |
3260 | /// * binary_output: bool; Whether the kernel should count the appearance or number of occurrences. |
3261 | /// |
3262 | /// Returns: |
3263 | /// * `Output`: 1D `Tensor` with length equal to `size` or 2D `Tensor` with [batch_size, `size`]. |
3264 | /// The counts or summed weights for each value in the range [0, size). |
3265 | class RaggedBincount { |
3266 | public: |
3267 | /// Optional attribute setters for RaggedBincount |
3268 | struct Attrs { |
3269 | /// bool; Whether the kernel should count the appearance or number of occurrences. |
3270 | /// |
3271 | /// Defaults to false |
3272 | TF_MUST_USE_RESULT Attrs BinaryOutput(bool x) { |
3273 | Attrs ret = *this; |
3274 | ret.binary_output_ = x; |
3275 | return ret; |
3276 | } |
3277 | |
3278 | bool binary_output_ = false; |
3279 | }; |
3280 | RaggedBincount(const ::tensorflow::Scope& scope, ::tensorflow::Input splits, |
3281 | ::tensorflow::Input values, ::tensorflow::Input size, |
3282 | ::tensorflow::Input weights); |
3283 | RaggedBincount(const ::tensorflow::Scope& scope, ::tensorflow::Input splits, |
3284 | ::tensorflow::Input values, ::tensorflow::Input size, |
3285 | ::tensorflow::Input weights, const RaggedBincount::Attrs& attrs); |
3286 | operator ::tensorflow::Output() const { return output; } |
3287 | operator ::tensorflow::Input() const { return output; } |
3288 | ::tensorflow::Node* node() const { return output.node(); } |
3289 | |
3290 | static Attrs BinaryOutput(bool x) { |
3291 | return Attrs().BinaryOutput(x); |
3292 | } |
3293 | |
3294 | Operation operation; |
3295 | ::tensorflow::Output output; |
3296 | }; |
3297 | |
3298 | /// Creates a sequence of numbers. |
3299 | /// |
3300 | /// This operation creates a sequence of numbers that begins at `start` and |
3301 | /// extends by increments of `delta` up to but not including `limit`. |
3302 | /// |
3303 | /// For example: |
3304 | /// |
3305 | /// ``` |
3306 | /// # 'start' is 3 |
3307 | /// # 'limit' is 18 |
3308 | /// # 'delta' is 3 |
3309 | /// tf.range(start, limit, delta) ==> [3, 6, 9, 12, 15] |
3310 | /// ``` |
3311 | /// |
3312 | /// Args: |
3313 | /// * scope: A Scope object |
3314 | /// * start: 0-D (scalar). First entry in the sequence. |
3315 | /// * limit: 0-D (scalar). Upper limit of sequence, exclusive. |
3316 | /// * delta: 0-D (scalar). Optional. Default is 1. Number that increments `start`. |
3317 | /// |
3318 | /// Returns: |
3319 | /// * `Output`: 1-D. |
3320 | class Range { |
3321 | public: |
3322 | Range(const ::tensorflow::Scope& scope, ::tensorflow::Input start, |
3323 | ::tensorflow::Input limit, ::tensorflow::Input delta); |
3324 | operator ::tensorflow::Output() const { return output; } |
3325 | operator ::tensorflow::Input() const { return output; } |
3326 | ::tensorflow::Node* node() const { return output.node(); } |
3327 | |
3328 | Operation operation; |
3329 | ::tensorflow::Output output; |
3330 | }; |
3331 | |
3332 | /// Returns the real part of a complex number. |
3333 | /// |
3334 | /// Given a tensor `input` of complex numbers, this operation returns a tensor of |
3335 | /// type `float` that is the real part of each element in `input`. All elements in |
3336 | /// `input` must be complex numbers of the form \\(a + bj\\), where *a* is the real |
3337 | /// part returned by this operation and *b* is the imaginary part. |
3338 | /// |
3339 | /// For example: |
3340 | /// |
3341 | /// ``` |
3342 | /// # tensor 'input' is [-2.25 + 4.75j, 3.25 + 5.75j] |
3343 | /// tf.real(input) ==> [-2.25, 3.25] |
3344 | /// ``` |
3345 | /// |
3346 | /// Args: |
3347 | /// * scope: A Scope object |
3348 | /// |
3349 | /// Returns: |
3350 | /// * `Output`: The output tensor. |
3351 | class Real { |
3352 | public: |
3353 | /// Optional attribute setters for Real |
3354 | struct Attrs { |
3355 | /// Defaults to DT_FLOAT |
3356 | TF_MUST_USE_RESULT Attrs Tout(DataType x) { |
3357 | Attrs ret = *this; |
3358 | ret.Tout_ = x; |
3359 | return ret; |
3360 | } |
3361 | |
3362 | DataType Tout_ = DT_FLOAT; |
3363 | }; |
3364 | Real(const ::tensorflow::Scope& scope, ::tensorflow::Input input); |
3365 | Real(const ::tensorflow::Scope& scope, ::tensorflow::Input input, const |
3366 | Real::Attrs& attrs); |
3367 | operator ::tensorflow::Output() const { return output; } |
3368 | operator ::tensorflow::Input() const { return output; } |
3369 | ::tensorflow::Node* node() const { return output.node(); } |
3370 | |
3371 | static Attrs Tout(DataType x) { |
3372 | return Attrs().Tout(x); |
3373 | } |
3374 | |
3375 | Operation operation; |
3376 | ::tensorflow::Output output; |
3377 | }; |
3378 | |
3379 | /// Returns x / y element-wise for real types. |
3380 | /// |
3381 | /// If `x` and `y` are reals, this will return the floating-point division. |
3382 | /// |
3383 | /// *NOTE*: `Div` supports broadcasting. More about broadcasting |
3384 | /// [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) |
3385 | /// |
3386 | /// Args: |
3387 | /// * scope: A Scope object |
3388 | /// |
3389 | /// Returns: |
3390 | /// * `Output`: The z tensor. |
3391 | class RealDiv { |
3392 | public: |
3393 | RealDiv(const ::tensorflow::Scope& scope, ::tensorflow::Input x, |
3394 | ::tensorflow::Input y); |
3395 | operator ::tensorflow::Output() const { return z; } |
3396 | operator ::tensorflow::Input() const { return z; } |
3397 | ::tensorflow::Node* node() const { return z.node(); } |
3398 | |
3399 | Operation operation; |
3400 | ::tensorflow::Output z; |
3401 | }; |
3402 | |
3403 | /// Computes the reciprocal of x element-wise. |
3404 | /// |
3405 | /// I.e., \\(y = 1 / x\\). |
3406 | /// |
3407 | /// Args: |
3408 | /// * scope: A Scope object |
3409 | /// |
3410 | /// Returns: |
3411 | /// * `Output`: The y tensor. |
3412 | class Reciprocal { |
3413 | public: |
3414 | Reciprocal(const ::tensorflow::Scope& scope, ::tensorflow::Input x); |
3415 | operator ::tensorflow::Output() const { return y; } |
3416 | operator ::tensorflow::Input() const { return y; } |
3417 | ::tensorflow::Node* node() const { return y.node(); } |
3418 | |
3419 | Operation operation; |
3420 | ::tensorflow::Output y; |
3421 | }; |
3422 | |
3423 | /// Computes a range that covers the actual values present in a quantized tensor. |
3424 | /// |
3425 | /// Given a quantized tensor described by `(input, input_min, input_max)`, outputs a |
3426 | /// range that covers the actual values present in that tensor. This op is typically |
3427 | /// used to produce the `requested_output_min` and `requested_output_max` for |
3428 | /// `Requantize`. |
3429 | /// |
3430 | /// Args: |
3431 | /// * scope: A Scope object |
3432 | /// * input_min: The float value that the minimum quantized input value represents. |
3433 | /// * input_max: The float value that the maximum quantized input value represents. |
3434 | /// |
3435 | /// Returns: |
3436 | /// * `Output` output_min: The computed min output. |
3437 | /// * `Output` output_max: the computed max output. |
3438 | class RequantizationRange { |
3439 | public: |
3440 | RequantizationRange(const ::tensorflow::Scope& scope, ::tensorflow::Input |
3441 | input, ::tensorflow::Input input_min, ::tensorflow::Input |
3442 | input_max); |
3443 | |
3444 | Operation operation; |
3445 | ::tensorflow::Output output_min; |
3446 | ::tensorflow::Output output_max; |
3447 | }; |
3448 | |
3449 | /// Converts the quantized `input` tensor into a lower-precision `output`. |
3450 | /// |
3451 | /// Converts the quantized `input` tensor into a lower-precision `output`, using the |
3452 | /// output range specified with `requested_output_min` and `requested_output_max`. |
3453 | /// |
3454 | /// `[input_min, input_max]` are scalar floats that specify the range for the float |
3455 | /// interpretation of the `input` data. For example, if `input_min` is -1.0f and |
3456 | /// `input_max` is 1.0f, and we are dealing with `quint16` quantized data, then a 0 |
3457 | /// value in the 16-bit data should be interpreted as -1.0f, and a 65535 means 1.0f. |
3458 | /// |
3459 | /// Args: |
3460 | /// * scope: A Scope object |
3461 | /// * input_min: The float value that the minimum quantized input value represents. |
3462 | /// * input_max: The float value that the maximum quantized input value represents. |
3463 | /// * requested_output_min: The float value that the minimum quantized output value represents. |
3464 | /// * requested_output_max: The float value that the maximum quantized output value represents. |
3465 | /// * out_type: The type of the output. Should be a lower bit depth than Tinput. |
3466 | /// |
3467 | /// Returns: |
3468 | /// * `Output` output |
3469 | /// * `Output` output_min: The requested_output_min value is copied into this output. |
3470 | /// * `Output` output_max: The requested_output_max value is copied into this output. |
3471 | class Requantize { |
3472 | public: |
3473 | Requantize(const ::tensorflow::Scope& scope, ::tensorflow::Input input, |
3474 | ::tensorflow::Input input_min, ::tensorflow::Input input_max, |
3475 | ::tensorflow::Input requested_output_min, ::tensorflow::Input |
3476 | requested_output_max, DataType out_type); |
3477 | |
3478 | Operation operation; |
3479 | ::tensorflow::Output output; |
3480 | ::tensorflow::Output output_min; |
3481 | ::tensorflow::Output output_max; |
3482 | }; |
3483 | |
3484 | /// Returns element-wise integer closest to x. |
3485 | /// |
3486 | /// If the result is midway between two representable values, |
3487 | /// the even representable is chosen. |
3488 | /// For example: |
3489 | /// |
3490 | /// ``` |
3491 | /// rint(-1.5) ==> -2.0 |
3492 | /// rint(0.5000001) ==> 1.0 |
3493 | /// rint([-1.7, -1.5, -0.2, 0.2, 1.5, 1.7, 2.0]) ==> [-2., -2., -0., 0., 2., 2., 2.] |
3494 | /// ``` |
3495 | /// |
3496 | /// Args: |
3497 | /// * scope: A Scope object |
3498 | /// |
3499 | /// Returns: |
3500 | /// * `Output`: The y tensor. |
3501 | class Rint { |
3502 | public: |
3503 | Rint(const ::tensorflow::Scope& scope, ::tensorflow::Input x); |
3504 | operator ::tensorflow::Output() const { return y; } |
3505 | operator ::tensorflow::Input() const { return y; } |
3506 | ::tensorflow::Node* node() const { return y.node(); } |
3507 | |
3508 | Operation operation; |
3509 | ::tensorflow::Output y; |
3510 | }; |
3511 | |
3512 | /// Rounds the values of a tensor to the nearest integer, element-wise. |
3513 | /// |
3514 | /// Rounds half to even. Also known as bankers rounding. If you want to round |
3515 | /// according to the current system rounding mode use std::cint. |
3516 | /// |
3517 | /// Args: |
3518 | /// * scope: A Scope object |
3519 | /// |
3520 | /// Returns: |
3521 | /// * `Output`: The y tensor. |
3522 | class Round { |
3523 | public: |
3524 | Round(const ::tensorflow::Scope& scope, ::tensorflow::Input x); |
3525 | operator ::tensorflow::Output() const { return y; } |
3526 | operator ::tensorflow::Input() const { return y; } |
3527 | ::tensorflow::Node* node() const { return y.node(); } |
3528 | |
3529 | Operation operation; |
3530 | ::tensorflow::Output y; |
3531 | }; |
3532 | |
3533 | /// Computes reciprocal of square root of x element-wise. |
3534 | /// |
3535 | /// I.e., \\(y = 1 / \sqrt{x}\\). |
3536 | /// |
3537 | /// Args: |
3538 | /// * scope: A Scope object |
3539 | /// |
3540 | /// Returns: |
3541 | /// * `Output`: The y tensor. |
3542 | class Rsqrt { |
3543 | public: |
3544 | Rsqrt(const ::tensorflow::Scope& scope, ::tensorflow::Input x); |
3545 | operator ::tensorflow::Output() const { return y; } |
3546 | operator ::tensorflow::Input() const { return y; } |
3547 | ::tensorflow::Node* node() const { return y.node(); } |
3548 | |
3549 | Operation operation; |
3550 | ::tensorflow::Output y; |
3551 | }; |
3552 | |
3553 | /// Computes the maximum along segments of a tensor. |
3554 | /// |
3555 | /// Read |
3556 | /// [the section on segmentation](https://tensorflow.org/api_docs/python/tf/math#Segmentation) |
3557 | /// for an explanation of segments. |
3558 | /// |
3559 | /// Computes a tensor such that |
3560 | /// \\(output_i = \max_j(data_j)\\) where `max` is over `j` such |
3561 | /// that `segment_ids[j] == i`. |
3562 | /// |
3563 | /// If the max is empty for a given segment ID `i`, `output[i] = 0`. |
3564 | /// |
3565 | /// Caution: On CPU, values in `segment_ids` are always validated to be sorted, |
3566 | /// and an error is thrown for indices that are not increasing. On GPU, this |
3567 | /// does not throw an error for unsorted indices. On GPU, out-of-order indices |
3568 | /// result in safe but unspecified behavior, which may include treating |
3569 | /// out-of-order indices as the same as a smaller following index. |
3570 | /// |
3571 | /// <div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;"> |
3572 | /// <img style="width:100%" src="https://www.tensorflow.org/images/SegmentMax.png" alt> |
3573 | /// </div> |
3574 | /// |
3575 | /// For example: |
3576 | /// |
3577 | /// >>> c = tf.constant([[1,2,3,4], [4, 3, 2, 1], [5,6,7,8]]) |
3578 | /// >>> tf.math.segment_max(c, tf.constant([0, 0, 1])).numpy() |
3579 | /// array([[4, 3, 3, 4], |
3580 | /// [5, 6, 7, 8]], dtype=int32) |
3581 | /// |
3582 | /// |
3583 | /// Args: |
3584 | /// * scope: A Scope object |
3585 | /// * segment_ids: A 1-D tensor whose size is equal to the size of `data`'s |
3586 | /// first dimension. Values should be sorted and can be repeated. |
3587 | /// |
3588 | /// Caution: The values are always validated to be sorted on CPU, never validated |
3589 | /// on GPU. |
3590 | /// |
3591 | /// Returns: |
3592 | /// * `Output`: Has same shape as data, except for dimension 0 which |
3593 | /// has size `k`, the number of segments. |
3594 | class SegmentMax { |
3595 | public: |
3596 | SegmentMax(const ::tensorflow::Scope& scope, ::tensorflow::Input data, |
3597 | ::tensorflow::Input segment_ids); |
3598 | operator ::tensorflow::Output() const { return output; } |
3599 | operator ::tensorflow::Input() const { return output; } |
3600 | ::tensorflow::Node* node() const { return output.node(); } |
3601 | |
3602 | Operation operation; |
3603 | ::tensorflow::Output output; |
3604 | }; |
3605 | |
3606 | /// Computes the mean along segments of a tensor. |
3607 | /// |
3608 | /// Read |
3609 | /// [the section on segmentation](https://tensorflow.org/api_docs/python/tf/math#Segmentation) |
3610 | /// for an explanation of segments. |
3611 | /// |
3612 | /// Computes a tensor such that |
3613 | /// \\(output_i = \frac{\sum_j data_j}{N}\\) where `mean` is |
3614 | /// over `j` such that `segment_ids[j] == i` and `N` is the total number of |
3615 | /// values summed. |
3616 | /// |
3617 | /// If the mean is empty for a given segment ID `i`, `output[i] = 0`. |
3618 | /// |
3619 | /// Caution: On CPU, values in `segment_ids` are always validated to be sorted, |
3620 | /// and an error is thrown for indices that are not increasing. On GPU, this |
3621 | /// does not throw an error for unsorted indices. On GPU, out-of-order indices |
3622 | /// result in safe but unspecified behavior, which may include treating |
3623 | /// out-of-order indices as a smaller following index when computing the numerator |
3624 | /// of the mean. |
3625 | /// |
3626 | /// <div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;"> |
3627 | /// <img style="width:100%" src="https://www.tensorflow.org/images/SegmentMean.png" alt> |
3628 | /// </div> |
3629 | /// |
3630 | /// For example: |
3631 | /// |
3632 | /// >>> c = tf.constant([[1.0,2,3,4], [4, 3, 2, 1], [5,6,7,8]]) |
3633 | /// >>> tf.math.segment_mean(c, tf.constant([0, 0, 1])).numpy() |
3634 | /// array([[2.5, 2.5, 2.5, 2.5], |
3635 | /// [5., 6., 7., 8.]], dtype=float32) |
3636 | /// |
3637 | /// |
3638 | /// Args: |
3639 | /// * scope: A Scope object |
3640 | /// * segment_ids: A 1-D tensor whose size is equal to the size of `data`'s |
3641 | /// first dimension. Values should be sorted and can be repeated. |
3642 | /// |
3643 | /// Caution: The values are always validated to be sorted on CPU, never validated |
3644 | /// on GPU. |
3645 | /// |
3646 | /// Returns: |
3647 | /// * `Output`: Has same shape as data, except for dimension 0 which |
3648 | /// has size `k`, the number of segments. |
3649 | class SegmentMean { |
3650 | public: |
3651 | SegmentMean(const ::tensorflow::Scope& scope, ::tensorflow::Input data, |
3652 | ::tensorflow::Input segment_ids); |
3653 | operator ::tensorflow::Output() const { return output; } |
3654 | operator ::tensorflow::Input() const { return output; } |
3655 | ::tensorflow::Node* node() const { return output.node(); } |
3656 | |
3657 | Operation operation; |
3658 | ::tensorflow::Output output; |
3659 | }; |
3660 | |
3661 | /// Computes the minimum along segments of a tensor. |
3662 | /// |
3663 | /// Read |
3664 | /// [the section on segmentation](https://tensorflow.org/api_docs/python/tf/math#Segmentation) |
3665 | /// for an explanation of segments. |
3666 | /// |
3667 | /// Computes a tensor such that |
3668 | /// \\(output_i = \min_j(data_j)\\) where `min` is over `j` such |
3669 | /// that `segment_ids[j] == i`. |
3670 | /// |
3671 | /// If the min is empty for a given segment ID `i`, `output[i] = 0`. |
3672 | /// |
3673 | /// Caution: On CPU, values in `segment_ids` are always validated to be sorted, |
3674 | /// and an error is thrown for indices that are not increasing. On GPU, this |
3675 | /// does not throw an error for unsorted indices. On GPU, out-of-order indices |
3676 | /// result in safe but unspecified behavior, which may include treating |
3677 | /// out-of-order indices as the same as a smaller following index. |
3678 | /// |
3679 | /// <div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;"> |
3680 | /// <img style="width:100%" src="https://www.tensorflow.org/images/SegmentMin.png" alt> |
3681 | /// </div> |
3682 | /// |
3683 | /// For example: |
3684 | /// |
3685 | /// >>> c = tf.constant([[1,2,3,4], [4, 3, 2, 1], [5,6,7,8]]) |
3686 | /// >>> tf.math.segment_min(c, tf.constant([0, 0, 1])).numpy() |
3687 | /// array([[1, 2, 2, 1], |
3688 | /// [5, 6, 7, 8]], dtype=int32) |
3689 | /// |
3690 | /// |
3691 | /// Args: |
3692 | /// * scope: A Scope object |
3693 | /// * segment_ids: A 1-D tensor whose size is equal to the size of `data`'s |
3694 | /// first dimension. Values should be sorted and can be repeated. |
3695 | /// |
3696 | /// Caution: The values are always validated to be sorted on CPU, never validated |
3697 | /// on GPU. |
3698 | /// |
3699 | /// Returns: |
3700 | /// * `Output`: Has same shape as data, except for dimension 0 which |
3701 | /// has size `k`, the number of segments. |
3702 | class SegmentMin { |
3703 | public: |
3704 | SegmentMin(const ::tensorflow::Scope& scope, ::tensorflow::Input data, |
3705 | ::tensorflow::Input segment_ids); |
3706 | operator ::tensorflow::Output() const { return output; } |
3707 | operator ::tensorflow::Input() const { return output; } |
3708 | ::tensorflow::Node* node() const { return output.node(); } |
3709 | |
3710 | Operation operation; |
3711 | ::tensorflow::Output output; |
3712 | }; |
3713 | |
3714 | /// Computes the product along segments of a tensor. |
3715 | /// |
3716 | /// Read |
3717 | /// [the section on segmentation](https://tensorflow.org/api_docs/python/tf/math#Segmentation) |
3718 | /// for an explanation of segments. |
3719 | /// |
3720 | /// Computes a tensor such that |
3721 | /// \\(output_i = \prod_j data_j\\) where the product is over `j` such |
3722 | /// that `segment_ids[j] == i`. |
3723 | /// |
3724 | /// If the product is empty for a given segment ID `i`, `output[i] = 1`. |
3725 | /// |
3726 | /// Caution: On CPU, values in `segment_ids` are always validated to be sorted, |
3727 | /// and an error is thrown for indices that are not increasing. On GPU, this |
3728 | /// does not throw an error for unsorted indices. On GPU, out-of-order indices |
3729 | /// result in safe but unspecified behavior, which may include treating |
3730 | /// out-of-order indices as the same as a smaller following index. |
3731 | /// |
3732 | /// <div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;"> |
3733 | /// <img style="width:100%" src="https://www.tensorflow.org/images/SegmentProd.png" alt> |
3734 | /// </div> |
3735 | /// |
3736 | /// For example: |
3737 | /// |
3738 | /// >>> c = tf.constant([[1,2,3,4], [4, 3, 2, 1], [5,6,7,8]]) |
3739 | /// >>> tf.math.segment_prod(c, tf.constant([0, 0, 1])).numpy() |
3740 | /// array([[4, 6, 6, 4], |
3741 | /// [5, 6, 7, 8]], dtype=int32) |
3742 | /// |
3743 | /// |
3744 | /// Args: |
3745 | /// * scope: A Scope object |
3746 | /// * segment_ids: A 1-D tensor whose size is equal to the size of `data`'s |
3747 | /// first dimension. Values should be sorted and can be repeated. |
3748 | /// |
3749 | /// Caution: The values are always validated to be sorted on CPU, never validated |
3750 | /// on GPU. |
3751 | /// |
3752 | /// Returns: |
3753 | /// * `Output`: Has same shape as data, except for dimension 0 which |
3754 | /// has size `k`, the number of segments. |
3755 | class SegmentProd { |
3756 | public: |
3757 | SegmentProd(const ::tensorflow::Scope& scope, ::tensorflow::Input data, |
3758 | ::tensorflow::Input segment_ids); |
3759 | operator ::tensorflow::Output() const { return output; } |
3760 | operator ::tensorflow::Input() const { return output; } |
3761 | ::tensorflow::Node* node() const { return output.node(); } |
3762 | |
3763 | Operation operation; |
3764 | ::tensorflow::Output output; |
3765 | }; |
3766 | |
3767 | /// Computes the sum along segments of a tensor. |
3768 | /// |
3769 | /// Read |
3770 | /// [the section on segmentation](https://tensorflow.org/api_docs/python/tf/math#Segmentation) |
3771 | /// for an explanation of segments. |
3772 | /// |
3773 | /// Computes a tensor such that |
3774 | /// \\(output_i = \sum_j data_j\\) where sum is over `j` such |
3775 | /// that `segment_ids[j] == i`. |
3776 | /// |
3777 | /// If the sum is empty for a given segment ID `i`, `output[i] = 0`. |
3778 | /// |
3779 | /// Caution: On CPU, values in `segment_ids` are always validated to be sorted, |
3780 | /// and an error is thrown for indices that are not increasing. On GPU, this |
3781 | /// does not throw an error for unsorted indices. On GPU, out-of-order indices |
3782 | /// result in safe but unspecified behavior, which may include treating |
3783 | /// out-of-order indices as the same as a smaller following index. |
3784 | /// |
3785 | /// <div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;"> |
3786 | /// <img style="width:100%" src="https://www.tensorflow.org/images/SegmentSum.png" alt> |
3787 | /// </div> |
3788 | /// |
3789 | /// For example: |
3790 | /// |
3791 | /// >>> c = tf.constant([[1,2,3,4], [4, 3, 2, 1], [5,6,7,8]]) |
3792 | /// >>> tf.math.segment_sum(c, tf.constant([0, 0, 1])).numpy() |
3793 | /// array([[5, 5, 5, 5], |
3794 | /// [5, 6, 7, 8]], dtype=int32) |
3795 | /// |
3796 | /// |
3797 | /// Args: |
3798 | /// * scope: A Scope object |
3799 | /// * segment_ids: A 1-D tensor whose size is equal to the size of `data`'s |
3800 | /// first dimension. Values should be sorted and can be repeated. |
3801 | /// |
3802 | /// Caution: The values are always validated to be sorted on CPU, never validated |
3803 | /// on GPU. |
3804 | /// |
3805 | /// Returns: |
3806 | /// * `Output`: Has same shape as data, except for dimension 0 which |
3807 | /// has size `k`, the number of segments. |
3808 | class SegmentSum { |
3809 | public: |
3810 | SegmentSum(const ::tensorflow::Scope& scope, ::tensorflow::Input data, |
3811 | ::tensorflow::Input segment_ids); |
3812 | operator ::tensorflow::Output() const { return output; } |
3813 | operator ::tensorflow::Input() const { return output; } |
3814 | ::tensorflow::Node* node() const { return output.node(); } |
3815 | |
3816 | Operation operation; |
3817 | ::tensorflow::Output output; |
3818 | }; |
3819 | |
3820 | /// Selects elements from `x` or `y`, depending on `condition`. |
3821 | /// |
3822 | /// The `x`, and `y` tensors must all have the same shape, and the |
3823 | /// output will also have that shape. |
3824 | /// |
3825 | /// The `condition` tensor must be a scalar if `x` and `y` are scalars. |
3826 | /// If `x` and `y` are vectors or higher rank, then `condition` must be either a |
3827 | /// scalar, a vector with size matching the first dimension of `x`, or must have |
3828 | /// the same shape as `x`. |
3829 | /// |
3830 | /// The `condition` tensor acts as a mask that chooses, based on the value at each |
3831 | /// element, whether the corresponding element / row in the output should be |
3832 | /// taken from `x` (if true) or `y` (if false). |
3833 | /// |
3834 | /// If `condition` is a vector and `x` and `y` are higher rank matrices, then |
3835 | /// it chooses which row (outer dimension) to copy from `x` and `y`. |
3836 | /// If `condition` has the same shape as `x` and `y`, then it chooses which |
3837 | /// element to copy from `x` and `y`. |
3838 | /// |
3839 | /// For example: |
3840 | /// |
3841 | /// ```python |
3842 | /// # 'condition' tensor is [[True, False] |
3843 | /// # [False, True]] |
3844 | /// # 't' is [[1, 2], |
3845 | /// # [3, 4]] |
3846 | /// # 'e' is [[5, 6], |
3847 | /// # [7, 8]] |
3848 | /// select(condition, t, e) # => [[1, 6], [7, 4]] |
3849 | /// |
3850 | /// |
3851 | /// # 'condition' tensor is [True, False] |
3852 | /// # 't' is [[1, 2], |
3853 | /// # [3, 4]] |
3854 | /// # 'e' is [[5, 6], |
3855 | /// # [7, 8]] |
3856 | /// select(condition, t, e) ==> [[1, 2], |
3857 | /// [7, 8]] |
3858 | /// |
3859 | /// ``` |
3860 | /// |
3861 | /// Args: |
3862 | /// * scope: A Scope object |
3863 | /// * x: = A `Tensor` which may have the same shape as `condition`. |
3864 | /// If `condition` is rank 1, `x` may have higher rank, |
3865 | /// but its first dimension must match the size of `condition`. |
3866 | /// * y: = A `Tensor` with the same type and shape as `x`. |
3867 | /// |
3868 | /// Returns: |
3869 | /// * `Output`: = A `Tensor` with the same type and shape as `x` and `y`. |
3870 | class Where3 { |
3871 | public: |
3872 | Where3(const ::tensorflow::Scope& scope, ::tensorflow::Input condition, |
3873 | ::tensorflow::Input x, ::tensorflow::Input y); |
3874 | operator ::tensorflow::Output() const { return output; } |
3875 | operator ::tensorflow::Input() const { return output; } |
3876 | ::tensorflow::Node* node() const { return output.node(); } |
3877 | |
3878 | Operation operation; |
3879 | ::tensorflow::Output output; |
3880 | }; |
3881 | |
3882 | /// TODO: add doc. |
3883 | /// |
3884 | /// Args: |
3885 | /// * scope: A Scope object |
3886 | /// |
3887 | /// Returns: |
3888 | /// * `Output`: The output tensor. |
3889 | class SelectV2 { |
3890 | public: |
3891 | SelectV2(const ::tensorflow::Scope& scope, ::tensorflow::Input condition, |
3892 | ::tensorflow::Input t, ::tensorflow::Input e); |
3893 | operator ::tensorflow::Output() const { return output; } |
3894 | operator ::tensorflow::Input() const { return output; } |
3895 | ::tensorflow::Node* node() const { return output.node(); } |
3896 | |
3897 | Operation operation; |
3898 | ::tensorflow::Output output; |
3899 | }; |
3900 | |
3901 | /// Computes sigmoid of `x` element-wise. |
3902 | /// |
3903 | /// Specifically, `y = 1 / (1 + exp(-x))`. |
3904 | /// |
3905 | /// Args: |
3906 | /// * scope: A Scope object |
3907 | /// |
3908 | /// Returns: |
3909 | /// * `Output`: The y tensor. |
3910 | class Sigmoid { |
3911 | public: |
3912 | Sigmoid(const ::tensorflow::Scope& scope, ::tensorflow::Input x); |
3913 | operator ::tensorflow::Output() const { return y; } |
3914 | operator ::tensorflow::Input() const { return y; } |
3915 | ::tensorflow::Node* node() const { return y.node(); } |
3916 | |
3917 | Operation operation; |
3918 | ::tensorflow::Output y; |
3919 | }; |
3920 | |
3921 | /// Returns an element-wise indication of the sign of a number. |
3922 | /// |
3923 | /// `y = sign(x) = -1` if `x < 0`; 0 if `x == 0`; 1 if `x > 0`. |
3924 | /// |
3925 | /// For complex numbers, `y = sign(x) = x / |x|` if `x != 0`, otherwise `y = 0`. |
3926 | /// |
3927 | /// Example usage: |
3928 | /// >>> tf.math.sign([0., 2., -3.]) |
3929 | /// <tf.Tensor: shape=(3,), dtype=float32, numpy=array([ 0., 1., -1.], dtype=float32)> |
3930 | /// |
3931 | /// Args: |
3932 | /// * scope: A Scope object |
3933 | /// |
3934 | /// Returns: |
3935 | /// * `Output`: The y tensor. |
3936 | class Sign { |
3937 | public: |
3938 | Sign(const ::tensorflow::Scope& scope, ::tensorflow::Input x); |
3939 | operator ::tensorflow::Output() const { return y; } |
3940 | operator ::tensorflow::Input() const { return y; } |
3941 | ::tensorflow::Node* node() const { return y.node(); } |
3942 | |
3943 | Operation operation; |
3944 | ::tensorflow::Output y; |
3945 | }; |
3946 | |
3947 | /// Computes sine of x element-wise. |
3948 | /// |
3949 | /// Given an input tensor, this function computes sine of every |
3950 | /// element in the tensor. Input range is `(-inf, inf)` and |
3951 | /// output range is `[-1,1]`. |
3952 | /// |
3953 | /// ```python |
3954 | /// x = tf.constant([-float("inf"), -9, -0.5, 1, 1.2, 200, 10, float("inf")]) |
3955 | /// tf.math.sin(x) ==> [nan -0.4121185 -0.47942555 0.84147096 0.9320391 -0.87329733 -0.54402107 nan] |
3956 | /// ``` |
3957 | /// |
3958 | /// Args: |
3959 | /// * scope: A Scope object |
3960 | /// |
3961 | /// Returns: |
3962 | /// * `Output`: The y tensor. |
3963 | class Sin { |
3964 | public: |
3965 | Sin(const ::tensorflow::Scope& scope, ::tensorflow::Input x); |
3966 | operator ::tensorflow::Output() const { return y; } |
3967 | operator ::tensorflow::Input() const { return y; } |
3968 | ::tensorflow::Node* node() const { return y.node(); } |
3969 | |
3970 | Operation operation; |
3971 | ::tensorflow::Output y; |
3972 | }; |
3973 | |
3974 | /// Computes hyperbolic sine of x element-wise. |
3975 | /// |
3976 | /// Given an input tensor, this function computes hyperbolic sine of every |
3977 | /// element in the tensor. Input range is `[-inf,inf]` and output range |
3978 | /// is `[-inf,inf]`. |
3979 | /// |
3980 | /// ```python |
3981 | /// x = tf.constant([-float("inf"), -9, -0.5, 1, 1.2, 2, 10, float("inf")]) |
3982 | /// tf.math.sinh(x) ==> [-inf -4.0515420e+03 -5.2109528e-01 1.1752012e+00 1.5094614e+00 3.6268604e+00 1.1013232e+04 inf] |
3983 | /// ``` |
3984 | /// |
3985 | /// Args: |
3986 | /// * scope: A Scope object |
3987 | /// |
3988 | /// Returns: |
3989 | /// * `Output`: The y tensor. |
3990 | class Sinh { |
3991 | public: |
3992 | Sinh(const ::tensorflow::Scope& scope, ::tensorflow::Input x); |
3993 | operator ::tensorflow::Output() const { return y; } |
3994 | operator ::tensorflow::Input() const { return y; } |
3995 | ::tensorflow::Node* node() const { return y.node(); } |
3996 | |
3997 | Operation operation; |
3998 | ::tensorflow::Output y; |
3999 | }; |
4000 | |
4001 | /// Counts the number of occurrences of each value in an integer array. |
4002 | /// |
4003 | /// Outputs a vector with length `size` and the same dtype as `weights`. If |
4004 | /// `weights` are empty, then index `i` stores the number of times the value `i` is |
4005 | /// counted in `arr`. If `weights` are non-empty, then index `i` stores the sum of |
4006 | /// the value in `weights` at each index where the corresponding value in `arr` is |
4007 | /// `i`. |
4008 | /// |
4009 | /// Values in `arr` outside of the range [0, size) are ignored. |
4010 | /// |
4011 | /// Args: |
4012 | /// * scope: A Scope object |
4013 | /// * indices: 2D int64 `Tensor`. |
4014 | /// * values: 1D int `Tensor`. |
4015 | /// * dense_shape: 1D int64 `Tensor`. |
4016 | /// * size: non-negative int scalar `Tensor`. |
4017 | /// * weights: is an int32, int64, float32, or float64 `Tensor` with the same |
4018 | /// shape as `input`, or a length-0 `Tensor`, in which case it acts as all weights |
4019 | /// equal to 1. |
4020 | /// |
4021 | /// Optional attributes (see `Attrs`): |
4022 | /// * binary_output: bool; Whether the kernel should count the appearance or number of occurrences. |
4023 | /// |
4024 | /// Returns: |
4025 | /// * `Output`: 1D `Tensor` with length equal to `size` or 2D `Tensor` with [batch_size, `size`]. |
4026 | /// The counts or summed weights for each value in the range [0, size). |
4027 | class SparseBincount { |
4028 | public: |
4029 | /// Optional attribute setters for SparseBincount |
4030 | struct Attrs { |
4031 | /// bool; Whether the kernel should count the appearance or number of occurrences. |
4032 | /// |
4033 | /// Defaults to false |
4034 | TF_MUST_USE_RESULT Attrs BinaryOutput(bool x) { |
4035 | Attrs ret = *this; |
4036 | ret.binary_output_ = x; |
4037 | return ret; |
4038 | } |
4039 | |
4040 | bool binary_output_ = false; |
4041 | }; |
4042 | SparseBincount(const ::tensorflow::Scope& scope, ::tensorflow::Input indices, |
4043 | ::tensorflow::Input values, ::tensorflow::Input dense_shape, |
4044 | ::tensorflow::Input size, ::tensorflow::Input weights); |
4045 | SparseBincount(const ::tensorflow::Scope& scope, ::tensorflow::Input indices, |
4046 | ::tensorflow::Input values, ::tensorflow::Input dense_shape, |
4047 | ::tensorflow::Input size, ::tensorflow::Input weights, const |
4048 | SparseBincount::Attrs& attrs); |
4049 | operator ::tensorflow::Output() const { return output; } |
4050 | operator ::tensorflow::Input() const { return output; } |
4051 | ::tensorflow::Node* node() const { return output.node(); } |
4052 | |
4053 | static Attrs BinaryOutput(bool x) { |
4054 | return Attrs().BinaryOutput(x); |
4055 | } |
4056 | |
4057 | Operation operation; |
4058 | ::tensorflow::Output output; |
4059 | }; |
4060 | |
4061 | /// Multiply matrix "a" by matrix "b". |
4062 | /// |
4063 | /// The inputs must be two-dimensional matrices and the inner dimension of "a" must |
4064 | /// match the outer dimension of "b". Both "a" and "b" must be `Tensor`s not |
4065 | /// `SparseTensor`s. This op is optimized for the case where at least one of "a" or |
4066 | /// "b" is sparse, in the sense that they have a large proportion of zero values. |
4067 | /// The breakeven for using this versus a dense matrix multiply on one platform was |
4068 | /// 30% zero values in the sparse matrix. |
4069 | /// |
4070 | /// The gradient computation of this operation will only take advantage of sparsity |
4071 | /// in the input gradient when that gradient comes from a Relu. |
4072 | /// |
4073 | /// Args: |
4074 | /// * scope: A Scope object |
4075 | /// |
4076 | /// Returns: |
4077 | /// * `Output`: The product tensor. |
4078 | class SparseMatMul { |
4079 | public: |
4080 | /// Optional attribute setters for SparseMatMul |
4081 | struct Attrs { |
4082 | /// Defaults to false |
4083 | TF_MUST_USE_RESULT Attrs TransposeA(bool x) { |
4084 | Attrs ret = *this; |
4085 | ret.transpose_a_ = x; |
4086 | return ret; |
4087 | } |
4088 | |
4089 | /// Defaults to false |
4090 | TF_MUST_USE_RESULT Attrs TransposeB(bool x) { |
4091 | Attrs ret = *this; |
4092 | ret.transpose_b_ = x; |
4093 | return ret; |
4094 | } |
4095 | |
4096 | /// Defaults to false |
4097 | TF_MUST_USE_RESULT Attrs AIsSparse(bool x) { |
4098 | Attrs ret = *this; |
4099 | ret.a_is_sparse_ = x; |
4100 | return ret; |
4101 | } |
4102 | |
4103 | /// Defaults to false |
4104 | TF_MUST_USE_RESULT Attrs BIsSparse(bool x) { |
4105 | Attrs ret = *this; |
4106 | ret.b_is_sparse_ = x; |
4107 | return ret; |
4108 | } |
4109 | |
4110 | bool transpose_a_ = false; |
4111 | bool transpose_b_ = false; |
4112 | bool a_is_sparse_ = false; |
4113 | bool b_is_sparse_ = false; |
4114 | }; |
4115 | SparseMatMul(const ::tensorflow::Scope& scope, ::tensorflow::Input a, |
4116 | ::tensorflow::Input b); |
4117 | SparseMatMul(const ::tensorflow::Scope& scope, ::tensorflow::Input a, |
4118 | ::tensorflow::Input b, const SparseMatMul::Attrs& attrs); |
4119 | operator ::tensorflow::Output() const { return product; } |
4120 | operator ::tensorflow::Input() const { return product; } |
4121 | ::tensorflow::Node* node() const { return product.node(); } |
4122 | |
4123 | static Attrs TransposeA(bool x) { |
4124 | return Attrs().TransposeA(x); |
4125 | } |
4126 | static Attrs TransposeB(bool x) { |
4127 | return Attrs().TransposeB(x); |
4128 | } |
4129 | static Attrs AIsSparse(bool x) { |
4130 | return Attrs().AIsSparse(x); |
4131 | } |
4132 | static Attrs BIsSparse(bool x) { |
4133 | return Attrs().BIsSparse(x); |
4134 | } |
4135 | |
4136 | Operation operation; |
4137 | ::tensorflow::Output product; |
4138 | }; |
4139 | |
4140 | /// Computes the mean along sparse segments of a tensor. |
4141 | /// |
4142 | /// See `tf.sparse.segment_sum` for usage examples. |
4143 | /// |
4144 | /// Like `SegmentMean`, but `segment_ids` can have rank less than `data`'s first |
4145 | /// dimension, selecting a subset of dimension 0, specified by `indices`. |
4146 | /// |
4147 | /// Args: |
4148 | /// * scope: A Scope object |
4149 | /// * indices: A 1-D tensor. Has same rank as `segment_ids`. |
4150 | /// * segment_ids: A 1-D tensor. Values should be sorted and can be repeated. |
4151 | /// |
4152 | /// Returns: |
4153 | /// * `Output`: Has same shape as data, except for dimension 0 which |
4154 | /// has size `k`, the number of segments. |
4155 | class SparseSegmentMean { |
4156 | public: |
4157 | SparseSegmentMean(const ::tensorflow::Scope& scope, ::tensorflow::Input data, |
4158 | ::tensorflow::Input indices, ::tensorflow::Input segment_ids); |
4159 | operator ::tensorflow::Output() const { return output; } |
4160 | operator ::tensorflow::Input() const { return output; } |
4161 | ::tensorflow::Node* node() const { return output.node(); } |
4162 | |
4163 | Operation operation; |
4164 | ::tensorflow::Output output; |
4165 | }; |
4166 | |
4167 | /// Computes gradients for SparseSegmentMean. |
4168 | /// |
4169 | /// Returns tensor "output" with same shape as grad, except for dimension 0 whose |
4170 | /// value is output_dim0. |
4171 | /// |
4172 | /// Args: |
4173 | /// * scope: A Scope object |
4174 | /// * grad: gradient propagated to the SparseSegmentMean op. |
4175 | /// * indices: indices passed to the corresponding SparseSegmentMean op. |
4176 | /// * segment_ids: segment_ids passed to the corresponding SparseSegmentMean op. |
4177 | /// * output_dim0: dimension 0 of "data" passed to SparseSegmentMean op. |
4178 | /// |
4179 | /// Returns: |
4180 | /// * `Output`: The output tensor. |
4181 | class SparseSegmentMeanGrad { |
4182 | public: |
4183 | SparseSegmentMeanGrad(const ::tensorflow::Scope& scope, ::tensorflow::Input |
4184 | grad, ::tensorflow::Input indices, ::tensorflow::Input |
4185 | segment_ids, ::tensorflow::Input output_dim0); |
4186 | operator ::tensorflow::Output() const { return output; } |
4187 | operator ::tensorflow::Input() const { return output; } |
4188 | ::tensorflow::Node* node() const { return output.node(); } |
4189 | |
4190 | Operation operation; |
4191 | ::tensorflow::Output output; |
4192 | }; |
4193 | |
4194 | /// Computes the mean along sparse segments of a tensor. |
4195 | /// |
4196 | /// Like `SparseSegmentMean`, but allows missing ids in `segment_ids`. If an id is |
4197 | /// missing, the `output` tensor at that position will be zeroed. |
4198 | /// |
4199 | /// Read |
4200 | /// [the section on segmentation](https://tensorflow.org/api_docs/python/tf/math#Segmentation) |
4201 | /// for an explanation of segments. |
4202 | /// |
4203 | /// Args: |
4204 | /// * scope: A Scope object |
4205 | /// * indices: A 1-D tensor. Has same rank as `segment_ids`. |
4206 | /// * segment_ids: A 1-D tensor. Values should be sorted and can be repeated. |
4207 | /// * num_segments: Should equal the number of distinct segment IDs. |
4208 | /// |
4209 | /// Returns: |
4210 | /// * `Output`: Has same shape as data, except for dimension 0 which has size |
4211 | /// `num_segments`. |
4212 | class SparseSegmentMeanWithNumSegments { |
4213 | public: |
4214 | SparseSegmentMeanWithNumSegments(const ::tensorflow::Scope& scope, |
4215 | ::tensorflow::Input data, ::tensorflow::Input |
4216 | indices, ::tensorflow::Input segment_ids, |
4217 | ::tensorflow::Input num_segments); |
4218 | operator ::tensorflow::Output() const { return output; } |
4219 | operator ::tensorflow::Input() const { return output; } |
4220 | ::tensorflow::Node* node() const { return output.node(); } |
4221 | |
4222 | Operation operation; |
4223 | ::tensorflow::Output output; |
4224 | }; |
4225 | |
4226 | /// Computes the sum along sparse segments of a tensor divided by the sqrt of N. |
4227 | /// |
4228 | /// N is the size of the segment being reduced. |
4229 | /// |
4230 | /// See `tf.sparse.segment_sum` for usage examples. |
4231 | /// |
4232 | /// |
4233 | /// Args: |
4234 | /// * scope: A Scope object |
4235 | /// * indices: A 1-D tensor. Has same rank as `segment_ids`. |
4236 | /// * segment_ids: A 1-D tensor. Values should be sorted and can be repeated. |
4237 | /// |
4238 | /// Returns: |
4239 | /// * `Output`: Has same shape as data, except for dimension 0 which |
4240 | /// has size `k`, the number of segments. |
4241 | class SparseSegmentSqrtN { |
4242 | public: |
4243 | SparseSegmentSqrtN(const ::tensorflow::Scope& scope, ::tensorflow::Input data, |
4244 | ::tensorflow::Input indices, ::tensorflow::Input |
4245 | segment_ids); |
4246 | operator ::tensorflow::Output() const { return output; } |
4247 | operator ::tensorflow::Input() const { return output; } |
4248 | ::tensorflow::Node* node() const { return output.node(); } |
4249 | |
4250 | Operation operation; |
4251 | ::tensorflow::Output output; |
4252 | }; |
4253 | |
4254 | /// Computes gradients for SparseSegmentSqrtN. |
4255 | /// |
4256 | /// Returns tensor "output" with same shape as grad, except for dimension 0 whose |
4257 | /// value is output_dim0. |
4258 | /// |
4259 | /// Args: |
4260 | /// * scope: A Scope object |
4261 | /// * grad: gradient propagated to the SparseSegmentSqrtN op. |
4262 | /// * indices: indices passed to the corresponding SparseSegmentSqrtN op. |
4263 | /// * segment_ids: segment_ids passed to the corresponding SparseSegmentSqrtN op. |
4264 | /// * output_dim0: dimension 0 of "data" passed to SparseSegmentSqrtN op. |
4265 | /// |
4266 | /// Returns: |
4267 | /// * `Output`: The output tensor. |
4268 | class SparseSegmentSqrtNGrad { |
4269 | public: |
4270 | SparseSegmentSqrtNGrad(const ::tensorflow::Scope& scope, ::tensorflow::Input |
4271 | grad, ::tensorflow::Input indices, ::tensorflow::Input |
4272 | segment_ids, ::tensorflow::Input output_dim0); |
4273 | operator ::tensorflow::Output() const { return output; } |
4274 | operator ::tensorflow::Input() const { return output; } |
4275 | ::tensorflow::Node* node() const { return output.node(); } |
4276 | |
4277 | Operation operation; |
4278 | ::tensorflow::Output output; |
4279 | }; |
4280 | |
4281 | /// Computes the sum along sparse segments of a tensor divided by the sqrt of N. |
4282 | /// |
4283 | /// N is the size of the segment being reduced. |
4284 | /// |
4285 | /// Like `SparseSegmentSqrtN`, but allows missing ids in `segment_ids`. If an id is |
4286 | /// missing, the `output` tensor at that position will be zeroed. |
4287 | /// |
4288 | /// Read |
4289 | /// [the section on segmentation](https://tensorflow.org/api_docs/python/tf/math#Segmentation) |
4290 | /// for an explanation of segments. |
4291 | /// |
4292 | /// Args: |
4293 | /// * scope: A Scope object |
4294 | /// * indices: A 1-D tensor. Has same rank as `segment_ids`. |
4295 | /// * segment_ids: A 1-D tensor. Values should be sorted and can be repeated. |
4296 | /// * num_segments: Should equal the number of distinct segment IDs. |
4297 | /// |
4298 | /// Returns: |
4299 | /// * `Output`: Has same shape as data, except for dimension 0 which |
4300 | /// has size `k`, the number of segments. |
4301 | class SparseSegmentSqrtNWithNumSegments { |
4302 | public: |
4303 | SparseSegmentSqrtNWithNumSegments(const ::tensorflow::Scope& scope, |
4304 | ::tensorflow::Input data, ::tensorflow::Input |
4305 | indices, ::tensorflow::Input segment_ids, |
4306 | ::tensorflow::Input num_segments); |
4307 | operator ::tensorflow::Output() const { return output; } |
4308 | operator ::tensorflow::Input() const { return output; } |
4309 | ::tensorflow::Node* node() const { return output.node(); } |
4310 | |
4311 | Operation operation; |
4312 | ::tensorflow::Output output; |
4313 | }; |
4314 | |
4315 | /// Computes the sum along sparse segments of a tensor. |
4316 | /// |
4317 | /// Read |
4318 | /// [the section on segmentation](https://tensorflow.org/api_docs/python/tf/math#Segmentation) |
4319 | /// for an explanation of segments. |
4320 | /// |
4321 | /// Like `SegmentSum`, but `segment_ids` can have rank less than `data`'s first |
4322 | /// dimension, selecting a subset of dimension 0, specified by `indices`. |
4323 | /// |
4324 | /// For example: |
4325 | /// |
4326 | /// ```python |
4327 | /// c = tf.constant([[1,2,3,4], [-1,-2,-3,-4], [5,6,7,8]]) |
4328 | /// |
4329 | /// # Select two rows, one segment. |
4330 | /// tf.sparse_segment_sum(c, tf.constant([0, 1]), tf.constant([0, 0])) |
4331 | /// # => [[0 0 0 0]] |
4332 | /// |
4333 | /// # Select two rows, two segment. |
4334 | /// tf.sparse_segment_sum(c, tf.constant([0, 1]), tf.constant([0, 1])) |
4335 | /// # => [[ 1 2 3 4] |
4336 | /// # [-1 -2 -3 -4]] |
4337 | /// |
4338 | /// # Select all rows, two segments. |
4339 | /// tf.sparse_segment_sum(c, tf.constant([0, 1, 2]), tf.constant([0, 0, 1])) |
4340 | /// # => [[0 0 0 0] |
4341 | /// # [5 6 7 8]] |
4342 | /// |
4343 | /// # Which is equivalent to: |
4344 | /// tf.segment_sum(c, tf.constant([0, 0, 1])) |
4345 | /// ``` |
4346 | /// |
4347 | /// Args: |
4348 | /// * scope: A Scope object |
4349 | /// * indices: A 1-D tensor. Has same rank as `segment_ids`. |
4350 | /// * segment_ids: A 1-D tensor. Values should be sorted and can be repeated. |
4351 | /// |
4352 | /// Returns: |
4353 | /// * `Output`: Has same shape as data, except for dimension 0 which |
4354 | /// has size `k`, the number of segments. |
4355 | class SparseSegmentSum { |
4356 | public: |
4357 | SparseSegmentSum(const ::tensorflow::Scope& scope, ::tensorflow::Input data, |
4358 | ::tensorflow::Input indices, ::tensorflow::Input segment_ids); |
4359 | operator ::tensorflow::Output() const { return output; } |
4360 | operator ::tensorflow::Input() const { return output; } |
4361 | ::tensorflow::Node* node() const { return output.node(); } |
4362 | |
4363 | Operation operation; |
4364 | ::tensorflow::Output output; |
4365 | }; |
4366 | |
4367 | /// Computes gradients for SparseSegmentSum. |
4368 | /// |
4369 | /// Returns tensor "output" with same shape as grad, except for dimension 0 whose |
4370 | /// value is output_dim0. |
4371 | /// |
4372 | /// Args: |
4373 | /// * scope: A Scope object |
4374 | /// * grad: gradient propagated to the SparseSegmentSum op. |
4375 | /// * indices: indices passed to the corresponding SparseSegmentSum op. |
4376 | /// * segment_ids: segment_ids passed to the corresponding SparseSegmentSum op. |
4377 | /// * output_dim0: dimension 0 of "data" passed to SparseSegmentSum op. |
4378 | /// |
4379 | /// Returns: |
4380 | /// * `Output`: The output tensor. |
4381 | class SparseSegmentSumGrad { |
4382 | public: |
4383 | SparseSegmentSumGrad(const ::tensorflow::Scope& scope, ::tensorflow::Input |
4384 | grad, ::tensorflow::Input indices, ::tensorflow::Input |
4385 | segment_ids, ::tensorflow::Input output_dim0); |
4386 | operator ::tensorflow::Output() const { return output; } |
4387 | operator ::tensorflow::Input() const { return output; } |
4388 | ::tensorflow::Node* node() const { return output.node(); } |
4389 | |
4390 | Operation operation; |
4391 | ::tensorflow::Output output; |
4392 | }; |
4393 | |
4394 | /// Computes the sum along sparse segments of a tensor. |
4395 | /// |
4396 | /// Like `SparseSegmentSum`, but allows missing ids in `segment_ids`. If an id is |
4397 | /// missing, the `output` tensor at that position will be zeroed. |
4398 | /// |
4399 | /// Read |
4400 | /// [the section on segmentation](https://tensorflow.org/api_docs/python/tf/sparse#Segmentation) |
4401 | /// for an explanation of segments. |
4402 | /// |
4403 | /// For example: |
4404 | /// |
4405 | /// ```python |
4406 | /// c = tf.constant([[1,2,3,4], [-1,-2,-3,-4], [5,6,7,8]]) |
4407 | /// |
4408 | /// tf.sparse_segment_sum_with_num_segments( |
4409 | /// c, tf.constant([0, 1]), tf.constant([0, 0]), num_segments=3) |
4410 | /// # => [[0 0 0 0] |
4411 | /// # [0 0 0 0] |
4412 | /// # [0 0 0 0]] |
4413 | /// |
4414 | /// tf.sparse_segment_sum_with_num_segments(c, |
4415 | /// tf.constant([0, 1]), |
4416 | /// tf.constant([0, 2], |
4417 | /// num_segments=4)) |
4418 | /// # => [[ 1 2 3 4] |
4419 | /// # [ 0 0 0 0] |
4420 | /// # [-1 -2 -3 -4] |
4421 | /// # [ 0 0 0 0]] |
4422 | /// ``` |
4423 | /// |
4424 | /// Args: |
4425 | /// * scope: A Scope object |
4426 | /// * indices: A 1-D tensor. Has same rank as `segment_ids`. |
4427 | /// * segment_ids: A 1-D tensor. Values should be sorted and can be repeated. |
4428 | /// * num_segments: Should equal the number of distinct segment IDs. |
4429 | /// |
4430 | /// Returns: |
4431 | /// * `Output`: Has same shape as data, except for dimension 0 which |
4432 | /// has size `num_segments`. |
4433 | class SparseSegmentSumWithNumSegments { |
4434 | public: |
4435 | SparseSegmentSumWithNumSegments(const ::tensorflow::Scope& scope, |
4436 | ::tensorflow::Input data, ::tensorflow::Input |
4437 | indices, ::tensorflow::Input segment_ids, |
4438 | ::tensorflow::Input num_segments); |
4439 | operator ::tensorflow::Output() const { return output; } |
4440 | operator ::tensorflow::Input() const { return output; } |
4441 | ::tensorflow::Node* node() const { return output.node(); } |
4442 | |
4443 | Operation operation; |
4444 | ::tensorflow::Output output; |
4445 | }; |
4446 | |
4447 | /// Computes square root of x element-wise. |
4448 | /// |
4449 | /// I.e., \\(y = \sqrt{x} = x^{1/2}\\). |
4450 | /// |
4451 | /// Args: |
4452 | /// * scope: A Scope object |
4453 | /// |
4454 | /// Returns: |
4455 | /// * `Output`: The y tensor. |
4456 | class Sqrt { |
4457 | public: |
4458 | Sqrt(const ::tensorflow::Scope& scope, ::tensorflow::Input x); |
4459 | operator ::tensorflow::Output() const { return y; } |
4460 | operator ::tensorflow::Input() const { return y; } |
4461 | ::tensorflow::Node* node() const { return y.node(); } |
4462 | |
4463 | Operation operation; |
4464 | ::tensorflow::Output y; |
4465 | }; |
4466 | |
4467 | /// Computes square of x element-wise. |
4468 | /// |
4469 | /// I.e., \\(y = x * x = x^2\\). |
4470 | /// |
4471 | /// Args: |
4472 | /// * scope: A Scope object |
4473 | /// |
4474 | /// Returns: |
4475 | /// * `Output`: The y tensor. |
4476 | class Square { |
4477 | public: |
4478 | Square(const ::tensorflow::Scope& scope, ::tensorflow::Input x); |
4479 | operator ::tensorflow::Output() const { return y; } |
4480 | operator ::tensorflow::Input() const { return y; } |
4481 | ::tensorflow::Node* node() const { return y.node(); } |
4482 | |
4483 | Operation operation; |
4484 | ::tensorflow::Output y; |
4485 | }; |
4486 | |
4487 | /// Returns conj(x - y)(x - y) element-wise. |
4488 | /// |
4489 | /// *NOTE*: `SquaredDifference` supports broadcasting. More about broadcasting |
4490 | /// [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) |
4491 | /// |
4492 | /// Args: |
4493 | /// * scope: A Scope object |
4494 | /// |
4495 | /// Returns: |
4496 | /// * `Output`: The z tensor. |
4497 | class SquaredDifference { |
4498 | public: |
4499 | SquaredDifference(const ::tensorflow::Scope& scope, ::tensorflow::Input x, |
4500 | ::tensorflow::Input y); |
4501 | operator ::tensorflow::Output() const { return z; } |
4502 | operator ::tensorflow::Input() const { return z; } |
4503 | ::tensorflow::Node* node() const { return z.node(); } |
4504 | |
4505 | Operation operation; |
4506 | ::tensorflow::Output z; |
4507 | }; |
4508 | |
4509 | /// Returns x - y element-wise. |
4510 | /// |
4511 | /// *NOTE*: `Subtract` supports broadcasting. More about broadcasting |
4512 | /// [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) |
4513 | /// |
4514 | /// Args: |
4515 | /// * scope: A Scope object |
4516 | /// |
4517 | /// Returns: |
4518 | /// * `Output`: The z tensor. |
4519 | /// |
4520 | /// Aliases: |
4521 | /// * Sub |
4522 | class Subtract { |
4523 | public: |
4524 | Subtract(const ::tensorflow::Scope& scope, ::tensorflow::Input x, |
4525 | ::tensorflow::Input y); |
4526 | operator ::tensorflow::Output() const { return z; } |
4527 | operator ::tensorflow::Input() const { return z; } |
4528 | ::tensorflow::Node* node() const { return z.node(); } |
4529 | |
4530 | Operation operation; |
4531 | ::tensorflow::Output z; |
4532 | }; |
4533 | typedef Subtract Sub; |
4534 | |
4535 | /// Computes the sum of elements across dimensions of a tensor. |
4536 | /// |
4537 | /// Reduces `input` along the dimensions given in `axis`. Unless |
4538 | /// `keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in |
4539 | /// `axis`. If `keep_dims` is true, the reduced dimensions are |
4540 | /// retained with length 1. |
4541 | /// |
4542 | /// Args: |
4543 | /// * scope: A Scope object |
4544 | /// * input: The tensor to reduce. |
4545 | /// * axis: The dimensions to reduce. Must be in the range |
4546 | /// `[-rank(input), rank(input))`. |
4547 | /// |
4548 | /// Optional attributes (see `Attrs`): |
4549 | /// * keep_dims: If true, retain reduced dimensions with length 1. |
4550 | /// |
4551 | /// Returns: |
4552 | /// * `Output`: The reduced tensor. |
4553 | /// |
4554 | /// Aliases: |
4555 | /// * ReduceSum |
4556 | class Sum { |
4557 | public: |
4558 | /// Optional attribute setters for Sum |
4559 | struct Attrs { |
4560 | /// If true, retain reduced dimensions with length 1. |
4561 | /// |
4562 | /// Defaults to false |
4563 | TF_MUST_USE_RESULT Attrs KeepDims(bool x) { |
4564 | Attrs ret = *this; |
4565 | ret.keep_dims_ = x; |
4566 | return ret; |
4567 | } |
4568 | |
4569 | bool keep_dims_ = false; |
4570 | }; |
4571 | Sum(const ::tensorflow::Scope& scope, ::tensorflow::Input input, |
4572 | ::tensorflow::Input axis); |
4573 | Sum(const ::tensorflow::Scope& scope, ::tensorflow::Input input, |
4574 | ::tensorflow::Input axis, const Sum::Attrs& attrs); |
4575 | operator ::tensorflow::Output() const { return output; } |
4576 | operator ::tensorflow::Input() const { return output; } |
4577 | ::tensorflow::Node* node() const { return output.node(); } |
4578 | |
4579 | static Attrs KeepDims(bool x) { |
4580 | return Attrs().KeepDims(x); |
4581 | } |
4582 | |
4583 | Operation operation; |
4584 | ::tensorflow::Output output; |
4585 | }; |
4586 | typedef Sum ReduceSum; |
4587 | |
4588 | /// Computes tan of x element-wise. |
4589 | /// |
4590 | /// Given an input tensor, this function computes tangent of every |
4591 | /// element in the tensor. Input range is `(-inf, inf)` and |
4592 | /// output range is `(-inf, inf)`. If input lies outside the boundary, `nan` |
4593 | /// is returned. |
4594 | /// |
4595 | /// ```python |
4596 | /// x = tf.constant([-float("inf"), -9, -0.5, 1, 1.2, 200, 10000, float("inf")]) |
4597 | /// tf.math.tan(x) ==> [nan 0.45231566 -0.5463025 1.5574077 2.572152 -1.7925274 0.32097113 nan] |
4598 | /// ``` |
4599 | /// |
4600 | /// Args: |
4601 | /// * scope: A Scope object |
4602 | /// |
4603 | /// Returns: |
4604 | /// * `Output`: The y tensor. |
4605 | class Tan { |
4606 | public: |
4607 | Tan(const ::tensorflow::Scope& scope, ::tensorflow::Input x); |
4608 | operator ::tensorflow::Output() const { return y; } |
4609 | operator ::tensorflow::Input() const { return y; } |
4610 | ::tensorflow::Node* node() const { return y.node(); } |
4611 | |
4612 | Operation operation; |
4613 | ::tensorflow::Output y; |
4614 | }; |
4615 | |
4616 | /// Computes hyperbolic tangent of `x` element-wise. |
4617 | /// |
4618 | /// Given an input tensor, this function computes hyperbolic tangent of every |
4619 | /// element in the tensor. Input range is `[-inf, inf]` and |
4620 | /// output range is `[-1,1]`. |
4621 | /// |
4622 | /// >>> x = tf.constant([-float("inf"), -5, -0.5, 1, 1.2, 2, 3, float("inf")]) |
4623 | /// >>> tf.math.tanh(x) |
4624 | /// <tf.Tensor: shape=(8,), dtype=float32, numpy= |
4625 | /// array([-1.0, -0.99990916, -0.46211717, 0.7615942 , 0.8336547 , |
4626 | /// 0.9640276 , 0.9950547 , 1.0], dtype=float32)> |
4627 | /// |
4628 | /// |
4629 | /// Args: |
4630 | /// * scope: A Scope object |
4631 | /// |
4632 | /// Returns: |
4633 | /// * `Output`: The y tensor. |
4634 | class Tanh { |
4635 | public: |
4636 | Tanh(const ::tensorflow::Scope& scope, ::tensorflow::Input x); |
4637 | operator ::tensorflow::Output() const { return y; } |
4638 | operator ::tensorflow::Input() const { return y; } |
4639 | ::tensorflow::Node* node() const { return y.node(); } |
4640 | |
4641 | Operation operation; |
4642 | ::tensorflow::Output y; |
4643 | }; |
4644 | |
4645 | /// Returns x / y element-wise for integer types. |
4646 | /// |
4647 | /// Truncation designates that negative numbers will round fractional quantities |
4648 | /// toward zero. I.e. -7 / 5 = -1. This matches C semantics but it is different |
4649 | /// than Python semantics. See `FloorDiv` for a division function that matches |
4650 | /// Python Semantics. |
4651 | /// |
4652 | /// *NOTE*: `TruncateDiv` supports broadcasting. More about broadcasting |
4653 | /// [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) |
4654 | /// |
4655 | /// Args: |
4656 | /// * scope: A Scope object |
4657 | /// |
4658 | /// Returns: |
4659 | /// * `Output`: The z tensor. |
4660 | class TruncateDiv { |
4661 | public: |
4662 | TruncateDiv(const ::tensorflow::Scope& scope, ::tensorflow::Input x, |
4663 | ::tensorflow::Input y); |
4664 | operator ::tensorflow::Output() const { return z; } |
4665 | operator ::tensorflow::Input() const { return z; } |
4666 | ::tensorflow::Node* node() const { return z.node(); } |
4667 | |
4668 | Operation operation; |
4669 | ::tensorflow::Output z; |
4670 | }; |
4671 | |
4672 | /// Returns element-wise remainder of division. This emulates C semantics in that |
4673 | /// |
4674 | /// the result here is consistent with a truncating divide. E.g. `truncate(x / y) * |
4675 | /// y + truncate_mod(x, y) = x`. |
4676 | /// |
4677 | /// *NOTE*: `TruncateMod` supports broadcasting. More about broadcasting |
4678 | /// [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) |
4679 | /// |
4680 | /// Args: |
4681 | /// * scope: A Scope object |
4682 | /// |
4683 | /// Returns: |
4684 | /// * `Output`: The z tensor. |
4685 | class TruncateMod { |
4686 | public: |
4687 | TruncateMod(const ::tensorflow::Scope& scope, ::tensorflow::Input x, |
4688 | ::tensorflow::Input y); |
4689 | operator ::tensorflow::Output() const { return z; } |
4690 | operator ::tensorflow::Input() const { return z; } |
4691 | ::tensorflow::Node* node() const { return z.node(); } |
4692 | |
4693 | Operation operation; |
4694 | ::tensorflow::Output z; |
4695 | }; |
4696 | |
4697 | /// Computes the maximum along segments of a tensor. |
4698 | /// |
4699 | /// Read |
4700 | /// [the section on segmentation](https://tensorflow.org/api_docs/python/tf/math#Segmentation) |
4701 | /// for an explanation of segments. |
4702 | /// |
4703 | /// This operator is similar to `tf.math.unsorted_segment_sum`, |
4704 | /// Instead of computing the sum over segments, it computes the maximum such that: |
4705 | /// |
4706 | /// \\(output_i = \max_{j...} data[j...]\\) where max is over tuples `j...` such |
4707 | /// that `segment_ids[j...] == i`. |
4708 | /// |
4709 | /// If the maximum is empty for a given segment ID `i`, it outputs the smallest |
4710 | /// possible value for the specific numeric type, |
4711 | /// `output[i] = numeric_limits<T>::lowest()`. |
4712 | /// |
4713 | /// If the given segment ID `i` is negative, then the corresponding value is |
4714 | /// dropped, and will not be included in the result. |
4715 | /// |
4716 | /// Caution: On CPU, values in `segment_ids` are always validated to be less than |
4717 | /// `num_segments`, and an error is thrown for out-of-bound indices. On GPU, this |
4718 | /// does not throw an error for out-of-bound indices. On Gpu, out-of-bound indices |
4719 | /// result in safe but unspecified behavior, which may include ignoring |
4720 | /// out-of-bound indices or outputting a tensor with a 0 stored in the first |
4721 | /// dimension of its shape if `num_segments` is 0. |
4722 | /// |
4723 | /// <div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;"> |
4724 | /// <img style="width:100%" src="https://www.tensorflow.org/images/UnsortedSegmentMax.png" alt> |
4725 | /// </div> |
4726 | /// |
4727 | /// For example: |
4728 | /// |
4729 | /// >>> c = tf.constant([[1,2,3,4], [5,6,7,8], [4,3,2,1]]) |
4730 | /// >>> tf.math.unsorted_segment_max(c, tf.constant([0, 1, 0]), num_segments=2).numpy() |
4731 | /// array([[4, 3, 3, 4], |
4732 | /// [5, 6, 7, 8]], dtype=int32) |
4733 | /// |
4734 | /// |
4735 | /// Args: |
4736 | /// * scope: A Scope object |
4737 | /// * segment_ids: A tensor whose shape is a prefix of `data.shape`. |
4738 | /// The values must be less than `num_segments`. |
4739 | /// |
4740 | /// Caution: The values are always validated to be in range on CPU, never validated |
4741 | /// on GPU. |
4742 | /// |
4743 | /// Returns: |
4744 | /// * `Output`: Has same shape as data, except for the first `segment_ids.rank` |
4745 | /// dimensions, which are replaced with a single dimension which has size |
4746 | /// `num_segments`. |
4747 | class UnsortedSegmentMax { |
4748 | public: |
4749 | UnsortedSegmentMax(const ::tensorflow::Scope& scope, ::tensorflow::Input data, |
4750 | ::tensorflow::Input segment_ids, ::tensorflow::Input |
4751 | num_segments); |
4752 | operator ::tensorflow::Output() const { return output; } |
4753 | operator ::tensorflow::Input() const { return output; } |
4754 | ::tensorflow::Node* node() const { return output.node(); } |
4755 | |
4756 | Operation operation; |
4757 | ::tensorflow::Output output; |
4758 | }; |
4759 | |
4760 | /// Computes the minimum along segments of a tensor. |
4761 | /// |
4762 | /// Read |
4763 | /// [the section on segmentation](https://tensorflow.org/api_docs/python/tf/math#Segmentation) |
4764 | /// for an explanation of segments. |
4765 | /// |
4766 | /// This operator is similar to `tf.math.unsorted_segment_sum`, |
4767 | /// Instead of computing the sum over segments, it computes the minimum such that: |
4768 | /// |
4769 | /// \\(output_i = \min_{j...} data_[j...]\\) where min is over tuples `j...` such |
4770 | /// that `segment_ids[j...] == i`. |
4771 | /// |
4772 | /// If the minimum is empty for a given segment ID `i`, it outputs the largest |
4773 | /// possible value for the specific numeric type, |
4774 | /// `output[i] = numeric_limits<T>::max()`. |
4775 | /// |
4776 | /// For example: |
4777 | /// |
4778 | /// >>> c = tf.constant([[1,2,3,4], [5,6,7,8], [4,3,2,1]]) |
4779 | /// >>> tf.math.unsorted_segment_min(c, tf.constant([0, 1, 0]), num_segments=2).numpy() |
4780 | /// array([[1, 2, 2, 1], |
4781 | /// [5, 6, 7, 8]], dtype=int32) |
4782 | /// |
4783 | /// If the given segment ID `i` is negative, then the corresponding value is |
4784 | /// dropped, and will not be included in the result. |
4785 | /// |
4786 | /// Caution: On CPU, values in `segment_ids` are always validated to be less than |
4787 | /// `num_segments`, and an error is thrown for out-of-bound indices. On GPU, this |
4788 | /// does not throw an error for out-of-bound indices. On Gpu, out-of-bound indices |
4789 | /// result in safe but unspecified behavior, which may include ignoring |
4790 | /// out-of-bound indices or outputting a tensor with a 0 stored in the first |
4791 | /// dimension of its shape if `num_segments` is 0. |
4792 | /// |
4793 | /// Args: |
4794 | /// * scope: A Scope object |
4795 | /// * segment_ids: A tensor whose shape is a prefix of `data.shape`. |
4796 | /// The values must be less than `num_segments`. |
4797 | /// |
4798 | /// Caution: The values are always validated to be in range on CPU, never validated |
4799 | /// on GPU. |
4800 | /// |
4801 | /// Returns: |
4802 | /// * `Output`: Has same shape as data, except for the first `segment_ids.rank` |
4803 | /// dimensions, which are replaced with a single dimension which has size |
4804 | /// `num_segments`. |
4805 | class UnsortedSegmentMin { |
4806 | public: |
4807 | UnsortedSegmentMin(const ::tensorflow::Scope& scope, ::tensorflow::Input data, |
4808 | ::tensorflow::Input segment_ids, ::tensorflow::Input |
4809 | num_segments); |
4810 | operator ::tensorflow::Output() const { return output; } |
4811 | operator ::tensorflow::Input() const { return output; } |
4812 | ::tensorflow::Node* node() const { return output.node(); } |
4813 | |
4814 | Operation operation; |
4815 | ::tensorflow::Output output; |
4816 | }; |
4817 | |
4818 | /// Computes the product along segments of a tensor. |
4819 | /// |
4820 | /// Read |
4821 | /// [the section on segmentation](https://tensorflow.org/api_docs/python/tf/math#Segmentation) |
4822 | /// for an explanation of segments. |
4823 | /// |
4824 | /// This operator is similar to `tf.math.unsorted_segment_sum`, |
4825 | /// Instead of computing the sum over segments, it computes the product of all |
4826 | /// entries belonging to a segment such that: |
4827 | /// |
4828 | /// \\(output_i = \prod_{j...} data[j...]\\) where the product is over tuples |
4829 | /// `j...` such that `segment_ids[j...] == i`. |
4830 | /// |
4831 | /// For example: |
4832 | /// |
4833 | /// >>> c = tf.constant([[1,2,3,4], [5,6,7,8], [4,3,2,1]]) |
4834 | /// >>> tf.math.unsorted_segment_prod(c, tf.constant([0, 1, 0]), num_segments=2).numpy() |
4835 | /// array([[4, 6, 6, 4], |
4836 | /// [5, 6, 7, 8]], dtype=int32) |
4837 | /// |
4838 | /// If there is no entry for a given segment ID `i`, it outputs 1. |
4839 | /// |
4840 | /// If the given segment ID `i` is negative, then the corresponding value is |
4841 | /// dropped, and will not be included in the result. |
4842 | /// Caution: On CPU, values in `segment_ids` are always validated to be less than |
4843 | /// `num_segments`, and an error is thrown for out-of-bound indices. On GPU, this |
4844 | /// does not throw an error for out-of-bound indices. On Gpu, out-of-bound indices |
4845 | /// result in safe but unspecified behavior, which may include ignoring |
4846 | /// out-of-bound indices or outputting a tensor with a 0 stored in the first |
4847 | /// dimension of its shape if `num_segments` is 0. |
4848 | /// |
4849 | /// |
4850 | /// Args: |
4851 | /// * scope: A Scope object |
4852 | /// * segment_ids: A tensor whose shape is a prefix of `data.shape`. |
4853 | /// The values must be less than `num_segments`. |
4854 | /// |
4855 | /// Caution: The values are always validated to be in range on CPU, never validated |
4856 | /// on GPU. |
4857 | /// |
4858 | /// Returns: |
4859 | /// * `Output`: Has same shape as data, except for the first `segment_ids.rank` |
4860 | /// dimensions, which are replaced with a single dimension which has size |
4861 | /// `num_segments`. |
4862 | class UnsortedSegmentProd { |
4863 | public: |
4864 | UnsortedSegmentProd(const ::tensorflow::Scope& scope, ::tensorflow::Input data, |
4865 | ::tensorflow::Input segment_ids, ::tensorflow::Input |
4866 | num_segments); |
4867 | operator ::tensorflow::Output() const { return output; } |
4868 | operator ::tensorflow::Input() const { return output; } |
4869 | ::tensorflow::Node* node() const { return output.node(); } |
4870 | |
4871 | Operation operation; |
4872 | ::tensorflow::Output output; |
4873 | }; |
4874 | |
4875 | /// Computes the sum along segments of a tensor. |
4876 | /// |
4877 | /// Read |
4878 | /// [the section on segmentation](https://tensorflow.org/api_docs/python/tf/math#Segmentation) |
4879 | /// for an explanation of segments. |
4880 | /// |
4881 | /// Computes a tensor such that |
4882 | /// \\(output[i] = \sum_{j...} data[j...]\\) where the sum is over tuples `j...` such |
4883 | /// that `segment_ids[j...] == i`. Unlike `SegmentSum`, `segment_ids` |
4884 | /// need not be sorted and need not cover all values in the full |
4885 | /// range of valid values. |
4886 | /// |
4887 | /// If the sum is empty for a given segment ID `i`, `output[i] = 0`. |
4888 | /// If the given segment ID `i` is negative, the value is dropped and will not be |
4889 | /// added to the sum of the segment. |
4890 | /// |
4891 | /// `num_segments` should equal the number of distinct segment IDs. |
4892 | /// |
4893 | /// Caution: On CPU, values in `segment_ids` are always validated to be less than |
4894 | /// `num_segments`, and an error is thrown for out-of-bound indices. On GPU, this |
4895 | /// does not throw an error for out-of-bound indices. On Gpu, out-of-bound indices |
4896 | /// result in safe but unspecified behavior, which may include ignoring |
4897 | /// out-of-bound indices or outputting a tensor with a 0 stored in the first |
4898 | /// dimension of its shape if `num_segments` is 0. |
4899 | /// |
4900 | /// <div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;"> |
4901 | /// <img style="width:100%" src="https://www.tensorflow.org/images/UnsortedSegmentSum.png" alt> |
4902 | /// </div> |
4903 | /// |
4904 | /// >>> c = [[1,2,3,4], [5,6,7,8], [4,3,2,1]] |
4905 | /// >>> tf.math.unsorted_segment_sum(c, [0, 1, 0], num_segments=2).numpy() |
4906 | /// array([[5, 5, 5, 5], |
4907 | /// [5, 6, 7, 8]], dtype=int32) |
4908 | /// |
4909 | /// |
4910 | /// |
4911 | /// Args: |
4912 | /// * scope: A Scope object |
4913 | /// * segment_ids: A tensor whose shape is a prefix of `data.shape`. |
4914 | /// The values must be less than `num_segments`. |
4915 | /// |
4916 | /// Caution: The values are always validated to be in range on CPU, never validated |
4917 | /// on GPU. |
4918 | /// |
4919 | /// Returns: |
4920 | /// * `Output`: Has same shape as data, except for the first `segment_ids.rank` |
4921 | /// dimensions, which are replaced with a single dimension which has size |
4922 | /// `num_segments`. |
4923 | class UnsortedSegmentSum { |
4924 | public: |
4925 | UnsortedSegmentSum(const ::tensorflow::Scope& scope, ::tensorflow::Input data, |
4926 | ::tensorflow::Input segment_ids, ::tensorflow::Input |
4927 | num_segments); |
4928 | operator ::tensorflow::Output() const { return output; } |
4929 | operator ::tensorflow::Input() const { return output; } |
4930 | ::tensorflow::Node* node() const { return output.node(); } |
4931 | |
4932 | Operation operation; |
4933 | ::tensorflow::Output output; |
4934 | }; |
4935 | |
4936 | /// Returns 0 if x == 0, and x / y otherwise, elementwise. |
4937 | /// |
4938 | /// Args: |
4939 | /// * scope: A Scope object |
4940 | /// |
4941 | /// Returns: |
4942 | /// * `Output`: The z tensor. |
4943 | class Xdivy { |
4944 | public: |
4945 | Xdivy(const ::tensorflow::Scope& scope, ::tensorflow::Input x, |
4946 | ::tensorflow::Input y); |
4947 | operator ::tensorflow::Output() const { return z; } |
4948 | operator ::tensorflow::Input() const { return z; } |
4949 | ::tensorflow::Node* node() const { return z.node(); } |
4950 | |
4951 | Operation operation; |
4952 | ::tensorflow::Output z; |
4953 | }; |
4954 | |
4955 | /// Returns 0 if x == 0, and x * log1p(y) otherwise, elementwise. |
4956 | /// |
4957 | /// Args: |
4958 | /// * scope: A Scope object |
4959 | /// |
4960 | /// Returns: |
4961 | /// * `Output`: The z tensor. |
4962 | class Xlog1py { |
4963 | public: |
4964 | Xlog1py(const ::tensorflow::Scope& scope, ::tensorflow::Input x, |
4965 | ::tensorflow::Input y); |
4966 | operator ::tensorflow::Output() const { return z; } |
4967 | operator ::tensorflow::Input() const { return z; } |
4968 | ::tensorflow::Node* node() const { return z.node(); } |
4969 | |
4970 | Operation operation; |
4971 | ::tensorflow::Output z; |
4972 | }; |
4973 | |
4974 | /// Returns 0 if x == 0, and x * log(y) otherwise, elementwise. |
4975 | /// |
4976 | /// Args: |
4977 | /// * scope: A Scope object |
4978 | /// |
4979 | /// Returns: |
4980 | /// * `Output`: The z tensor. |
4981 | class Xlogy { |
4982 | public: |
4983 | Xlogy(const ::tensorflow::Scope& scope, ::tensorflow::Input x, |
4984 | ::tensorflow::Input y); |
4985 | operator ::tensorflow::Output() const { return z; } |
4986 | operator ::tensorflow::Input() const { return z; } |
4987 | ::tensorflow::Node* node() const { return z.node(); } |
4988 | |
4989 | Operation operation; |
4990 | ::tensorflow::Output z; |
4991 | }; |
4992 | |
4993 | /// Compute the Hurwitz zeta function \\(\zeta(x, q)\\). |
4994 | /// |
4995 | /// The Hurwitz zeta function is defined as: |
4996 | /// |
4997 | /// |
4998 | /// \\(\zeta(x, q) = \sum_{n=0}^{\infty} (q + n)^{-x}\\) |
4999 | /// |
5000 | /// Args: |
5001 | /// * scope: A Scope object |
5002 | /// |
5003 | /// Returns: |
5004 | /// * `Output`: The z tensor. |
5005 | class Zeta { |
5006 | public: |
5007 | Zeta(const ::tensorflow::Scope& scope, ::tensorflow::Input x, |
5008 | ::tensorflow::Input q); |
5009 | operator ::tensorflow::Output() const { return z; } |
5010 | operator ::tensorflow::Input() const { return z; } |
5011 | ::tensorflow::Node* node() const { return z.node(); } |
5012 | |
5013 | Operation operation; |
5014 | ::tensorflow::Output z; |
5015 | }; |
5016 | |
5017 | /// @} |
5018 | |
5019 | } // namespace ops |
5020 | } // namespace tensorflow |
5021 | |
5022 | #endif // TENSORFLOW_CC_OPS_MATH_OPS_H_ |
5023 | |