1 | // This file is MACHINE GENERATED! Do not edit. |
2 | |
3 | #ifndef TENSORFLOW_CC_OPS_ARRAY_OPS_H_ |
4 | #define TENSORFLOW_CC_OPS_ARRAY_OPS_H_ |
5 | |
6 | // This file is MACHINE GENERATED! Do not edit. |
7 | |
8 | #include "tensorflow/cc/framework/ops.h" |
9 | #include "tensorflow/cc/framework/scope.h" |
10 | #include "tensorflow/core/framework/tensor.h" |
11 | #include "tensorflow/core/framework/tensor_shape.h" |
12 | #include "tensorflow/core/framework/types.h" |
13 | #include "tensorflow/core/lib/gtl/array_slice.h" |
14 | |
15 | namespace tensorflow { |
16 | namespace ops { |
17 | |
18 | /// @defgroup array_ops Array Ops |
19 | /// @{ |
20 | |
21 | /// BatchToSpace for 4-D tensors of type T. |
22 | /// |
23 | /// This is a legacy version of the more general BatchToSpaceND. |
24 | /// |
25 | /// Rearranges (permutes) data from batch into blocks of spatial data, followed by |
26 | /// cropping. This is the reverse transformation of SpaceToBatch. More specifically, |
27 | /// this op outputs a copy of the input tensor where values from the `batch` |
28 | /// dimension are moved in spatial blocks to the `height` and `width` dimensions, |
29 | /// followed by cropping along the `height` and `width` dimensions. |
30 | /// |
31 | /// Args: |
32 | /// * scope: A Scope object |
33 | /// * input: 4-D tensor with shape |
34 | /// `[batch*block_size*block_size, height_pad/block_size, width_pad/block_size, |
35 | /// depth]`. Note that the batch size of the input tensor must be divisible by |
36 | /// `block_size * block_size`. |
37 | /// * crops: 2-D tensor of non-negative integers with shape `[2, 2]`. It specifies |
38 | /// how many elements to crop from the intermediate result across the spatial |
39 | /// dimensions as follows: |
40 | /// |
41 | /// crops = [[crop_top, crop_bottom], [crop_left, crop_right]] |
42 | /// |
43 | /// Returns: |
44 | /// * `Output`: 4-D with shape `[batch, height, width, depth]`, where: |
45 | /// |
46 | /// height = height_pad - crop_top - crop_bottom |
47 | /// width = width_pad - crop_left - crop_right |
48 | /// |
49 | /// The attr `block_size` must be greater than one. It indicates the block size. |
50 | /// |
51 | /// Some examples: |
52 | /// |
53 | /// (1) For the following input of shape `[4, 1, 1, 1]` and block_size of 2: |
54 | /// |
55 | /// ``` |
56 | /// [[[[1]]], [[[2]]], [[[3]]], [[[4]]]] |
57 | /// ``` |
58 | /// |
59 | /// The output tensor has shape `[1, 2, 2, 1]` and value: |
60 | /// |
61 | /// ``` |
62 | /// x = [[[[1], [2]], [[3], [4]]]] |
63 | /// ``` |
64 | /// |
65 | /// (2) For the following input of shape `[4, 1, 1, 3]` and block_size of 2: |
66 | /// |
67 | /// ``` |
68 | /// [[[[1, 2, 3]]], [[[4, 5, 6]]], [[[7, 8, 9]]], [[[10, 11, 12]]]] |
69 | /// ``` |
70 | /// |
71 | /// The output tensor has shape `[1, 2, 2, 3]` and value: |
72 | /// |
73 | /// ``` |
74 | /// x = [[[[1, 2, 3], [4, 5, 6]], |
75 | /// [[7, 8, 9], [10, 11, 12]]]] |
76 | /// ``` |
77 | /// |
78 | /// (3) For the following input of shape `[4, 2, 2, 1]` and block_size of 2: |
79 | /// |
80 | /// ``` |
81 | /// x = [[[[1], [3]], [[9], [11]]], |
82 | /// [[[2], [4]], [[10], [12]]], |
83 | /// [[[5], [7]], [[13], [15]]], |
84 | /// [[[6], [8]], [[14], [16]]]] |
85 | /// ``` |
86 | /// |
87 | /// The output tensor has shape `[1, 4, 4, 1]` and value: |
88 | /// |
89 | /// ``` |
90 | /// x = [[[[1], [2], [3], [4]], |
91 | /// [[5], [6], [7], [8]], |
92 | /// [[9], [10], [11], [12]], |
93 | /// [[13], [14], [15], [16]]]] |
94 | /// ``` |
95 | /// |
96 | /// (4) For the following input of shape `[8, 1, 2, 1]` and block_size of 2: |
97 | /// |
98 | /// ``` |
99 | /// x = [[[[1], [3]]], [[[9], [11]]], [[[2], [4]]], [[[10], [12]]], |
100 | /// [[[5], [7]]], [[[13], [15]]], [[[6], [8]]], [[[14], [16]]]] |
101 | /// ``` |
102 | /// |
103 | /// The output tensor has shape `[2, 2, 4, 1]` and value: |
104 | /// |
105 | /// ``` |
106 | /// x = [[[[1], [3]], [[5], [7]]], |
107 | /// [[[2], [4]], [[10], [12]]], |
108 | /// [[[5], [7]], [[13], [15]]], |
109 | /// [[[6], [8]], [[14], [16]]]] |
110 | /// ``` |
111 | class BatchToSpace { |
112 | public: |
113 | BatchToSpace(const ::tensorflow::Scope& scope, ::tensorflow::Input input, |
114 | ::tensorflow::Input crops, int64 block_size); |
115 | operator ::tensorflow::Output() const { return output; } |
116 | operator ::tensorflow::Input() const { return output; } |
117 | ::tensorflow::Node* node() const { return output.node(); } |
118 | |
119 | Operation operation; |
120 | ::tensorflow::Output output; |
121 | }; |
122 | |
123 | /// BatchToSpace for N-D tensors of type T. |
124 | /// |
125 | /// This operation reshapes the "batch" dimension 0 into `M + 1` dimensions of shape |
126 | /// `block_shape + [batch]`, interleaves these blocks back into the grid defined by |
127 | /// the spatial dimensions `[1, ..., M]`, to obtain a result with the same rank as |
128 | /// the input. The spatial dimensions of this intermediate result are then |
129 | /// optionally cropped according to `crops` to produce the output. This is the |
130 | /// reverse of SpaceToBatch. See below for a precise description. |
131 | /// |
132 | /// Args: |
133 | /// * scope: A Scope object |
134 | /// * input: N-D with shape `input_shape = [batch] + spatial_shape + remaining_shape`, |
135 | /// where spatial_shape has M dimensions. |
136 | /// * block_shape: 1-D with shape `[M]`, all values must be >= 1. |
137 | /// * crops: 2-D with shape `[M, 2]`, all values must be >= 0. |
138 | /// `crops[i] = [crop_start, crop_end]` specifies the amount to crop from input |
139 | /// dimension `i + 1`, which corresponds to spatial dimension `i`. It is |
140 | /// required that |
141 | /// `crop_start[i] + crop_end[i] <= block_shape[i] * input_shape[i + 1]`. |
142 | /// |
143 | /// This operation is equivalent to the following steps: |
144 | /// |
145 | /// 1. Reshape `input` to `reshaped` of shape: |
146 | /// [block_shape[0], ..., block_shape[M-1], |
147 | /// batch / prod(block_shape), |
148 | /// input_shape[1], ..., input_shape[N-1]] |
149 | /// |
150 | /// 2. Permute dimensions of `reshaped` to produce `permuted` of shape |
151 | /// [batch / prod(block_shape), |
152 | /// |
153 | /// input_shape[1], block_shape[0], |
154 | /// ..., |
155 | /// input_shape[M], block_shape[M-1], |
156 | /// |
157 | /// input_shape[M+1], ..., input_shape[N-1]] |
158 | /// |
159 | /// 3. Reshape `permuted` to produce `reshaped_permuted` of shape |
160 | /// [batch / prod(block_shape), |
161 | /// |
162 | /// input_shape[1] * block_shape[0], |
163 | /// ..., |
164 | /// input_shape[M] * block_shape[M-1], |
165 | /// |
166 | /// input_shape[M+1], |
167 | /// ..., |
168 | /// input_shape[N-1]] |
169 | /// |
170 | /// 4. Crop the start and end of dimensions `[1, ..., M]` of |
171 | /// `reshaped_permuted` according to `crops` to produce the output of shape: |
172 | /// [batch / prod(block_shape), |
173 | /// |
174 | /// input_shape[1] * block_shape[0] - crops[0,0] - crops[0,1], |
175 | /// ..., |
176 | /// input_shape[M] * block_shape[M-1] - crops[M-1,0] - crops[M-1,1], |
177 | /// |
178 | /// input_shape[M+1], ..., input_shape[N-1]] |
179 | /// |
180 | /// Some examples: |
181 | /// |
182 | /// (1) For the following input of shape `[4, 1, 1, 1]`, `block_shape = [2, 2]`, and |
183 | /// `crops = [[0, 0], [0, 0]]`: |
184 | /// |
185 | /// ``` |
186 | /// [[[[1]]], [[[2]]], [[[3]]], [[[4]]]] |
187 | /// ``` |
188 | /// |
189 | /// The output tensor has shape `[1, 2, 2, 1]` and value: |
190 | /// |
191 | /// ``` |
192 | /// x = [[[[1], [2]], [[3], [4]]]] |
193 | /// ``` |
194 | /// |
195 | /// (2) For the following input of shape `[4, 1, 1, 3]`, `block_shape = [2, 2]`, and |
196 | /// `crops = [[0, 0], [0, 0]]`: |
197 | /// |
198 | /// ``` |
199 | /// [[[[1, 2, 3]]], [[[4, 5, 6]]], [[[7, 8, 9]]], [[[10, 11, 12]]]] |
200 | /// ``` |
201 | /// |
202 | /// The output tensor has shape `[1, 2, 2, 3]` and value: |
203 | /// |
204 | /// ``` |
205 | /// x = [[[[1, 2, 3], [4, 5, 6]], |
206 | /// [[7, 8, 9], [10, 11, 12]]]] |
207 | /// ``` |
208 | /// |
209 | /// (3) For the following input of shape `[4, 2, 2, 1]`, `block_shape = [2, 2]`, and |
210 | /// `crops = [[0, 0], [0, 0]]`: |
211 | /// |
212 | /// ``` |
213 | /// x = [[[[1], [3]], [[9], [11]]], |
214 | /// [[[2], [4]], [[10], [12]]], |
215 | /// [[[5], [7]], [[13], [15]]], |
216 | /// [[[6], [8]], [[14], [16]]]] |
217 | /// ``` |
218 | /// |
219 | /// The output tensor has shape `[1, 4, 4, 1]` and value: |
220 | /// |
221 | /// ``` |
222 | /// x = [[[[1], [2], [3], [4]], |
223 | /// [[5], [6], [7], [8]], |
224 | /// [[9], [10], [11], [12]], |
225 | /// [[13], [14], [15], [16]]]] |
226 | /// ``` |
227 | /// |
228 | /// (4) For the following input of shape `[8, 1, 3, 1]`, `block_shape = [2, 2]`, and |
229 | /// `crops = [[0, 0], [2, 0]]`: |
230 | /// |
231 | /// ``` |
232 | /// x = [[[[0], [1], [3]]], [[[0], [9], [11]]], |
233 | /// [[[0], [2], [4]]], [[[0], [10], [12]]], |
234 | /// [[[0], [5], [7]]], [[[0], [13], [15]]], |
235 | /// [[[0], [6], [8]]], [[[0], [14], [16]]]] |
236 | /// ``` |
237 | /// |
238 | /// The output tensor has shape `[2, 2, 4, 1]` and value: |
239 | /// |
240 | /// ``` |
241 | /// x = [[[[1], [2], [3], [4]], |
242 | /// [[5], [6], [7], [8]]], |
243 | /// [[[9], [10], [11], [12]], |
244 | /// [[13], [14], [15], [16]]]] |
245 | /// ``` |
246 | /// |
247 | /// Returns: |
248 | /// * `Output`: The output tensor. |
249 | class BatchToSpaceND { |
250 | public: |
251 | BatchToSpaceND(const ::tensorflow::Scope& scope, ::tensorflow::Input input, |
252 | ::tensorflow::Input block_shape, ::tensorflow::Input crops); |
253 | operator ::tensorflow::Output() const { return output; } |
254 | operator ::tensorflow::Input() const { return output; } |
255 | ::tensorflow::Node* node() const { return output.node(); } |
256 | |
257 | Operation operation; |
258 | ::tensorflow::Output output; |
259 | }; |
260 | |
261 | /// Bitcasts a tensor from one type to another without copying data. |
262 | /// |
263 | /// Given a tensor `input`, this operation returns a tensor that has the same buffer |
264 | /// data as `input` with datatype `type`. |
265 | /// |
266 | /// If the input datatype `T` is larger than the output datatype `type` then the |
267 | /// shape changes from [...] to [..., sizeof(`T`)/sizeof(`type`)]. |
268 | /// |
269 | /// If `T` is smaller than `type`, the operator requires that the rightmost |
270 | /// dimension be equal to sizeof(`type`)/sizeof(`T`). The shape then goes from |
271 | /// [..., sizeof(`type`)/sizeof(`T`)] to [...]. |
272 | /// |
273 | /// tf.bitcast() and tf.cast() work differently when real dtype is casted as a complex dtype |
274 | /// (e.g. tf.complex64 or tf.complex128) as tf.cast() make imaginary part 0 while tf.bitcast() |
275 | /// gives module error. |
276 | /// For example, |
277 | /// |
278 | /// Example 1: |
279 | /// |
280 | /// >>> a = [1., 2., 3.] |
281 | /// >>> equality_bitcast = tf.bitcast(a, tf.complex128) |
282 | /// Traceback (most recent call last): |
283 | /// ... |
284 | /// InvalidArgumentError: Cannot bitcast from 1 to 18 [Op:Bitcast] |
285 | /// >>> equality_cast = tf.cast(a, tf.complex128) |
286 | /// >>> print(equality_cast) |
287 | /// tf.Tensor([1.+0.j 2.+0.j 3.+0.j], shape=(3,), dtype=complex128) |
288 | /// |
289 | /// Example 2: |
290 | /// |
291 | /// >>> tf.bitcast(tf.constant(0xffffffff, dtype=tf.uint32), tf.uint8) |
292 | /// <tf.Tensor: shape=(4,), dtype=uint8, numpy=array([255, 255, 255, 255], dtype=uint8)> |
293 | /// |
294 | /// Example 3: |
295 | /// |
296 | /// >>> x = [1., 2., 3.] |
297 | /// >>> y = [0., 2., 3.] |
298 | /// >>> equality= tf.equal(x,y) |
299 | /// >>> equality_cast = tf.cast(equality,tf.float32) |
300 | /// >>> equality_bitcast = tf.bitcast(equality_cast,tf.uint8) |
301 | /// >>> print(equality) |
302 | /// tf.Tensor([False True True], shape=(3,), dtype=bool) |
303 | /// >>> print(equality_cast) |
304 | /// tf.Tensor([0. 1. 1.], shape=(3,), dtype=float32) |
305 | /// >>> print(equality_bitcast) |
306 | /// tf.Tensor( |
307 | /// [[ 0 0 0 0] |
308 | /// [ 0 0 128 63] |
309 | /// [ 0 0 128 63]], shape=(3, 4), dtype=uint8) |
310 | /// |
311 | /// *NOTE*: Bitcast is implemented as a low-level cast, so machines with different |
312 | /// endian orderings will give different results. |
313 | /// |
314 | /// Args: |
315 | /// * scope: A Scope object |
316 | /// |
317 | /// Returns: |
318 | /// * `Output`: The output tensor. |
319 | class Bitcast { |
320 | public: |
321 | Bitcast(const ::tensorflow::Scope& scope, ::tensorflow::Input input, DataType |
322 | type); |
323 | operator ::tensorflow::Output() const { return output; } |
324 | operator ::tensorflow::Input() const { return output; } |
325 | ::tensorflow::Node* node() const { return output.node(); } |
326 | |
327 | Operation operation; |
328 | ::tensorflow::Output output; |
329 | }; |
330 | |
331 | /// Return the shape of s0 op s1 with broadcast. |
332 | /// |
333 | /// Given `s0` and `s1`, tensors that represent shapes, compute `r0`, the |
334 | /// broadcasted shape. `s0`, `s1` and `r0` are all integer vectors. |
335 | /// |
336 | /// Args: |
337 | /// * scope: A Scope object |
338 | /// |
339 | /// Returns: |
340 | /// * `Output`: The r0 tensor. |
341 | class BroadcastDynamicShape { |
342 | public: |
343 | BroadcastDynamicShape(const ::tensorflow::Scope& scope, ::tensorflow::Input s0, |
344 | ::tensorflow::Input s1); |
345 | operator ::tensorflow::Output() const { return r0; } |
346 | operator ::tensorflow::Input() const { return r0; } |
347 | ::tensorflow::Node* node() const { return r0.node(); } |
348 | |
349 | Operation operation; |
350 | ::tensorflow::Output r0; |
351 | }; |
352 | |
353 | /// Broadcast an array for a compatible shape. |
354 | /// |
355 | /// Broadcasting is the process of making arrays to have compatible shapes |
356 | /// for arithmetic operations. Two shapes are compatible if for each |
357 | /// dimension pair they are either equal or one of them is one. |
358 | /// |
359 | /// For example: |
360 | /// |
361 | /// >>> x = tf.constant([[1, 2, 3]]) # Shape (1, 3,) |
362 | /// >>> y = tf.broadcast_to(x, [2, 3]) |
363 | /// >>> print(y) |
364 | /// tf.Tensor( |
365 | /// [[1 2 3] |
366 | /// [1 2 3]], shape=(2, 3), dtype=int32) |
367 | /// |
368 | /// In the above example, the input Tensor with the shape of `[1, 3]` |
369 | /// is broadcasted to output Tensor with shape of `[2, 3]`. |
370 | /// |
371 | /// When broadcasting, if a tensor has fewer axes than necessary its shape is |
372 | /// padded on the left with ones. So this gives the same result as the previous |
373 | /// example: |
374 | /// |
375 | /// >>> x = tf.constant([1, 2, 3]) # Shape (3,) |
376 | /// >>> y = tf.broadcast_to(x, [2, 3]) |
377 | /// |
378 | /// |
379 | /// When doing broadcasted operations such as multiplying a tensor |
380 | /// by a scalar, broadcasting (usually) confers some time or space |
381 | /// benefit, as the broadcasted tensor is never materialized. |
382 | /// |
383 | /// However, `broadcast_to` does not carry with it any such benefits. |
384 | /// The newly-created tensor takes the full memory of the broadcasted |
385 | /// shape. (In a graph context, `broadcast_to` might be fused to |
386 | /// subsequent operation and then be optimized away, however.) |
387 | /// |
388 | /// Args: |
389 | /// * scope: A Scope object |
390 | /// * input: A Tensor to broadcast. |
391 | /// * shape: An 1-D `int` Tensor. The shape of the desired output. |
392 | /// |
393 | /// Returns: |
394 | /// * `Output`: A Tensor. |
395 | class BroadcastTo { |
396 | public: |
397 | BroadcastTo(const ::tensorflow::Scope& scope, ::tensorflow::Input input, |
398 | ::tensorflow::Input shape); |
399 | operator ::tensorflow::Output() const { return output; } |
400 | operator ::tensorflow::Input() const { return output; } |
401 | ::tensorflow::Node* node() const { return output.node(); } |
402 | |
403 | Operation operation; |
404 | ::tensorflow::Output output; |
405 | }; |
406 | |
407 | /// Checks a tensor for NaN and Inf values. |
408 | /// |
409 | /// When run, reports an `InvalidArgument` error if `tensor` has any values |
410 | /// that are not a number (NaN) or infinity (Inf). Otherwise, returns the input |
411 | /// tensor. |
412 | /// |
413 | /// Example usage: |
414 | /// |
415 | /// ``` python |
416 | /// a = tf.Variable(1.0) |
417 | /// tf.debugging.check_numerics(a, message='') |
418 | /// |
419 | /// b = tf.Variable(np.nan) |
420 | /// try: |
421 | /// tf.debugging.check_numerics(b, message='Checking b') |
422 | /// except Exception as e: |
423 | /// assert "Checking b : Tensor had NaN values" in e.message |
424 | /// |
425 | /// c = tf.Variable(np.inf) |
426 | /// try: |
427 | /// tf.debugging.check_numerics(c, message='Checking c') |
428 | /// except Exception as e: |
429 | /// assert "Checking c : Tensor had Inf values" in e.message |
430 | /// ``` |
431 | /// |
432 | /// |
433 | /// Args: |
434 | /// * scope: A Scope object |
435 | /// * message: Prefix of the error message. |
436 | /// |
437 | /// Returns: |
438 | /// * `Output`: The output tensor. |
439 | class CheckNumerics { |
440 | public: |
441 | CheckNumerics(const ::tensorflow::Scope& scope, ::tensorflow::Input tensor, |
442 | StringPiece message); |
443 | operator ::tensorflow::Output() const { return output; } |
444 | operator ::tensorflow::Input() const { return output; } |
445 | ::tensorflow::Node* node() const { return output.node(); } |
446 | |
447 | Operation operation; |
448 | ::tensorflow::Output output; |
449 | }; |
450 | |
451 | /// Concatenates tensors along one dimension. |
452 | /// |
453 | /// Args: |
454 | /// * scope: A Scope object |
455 | /// * values: List of `N` Tensors to concatenate. Their ranks and types must match, |
456 | /// and their sizes must match in all dimensions except `concat_dim`. |
457 | /// * axis: 0-D. The dimension along which to concatenate. Must be in the |
458 | /// range [-rank(values), rank(values)). |
459 | /// |
460 | /// Returns: |
461 | /// * `Output`: A `Tensor` with the concatenation of values stacked along the |
462 | /// `concat_dim` dimension. This tensor's shape matches that of `values` except |
463 | /// in `concat_dim` where it has the sum of the sizes. |
464 | class Concat { |
465 | public: |
466 | Concat(const ::tensorflow::Scope& scope, ::tensorflow::InputList values, |
467 | ::tensorflow::Input axis); |
468 | operator ::tensorflow::Output() const { return output; } |
469 | operator ::tensorflow::Input() const { return output; } |
470 | ::tensorflow::Node* node() const { return output.node(); } |
471 | |
472 | Operation operation; |
473 | ::tensorflow::Output output; |
474 | }; |
475 | |
476 | /// Shuffle dimensions of x according to a permutation and conjugate the result. |
477 | /// |
478 | /// The output `y` has the same rank as `x`. The shapes of `x` and `y` satisfy: |
479 | /// `y.shape[i] == x.shape[perm[i]] for i in [0, 1, ..., rank(x) - 1]` |
480 | /// `y[i,j,k,...,s,t,u] == conj(x[perm[i], perm[j], perm[k],...,perm[s], perm[t], perm[u]])` |
481 | /// |
482 | /// Args: |
483 | /// * scope: A Scope object |
484 | /// |
485 | /// Returns: |
486 | /// * `Output`: The y tensor. |
487 | class ConjugateTranspose { |
488 | public: |
489 | ConjugateTranspose(const ::tensorflow::Scope& scope, ::tensorflow::Input x, |
490 | ::tensorflow::Input perm); |
491 | operator ::tensorflow::Output() const { return y; } |
492 | operator ::tensorflow::Input() const { return y; } |
493 | ::tensorflow::Node* node() const { return y.node(); } |
494 | |
495 | Operation operation; |
496 | ::tensorflow::Output y; |
497 | }; |
498 | |
499 | /// Identity op for gradient debugging. |
500 | /// |
501 | /// This op is hidden from public in Python. It is used by TensorFlow Debugger to |
502 | /// register gradient tensors for gradient debugging. |
503 | /// This op operates on non-reference-type tensors. |
504 | /// |
505 | /// Args: |
506 | /// * scope: A Scope object |
507 | /// |
508 | /// Returns: |
509 | /// * `Output`: The output tensor. |
510 | class DebugGradientIdentity { |
511 | public: |
512 | DebugGradientIdentity(const ::tensorflow::Scope& scope, ::tensorflow::Input |
513 | input); |
514 | operator ::tensorflow::Output() const { return output; } |
515 | operator ::tensorflow::Input() const { return output; } |
516 | ::tensorflow::Node* node() const { return output.node(); } |
517 | |
518 | Operation operation; |
519 | ::tensorflow::Output output; |
520 | }; |
521 | |
522 | /// Identity op for gradient debugging. |
523 | /// |
524 | /// This op is hidden from public in Python. It is used by TensorFlow Debugger to |
525 | /// register gradient tensors for gradient debugging. |
526 | /// This op operates on reference-type tensors. |
527 | /// |
528 | /// Args: |
529 | /// * scope: A Scope object |
530 | /// |
531 | /// Returns: |
532 | /// * `Output`: The output tensor. |
533 | class DebugGradientRefIdentity { |
534 | public: |
535 | DebugGradientRefIdentity(const ::tensorflow::Scope& scope, ::tensorflow::Input |
536 | input); |
537 | operator ::tensorflow::Output() const { return output; } |
538 | operator ::tensorflow::Input() const { return output; } |
539 | ::tensorflow::Node* node() const { return output.node(); } |
540 | |
541 | Operation operation; |
542 | ::tensorflow::Output output; |
543 | }; |
544 | |
545 | /// Makes a copy of `x`. |
546 | /// |
547 | /// Args: |
548 | /// * scope: A Scope object |
549 | /// * x: The source tensor of type `T`. |
550 | /// |
551 | /// Returns: |
552 | /// * `Output`: y: A `Tensor` of type `T`. A copy of `x`. Guaranteed that `y` |
553 | /// is not an alias of `x`. |
554 | class DeepCopy { |
555 | public: |
556 | DeepCopy(const ::tensorflow::Scope& scope, ::tensorflow::Input x); |
557 | operator ::tensorflow::Output() const { return y; } |
558 | operator ::tensorflow::Input() const { return y; } |
559 | ::tensorflow::Node* node() const { return y.node(); } |
560 | |
561 | Operation operation; |
562 | ::tensorflow::Output y; |
563 | }; |
564 | |
565 | /// DepthToSpace for tensors of type T. |
566 | /// |
567 | /// Rearranges data from depth into blocks of spatial data. |
568 | /// This is the reverse transformation of SpaceToDepth. More specifically, |
569 | /// this op outputs a copy of the input tensor where values from the `depth` |
570 | /// dimension are moved in spatial blocks to the `height` and `width` dimensions. |
571 | /// The attr `block_size` indicates the input block size and how the data is moved. |
572 | /// |
573 | /// * Chunks of data of size `block_size * block_size` from depth are rearranged |
574 | /// into non-overlapping blocks of size `block_size x block_size` |
575 | /// * The width of the output tensor is `input_depth * block_size`, whereas the |
576 | /// height is `input_height * block_size`. |
577 | /// * The Y, X coordinates within each block of the output image are determined |
578 | /// by the high order component of the input channel index. |
579 | /// * The depth of the input tensor must be divisible by |
580 | /// `block_size * block_size`. |
581 | /// |
582 | /// The `data_format` attr specifies the layout of the input and output tensors |
583 | /// with the following options: |
584 | /// "NHWC": `[ batch, height, width, channels ]` |
585 | /// "NCHW": `[ batch, channels, height, width ]` |
586 | /// "NCHW_VECT_C": |
587 | /// `qint8 [ batch, channels / 4, height, width, 4 ]` |
588 | /// |
589 | /// It is useful to consider the operation as transforming a 6-D Tensor. |
590 | /// e.g. for data_format = NHWC, |
591 | /// Each element in the input tensor can be specified via 6 coordinates, |
592 | /// ordered by decreasing memory layout significance as: |
593 | /// n,iY,iX,bY,bX,oC (where n=batch index, iX, iY means X or Y coordinates |
594 | /// within the input image, bX, bY means coordinates |
595 | /// within the output block, oC means output channels). |
596 | /// The output would be the input transposed to the following layout: |
597 | /// n,iY,bY,iX,bX,oC |
598 | /// |
599 | /// This operation is useful for resizing the activations between convolutions |
600 | /// (but keeping all data), e.g. instead of pooling. It is also useful for training |
601 | /// purely convolutional models. |
602 | /// |
603 | /// For example, given an input of shape `[1, 1, 1, 4]`, data_format = "NHWC" and |
604 | /// block_size = 2: |
605 | /// |
606 | /// ``` |
607 | /// x = [[[[1, 2, 3, 4]]]] |
608 | /// |
609 | /// ``` |
610 | /// |
611 | /// This operation will output a tensor of shape `[1, 2, 2, 1]`: |
612 | /// |
613 | /// ``` |
614 | /// [[[[1], [2]], |
615 | /// [[3], [4]]]] |
616 | /// ``` |
617 | /// |
618 | /// Here, the input has a batch of 1 and each batch element has shape `[1, 1, 4]`, |
619 | /// the corresponding output will have 2x2 elements and will have a depth of |
620 | /// 1 channel (1 = `4 / (block_size * block_size)`). |
621 | /// The output element shape is `[2, 2, 1]`. |
622 | /// |
623 | /// For an input tensor with larger depth, here of shape `[1, 1, 1, 12]`, e.g. |
624 | /// |
625 | /// ``` |
626 | /// x = [[[[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]]]] |
627 | /// ``` |
628 | /// |
629 | /// This operation, for block size of 2, will return the following tensor of shape |
630 | /// `[1, 2, 2, 3]` |
631 | /// |
632 | /// ``` |
633 | /// [[[[1, 2, 3], [4, 5, 6]], |
634 | /// [[7, 8, 9], [10, 11, 12]]]] |
635 | /// |
636 | /// ``` |
637 | /// |
638 | /// Similarly, for the following input of shape `[1 2 2 4]`, and a block size of 2: |
639 | /// |
640 | /// ``` |
641 | /// x = [[[[1, 2, 3, 4], |
642 | /// [5, 6, 7, 8]], |
643 | /// [[9, 10, 11, 12], |
644 | /// [13, 14, 15, 16]]]] |
645 | /// ``` |
646 | /// |
647 | /// the operator will return the following tensor of shape `[1 4 4 1]`: |
648 | /// |
649 | /// ``` |
650 | /// x = [[[ [1], [2], [5], [6]], |
651 | /// [ [3], [4], [7], [8]], |
652 | /// [ [9], [10], [13], [14]], |
653 | /// [ [11], [12], [15], [16]]]] |
654 | /// |
655 | /// ``` |
656 | /// |
657 | /// Args: |
658 | /// * scope: A Scope object |
659 | /// * block_size: The size of the spatial block, same as in Space2Depth. |
660 | /// |
661 | /// Returns: |
662 | /// * `Output`: The output tensor. |
663 | class DepthToSpace { |
664 | public: |
665 | /// Optional attribute setters for DepthToSpace |
666 | struct Attrs { |
667 | /// Defaults to "NHWC" |
668 | TF_MUST_USE_RESULT Attrs DataFormat(StringPiece x) { |
669 | Attrs ret = *this; |
670 | ret.data_format_ = x; |
671 | return ret; |
672 | } |
673 | |
674 | StringPiece data_format_ = "NHWC" ; |
675 | }; |
676 | DepthToSpace(const ::tensorflow::Scope& scope, ::tensorflow::Input input, int64 |
677 | block_size); |
678 | DepthToSpace(const ::tensorflow::Scope& scope, ::tensorflow::Input input, int64 |
679 | block_size, const DepthToSpace::Attrs& attrs); |
680 | operator ::tensorflow::Output() const { return output; } |
681 | operator ::tensorflow::Input() const { return output; } |
682 | ::tensorflow::Node* node() const { return output.node(); } |
683 | |
684 | static Attrs DataFormat(StringPiece x) { |
685 | return Attrs().DataFormat(x); |
686 | } |
687 | |
688 | Operation operation; |
689 | ::tensorflow::Output output; |
690 | }; |
691 | |
692 | /// Dequantize the 'input' tensor into a float or bfloat16 Tensor. |
693 | /// |
694 | /// [min_range, max_range] are scalar floats that specify the range for |
695 | /// the output. The 'mode' attribute controls exactly which calculations are |
696 | /// used to convert the float values to their quantized equivalents. |
697 | /// |
698 | /// In 'MIN_COMBINED' mode, each value of the tensor will undergo the following: |
699 | /// |
700 | /// ``` |
701 | /// if T == qint8: in[i] += (range(T) + 1)/ 2.0 |
702 | /// out[i] = min_range + (in[i]* (max_range - min_range) / range(T)) |
703 | /// ``` |
704 | /// here `range(T) = numeric_limits<T>::max() - numeric_limits<T>::min()` |
705 | /// |
706 | /// *MIN_COMBINED Mode Example* |
707 | /// |
708 | /// If the input comes from a QuantizedRelu6, the output type is |
709 | /// quint8 (range of 0-255) but the possible range of QuantizedRelu6 is |
710 | /// 0-6. The min_range and max_range values are therefore 0.0 and 6.0. |
711 | /// Dequantize on quint8 will take each value, cast to float, and multiply |
712 | /// by 6 / 255. |
713 | /// Note that if quantizedtype is qint8, the operation will additionally add |
714 | /// each value by 128 prior to casting. |
715 | /// |
716 | /// If the mode is 'MIN_FIRST', then this approach is used: |
717 | /// |
718 | /// ```c++ |
719 | /// num_discrete_values = 1 << (# of bits in T) |
720 | /// range_adjust = num_discrete_values / (num_discrete_values - 1) |
721 | /// range = (range_max - range_min) * range_adjust |
722 | /// range_scale = range / num_discrete_values |
723 | /// const double offset_input = static_cast<double>(input) - lowest_quantized; |
724 | /// result = range_min + ((input - numeric_limits<T>::min()) * range_scale) |
725 | /// ``` |
726 | /// |
727 | /// If the mode is `SCALED`, dequantization is performed by multiplying each |
728 | /// input value by a scaling_factor. (Thus an input of 0 always maps to 0.0). |
729 | /// |
730 | /// The scaling_factor is determined from `min_range`, `max_range`, and |
731 | /// `narrow_range` in a way that is compatible with `QuantizeAndDequantize{V2|V3}` |
732 | /// and `QuantizeV2`, using the following algorithm: |
733 | /// |
734 | /// ```c++ |
735 | /// |
736 | /// const int min_expected_T = std::numeric_limits<T>::min() + |
737 | /// (narrow_range ? 1 : 0); |
738 | /// const int max_expected_T = std::numeric_limits<T>::max(); |
739 | /// const float max_expected_T = std::numeric_limits<float>::max(); |
740 | /// |
741 | /// const float scale_factor = |
742 | /// (std::numeric_limits<T>::min() == 0) ? (max_range / max_expected_T) |
743 | /// : std::max(min_range / min_expected_T, |
744 | /// max_range / max_expected_T); |
745 | /// ``` |
746 | /// |
747 | /// Args: |
748 | /// * scope: A Scope object |
749 | /// * min_range: The minimum scalar value possibly produced for the input. |
750 | /// * max_range: The maximum scalar value possibly produced for the input. |
751 | /// |
752 | /// Optional attributes (see `Attrs`): |
753 | /// * dtype: Type of the output tensor. Currently Dequantize supports float and bfloat16. |
754 | /// If 'dtype' is 'bfloat16', it only supports 'MIN_COMBINED' mode. |
755 | /// |
756 | /// Returns: |
757 | /// * `Output`: The output tensor. |
758 | class Dequantize { |
759 | public: |
760 | /// Optional attribute setters for Dequantize |
761 | struct Attrs { |
762 | /// Defaults to "MIN_COMBINED" |
763 | TF_MUST_USE_RESULT Attrs Mode(StringPiece x) { |
764 | Attrs ret = *this; |
765 | ret.mode_ = x; |
766 | return ret; |
767 | } |
768 | |
769 | /// Defaults to false |
770 | TF_MUST_USE_RESULT Attrs NarrowRange(bool x) { |
771 | Attrs ret = *this; |
772 | ret.narrow_range_ = x; |
773 | return ret; |
774 | } |
775 | |
776 | /// Defaults to -1 |
777 | TF_MUST_USE_RESULT Attrs Axis(int64 x) { |
778 | Attrs ret = *this; |
779 | ret.axis_ = x; |
780 | return ret; |
781 | } |
782 | |
783 | /// Type of the output tensor. Currently Dequantize supports float and bfloat16. |
784 | /// If 'dtype' is 'bfloat16', it only supports 'MIN_COMBINED' mode. |
785 | /// |
786 | /// Defaults to DT_FLOAT |
787 | TF_MUST_USE_RESULT Attrs Dtype(DataType x) { |
788 | Attrs ret = *this; |
789 | ret.dtype_ = x; |
790 | return ret; |
791 | } |
792 | |
793 | StringPiece mode_ = "MIN_COMBINED" ; |
794 | bool narrow_range_ = false; |
795 | int64 axis_ = -1; |
796 | DataType dtype_ = DT_FLOAT; |
797 | }; |
798 | Dequantize(const ::tensorflow::Scope& scope, ::tensorflow::Input input, |
799 | ::tensorflow::Input min_range, ::tensorflow::Input max_range); |
800 | Dequantize(const ::tensorflow::Scope& scope, ::tensorflow::Input input, |
801 | ::tensorflow::Input min_range, ::tensorflow::Input max_range, const |
802 | Dequantize::Attrs& attrs); |
803 | operator ::tensorflow::Output() const { return output; } |
804 | operator ::tensorflow::Input() const { return output; } |
805 | ::tensorflow::Node* node() const { return output.node(); } |
806 | |
807 | static Attrs Mode(StringPiece x) { |
808 | return Attrs().Mode(x); |
809 | } |
810 | static Attrs NarrowRange(bool x) { |
811 | return Attrs().NarrowRange(x); |
812 | } |
813 | static Attrs Axis(int64 x) { |
814 | return Attrs().Axis(x); |
815 | } |
816 | static Attrs Dtype(DataType x) { |
817 | return Attrs().Dtype(x); |
818 | } |
819 | |
820 | Operation operation; |
821 | ::tensorflow::Output output; |
822 | }; |
823 | |
824 | /// Returns a diagonal tensor with a given diagonal values. |
825 | /// |
826 | /// Given a `diagonal`, this operation returns a tensor with the `diagonal` and |
827 | /// everything else padded with zeros. The diagonal is computed as follows: |
828 | /// |
829 | /// Assume `diagonal` has dimensions [D1,..., Dk], then the output is a tensor of |
830 | /// rank 2k with dimensions [D1,..., Dk, D1,..., Dk] where: |
831 | /// |
832 | /// `output[i1,..., ik, i1,..., ik] = diagonal[i1, ..., ik]` and 0 everywhere else. |
833 | /// |
834 | /// For example: |
835 | /// |
836 | /// ``` |
837 | /// # 'diagonal' is [1, 2, 3, 4] |
838 | /// tf.diag(diagonal) ==> [[1, 0, 0, 0] |
839 | /// [0, 2, 0, 0] |
840 | /// [0, 0, 3, 0] |
841 | /// [0, 0, 0, 4]] |
842 | /// ``` |
843 | /// |
844 | /// Args: |
845 | /// * scope: A Scope object |
846 | /// * diagonal: Rank k tensor where k is at most 1. |
847 | /// |
848 | /// Returns: |
849 | /// * `Output`: The output tensor. |
850 | class Diag { |
851 | public: |
852 | Diag(const ::tensorflow::Scope& scope, ::tensorflow::Input diagonal); |
853 | operator ::tensorflow::Output() const { return output; } |
854 | operator ::tensorflow::Input() const { return output; } |
855 | ::tensorflow::Node* node() const { return output.node(); } |
856 | |
857 | Operation operation; |
858 | ::tensorflow::Output output; |
859 | }; |
860 | |
861 | /// Returns the diagonal part of the tensor. |
862 | /// |
863 | /// This operation returns a tensor with the `diagonal` part |
864 | /// of the `input`. The `diagonal` part is computed as follows: |
865 | /// |
866 | /// Assume `input` has dimensions `[D1,..., Dk, D1,..., Dk]`, then the output is a |
867 | /// tensor of rank `k` with dimensions `[D1,..., Dk]` where: |
868 | /// |
869 | /// `diagonal[i1,..., ik] = input[i1, ..., ik, i1,..., ik]`. |
870 | /// |
871 | /// For example: |
872 | /// |
873 | /// ``` |
874 | /// # 'input' is [[1, 0, 0, 0] |
875 | /// [0, 2, 0, 0] |
876 | /// [0, 0, 3, 0] |
877 | /// [0, 0, 0, 4]] |
878 | /// |
879 | /// tf.diag_part(input) ==> [1, 2, 3, 4] |
880 | /// ``` |
881 | /// |
882 | /// Args: |
883 | /// * scope: A Scope object |
884 | /// * input: Rank k tensor where k is even and not zero. |
885 | /// |
886 | /// Returns: |
887 | /// * `Output`: The extracted diagonal. |
888 | class DiagPart { |
889 | public: |
890 | DiagPart(const ::tensorflow::Scope& scope, ::tensorflow::Input input); |
891 | operator ::tensorflow::Output() const { return diagonal; } |
892 | operator ::tensorflow::Input() const { return diagonal; } |
893 | ::tensorflow::Node* node() const { return diagonal.node(); } |
894 | |
895 | Operation operation; |
896 | ::tensorflow::Output diagonal; |
897 | }; |
898 | |
899 | /// Computes the (possibly normalized) Levenshtein Edit Distance. |
900 | /// |
901 | /// The inputs are variable-length sequences provided by SparseTensors |
902 | /// (hypothesis_indices, hypothesis_values, hypothesis_shape) |
903 | /// and |
904 | /// (truth_indices, truth_values, truth_shape). |
905 | /// |
906 | /// The inputs are: |
907 | /// |
908 | /// Args: |
909 | /// * scope: A Scope object |
910 | /// * hypothesis_indices: The indices of the hypothesis list SparseTensor. |
911 | /// This is an N x R int64 matrix. |
912 | /// * hypothesis_values: The values of the hypothesis list SparseTensor. |
913 | /// This is an N-length vector. |
914 | /// * hypothesis_shape: The shape of the hypothesis list SparseTensor. |
915 | /// This is an R-length vector. |
916 | /// * truth_indices: The indices of the truth list SparseTensor. |
917 | /// This is an M x R int64 matrix. |
918 | /// * truth_values: The values of the truth list SparseTensor. |
919 | /// This is an M-length vector. |
920 | /// * truth_shape: truth indices, vector. |
921 | /// |
922 | /// Optional attributes (see `Attrs`): |
923 | /// * normalize: boolean (if true, edit distances are normalized by length of truth). |
924 | /// |
925 | /// The output is: |
926 | /// |
927 | /// Returns: |
928 | /// * `Output`: A dense float tensor with rank R - 1. |
929 | /// |
930 | /// For the example input: |
931 | /// |
932 | /// // hypothesis represents a 2x1 matrix with variable-length values: |
933 | /// // (0,0) = ["a"] |
934 | /// // (1,0) = ["b"] |
935 | /// hypothesis_indices = [[0, 0, 0], |
936 | /// [1, 0, 0]] |
937 | /// hypothesis_values = ["a", "b"] |
938 | /// hypothesis_shape = [2, 1, 1] |
939 | /// |
940 | /// // truth represents a 2x2 matrix with variable-length values: |
941 | /// // (0,0) = [] |
942 | /// // (0,1) = ["a"] |
943 | /// // (1,0) = ["b", "c"] |
944 | /// // (1,1) = ["a"] |
945 | /// truth_indices = [[0, 1, 0], |
946 | /// [1, 0, 0], |
947 | /// [1, 0, 1], |
948 | /// [1, 1, 0]] |
949 | /// truth_values = ["a", "b", "c", "a"] |
950 | /// truth_shape = [2, 2, 2] |
951 | /// normalize = true |
952 | /// |
953 | /// The output will be: |
954 | /// |
955 | /// // output is a 2x2 matrix with edit distances normalized by truth lengths. |
956 | /// output = [[inf, 1.0], // (0,0): no truth, (0,1): no hypothesis |
957 | /// [0.5, 1.0]] // (1,0): addition, (1,1): no hypothesis |
958 | class EditDistance { |
959 | public: |
960 | /// Optional attribute setters for EditDistance |
961 | struct Attrs { |
962 | /// boolean (if true, edit distances are normalized by length of truth). |
963 | /// |
964 | /// The output is: |
965 | /// |
966 | /// Defaults to true |
967 | TF_MUST_USE_RESULT Attrs Normalize(bool x) { |
968 | Attrs ret = *this; |
969 | ret.normalize_ = x; |
970 | return ret; |
971 | } |
972 | |
973 | bool normalize_ = true; |
974 | }; |
975 | EditDistance(const ::tensorflow::Scope& scope, ::tensorflow::Input |
976 | hypothesis_indices, ::tensorflow::Input hypothesis_values, |
977 | ::tensorflow::Input hypothesis_shape, ::tensorflow::Input |
978 | truth_indices, ::tensorflow::Input truth_values, |
979 | ::tensorflow::Input truth_shape); |
980 | EditDistance(const ::tensorflow::Scope& scope, ::tensorflow::Input |
981 | hypothesis_indices, ::tensorflow::Input hypothesis_values, |
982 | ::tensorflow::Input hypothesis_shape, ::tensorflow::Input |
983 | truth_indices, ::tensorflow::Input truth_values, |
984 | ::tensorflow::Input truth_shape, const EditDistance::Attrs& attrs); |
985 | operator ::tensorflow::Output() const { return output; } |
986 | operator ::tensorflow::Input() const { return output; } |
987 | ::tensorflow::Node* node() const { return output.node(); } |
988 | |
989 | static Attrs Normalize(bool x) { |
990 | return Attrs().Normalize(x); |
991 | } |
992 | |
993 | Operation operation; |
994 | ::tensorflow::Output output; |
995 | }; |
996 | |
997 | /// Creates a tensor with the given shape. |
998 | /// |
999 | /// This operation creates a tensor of `shape` and `dtype`. |
1000 | /// |
1001 | /// Args: |
1002 | /// * scope: A Scope object |
1003 | /// * shape: 1-D. Represents the shape of the output tensor. |
1004 | /// |
1005 | /// Optional attributes (see `Attrs`): |
1006 | /// * init: If True, initialize the returned tensor with the default value of dtype. Otherwise, the implementation is free not to initializethe tensor's content. |
1007 | /// |
1008 | /// Returns: |
1009 | /// * `Output`: A `Tensor` of type `T`. |
1010 | class Empty { |
1011 | public: |
1012 | /// Optional attribute setters for Empty |
1013 | struct Attrs { |
1014 | /// If True, initialize the returned tensor with the default value of dtype. Otherwise, the implementation is free not to initializethe tensor's content. |
1015 | /// |
1016 | /// Defaults to false |
1017 | TF_MUST_USE_RESULT Attrs Init(bool x) { |
1018 | Attrs ret = *this; |
1019 | ret.init_ = x; |
1020 | return ret; |
1021 | } |
1022 | |
1023 | bool init_ = false; |
1024 | }; |
1025 | Empty(const ::tensorflow::Scope& scope, ::tensorflow::Input shape, DataType |
1026 | dtype); |
1027 | Empty(const ::tensorflow::Scope& scope, ::tensorflow::Input shape, DataType |
1028 | dtype, const Empty::Attrs& attrs); |
1029 | operator ::tensorflow::Output() const { return output; } |
1030 | operator ::tensorflow::Input() const { return output; } |
1031 | ::tensorflow::Node* node() const { return output.node(); } |
1032 | |
1033 | static Attrs Init(bool x) { |
1034 | return Attrs().Init(x); |
1035 | } |
1036 | |
1037 | Operation operation; |
1038 | ::tensorflow::Output output; |
1039 | }; |
1040 | |
1041 | /// Ensures that the tensor's shape matches the expected shape. |
1042 | /// |
1043 | /// Raises an error if the input tensor's shape does not match the specified shape. |
1044 | /// Returns the input tensor otherwise. |
1045 | /// |
1046 | /// Args: |
1047 | /// * scope: A Scope object |
1048 | /// * input: A tensor, whose shape is to be validated. |
1049 | /// * shape: The expected (possibly partially specified) shape of the input tensor. |
1050 | /// |
1051 | /// Returns: |
1052 | /// * `Output`: A tensor with the same shape and contents as the input tensor or value. |
1053 | class EnsureShape { |
1054 | public: |
1055 | EnsureShape(const ::tensorflow::Scope& scope, ::tensorflow::Input input, |
1056 | PartialTensorShape shape); |
1057 | operator ::tensorflow::Output() const { return output; } |
1058 | operator ::tensorflow::Input() const { return output; } |
1059 | ::tensorflow::Node* node() const { return output.node(); } |
1060 | |
1061 | Operation operation; |
1062 | ::tensorflow::Output output; |
1063 | }; |
1064 | |
1065 | /// Inserts a dimension of 1 into a tensor's shape. |
1066 | /// |
1067 | /// Given a tensor `input`, this operation inserts a dimension of 1 at the |
1068 | /// dimension index `axis` of `input`'s shape. The dimension index `axis` starts at |
1069 | /// zero; if you specify a negative number for `axis` it is counted backward from |
1070 | /// the end. |
1071 | /// |
1072 | /// This operation is useful if you want to add a batch dimension to a single |
1073 | /// element. For example, if you have a single image of shape `[height, width, |
1074 | /// channels]`, you can make it a batch of 1 image with `expand_dims(image, 0)`, |
1075 | /// which will make the shape `[1, height, width, channels]`. |
1076 | /// |
1077 | /// Other examples: |
1078 | /// |
1079 | /// ``` |
1080 | /// # 't' is a tensor of shape [2] |
1081 | /// shape(expand_dims(t, 0)) ==> [1, 2] |
1082 | /// shape(expand_dims(t, 1)) ==> [2, 1] |
1083 | /// shape(expand_dims(t, -1)) ==> [2, 1] |
1084 | /// |
1085 | /// # 't2' is a tensor of shape [2, 3, 5] |
1086 | /// shape(expand_dims(t2, 0)) ==> [1, 2, 3, 5] |
1087 | /// shape(expand_dims(t2, 2)) ==> [2, 3, 1, 5] |
1088 | /// shape(expand_dims(t2, 3)) ==> [2, 3, 5, 1] |
1089 | /// ``` |
1090 | /// |
1091 | /// This operation requires that: |
1092 | /// |
1093 | /// `-1-input.dims() <= dim <= input.dims()` |
1094 | /// |
1095 | /// This operation is related to `squeeze()`, which removes dimensions of |
1096 | /// size 1. |
1097 | /// |
1098 | /// Args: |
1099 | /// * scope: A Scope object |
1100 | /// * axis: 0-D (scalar). Specifies the dimension index at which to |
1101 | /// expand the shape of `input`. Must be in the range |
1102 | /// `[-rank(input) - 1, rank(input)]`. |
1103 | /// |
1104 | /// Returns: |
1105 | /// * `Output`: Contains the same data as `input`, but its shape has an additional |
1106 | /// dimension of size 1 added. |
1107 | class ExpandDims { |
1108 | public: |
1109 | ExpandDims(const ::tensorflow::Scope& scope, ::tensorflow::Input input, |
1110 | ::tensorflow::Input axis); |
1111 | operator ::tensorflow::Output() const { return output; } |
1112 | operator ::tensorflow::Input() const { return output; } |
1113 | ::tensorflow::Node* node() const { return output.node(); } |
1114 | |
1115 | Operation operation; |
1116 | ::tensorflow::Output output; |
1117 | }; |
1118 | |
1119 | /// Extract `patches` from `images` and put them in the "depth" output dimension. |
1120 | /// |
1121 | /// Args: |
1122 | /// * scope: A Scope object |
1123 | /// * images: 4-D Tensor with shape `[batch, in_rows, in_cols, depth]`. |
1124 | /// * ksizes: The size of the sliding window for each dimension of `images`. |
1125 | /// * strides: How far the centers of two consecutive patches are in |
1126 | /// the images. Must be: `[1, stride_rows, stride_cols, 1]`. |
1127 | /// * rates: Must be: `[1, rate_rows, rate_cols, 1]`. This is the |
1128 | /// input stride, specifying how far two consecutive patch samples are in the |
1129 | /// input. Equivalent to extracting patches with |
1130 | /// `patch_sizes_eff = patch_sizes + (patch_sizes - 1) * (rates - 1)`, followed by |
1131 | /// subsampling them spatially by a factor of `rates`. This is equivalent to |
1132 | /// `rate` in dilated (a.k.a. Atrous) convolutions. |
1133 | /// * padding: The type of padding algorithm to use. |
1134 | /// |
1135 | /// Returns: |
1136 | /// * `Output`: 4-D Tensor with shape `[batch, out_rows, out_cols, ksize_rows * |
1137 | /// ksize_cols * depth]` containing image patches with size |
1138 | /// `ksize_rows x ksize_cols x depth` vectorized in the "depth" dimension. Note |
1139 | /// `out_rows` and `out_cols` are the dimensions of the output patches. |
1140 | class { |
1141 | public: |
1142 | (const ::tensorflow::Scope& scope, ::tensorflow::Input |
1143 | images, const gtl::ArraySlice<int>& ksizes, const |
1144 | gtl::ArraySlice<int>& strides, const gtl::ArraySlice<int>& |
1145 | rates, StringPiece padding); |
1146 | () const { return patches; } |
1147 | () const { return patches; } |
1148 | ::tensorflow::Node* () const { return patches.node(); } |
1149 | |
1150 | Operation ; |
1151 | ::tensorflow::Output ; |
1152 | }; |
1153 | |
1154 | /// Extract `patches` from `input` and put them in the `"depth"` output dimension. 3D extension of `extract_image_patches`. |
1155 | /// |
1156 | /// Args: |
1157 | /// * scope: A Scope object |
1158 | /// * input: 5-D Tensor with shape `[batch, in_planes, in_rows, in_cols, depth]`. |
1159 | /// * ksizes: The size of the sliding window for each dimension of `input`. |
1160 | /// * strides: 1-D of length 5. How far the centers of two consecutive patches are in |
1161 | /// `input`. Must be: `[1, stride_planes, stride_rows, stride_cols, 1]`. |
1162 | /// * padding: The type of padding algorithm to use. |
1163 | /// |
1164 | /// The size-related attributes are specified as follows: |
1165 | /// |
1166 | /// ```python |
1167 | /// ksizes = [1, ksize_planes, ksize_rows, ksize_cols, 1] |
1168 | /// strides = [1, stride_planes, strides_rows, strides_cols, 1] |
1169 | /// ``` |
1170 | /// |
1171 | /// Returns: |
1172 | /// * `Output`: 5-D Tensor with shape `[batch, out_planes, out_rows, out_cols, |
1173 | /// ksize_planes * ksize_rows * ksize_cols * depth]` containing patches |
1174 | /// with size `ksize_planes x ksize_rows x ksize_cols x depth` vectorized |
1175 | /// in the "depth" dimension. Note `out_planes`, `out_rows` and `out_cols` |
1176 | /// are the dimensions of the output patches. |
1177 | class { |
1178 | public: |
1179 | (const ::tensorflow::Scope& scope, ::tensorflow::Input |
1180 | input, const gtl::ArraySlice<int>& ksizes, const |
1181 | gtl::ArraySlice<int>& strides, StringPiece padding); |
1182 | () const { return patches; } |
1183 | () const { return patches; } |
1184 | ::tensorflow::Node* () const { return patches.node(); } |
1185 | |
1186 | Operation ; |
1187 | ::tensorflow::Output ; |
1188 | }; |
1189 | |
1190 | /// Fake-quantize the 'inputs' tensor, type float to 'outputs' tensor of same type. |
1191 | /// |
1192 | /// Attributes |
1193 | /// |
1194 | /// * `[min; max]` define the clamping range for the `inputs` data. |
1195 | /// * `inputs` values are quantized into the quantization range ( |
1196 | /// `[0; 2^num_bits - 1]` when `narrow_range` is false and `[1; 2^num_bits - 1]` |
1197 | /// when it is true) and then de-quantized and output as floats in `[min; max]` |
1198 | /// interval. |
1199 | /// * `num_bits` is the bitwidth of the quantization; between 2 and 16, inclusive. |
1200 | /// |
1201 | /// Before quantization, `min` and `max` values are adjusted with the following |
1202 | /// logic. |
1203 | /// It is suggested to have `min <= 0 <= max`. If `0` is not in the range of values, |
1204 | /// the behavior can be unexpected: |
1205 | /// |
1206 | /// * If `0 < min < max`: `min_adj = 0` and `max_adj = max - min`. |
1207 | /// * If `min < max < 0`: `min_adj = min - max` and `max_adj = 0`. |
1208 | /// * If `min <= 0 <= max`: `scale = (max - min) / (2^num_bits - 1) `, |
1209 | /// `min_adj = scale * round(min / scale)` and `max_adj = max + min_adj - min`. |
1210 | /// |
1211 | /// Quantization is called fake since the output is still in floating point. |
1212 | /// |
1213 | /// Args: |
1214 | /// * scope: A Scope object |
1215 | /// |
1216 | /// Returns: |
1217 | /// * `Output`: The outputs tensor. |
1218 | class FakeQuantWithMinMaxArgs { |
1219 | public: |
1220 | /// Optional attribute setters for FakeQuantWithMinMaxArgs |
1221 | struct Attrs { |
1222 | /// Defaults to -6 |
1223 | TF_MUST_USE_RESULT Attrs Min(float x) { |
1224 | Attrs ret = *this; |
1225 | ret.min_ = x; |
1226 | return ret; |
1227 | } |
1228 | |
1229 | /// Defaults to 6 |
1230 | TF_MUST_USE_RESULT Attrs Max(float x) { |
1231 | Attrs ret = *this; |
1232 | ret.max_ = x; |
1233 | return ret; |
1234 | } |
1235 | |
1236 | /// Defaults to 8 |
1237 | TF_MUST_USE_RESULT Attrs NumBits(int64 x) { |
1238 | Attrs ret = *this; |
1239 | ret.num_bits_ = x; |
1240 | return ret; |
1241 | } |
1242 | |
1243 | /// Defaults to false |
1244 | TF_MUST_USE_RESULT Attrs NarrowRange(bool x) { |
1245 | Attrs ret = *this; |
1246 | ret.narrow_range_ = x; |
1247 | return ret; |
1248 | } |
1249 | |
1250 | float min_ = -6.0f; |
1251 | float max_ = 6.0f; |
1252 | int64 num_bits_ = 8; |
1253 | bool narrow_range_ = false; |
1254 | }; |
1255 | FakeQuantWithMinMaxArgs(const ::tensorflow::Scope& scope, ::tensorflow::Input |
1256 | inputs); |
1257 | FakeQuantWithMinMaxArgs(const ::tensorflow::Scope& scope, ::tensorflow::Input |
1258 | inputs, const FakeQuantWithMinMaxArgs::Attrs& attrs); |
1259 | operator ::tensorflow::Output() const { return outputs; } |
1260 | operator ::tensorflow::Input() const { return outputs; } |
1261 | ::tensorflow::Node* node() const { return outputs.node(); } |
1262 | |
1263 | static Attrs Min(float x) { |
1264 | return Attrs().Min(x); |
1265 | } |
1266 | static Attrs Max(float x) { |
1267 | return Attrs().Max(x); |
1268 | } |
1269 | static Attrs NumBits(int64 x) { |
1270 | return Attrs().NumBits(x); |
1271 | } |
1272 | static Attrs NarrowRange(bool x) { |
1273 | return Attrs().NarrowRange(x); |
1274 | } |
1275 | |
1276 | Operation operation; |
1277 | ::tensorflow::Output outputs; |
1278 | }; |
1279 | |
1280 | /// Compute gradients for a FakeQuantWithMinMaxArgs operation. |
1281 | /// |
1282 | /// Args: |
1283 | /// * scope: A Scope object |
1284 | /// * gradients: Backpropagated gradients above the FakeQuantWithMinMaxArgs operation. |
1285 | /// * inputs: Values passed as inputs to the FakeQuantWithMinMaxArgs operation. |
1286 | /// |
1287 | /// Returns: |
1288 | /// * `Output`: Backpropagated gradients below the FakeQuantWithMinMaxArgs operation: |
1289 | /// `gradients * (inputs >= min && inputs <= max)`. |
1290 | class FakeQuantWithMinMaxArgsGradient { |
1291 | public: |
1292 | /// Optional attribute setters for FakeQuantWithMinMaxArgsGradient |
1293 | struct Attrs { |
1294 | /// Defaults to -6 |
1295 | TF_MUST_USE_RESULT Attrs Min(float x) { |
1296 | Attrs ret = *this; |
1297 | ret.min_ = x; |
1298 | return ret; |
1299 | } |
1300 | |
1301 | /// Defaults to 6 |
1302 | TF_MUST_USE_RESULT Attrs Max(float x) { |
1303 | Attrs ret = *this; |
1304 | ret.max_ = x; |
1305 | return ret; |
1306 | } |
1307 | |
1308 | /// Defaults to 8 |
1309 | TF_MUST_USE_RESULT Attrs NumBits(int64 x) { |
1310 | Attrs ret = *this; |
1311 | ret.num_bits_ = x; |
1312 | return ret; |
1313 | } |
1314 | |
1315 | /// Defaults to false |
1316 | TF_MUST_USE_RESULT Attrs NarrowRange(bool x) { |
1317 | Attrs ret = *this; |
1318 | ret.narrow_range_ = x; |
1319 | return ret; |
1320 | } |
1321 | |
1322 | float min_ = -6.0f; |
1323 | float max_ = 6.0f; |
1324 | int64 num_bits_ = 8; |
1325 | bool narrow_range_ = false; |
1326 | }; |
1327 | FakeQuantWithMinMaxArgsGradient(const ::tensorflow::Scope& scope, |
1328 | ::tensorflow::Input gradients, |
1329 | ::tensorflow::Input inputs); |
1330 | FakeQuantWithMinMaxArgsGradient(const ::tensorflow::Scope& scope, |
1331 | ::tensorflow::Input gradients, |
1332 | ::tensorflow::Input inputs, const |
1333 | FakeQuantWithMinMaxArgsGradient::Attrs& attrs); |
1334 | operator ::tensorflow::Output() const { return backprops; } |
1335 | operator ::tensorflow::Input() const { return backprops; } |
1336 | ::tensorflow::Node* node() const { return backprops.node(); } |
1337 | |
1338 | static Attrs Min(float x) { |
1339 | return Attrs().Min(x); |
1340 | } |
1341 | static Attrs Max(float x) { |
1342 | return Attrs().Max(x); |
1343 | } |
1344 | static Attrs NumBits(int64 x) { |
1345 | return Attrs().NumBits(x); |
1346 | } |
1347 | static Attrs NarrowRange(bool x) { |
1348 | return Attrs().NarrowRange(x); |
1349 | } |
1350 | |
1351 | Operation operation; |
1352 | ::tensorflow::Output backprops; |
1353 | }; |
1354 | |
1355 | /// Fake-quantize the 'inputs' tensor of type float via global float scalars |
1356 | /// |
1357 | /// Fake-quantize the `inputs` tensor of type float via global float scalars |
1358 | /// `min` and `max` to `outputs` tensor of same shape as `inputs`. |
1359 | /// |
1360 | /// Attributes |
1361 | /// |
1362 | /// * `[min; max]` define the clamping range for the `inputs` data. |
1363 | /// * `inputs` values are quantized into the quantization range ( |
1364 | /// `[0; 2^num_bits - 1]` when `narrow_range` is false and `[1; 2^num_bits - 1]` |
1365 | /// when it is true) and then de-quantized and output as floats in `[min; max]` |
1366 | /// interval. |
1367 | /// * `num_bits` is the bitwidth of the quantization; between 2 and 16, inclusive. |
1368 | /// |
1369 | /// Before quantization, `min` and `max` values are adjusted with the following |
1370 | /// logic. |
1371 | /// It is suggested to have `min <= 0 <= max`. If `0` is not in the range of values, |
1372 | /// the behavior can be unexpected: |
1373 | /// |
1374 | /// * If `0 < min < max`: `min_adj = 0` and `max_adj = max - min`. |
1375 | /// * If `min < max < 0`: `min_adj = min - max` and `max_adj = 0`. |
1376 | /// * If `min <= 0 <= max`: `scale = (max - min) / (2^num_bits - 1) `, |
1377 | /// `min_adj = scale * round(min / scale)` and `max_adj = max + min_adj - min`. |
1378 | /// |
1379 | /// This operation has a gradient and thus allows for training `min` and `max` |
1380 | /// values. |
1381 | /// |
1382 | /// Args: |
1383 | /// * scope: A Scope object |
1384 | /// |
1385 | /// Returns: |
1386 | /// * `Output`: The outputs tensor. |
1387 | class FakeQuantWithMinMaxVars { |
1388 | public: |
1389 | /// Optional attribute setters for FakeQuantWithMinMaxVars |
1390 | struct Attrs { |
1391 | /// Defaults to 8 |
1392 | TF_MUST_USE_RESULT Attrs NumBits(int64 x) { |
1393 | Attrs ret = *this; |
1394 | ret.num_bits_ = x; |
1395 | return ret; |
1396 | } |
1397 | |
1398 | /// Defaults to false |
1399 | TF_MUST_USE_RESULT Attrs NarrowRange(bool x) { |
1400 | Attrs ret = *this; |
1401 | ret.narrow_range_ = x; |
1402 | return ret; |
1403 | } |
1404 | |
1405 | int64 num_bits_ = 8; |
1406 | bool narrow_range_ = false; |
1407 | }; |
1408 | FakeQuantWithMinMaxVars(const ::tensorflow::Scope& scope, ::tensorflow::Input |
1409 | inputs, ::tensorflow::Input min, ::tensorflow::Input |
1410 | max); |
1411 | FakeQuantWithMinMaxVars(const ::tensorflow::Scope& scope, ::tensorflow::Input |
1412 | inputs, ::tensorflow::Input min, ::tensorflow::Input |
1413 | max, const FakeQuantWithMinMaxVars::Attrs& attrs); |
1414 | operator ::tensorflow::Output() const { return outputs; } |
1415 | operator ::tensorflow::Input() const { return outputs; } |
1416 | ::tensorflow::Node* node() const { return outputs.node(); } |
1417 | |
1418 | static Attrs NumBits(int64 x) { |
1419 | return Attrs().NumBits(x); |
1420 | } |
1421 | static Attrs NarrowRange(bool x) { |
1422 | return Attrs().NarrowRange(x); |
1423 | } |
1424 | |
1425 | Operation operation; |
1426 | ::tensorflow::Output outputs; |
1427 | }; |
1428 | |
1429 | /// Compute gradients for a FakeQuantWithMinMaxVars operation. |
1430 | /// |
1431 | /// Args: |
1432 | /// * scope: A Scope object |
1433 | /// * gradients: Backpropagated gradients above the FakeQuantWithMinMaxVars operation. |
1434 | /// * inputs: Values passed as inputs to the FakeQuantWithMinMaxVars operation. |
1435 | /// min, max: Quantization interval, scalar floats. |
1436 | /// |
1437 | /// Optional attributes (see `Attrs`): |
1438 | /// * num_bits: The bitwidth of the quantization; between 2 and 8, inclusive. |
1439 | /// * narrow_range: Whether to quantize into 2^num_bits - 1 distinct values. |
1440 | /// |
1441 | /// Returns: |
1442 | /// * `Output` backprops_wrt_input: Backpropagated gradients w.r.t. inputs: |
1443 | /// `gradients * (inputs >= min && inputs <= max)`. |
1444 | /// * `Output` backprop_wrt_min: Backpropagated gradients w.r.t. min parameter: |
1445 | /// `sum(gradients * (inputs < min))`. |
1446 | /// * `Output` backprop_wrt_max: Backpropagated gradients w.r.t. max parameter: |
1447 | /// `sum(gradients * (inputs > max))`. |
1448 | class FakeQuantWithMinMaxVarsGradient { |
1449 | public: |
1450 | /// Optional attribute setters for FakeQuantWithMinMaxVarsGradient |
1451 | struct Attrs { |
1452 | /// The bitwidth of the quantization; between 2 and 8, inclusive. |
1453 | /// |
1454 | /// Defaults to 8 |
1455 | TF_MUST_USE_RESULT Attrs NumBits(int64 x) { |
1456 | Attrs ret = *this; |
1457 | ret.num_bits_ = x; |
1458 | return ret; |
1459 | } |
1460 | |
1461 | /// Whether to quantize into 2^num_bits - 1 distinct values. |
1462 | /// |
1463 | /// Defaults to false |
1464 | TF_MUST_USE_RESULT Attrs NarrowRange(bool x) { |
1465 | Attrs ret = *this; |
1466 | ret.narrow_range_ = x; |
1467 | return ret; |
1468 | } |
1469 | |
1470 | int64 num_bits_ = 8; |
1471 | bool narrow_range_ = false; |
1472 | }; |
1473 | FakeQuantWithMinMaxVarsGradient(const ::tensorflow::Scope& scope, |
1474 | ::tensorflow::Input gradients, |
1475 | ::tensorflow::Input inputs, ::tensorflow::Input |
1476 | min, ::tensorflow::Input max); |
1477 | FakeQuantWithMinMaxVarsGradient(const ::tensorflow::Scope& scope, |
1478 | ::tensorflow::Input gradients, |
1479 | ::tensorflow::Input inputs, ::tensorflow::Input |
1480 | min, ::tensorflow::Input max, const |
1481 | FakeQuantWithMinMaxVarsGradient::Attrs& attrs); |
1482 | |
1483 | static Attrs NumBits(int64 x) { |
1484 | return Attrs().NumBits(x); |
1485 | } |
1486 | static Attrs NarrowRange(bool x) { |
1487 | return Attrs().NarrowRange(x); |
1488 | } |
1489 | |
1490 | Operation operation; |
1491 | ::tensorflow::Output backprops_wrt_input; |
1492 | ::tensorflow::Output backprop_wrt_min; |
1493 | ::tensorflow::Output backprop_wrt_max; |
1494 | }; |
1495 | |
1496 | /// Fake-quantize the 'inputs' tensor of type float via per-channel floats |
1497 | /// |
1498 | /// Fake-quantize the `inputs` tensor of type float per-channel and one of the |
1499 | /// shapes: `[d]`, `[b, d]` `[b, h, w, d]` via per-channel floats `min` and `max` |
1500 | /// of shape `[d]` to `outputs` tensor of same shape as `inputs`. |
1501 | /// |
1502 | /// Attributes |
1503 | /// |
1504 | /// * `[min; max]` define the clamping range for the `inputs` data. |
1505 | /// * `inputs` values are quantized into the quantization range ( |
1506 | /// `[0; 2^num_bits - 1]` when `narrow_range` is false and `[1; 2^num_bits - 1]` |
1507 | /// when it is true) and then de-quantized and output as floats in `[min; max]` |
1508 | /// interval. |
1509 | /// * `num_bits` is the bitwidth of the quantization; between 2 and 16, inclusive. |
1510 | /// |
1511 | /// Before quantization, `min` and `max` values are adjusted with the following |
1512 | /// logic. |
1513 | /// It is suggested to have `min <= 0 <= max`. If `0` is not in the range of values, |
1514 | /// the behavior can be unexpected: |
1515 | /// |
1516 | /// * If `0 < min < max`: `min_adj = 0` and `max_adj = max - min`. |
1517 | /// * If `min < max < 0`: `min_adj = min - max` and `max_adj = 0`. |
1518 | /// * If `min <= 0 <= max`: `scale = (max - min) / (2^num_bits - 1) `, |
1519 | /// `min_adj = scale * round(min / scale)` and `max_adj = max + min_adj - min`. |
1520 | /// |
1521 | /// This operation has a gradient and thus allows for training `min` and `max` |
1522 | /// values. |
1523 | /// |
1524 | /// Args: |
1525 | /// * scope: A Scope object |
1526 | /// |
1527 | /// Returns: |
1528 | /// * `Output`: The outputs tensor. |
1529 | class FakeQuantWithMinMaxVarsPerChannel { |
1530 | public: |
1531 | /// Optional attribute setters for FakeQuantWithMinMaxVarsPerChannel |
1532 | struct Attrs { |
1533 | /// Defaults to 8 |
1534 | TF_MUST_USE_RESULT Attrs NumBits(int64 x) { |
1535 | Attrs ret = *this; |
1536 | ret.num_bits_ = x; |
1537 | return ret; |
1538 | } |
1539 | |
1540 | /// Defaults to false |
1541 | TF_MUST_USE_RESULT Attrs NarrowRange(bool x) { |
1542 | Attrs ret = *this; |
1543 | ret.narrow_range_ = x; |
1544 | return ret; |
1545 | } |
1546 | |
1547 | int64 num_bits_ = 8; |
1548 | bool narrow_range_ = false; |
1549 | }; |
1550 | FakeQuantWithMinMaxVarsPerChannel(const ::tensorflow::Scope& scope, |
1551 | ::tensorflow::Input inputs, |
1552 | ::tensorflow::Input min, ::tensorflow::Input |
1553 | max); |
1554 | FakeQuantWithMinMaxVarsPerChannel(const ::tensorflow::Scope& scope, |
1555 | ::tensorflow::Input inputs, |
1556 | ::tensorflow::Input min, ::tensorflow::Input |
1557 | max, const |
1558 | FakeQuantWithMinMaxVarsPerChannel::Attrs& |
1559 | attrs); |
1560 | operator ::tensorflow::Output() const { return outputs; } |
1561 | operator ::tensorflow::Input() const { return outputs; } |
1562 | ::tensorflow::Node* node() const { return outputs.node(); } |
1563 | |
1564 | static Attrs NumBits(int64 x) { |
1565 | return Attrs().NumBits(x); |
1566 | } |
1567 | static Attrs NarrowRange(bool x) { |
1568 | return Attrs().NarrowRange(x); |
1569 | } |
1570 | |
1571 | Operation operation; |
1572 | ::tensorflow::Output outputs; |
1573 | }; |
1574 | |
1575 | /// Compute gradients for a FakeQuantWithMinMaxVarsPerChannel operation. |
1576 | /// |
1577 | /// Args: |
1578 | /// * scope: A Scope object |
1579 | /// * gradients: Backpropagated gradients above the FakeQuantWithMinMaxVars operation, |
1580 | /// shape one of: `[d]`, `[b, d]`, `[b, h, w, d]`. |
1581 | /// * inputs: Values passed as inputs to the FakeQuantWithMinMaxVars operation, shape |
1582 | /// same as `gradients`. |
1583 | /// min, max: Quantization interval, floats of shape `[d]`. |
1584 | /// |
1585 | /// Optional attributes (see `Attrs`): |
1586 | /// * num_bits: The bitwidth of the quantization; between 2 and 16, inclusive. |
1587 | /// * narrow_range: Whether to quantize into 2^num_bits - 1 distinct values. |
1588 | /// |
1589 | /// Returns: |
1590 | /// * `Output` backprops_wrt_input: Backpropagated gradients w.r.t. inputs, shape same as |
1591 | /// `inputs`: |
1592 | /// `gradients * (inputs >= min && inputs <= max)`. |
1593 | /// * `Output` backprop_wrt_min: Backpropagated gradients w.r.t. min parameter, shape `[d]`: |
1594 | /// `sum_per_d(gradients * (inputs < min))`. |
1595 | /// * `Output` backprop_wrt_max: Backpropagated gradients w.r.t. max parameter, shape `[d]`: |
1596 | /// `sum_per_d(gradients * (inputs > max))`. |
1597 | class FakeQuantWithMinMaxVarsPerChannelGradient { |
1598 | public: |
1599 | /// Optional attribute setters for FakeQuantWithMinMaxVarsPerChannelGradient |
1600 | struct Attrs { |
1601 | /// The bitwidth of the quantization; between 2 and 16, inclusive. |
1602 | /// |
1603 | /// Defaults to 8 |
1604 | TF_MUST_USE_RESULT Attrs NumBits(int64 x) { |
1605 | Attrs ret = *this; |
1606 | ret.num_bits_ = x; |
1607 | return ret; |
1608 | } |
1609 | |
1610 | /// Whether to quantize into 2^num_bits - 1 distinct values. |
1611 | /// |
1612 | /// Defaults to false |
1613 | TF_MUST_USE_RESULT Attrs NarrowRange(bool x) { |
1614 | Attrs ret = *this; |
1615 | ret.narrow_range_ = x; |
1616 | return ret; |
1617 | } |
1618 | |
1619 | int64 num_bits_ = 8; |
1620 | bool narrow_range_ = false; |
1621 | }; |
1622 | FakeQuantWithMinMaxVarsPerChannelGradient(const ::tensorflow::Scope& scope, |
1623 | ::tensorflow::Input gradients, |
1624 | ::tensorflow::Input inputs, |
1625 | ::tensorflow::Input min, |
1626 | ::tensorflow::Input max); |
1627 | FakeQuantWithMinMaxVarsPerChannelGradient(const ::tensorflow::Scope& scope, |
1628 | ::tensorflow::Input gradients, |
1629 | ::tensorflow::Input inputs, |
1630 | ::tensorflow::Input min, |
1631 | ::tensorflow::Input max, const |
1632 | FakeQuantWithMinMaxVarsPerChannelGradient::Attrs& |
1633 | attrs); |
1634 | |
1635 | static Attrs NumBits(int64 x) { |
1636 | return Attrs().NumBits(x); |
1637 | } |
1638 | static Attrs NarrowRange(bool x) { |
1639 | return Attrs().NarrowRange(x); |
1640 | } |
1641 | |
1642 | Operation operation; |
1643 | ::tensorflow::Output backprops_wrt_input; |
1644 | ::tensorflow::Output backprop_wrt_min; |
1645 | ::tensorflow::Output backprop_wrt_max; |
1646 | }; |
1647 | |
1648 | /// Creates a tensor filled with a scalar value. |
1649 | /// |
1650 | /// This operation creates a tensor of shape `dims` and fills it with `value`. |
1651 | /// |
1652 | /// For example: |
1653 | /// |
1654 | /// ``` |
1655 | /// # Output tensor has shape [2, 3]. |
1656 | /// fill([2, 3], 9) ==> [[9, 9, 9] |
1657 | /// [9, 9, 9]] |
1658 | /// ``` |
1659 | /// |
1660 | /// `tf.fill` differs from `tf.constant` in a few ways: |
1661 | /// |
1662 | /// * `tf.fill` only supports scalar contents, whereas `tf.constant` supports |
1663 | /// Tensor values. |
1664 | /// * `tf.fill` creates an Op in the computation graph that constructs the actual |
1665 | /// Tensor value at runtime. This is in contrast to `tf.constant` which embeds |
1666 | /// the entire Tensor into the graph with a `Const` node. |
1667 | /// * Because `tf.fill` evaluates at graph runtime, it supports dynamic shapes |
1668 | /// based on other runtime Tensors, unlike `tf.constant`. |
1669 | /// |
1670 | /// Args: |
1671 | /// * scope: A Scope object |
1672 | /// * dims: 1-D. Represents the shape of the output tensor. |
1673 | /// * value: 0-D (scalar). Value to fill the returned tensor. |
1674 | /// |
1675 | /// @compatibility(numpy) |
1676 | /// Equivalent to np.full |
1677 | /// @end_compatibility |
1678 | /// |
1679 | /// Returns: |
1680 | /// * `Output`: The output tensor. |
1681 | class Fill { |
1682 | public: |
1683 | Fill(const ::tensorflow::Scope& scope, ::tensorflow::Input dims, |
1684 | ::tensorflow::Input value); |
1685 | operator ::tensorflow::Output() const { return output; } |
1686 | operator ::tensorflow::Input() const { return output; } |
1687 | ::tensorflow::Node* node() const { return output.node(); } |
1688 | |
1689 | Operation operation; |
1690 | ::tensorflow::Output output; |
1691 | }; |
1692 | |
1693 | /// Generates fingerprint values. |
1694 | /// |
1695 | /// Generates fingerprint values of `data`. |
1696 | /// |
1697 | /// Fingerprint op considers the first dimension of `data` as the batch dimension, |
1698 | /// and `output[i]` contains the fingerprint value generated from contents in |
1699 | /// `data[i, ...]` for all `i`. |
1700 | /// |
1701 | /// Fingerprint op writes fingerprint values as byte arrays. For example, the |
1702 | /// default method `farmhash64` generates a 64-bit fingerprint value at a time. |
1703 | /// This 8-byte value is written out as an `uint8` array of size 8, in little-endian |
1704 | /// order. |
1705 | /// |
1706 | /// For example, suppose that `data` has data type `DT_INT32` and shape (2, 3, 4), |
1707 | /// and that the fingerprint method is `farmhash64`. In this case, the output shape |
1708 | /// is (2, 8), where 2 is the batch dimension size of `data`, and 8 is the size of |
1709 | /// each fingerprint value in bytes. `output[0, :]` is generated from 12 integers in |
1710 | /// `data[0, :, :]` and similarly `output[1, :]` is generated from other 12 integers |
1711 | /// in `data[1, :, :]`. |
1712 | /// |
1713 | /// Note that this op fingerprints the raw underlying buffer, and it does not |
1714 | /// fingerprint Tensor's metadata such as data type and/or shape. For example, the |
1715 | /// fingerprint values are invariant under reshapes and bitcasts as long as the |
1716 | /// batch dimension remain the same: |
1717 | /// |
1718 | /// ``` |
1719 | /// Fingerprint(data) == Fingerprint(Reshape(data, ...)) |
1720 | /// Fingerprint(data) == Fingerprint(Bitcast(data, ...)) |
1721 | /// ``` |
1722 | /// |
1723 | /// For string data, one should expect `Fingerprint(data) != |
1724 | /// Fingerprint(ReduceJoin(data))` in general. |
1725 | /// |
1726 | /// Args: |
1727 | /// * scope: A Scope object |
1728 | /// * data: Must have rank 1 or higher. |
1729 | /// * method: Fingerprint method used by this op. Currently available method is |
1730 | /// `farmhash::fingerprint64`. |
1731 | /// |
1732 | /// Returns: |
1733 | /// * `Output`: A two-dimensional `Tensor` of type `tf.uint8`. The first dimension equals to |
1734 | /// `data`'s first dimension, and the second dimension size depends on the |
1735 | /// fingerprint algorithm. |
1736 | class Fingerprint { |
1737 | public: |
1738 | Fingerprint(const ::tensorflow::Scope& scope, ::tensorflow::Input data, |
1739 | ::tensorflow::Input method); |
1740 | operator ::tensorflow::Output() const { return fingerprint; } |
1741 | operator ::tensorflow::Input() const { return fingerprint; } |
1742 | ::tensorflow::Node* node() const { return fingerprint.node(); } |
1743 | |
1744 | Operation operation; |
1745 | ::tensorflow::Output fingerprint; |
1746 | }; |
1747 | |
1748 | /// Gather slices from `params` according to `indices`. |
1749 | /// |
1750 | /// `indices` must be an integer tensor of any dimension (usually 0-D or 1-D). |
1751 | /// Produces an output tensor with shape `indices.shape + params.shape[1:]` where: |
1752 | /// |
1753 | /// ```python |
1754 | /// # Scalar indices |
1755 | /// output[:, ..., :] = params[indices, :, ... :] |
1756 | /// |
1757 | /// # Vector indices |
1758 | /// output[i, :, ..., :] = params[indices[i], :, ... :] |
1759 | /// |
1760 | /// # Higher rank indices |
1761 | /// output[i, ..., j, :, ... :] = params[indices[i, ..., j], :, ..., :] |
1762 | /// ``` |
1763 | /// |
1764 | /// If `indices` is a permutation and `len(indices) == params.shape[0]` then |
1765 | /// this operation will permute `params` accordingly. |
1766 | /// |
1767 | /// `validate_indices`: DEPRECATED. If this operation is assigned to CPU, values in |
1768 | /// `indices` are always validated to be within range. If assigned to GPU, |
1769 | /// out-of-bound indices result in safe but unspecified behavior, which may include |
1770 | /// raising an error. |
1771 | /// |
1772 | /// <div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;"> |
1773 | /// <img style="width:100%" src="https://www.tensorflow.org/images/Gather.png" alt> |
1774 | /// </div> |
1775 | /// |
1776 | /// Args: |
1777 | /// * scope: A Scope object |
1778 | /// |
1779 | /// Returns: |
1780 | /// * `Output`: The output tensor. |
1781 | class Gather { |
1782 | public: |
1783 | /// Optional attribute setters for Gather |
1784 | struct Attrs { |
1785 | /// Defaults to true |
1786 | TF_MUST_USE_RESULT Attrs ValidateIndices(bool x) { |
1787 | Attrs ret = *this; |
1788 | ret.validate_indices_ = x; |
1789 | return ret; |
1790 | } |
1791 | |
1792 | bool validate_indices_ = true; |
1793 | }; |
1794 | Gather(const ::tensorflow::Scope& scope, ::tensorflow::Input params, |
1795 | ::tensorflow::Input indices); |
1796 | Gather(const ::tensorflow::Scope& scope, ::tensorflow::Input params, |
1797 | ::tensorflow::Input indices, const Gather::Attrs& attrs); |
1798 | operator ::tensorflow::Output() const { return output; } |
1799 | operator ::tensorflow::Input() const { return output; } |
1800 | ::tensorflow::Node* node() const { return output.node(); } |
1801 | |
1802 | static Attrs ValidateIndices(bool x) { |
1803 | return Attrs().ValidateIndices(x); |
1804 | } |
1805 | |
1806 | Operation operation; |
1807 | ::tensorflow::Output output; |
1808 | }; |
1809 | |
1810 | /// Gather slices from `params` into a Tensor with shape specified by `indices`. |
1811 | /// |
1812 | /// `indices` is a K-dimensional integer tensor, best thought of as a |
1813 | /// (K-1)-dimensional tensor of indices into `params`, where each element defines a |
1814 | /// slice of `params`: |
1815 | /// |
1816 | /// output[\\(i_0, ..., i_{K-2}\\)] = params[indices[\\(i_0, ..., i_{K-2}\\)]] |
1817 | /// |
1818 | /// Whereas in `tf.gather` `indices` defines slices into the `axis` |
1819 | /// dimension of `params`, in `tf.gather_nd`, `indices` defines slices into the |
1820 | /// first `N` dimensions of `params`, where `N = indices.shape[-1]`. |
1821 | /// |
1822 | /// The last dimension of `indices` can be at most the rank of |
1823 | /// `params`: |
1824 | /// |
1825 | /// indices.shape[-1] <= params.rank |
1826 | /// |
1827 | /// The last dimension of `indices` corresponds to elements |
1828 | /// (if `indices.shape[-1] == params.rank`) or slices |
1829 | /// (if `indices.shape[-1] < params.rank`) along dimension `indices.shape[-1]` |
1830 | /// of `params`. The output tensor has shape |
1831 | /// |
1832 | /// indices.shape[:-1] + params.shape[indices.shape[-1]:] |
1833 | /// |
1834 | /// Note that on CPU, if an out of bound index is found, an error is returned. |
1835 | /// On GPU, if an out of bound index is found, a 0 is stored in the |
1836 | /// corresponding output value. |
1837 | /// |
1838 | /// Some examples below. |
1839 | /// |
1840 | /// Simple indexing into a matrix: |
1841 | /// |
1842 | /// ```python |
1843 | /// indices = [[0, 0], [1, 1]] |
1844 | /// params = [['a', 'b'], ['c', 'd']] |
1845 | /// output = ['a', 'd'] |
1846 | /// ``` |
1847 | /// |
1848 | /// Slice indexing into a matrix: |
1849 | /// |
1850 | /// ```python |
1851 | /// indices = [[1], [0]] |
1852 | /// params = [['a', 'b'], ['c', 'd']] |
1853 | /// output = [['c', 'd'], ['a', 'b']] |
1854 | /// ``` |
1855 | /// |
1856 | /// Indexing into a 3-tensor: |
1857 | /// |
1858 | /// ```python |
1859 | /// indices = [[1]] |
1860 | /// params = [[['a0', 'b0'], ['c0', 'd0']], |
1861 | /// [['a1', 'b1'], ['c1', 'd1']]] |
1862 | /// output = [[['a1', 'b1'], ['c1', 'd1']]] |
1863 | /// |
1864 | /// |
1865 | /// indices = [[0, 1], [1, 0]] |
1866 | /// params = [[['a0', 'b0'], ['c0', 'd0']], |
1867 | /// [['a1', 'b1'], ['c1', 'd1']]] |
1868 | /// output = [['c0', 'd0'], ['a1', 'b1']] |
1869 | /// |
1870 | /// |
1871 | /// indices = [[0, 0, 1], [1, 0, 1]] |
1872 | /// params = [[['a0', 'b0'], ['c0', 'd0']], |
1873 | /// [['a1', 'b1'], ['c1', 'd1']]] |
1874 | /// output = ['b0', 'b1'] |
1875 | /// ``` |
1876 | /// |
1877 | /// Batched indexing into a matrix: |
1878 | /// |
1879 | /// ```python |
1880 | /// indices = [[[0, 0]], [[0, 1]]] |
1881 | /// params = [['a', 'b'], ['c', 'd']] |
1882 | /// output = [['a'], ['b']] |
1883 | /// ``` |
1884 | /// |
1885 | /// Batched slice indexing into a matrix: |
1886 | /// |
1887 | /// ```python |
1888 | /// indices = [[[1]], [[0]]] |
1889 | /// params = [['a', 'b'], ['c', 'd']] |
1890 | /// output = [[['c', 'd']], [['a', 'b']]] |
1891 | /// ``` |
1892 | /// |
1893 | /// Batched indexing into a 3-tensor: |
1894 | /// |
1895 | /// ```python |
1896 | /// indices = [[[1]], [[0]]] |
1897 | /// params = [[['a0', 'b0'], ['c0', 'd0']], |
1898 | /// [['a1', 'b1'], ['c1', 'd1']]] |
1899 | /// output = [[[['a1', 'b1'], ['c1', 'd1']]], |
1900 | /// [[['a0', 'b0'], ['c0', 'd0']]]] |
1901 | /// |
1902 | /// indices = [[[0, 1], [1, 0]], [[0, 0], [1, 1]]] |
1903 | /// params = [[['a0', 'b0'], ['c0', 'd0']], |
1904 | /// [['a1', 'b1'], ['c1', 'd1']]] |
1905 | /// output = [[['c0', 'd0'], ['a1', 'b1']], |
1906 | /// [['a0', 'b0'], ['c1', 'd1']]] |
1907 | /// |
1908 | /// |
1909 | /// indices = [[[0, 0, 1], [1, 0, 1]], [[0, 1, 1], [1, 1, 0]]] |
1910 | /// params = [[['a0', 'b0'], ['c0', 'd0']], |
1911 | /// [['a1', 'b1'], ['c1', 'd1']]] |
1912 | /// output = [['b0', 'b1'], ['d0', 'c1']] |
1913 | /// ``` |
1914 | /// |
1915 | /// See also `tf.gather` and `tf.batch_gather`. |
1916 | /// |
1917 | /// Args: |
1918 | /// * scope: A Scope object |
1919 | /// * params: The tensor from which to gather values. |
1920 | /// * indices: Index tensor. |
1921 | /// |
1922 | /// Returns: |
1923 | /// * `Output`: Values from `params` gathered from indices given by `indices`, with |
1924 | /// shape `indices.shape[:-1] + params.shape[indices.shape[-1]:]`. |
1925 | class GatherNd { |
1926 | public: |
1927 | GatherNd(const ::tensorflow::Scope& scope, ::tensorflow::Input params, |
1928 | ::tensorflow::Input indices); |
1929 | operator ::tensorflow::Output() const { return output; } |
1930 | operator ::tensorflow::Input() const { return output; } |
1931 | ::tensorflow::Node* node() const { return output.node(); } |
1932 | |
1933 | Operation operation; |
1934 | ::tensorflow::Output output; |
1935 | }; |
1936 | |
1937 | /// Gather slices from `params` axis `axis` according to `indices`. |
1938 | /// |
1939 | /// `indices` must be an integer tensor of any dimension (usually 0-D or 1-D). |
1940 | /// Produces an output tensor with shape `params.shape[:axis] + |
1941 | /// indices.shape[batch_dims:] + params.shape[axis + 1:]` where: |
1942 | /// |
1943 | /// ```python |
1944 | /// # Scalar indices (output is rank(params) - 1). |
1945 | /// output[a_0, ..., a_n, b_0, ..., b_n] = |
1946 | /// params[a_0, ..., a_n, indices, b_0, ..., b_n] |
1947 | /// |
1948 | /// # Vector indices (output is rank(params)). |
1949 | /// output[a_0, ..., a_n, i, b_0, ..., b_n] = |
1950 | /// params[a_0, ..., a_n, indices[i], b_0, ..., b_n] |
1951 | /// |
1952 | /// # Higher rank indices (output is rank(params) + rank(indices) - 1). |
1953 | /// output[a_0, ..., a_n, i, ..., j, b_0, ... b_n] = |
1954 | /// params[a_0, ..., a_n, indices[i, ..., j], b_0, ..., b_n] |
1955 | /// ``` |
1956 | /// |
1957 | /// <div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;"> |
1958 | /// <img style="width:100%" src="https://www.tensorflow.org/images/Gather.png" alt> |
1959 | /// </div> |
1960 | /// |
1961 | /// Note that on CPU, if an out of bound index is found, an error is returned. |
1962 | /// On GPU, if an out of bound index is found, a 0 is stored in the |
1963 | /// corresponding output value. |
1964 | /// |
1965 | /// See also `tf.batch_gather` and `tf.gather_nd`. |
1966 | /// |
1967 | /// Args: |
1968 | /// * scope: A Scope object |
1969 | /// * params: The tensor from which to gather values. Must be at least rank |
1970 | /// `axis + 1`. |
1971 | /// * indices: Index tensor. Must be in range `[0, params.shape[axis])`. |
1972 | /// * axis: The axis in `params` to gather `indices` from. Defaults to the first |
1973 | /// dimension. Supports negative indexes. |
1974 | /// |
1975 | /// Returns: |
1976 | /// * `Output`: Values from `params` gathered from indices given by `indices`, with |
1977 | /// shape `params.shape[:axis] + indices.shape + params.shape[axis + 1:]`. |
1978 | class GatherV2 { |
1979 | public: |
1980 | /// Optional attribute setters for GatherV2 |
1981 | struct Attrs { |
1982 | /// Defaults to 0 |
1983 | TF_MUST_USE_RESULT Attrs BatchDims(int64 x) { |
1984 | Attrs ret = *this; |
1985 | ret.batch_dims_ = x; |
1986 | return ret; |
1987 | } |
1988 | |
1989 | int64 batch_dims_ = 0; |
1990 | }; |
1991 | GatherV2(const ::tensorflow::Scope& scope, ::tensorflow::Input params, |
1992 | ::tensorflow::Input indices, ::tensorflow::Input axis); |
1993 | GatherV2(const ::tensorflow::Scope& scope, ::tensorflow::Input params, |
1994 | ::tensorflow::Input indices, ::tensorflow::Input axis, const |
1995 | GatherV2::Attrs& attrs); |
1996 | operator ::tensorflow::Output() const { return output; } |
1997 | operator ::tensorflow::Input() const { return output; } |
1998 | ::tensorflow::Node* node() const { return output.node(); } |
1999 | |
2000 | static Attrs BatchDims(int64 x) { |
2001 | return Attrs().BatchDims(x); |
2002 | } |
2003 | |
2004 | Operation operation; |
2005 | ::tensorflow::Output output; |
2006 | }; |
2007 | |
2008 | /// Gives a guarantee to the TF runtime that the input tensor is a constant. |
2009 | /// |
2010 | /// The runtime is then free to make optimizations based on this. |
2011 | /// |
2012 | /// Only accepts value typed tensors as inputs and rejects resource variable handles |
2013 | /// as input. |
2014 | /// |
2015 | /// Returns the input tensor without modification. |
2016 | /// |
2017 | /// Args: |
2018 | /// * scope: A Scope object |
2019 | /// |
2020 | /// Returns: |
2021 | /// * `Output`: The output tensor. |
2022 | class GuaranteeConst { |
2023 | public: |
2024 | GuaranteeConst(const ::tensorflow::Scope& scope, ::tensorflow::Input input); |
2025 | operator ::tensorflow::Output() const { return output; } |
2026 | operator ::tensorflow::Input() const { return output; } |
2027 | ::tensorflow::Node* node() const { return output.node(); } |
2028 | |
2029 | Operation operation; |
2030 | ::tensorflow::Output output; |
2031 | }; |
2032 | |
2033 | /// Return a tensor with the same shape and contents as the input tensor or value. |
2034 | /// |
2035 | /// Args: |
2036 | /// * scope: A Scope object |
2037 | /// |
2038 | /// Returns: |
2039 | /// * `Output`: The output tensor. |
2040 | class Identity { |
2041 | public: |
2042 | Identity(const ::tensorflow::Scope& scope, ::tensorflow::Input input); |
2043 | operator ::tensorflow::Output() const { return output; } |
2044 | operator ::tensorflow::Input() const { return output; } |
2045 | ::tensorflow::Node* node() const { return output.node(); } |
2046 | |
2047 | Operation operation; |
2048 | ::tensorflow::Output output; |
2049 | }; |
2050 | |
2051 | /// Returns a list of tensors with the same shapes and contents as the input |
2052 | /// |
2053 | /// tensors. |
2054 | /// |
2055 | /// This op can be used to override the gradient for complicated functions. For |
2056 | /// example, suppose y = f(x) and we wish to apply a custom function g for backprop |
2057 | /// such that dx = g(dy). In Python, |
2058 | /// |
2059 | /// ```python |
2060 | /// with tf.get_default_graph().gradient_override_map( |
2061 | /// {'IdentityN': 'OverrideGradientWithG'}): |
2062 | /// y, _ = identity_n([f(x), x]) |
2063 | /// |
2064 | /// @tf.RegisterGradient('OverrideGradientWithG') |
2065 | /// def ApplyG(op, dy, _): |
2066 | /// return [None, g(dy)] # Do not backprop to f(x). |
2067 | /// ``` |
2068 | /// |
2069 | /// Args: |
2070 | /// * scope: A Scope object |
2071 | /// |
2072 | /// Returns: |
2073 | /// * `OutputList`: The output tensor. |
2074 | class IdentityN { |
2075 | public: |
2076 | IdentityN(const ::tensorflow::Scope& scope, ::tensorflow::InputList input); |
2077 | ::tensorflow::Output operator[](size_t index) const { return output[index]; } |
2078 | |
2079 | |
2080 | Operation operation; |
2081 | ::tensorflow::OutputList output; |
2082 | }; |
2083 | |
2084 | /// Returns immutable tensor from memory region. |
2085 | /// |
2086 | /// The current implementation memmaps the tensor from a file. |
2087 | /// |
2088 | /// Args: |
2089 | /// * scope: A Scope object |
2090 | /// * dtype: Type of the returned tensor. |
2091 | /// * shape: Shape of the returned tensor. |
2092 | /// * memory_region_name: Name of readonly memory region used by the tensor, see |
2093 | /// NewReadOnlyMemoryRegionFromFile in tensorflow::Env. |
2094 | /// |
2095 | /// Returns: |
2096 | /// * `Output`: The tensor tensor. |
2097 | class ImmutableConst { |
2098 | public: |
2099 | ImmutableConst(const ::tensorflow::Scope& scope, DataType dtype, |
2100 | PartialTensorShape shape, StringPiece memory_region_name); |
2101 | operator ::tensorflow::Output() const { return tensor; } |
2102 | operator ::tensorflow::Input() const { return tensor; } |
2103 | ::tensorflow::Node* node() const { return tensor.node(); } |
2104 | |
2105 | Operation operation; |
2106 | ::tensorflow::Output tensor; |
2107 | }; |
2108 | |
2109 | /// Adds v into specified rows of x. |
2110 | /// |
2111 | /// Computes y = x; y[i, :] += v; return y. |
2112 | /// |
2113 | /// Args: |
2114 | /// * scope: A Scope object |
2115 | /// * x: A `Tensor` of type T. |
2116 | /// * i: A vector. Indices into the left-most dimension of `x`. |
2117 | /// * v: A `Tensor` of type T. Same dimension sizes as x except the first dimension, which must be the same as i's size. |
2118 | /// |
2119 | /// Returns: |
2120 | /// * `Output`: A `Tensor` of type T. An alias of `x`. The content of `y` is undefined if there are duplicates in `i`. |
2121 | class InplaceAdd { |
2122 | public: |
2123 | InplaceAdd(const ::tensorflow::Scope& scope, ::tensorflow::Input x, |
2124 | ::tensorflow::Input i, ::tensorflow::Input v); |
2125 | operator ::tensorflow::Output() const { return y; } |
2126 | operator ::tensorflow::Input() const { return y; } |
2127 | ::tensorflow::Node* node() const { return y.node(); } |
2128 | |
2129 | Operation operation; |
2130 | ::tensorflow::Output y; |
2131 | }; |
2132 | |
2133 | /// Subtracts `v` into specified rows of `x`. |
2134 | /// |
2135 | /// Computes y = x; y[i, :] -= v; return y. |
2136 | /// |
2137 | /// Args: |
2138 | /// * scope: A Scope object |
2139 | /// * x: A `Tensor` of type T. |
2140 | /// * i: A vector. Indices into the left-most dimension of `x`. |
2141 | /// * v: A `Tensor` of type T. Same dimension sizes as x except the first dimension, which must be the same as i's size. |
2142 | /// |
2143 | /// Returns: |
2144 | /// * `Output`: A `Tensor` of type T. An alias of `x`. The content of `y` is undefined if there are duplicates in `i`. |
2145 | class InplaceSub { |
2146 | public: |
2147 | InplaceSub(const ::tensorflow::Scope& scope, ::tensorflow::Input x, |
2148 | ::tensorflow::Input i, ::tensorflow::Input v); |
2149 | operator ::tensorflow::Output() const { return y; } |
2150 | operator ::tensorflow::Input() const { return y; } |
2151 | ::tensorflow::Node* node() const { return y.node(); } |
2152 | |
2153 | Operation operation; |
2154 | ::tensorflow::Output y; |
2155 | }; |
2156 | |
2157 | /// Updates specified rows 'i' with values 'v'. |
2158 | /// |
2159 | /// Computes `x[i, :] = v; return x`. |
2160 | /// |
2161 | /// Originally this function is mutative however for compilation we make this |
2162 | /// operation create / operate on a copy of `x`. |
2163 | /// |
2164 | /// Args: |
2165 | /// * scope: A Scope object |
2166 | /// * x: A tensor of type `T`. |
2167 | /// * i: A vector. Indices into the left-most dimension of `x`. |
2168 | /// * v: A `Tensor` of type T. Same dimension sizes as x except the first dimension, which must be the same as i's size. |
2169 | /// |
2170 | /// Returns: |
2171 | /// * `Output`: A `Tensor` of type T. An alias of `x`. The content of `y` is undefined if there are duplicates in `i`. |
2172 | class InplaceUpdate { |
2173 | public: |
2174 | InplaceUpdate(const ::tensorflow::Scope& scope, ::tensorflow::Input x, |
2175 | ::tensorflow::Input i, ::tensorflow::Input v); |
2176 | operator ::tensorflow::Output() const { return y; } |
2177 | operator ::tensorflow::Input() const { return y; } |
2178 | ::tensorflow::Node* node() const { return y.node(); } |
2179 | |
2180 | Operation operation; |
2181 | ::tensorflow::Output y; |
2182 | }; |
2183 | |
2184 | /// Computes the inverse permutation of a tensor. |
2185 | /// |
2186 | /// This operation computes the inverse of an index permutation. It takes a 1-D |
2187 | /// integer tensor `x`, which represents the indices of a zero-based array, and |
2188 | /// swaps each value with its index position. In other words, for an output tensor |
2189 | /// `y` and an input tensor `x`, this operation computes the following: |
2190 | /// |
2191 | /// `y[x[i]] = i for i in [0, 1, ..., len(x) - 1]` |
2192 | /// |
2193 | /// The values must include 0. There can be no duplicate values or negative values. |
2194 | /// |
2195 | /// For example: |
2196 | /// |
2197 | /// ``` |
2198 | /// # tensor `x` is [3, 4, 0, 2, 1] |
2199 | /// invert_permutation(x) ==> [2, 4, 3, 0, 1] |
2200 | /// ``` |
2201 | /// |
2202 | /// Args: |
2203 | /// * scope: A Scope object |
2204 | /// * x: 1-D. |
2205 | /// |
2206 | /// Returns: |
2207 | /// * `Output`: 1-D. |
2208 | class InvertPermutation { |
2209 | public: |
2210 | InvertPermutation(const ::tensorflow::Scope& scope, ::tensorflow::Input x); |
2211 | operator ::tensorflow::Output() const { return y; } |
2212 | operator ::tensorflow::Input() const { return y; } |
2213 | ::tensorflow::Node* node() const { return y.node(); } |
2214 | |
2215 | Operation operation; |
2216 | ::tensorflow::Output y; |
2217 | }; |
2218 | |
2219 | /// Computes the difference between two lists of numbers or strings. |
2220 | /// |
2221 | /// Given a list `x` and a list `y`, this operation returns a list `out` that |
2222 | /// represents all values that are in `x` but not in `y`. The returned list `out` |
2223 | /// is sorted in the same order that the numbers appear in `x` (duplicates are |
2224 | /// preserved). This operation also returns a list `idx` that represents the |
2225 | /// position of each `out` element in `x`. In other words: |
2226 | /// |
2227 | /// `out[i] = x[idx[i]] for i in [0, 1, ..., len(out) - 1]` |
2228 | /// |
2229 | /// For example, given this input: |
2230 | /// |
2231 | /// ``` |
2232 | /// x = [1, 2, 3, 4, 5, 6] |
2233 | /// y = [1, 3, 5] |
2234 | /// ``` |
2235 | /// |
2236 | /// This operation would return: |
2237 | /// |
2238 | /// ``` |
2239 | /// out ==> [2, 4, 6] |
2240 | /// idx ==> [1, 3, 5] |
2241 | /// ``` |
2242 | /// |
2243 | /// Args: |
2244 | /// * scope: A Scope object |
2245 | /// * x: 1-D. Values to keep. |
2246 | /// * y: 1-D. Values to remove. |
2247 | /// |
2248 | /// Returns: |
2249 | /// * `Output` out: 1-D. Values present in `x` but not in `y`. |
2250 | /// * `Output` idx: 1-D. Positions of `x` values preserved in `out`. |
2251 | class SetDiff1D { |
2252 | public: |
2253 | /// Optional attribute setters for SetDiff1D |
2254 | struct Attrs { |
2255 | /// Defaults to DT_INT32 |
2256 | TF_MUST_USE_RESULT Attrs OutIdx(DataType x) { |
2257 | Attrs ret = *this; |
2258 | ret.out_idx_ = x; |
2259 | return ret; |
2260 | } |
2261 | |
2262 | DataType out_idx_ = DT_INT32; |
2263 | }; |
2264 | SetDiff1D(const ::tensorflow::Scope& scope, ::tensorflow::Input x, |
2265 | ::tensorflow::Input y); |
2266 | SetDiff1D(const ::tensorflow::Scope& scope, ::tensorflow::Input x, |
2267 | ::tensorflow::Input y, const SetDiff1D::Attrs& attrs); |
2268 | |
2269 | static Attrs OutIdx(DataType x) { |
2270 | return Attrs().OutIdx(x); |
2271 | } |
2272 | |
2273 | Operation operation; |
2274 | ::tensorflow::Output out; |
2275 | ::tensorflow::Output idx; |
2276 | }; |
2277 | |
2278 | /// Copy a tensor setting everything outside a central band in each innermost matrix to zero. |
2279 | /// |
2280 | /// The `band` part is computed as follows: |
2281 | /// Assume `input` has `k` dimensions `[I, J, K, ..., M, N]`, then the output is a |
2282 | /// tensor with the same shape where |
2283 | /// |
2284 | /// `band[i, j, k, ..., m, n] = in_band(m, n) * input[i, j, k, ..., m, n]`. |
2285 | /// |
2286 | /// The indicator function |
2287 | /// |
2288 | /// `in_band(m, n) = (num_lower < 0 || (m-n) <= num_lower)) && |
2289 | /// (num_upper < 0 || (n-m) <= num_upper)`. |
2290 | /// |
2291 | /// For example: |
2292 | /// |
2293 | /// ``` |
2294 | /// # if 'input' is [[ 0, 1, 2, 3] |
2295 | /// # [-1, 0, 1, 2] |
2296 | /// # [-2, -1, 0, 1] |
2297 | /// # [-3, -2, -1, 0]], |
2298 | /// |
2299 | /// tf.linalg.band_part(input, 1, -1) ==> [[ 0, 1, 2, 3] |
2300 | /// [-1, 0, 1, 2] |
2301 | /// [ 0, -1, 0, 1] |
2302 | /// [ 0, 0, -1, 0]], |
2303 | /// |
2304 | /// tf.linalg.band_part(input, 2, 1) ==> [[ 0, 1, 0, 0] |
2305 | /// [-1, 0, 1, 0] |
2306 | /// [-2, -1, 0, 1] |
2307 | /// [ 0, -2, -1, 0]] |
2308 | /// ``` |
2309 | /// |
2310 | /// Useful special cases: |
2311 | /// |
2312 | /// ``` |
2313 | /// tf.linalg.band_part(input, 0, -1) ==> Upper triangular part. |
2314 | /// tf.linalg.band_part(input, -1, 0) ==> Lower triangular part. |
2315 | /// tf.linalg.band_part(input, 0, 0) ==> Diagonal. |
2316 | /// ``` |
2317 | /// |
2318 | /// Args: |
2319 | /// * scope: A Scope object |
2320 | /// * input: Rank `k` tensor. |
2321 | /// * num_lower: 0-D tensor. Number of subdiagonals to keep. If negative, keep entire |
2322 | /// lower triangle. |
2323 | /// * num_upper: 0-D tensor. Number of superdiagonals to keep. If negative, keep |
2324 | /// entire upper triangle. |
2325 | /// |
2326 | /// Returns: |
2327 | /// * `Output`: Rank `k` tensor of the same shape as input. The extracted banded tensor. |
2328 | class MatrixBandPart { |
2329 | public: |
2330 | MatrixBandPart(const ::tensorflow::Scope& scope, ::tensorflow::Input input, |
2331 | ::tensorflow::Input num_lower, ::tensorflow::Input num_upper); |
2332 | operator ::tensorflow::Output() const { return band; } |
2333 | operator ::tensorflow::Input() const { return band; } |
2334 | ::tensorflow::Node* node() const { return band.node(); } |
2335 | |
2336 | Operation operation; |
2337 | ::tensorflow::Output band; |
2338 | }; |
2339 | |
2340 | /// Returns a batched diagonal tensor with a given batched diagonal values. |
2341 | /// |
2342 | /// Given a `diagonal`, this operation returns a tensor with the `diagonal` and |
2343 | /// everything else padded with zeros. The diagonal is computed as follows: |
2344 | /// |
2345 | /// Assume `diagonal` has `k` dimensions `[I, J, K, ..., N]`, then the output is a |
2346 | /// tensor of rank `k+1` with dimensions [I, J, K, ..., N, N]` where: |
2347 | /// |
2348 | /// `output[i, j, k, ..., m, n] = 1{m=n} * diagonal[i, j, k, ..., n]`. |
2349 | /// |
2350 | /// For example: |
2351 | /// |
2352 | /// ``` |
2353 | /// # 'diagonal' is [[1, 2, 3, 4], [5, 6, 7, 8]] |
2354 | /// |
2355 | /// and diagonal.shape = (2, 4) |
2356 | /// |
2357 | /// tf.matrix_diag(diagonal) ==> [[[1, 0, 0, 0] |
2358 | /// [0, 2, 0, 0] |
2359 | /// [0, 0, 3, 0] |
2360 | /// [0, 0, 0, 4]], |
2361 | /// [[5, 0, 0, 0] |
2362 | /// [0, 6, 0, 0] |
2363 | /// [0, 0, 7, 0] |
2364 | /// [0, 0, 0, 8]]] |
2365 | /// |
2366 | /// which has shape (2, 4, 4) |
2367 | /// ``` |
2368 | /// |
2369 | /// Args: |
2370 | /// * scope: A Scope object |
2371 | /// * diagonal: Rank `k`, where `k >= 1`. |
2372 | /// |
2373 | /// Returns: |
2374 | /// * `Output`: Rank `k+1`, with `output.shape = diagonal.shape + [diagonal.shape[-1]]`. |
2375 | class MatrixDiag { |
2376 | public: |
2377 | MatrixDiag(const ::tensorflow::Scope& scope, ::tensorflow::Input diagonal); |
2378 | operator ::tensorflow::Output() const { return output; } |
2379 | operator ::tensorflow::Input() const { return output; } |
2380 | ::tensorflow::Node* node() const { return output.node(); } |
2381 | |
2382 | Operation operation; |
2383 | ::tensorflow::Output output; |
2384 | }; |
2385 | |
2386 | /// Returns the batched diagonal part of a batched tensor. |
2387 | /// |
2388 | /// This operation returns a tensor with the `diagonal` part |
2389 | /// of the batched `input`. The `diagonal` part is computed as follows: |
2390 | /// |
2391 | /// Assume `input` has `k` dimensions `[I, J, K, ..., M, N]`, then the output is a |
2392 | /// tensor of rank `k - 1` with dimensions `[I, J, K, ..., min(M, N)]` where: |
2393 | /// |
2394 | /// `diagonal[i, j, k, ..., n] = input[i, j, k, ..., n, n]`. |
2395 | /// |
2396 | /// The input must be at least a matrix. |
2397 | /// |
2398 | /// For example: |
2399 | /// |
2400 | /// ``` |
2401 | /// # 'input' is [[[1, 0, 0, 0] |
2402 | /// [0, 2, 0, 0] |
2403 | /// [0, 0, 3, 0] |
2404 | /// [0, 0, 0, 4]], |
2405 | /// [[5, 0, 0, 0] |
2406 | /// [0, 6, 0, 0] |
2407 | /// [0, 0, 7, 0] |
2408 | /// [0, 0, 0, 8]]] |
2409 | /// |
2410 | /// and input.shape = (2, 4, 4) |
2411 | /// |
2412 | /// tf.matrix_diag_part(input) ==> [[1, 2, 3, 4], [5, 6, 7, 8]] |
2413 | /// |
2414 | /// which has shape (2, 4) |
2415 | /// ``` |
2416 | /// |
2417 | /// Args: |
2418 | /// * scope: A Scope object |
2419 | /// * input: Rank `k` tensor where `k >= 2`. |
2420 | /// |
2421 | /// Returns: |
2422 | /// * `Output`: The extracted diagonal(s) having shape |
2423 | /// `diagonal.shape = input.shape[:-2] + [min(input.shape[-2:])]`. |
2424 | class MatrixDiagPart { |
2425 | public: |
2426 | MatrixDiagPart(const ::tensorflow::Scope& scope, ::tensorflow::Input input); |
2427 | operator ::tensorflow::Output() const { return diagonal; } |
2428 | operator ::tensorflow::Input() const { return diagonal; } |
2429 | ::tensorflow::Node* node() const { return diagonal.node(); } |
2430 | |
2431 | Operation operation; |
2432 | ::tensorflow::Output diagonal; |
2433 | }; |
2434 | |
2435 | /// Returns the batched diagonal part of a batched tensor. |
2436 | /// |
2437 | /// Returns a tensor with the `k[0]`-th to `k[1]`-th diagonals of the batched |
2438 | /// `input`. |
2439 | /// |
2440 | /// Assume `input` has `r` dimensions `[I, J, ..., L, M, N]`. |
2441 | /// Let `max_diag_len` be the maximum length among all diagonals to be extracted, |
2442 | /// `max_diag_len = min(M + min(k[1], 0), N + min(-k[0], 0))` |
2443 | /// Let `num_diags` be the number of diagonals to extract, |
2444 | /// `num_diags = k[1] - k[0] + 1`. |
2445 | /// |
2446 | /// If `num_diags == 1`, the output tensor is of rank `r - 1` with shape |
2447 | /// `[I, J, ..., L, max_diag_len]` and values: |
2448 | /// |
2449 | /// ``` |
2450 | /// diagonal[i, j, ..., l, n] |
2451 | /// = input[i, j, ..., l, n+y, n+x] ; if 0 <= n+y < M and 0 <= n+x < N, |
2452 | /// padding_value ; otherwise. |
2453 | /// ``` |
2454 | /// where `y = max(-k[1], 0)`, `x = max(k[1], 0)`. |
2455 | /// |
2456 | /// Otherwise, the output tensor has rank `r` with dimensions |
2457 | /// `[I, J, ..., L, num_diags, max_diag_len]` with values: |
2458 | /// |
2459 | /// ``` |
2460 | /// diagonal[i, j, ..., l, m, n] |
2461 | /// = input[i, j, ..., l, n+y, n+x] ; if 0 <= n+y < M and 0 <= n+x < N, |
2462 | /// padding_value ; otherwise. |
2463 | /// ``` |
2464 | /// where `d = k[1] - m`, `y = max(-d, 0)`, and `x = max(d, 0)`. |
2465 | /// |
2466 | /// The input must be at least a matrix. |
2467 | /// |
2468 | /// For example: |
2469 | /// |
2470 | /// ``` |
2471 | /// input = np.array([[[1, 2, 3, 4], # Input shape: (2, 3, 4) |
2472 | /// [5, 6, 7, 8], |
2473 | /// [9, 8, 7, 6]], |
2474 | /// [[5, 4, 3, 2], |
2475 | /// [1, 2, 3, 4], |
2476 | /// [5, 6, 7, 8]]]) |
2477 | /// |
2478 | /// # A main diagonal from each batch. |
2479 | /// tf.matrix_diag_part(input) ==> [[1, 6, 7], # Output shape: (2, 3) |
2480 | /// [5, 2, 7]] |
2481 | /// |
2482 | /// # A superdiagonal from each batch. |
2483 | /// tf.matrix_diag_part(input, k = 1) |
2484 | /// ==> [[2, 7, 6], # Output shape: (2, 3) |
2485 | /// [4, 3, 8]] |
2486 | /// |
2487 | /// # A tridiagonal band from each batch. |
2488 | /// tf.matrix_diag_part(input, k = (-1, 1)) |
2489 | /// ==> [[[2, 7, 6], # Output shape: (2, 3, 3) |
2490 | /// [1, 6, 7], |
2491 | /// [5, 8, 0]], |
2492 | /// [[4, 3, 8], |
2493 | /// [5, 2, 7], |
2494 | /// [1, 6, 0]]] |
2495 | /// |
2496 | /// # Padding value = 9 |
2497 | /// tf.matrix_diag_part(input, k = (1, 3), padding_value = 9) |
2498 | /// ==> [[[4, 9, 9], # Output shape: (2, 3, 3) |
2499 | /// [3, 8, 9], |
2500 | /// [2, 7, 6]], |
2501 | /// [[2, 9, 9], |
2502 | /// [3, 4, 9], |
2503 | /// [4, 3, 8]]] |
2504 | /// ``` |
2505 | /// |
2506 | /// Args: |
2507 | /// * scope: A Scope object |
2508 | /// * input: Rank `r` tensor where `r >= 2`. |
2509 | /// * k: Diagonal offset(s). Positive value means superdiagonal, 0 refers to the main |
2510 | /// diagonal, and negative value means subdiagonals. `k` can be a single integer |
2511 | /// (for a single diagonal) or a pair of integers specifying the low and high ends |
2512 | /// of a matrix band. `k[0]` must not be larger than `k[1]`. |
2513 | /// * padding_value: The value to fill the area outside the specified diagonal band with. |
2514 | /// Default is 0. |
2515 | /// |
2516 | /// Returns: |
2517 | /// * `Output`: The extracted diagonal(s). |
2518 | class MatrixDiagPartV2 { |
2519 | public: |
2520 | MatrixDiagPartV2(const ::tensorflow::Scope& scope, ::tensorflow::Input input, |
2521 | ::tensorflow::Input k, ::tensorflow::Input padding_value); |
2522 | operator ::tensorflow::Output() const { return diagonal; } |
2523 | operator ::tensorflow::Input() const { return diagonal; } |
2524 | ::tensorflow::Node* node() const { return diagonal.node(); } |
2525 | |
2526 | Operation operation; |
2527 | ::tensorflow::Output diagonal; |
2528 | }; |
2529 | |
2530 | /// Returns the batched diagonal part of a batched tensor. |
2531 | /// |
2532 | /// Returns a tensor with the `k[0]`-th to `k[1]`-th diagonals of the batched |
2533 | /// `input`. |
2534 | /// |
2535 | /// Assume `input` has `r` dimensions `[I, J, ..., L, M, N]`. |
2536 | /// Let `max_diag_len` be the maximum length among all diagonals to be extracted, |
2537 | /// `max_diag_len = min(M + min(k[1], 0), N + min(-k[0], 0))` |
2538 | /// Let `num_diags` be the number of diagonals to extract, |
2539 | /// `num_diags = k[1] - k[0] + 1`. |
2540 | /// |
2541 | /// If `num_diags == 1`, the output tensor is of rank `r - 1` with shape |
2542 | /// `[I, J, ..., L, max_diag_len]` and values: |
2543 | /// |
2544 | /// ``` |
2545 | /// diagonal[i, j, ..., l, n] |
2546 | /// = input[i, j, ..., l, n+y, n+x] ; if 0 <= n+y < M and 0 <= n+x < N, |
2547 | /// padding_value ; otherwise. |
2548 | /// ``` |
2549 | /// where `y = max(-k[1], 0)`, `x = max(k[1], 0)`. |
2550 | /// |
2551 | /// Otherwise, the output tensor has rank `r` with dimensions |
2552 | /// `[I, J, ..., L, num_diags, max_diag_len]` with values: |
2553 | /// |
2554 | /// ``` |
2555 | /// diagonal[i, j, ..., l, m, n] |
2556 | /// = input[i, j, ..., l, n+y, n+x] ; if 0 <= n+y < M and 0 <= n+x < N, |
2557 | /// padding_value ; otherwise. |
2558 | /// ``` |
2559 | /// where `d = k[1] - m`, `y = max(-d, 0) - offset`, and `x = max(d, 0) - offset`. |
2560 | /// |
2561 | /// `offset` is zero except when the alignment of the diagonal is to the right. |
2562 | /// ``` |
2563 | /// offset = max_diag_len - diag_len(d) ; if (`align` in {RIGHT_LEFT, RIGHT_RIGHT} |
2564 | /// and `d >= 0`) or |
2565 | /// (`align` in {LEFT_RIGHT, RIGHT_RIGHT} |
2566 | /// and `d <= 0`) |
2567 | /// 0 ; otherwise |
2568 | /// ``` |
2569 | /// where `diag_len(d) = min(cols - max(d, 0), rows + min(d, 0))`. |
2570 | /// |
2571 | /// The input must be at least a matrix. |
2572 | /// |
2573 | /// For example: |
2574 | /// |
2575 | /// ``` |
2576 | /// input = np.array([[[1, 2, 3, 4], # Input shape: (2, 3, 4) |
2577 | /// [5, 6, 7, 8], |
2578 | /// [9, 8, 7, 6]], |
2579 | /// [[5, 4, 3, 2], |
2580 | /// [1, 2, 3, 4], |
2581 | /// [5, 6, 7, 8]]]) |
2582 | /// |
2583 | /// # A main diagonal from each batch. |
2584 | /// tf.matrix_diag_part(input) ==> [[1, 6, 7], # Output shape: (2, 3) |
2585 | /// [5, 2, 7]] |
2586 | /// |
2587 | /// # A superdiagonal from each batch. |
2588 | /// tf.matrix_diag_part(input, k = 1) |
2589 | /// ==> [[2, 7, 6], # Output shape: (2, 3) |
2590 | /// [4, 3, 8]] |
2591 | /// |
2592 | /// # A band from each batch. |
2593 | /// tf.matrix_diag_part(input, k = (-1, 2)) |
2594 | /// ==> [[[0, 3, 8], # Output shape: (2, 4, 3) |
2595 | /// [2, 7, 6], |
2596 | /// [1, 6, 7], |
2597 | /// [5, 8, 0]], |
2598 | /// [[0, 3, 4], |
2599 | /// [4, 3, 8], |
2600 | /// [5, 2, 7], |
2601 | /// [1, 6, 0]]] |
2602 | /// |
2603 | /// # LEFT_RIGHT alignment. |
2604 | /// tf.matrix_diag_part(input, k = (-1, 2), align="LEFT_RIGHT") |
2605 | /// ==> [[[3, 8, 0], # Output shape: (2, 4, 3) |
2606 | /// [2, 7, 6], |
2607 | /// [1, 6, 7], |
2608 | /// [0, 5, 8]], |
2609 | /// [[3, 4, 0], |
2610 | /// [4, 3, 8], |
2611 | /// [5, 2, 7], |
2612 | /// [0, 1, 6]]] |
2613 | /// |
2614 | /// # max_diag_len can be shorter than the main diagonal. |
2615 | /// tf.matrix_diag_part(input, k = (-2, -1)) |
2616 | /// ==> [[[5, 8], |
2617 | /// [9, 0]], |
2618 | /// [[1, 6], |
2619 | /// [5, 0]]] |
2620 | /// |
2621 | /// # padding_value = 9 |
2622 | /// tf.matrix_diag_part(input, k = (1, 3), padding_value = 9) |
2623 | /// ==> [[[9, 9, 4], # Output shape: (2, 3, 3) |
2624 | /// [9, 3, 8], |
2625 | /// [2, 7, 6]], |
2626 | /// [[9, 9, 2], |
2627 | /// [9, 3, 4], |
2628 | /// [4, 3, 8]]] |
2629 | /// |
2630 | /// ``` |
2631 | /// |
2632 | /// Args: |
2633 | /// * scope: A Scope object |
2634 | /// * input: Rank `r` tensor where `r >= 2`. |
2635 | /// * k: Diagonal offset(s). Positive value means superdiagonal, 0 refers to the main |
2636 | /// diagonal, and negative value means subdiagonals. `k` can be a single integer |
2637 | /// (for a single diagonal) or a pair of integers specifying the low and high ends |
2638 | /// of a matrix band. `k[0]` must not be larger than `k[1]`. |
2639 | /// * padding_value: The value to fill the area outside the specified diagonal band with. |
2640 | /// Default is 0. |
2641 | /// |
2642 | /// Optional attributes (see `Attrs`): |
2643 | /// * align: Some diagonals are shorter than `max_diag_len` and need to be padded. `align` is |
2644 | /// a string specifying how superdiagonals and subdiagonals should be aligned, |
2645 | /// respectively. There are four possible alignments: "RIGHT_LEFT" (default), |
2646 | /// "LEFT_RIGHT", "LEFT_LEFT", and "RIGHT_RIGHT". "RIGHT_LEFT" aligns superdiagonals |
2647 | /// to the right (left-pads the row) and subdiagonals to the left (right-pads the |
2648 | /// row). It is the packing format LAPACK uses. cuSPARSE uses "LEFT_RIGHT", which is |
2649 | /// the opposite alignment. |
2650 | /// |
2651 | /// Returns: |
2652 | /// * `Output`: The extracted diagonal(s). |
2653 | class MatrixDiagPartV3 { |
2654 | public: |
2655 | /// Optional attribute setters for MatrixDiagPartV3 |
2656 | struct Attrs { |
2657 | /// Some diagonals are shorter than `max_diag_len` and need to be padded. `align` is |
2658 | /// a string specifying how superdiagonals and subdiagonals should be aligned, |
2659 | /// respectively. There are four possible alignments: "RIGHT_LEFT" (default), |
2660 | /// "LEFT_RIGHT", "LEFT_LEFT", and "RIGHT_RIGHT". "RIGHT_LEFT" aligns superdiagonals |
2661 | /// to the right (left-pads the row) and subdiagonals to the left (right-pads the |
2662 | /// row). It is the packing format LAPACK uses. cuSPARSE uses "LEFT_RIGHT", which is |
2663 | /// the opposite alignment. |
2664 | /// |
2665 | /// Defaults to "RIGHT_LEFT" |
2666 | TF_MUST_USE_RESULT Attrs Align(StringPiece x) { |
2667 | Attrs ret = *this; |
2668 | ret.align_ = x; |
2669 | return ret; |
2670 | } |
2671 | |
2672 | StringPiece align_ = "RIGHT_LEFT" ; |
2673 | }; |
2674 | MatrixDiagPartV3(const ::tensorflow::Scope& scope, ::tensorflow::Input input, |
2675 | ::tensorflow::Input k, ::tensorflow::Input padding_value); |
2676 | MatrixDiagPartV3(const ::tensorflow::Scope& scope, ::tensorflow::Input input, |
2677 | ::tensorflow::Input k, ::tensorflow::Input padding_value, |
2678 | const MatrixDiagPartV3::Attrs& attrs); |
2679 | operator ::tensorflow::Output() const { return diagonal; } |
2680 | operator ::tensorflow::Input() const { return diagonal; } |
2681 | ::tensorflow::Node* node() const { return diagonal.node(); } |
2682 | |
2683 | static Attrs Align(StringPiece x) { |
2684 | return Attrs().Align(x); |
2685 | } |
2686 | |
2687 | Operation operation; |
2688 | ::tensorflow::Output diagonal; |
2689 | }; |
2690 | |
2691 | /// Returns a batched diagonal tensor with given batched diagonal values. |
2692 | /// |
2693 | /// Returns a tensor with the contents in `diagonal` as `k[0]`-th to `k[1]`-th |
2694 | /// diagonals of a matrix, with everything else padded with `padding`. `num_rows` |
2695 | /// and `num_cols` specify the dimension of the innermost matrix of the output. If |
2696 | /// both are not specified, the op assumes the innermost matrix is square and infers |
2697 | /// its size from `k` and the innermost dimension of `diagonal`. If only one of them |
2698 | /// is specified, the op assumes the unspecified value is the smallest possible |
2699 | /// based on other criteria. |
2700 | /// |
2701 | /// Let `diagonal` have `r` dimensions `[I, J, ..., L, M, N]`. The output tensor has |
2702 | /// rank `r+1` with shape `[I, J, ..., L, M, num_rows, num_cols]` when only one |
2703 | /// diagonal is given (`k` is an integer or `k[0] == k[1]`). Otherwise, it has rank |
2704 | /// `r` with shape `[I, J, ..., L, num_rows, num_cols]`. |
2705 | /// |
2706 | /// The second innermost dimension of `diagonal` has double meaning. |
2707 | /// When `k` is scalar or `k[0] == k[1]`, `M` is part of the batch size |
2708 | /// [I, J, ..., M], and the output tensor is: |
2709 | /// |
2710 | /// ``` |
2711 | /// output[i, j, ..., l, m, n] |
2712 | /// = diagonal[i, j, ..., l, n-max(d_upper, 0)] ; if n - m == d_upper |
2713 | /// padding_value ; otherwise |
2714 | /// ``` |
2715 | /// |
2716 | /// Otherwise, `M` is treated as the number of diagonals for the matrix in the |
2717 | /// same batch (`M = k[1]-k[0]+1`), and the output tensor is: |
2718 | /// |
2719 | /// ``` |
2720 | /// output[i, j, ..., l, m, n] |
2721 | /// = diagonal[i, j, ..., l, diag_index, index_in_diag] ; if k[0] <= d <= k[1] |
2722 | /// padding_value ; otherwise |
2723 | /// ``` |
2724 | /// where `d = n - m`, `diag_index = k[1] - d`, and `index_in_diag = n - max(d, 0)`. |
2725 | /// |
2726 | /// For example: |
2727 | /// |
2728 | /// ``` |
2729 | /// # The main diagonal. |
2730 | /// diagonal = np.array([[1, 2, 3, 4], # Input shape: (2, 4) |
2731 | /// [5, 6, 7, 8]]) |
2732 | /// tf.matrix_diag(diagonal) ==> [[[1, 0, 0, 0], # Output shape: (2, 4, 4) |
2733 | /// [0, 2, 0, 0], |
2734 | /// [0, 0, 3, 0], |
2735 | /// [0, 0, 0, 4]], |
2736 | /// [[5, 0, 0, 0], |
2737 | /// [0, 6, 0, 0], |
2738 | /// [0, 0, 7, 0], |
2739 | /// [0, 0, 0, 8]]] |
2740 | /// |
2741 | /// # A superdiagonal (per batch). |
2742 | /// diagonal = np.array([[1, 2, 3], # Input shape: (2, 3) |
2743 | /// [4, 5, 6]]) |
2744 | /// tf.matrix_diag(diagonal, k = 1) |
2745 | /// ==> [[[0, 1, 0, 0], # Output shape: (2, 4, 4) |
2746 | /// [0, 0, 2, 0], |
2747 | /// [0, 0, 0, 3], |
2748 | /// [0, 0, 0, 0]], |
2749 | /// [[0, 4, 0, 0], |
2750 | /// [0, 0, 5, 0], |
2751 | /// [0, 0, 0, 6], |
2752 | /// [0, 0, 0, 0]]] |
2753 | /// |
2754 | /// # A band of diagonals. |
2755 | /// diagonals = np.array([[[1, 2, 3], # Input shape: (2, 2, 3) |
2756 | /// [4, 5, 0]], |
2757 | /// [[6, 7, 9], |
2758 | /// [9, 1, 0]]]) |
2759 | /// tf.matrix_diag(diagonals, k = (-1, 0)) |
2760 | /// ==> [[[1, 0, 0], # Output shape: (2, 3, 3) |
2761 | /// [4, 2, 0], |
2762 | /// [0, 5, 3]], |
2763 | /// [[6, 0, 0], |
2764 | /// [9, 7, 0], |
2765 | /// [0, 1, 9]]] |
2766 | /// |
2767 | /// # Rectangular matrix. |
2768 | /// diagonal = np.array([1, 2]) # Input shape: (2) |
2769 | /// tf.matrix_diag(diagonal, k = -1, num_rows = 3, num_cols = 4) |
2770 | /// ==> [[0, 0, 0, 0], # Output shape: (3, 4) |
2771 | /// [1, 0, 0, 0], |
2772 | /// [0, 2, 0, 0]] |
2773 | /// |
2774 | /// # Rectangular matrix with inferred num_cols and padding_value = 9. |
2775 | /// tf.matrix_diag(diagonal, k = -1, num_rows = 3, padding_value = 9) |
2776 | /// ==> [[9, 9], # Output shape: (3, 2) |
2777 | /// [1, 9], |
2778 | /// [9, 2]] |
2779 | /// ``` |
2780 | /// |
2781 | /// Args: |
2782 | /// * scope: A Scope object |
2783 | /// * diagonal: Rank `r`, where `r >= 1` |
2784 | /// * k: Diagonal offset(s). Positive value means superdiagonal, 0 refers to the main |
2785 | /// diagonal, and negative value means subdiagonals. `k` can be a single integer |
2786 | /// (for a single diagonal) or a pair of integers specifying the low and high ends |
2787 | /// of a matrix band. `k[0]` must not be larger than `k[1]`. |
2788 | /// * num_rows: The number of rows of the output matrix. If it is not provided, the op assumes |
2789 | /// the output matrix is a square matrix and infers the matrix size from k and the |
2790 | /// innermost dimension of `diagonal`. |
2791 | /// * num_cols: The number of columns of the output matrix. If it is not provided, the op |
2792 | /// assumes the output matrix is a square matrix and infers the matrix size from |
2793 | /// k and the innermost dimension of `diagonal`. |
2794 | /// * padding_value: The number to fill the area outside the specified diagonal band with. |
2795 | /// Default is 0. |
2796 | /// |
2797 | /// Returns: |
2798 | /// * `Output`: Has rank `r+1` when `k` is an integer or `k[0] == k[1]`, rank `r` otherwise. |
2799 | class MatrixDiagV2 { |
2800 | public: |
2801 | MatrixDiagV2(const ::tensorflow::Scope& scope, ::tensorflow::Input diagonal, |
2802 | ::tensorflow::Input k, ::tensorflow::Input num_rows, |
2803 | ::tensorflow::Input num_cols, ::tensorflow::Input padding_value); |
2804 | operator ::tensorflow::Output() const { return output; } |
2805 | operator ::tensorflow::Input() const { return output; } |
2806 | ::tensorflow::Node* node() const { return output.node(); } |
2807 | |
2808 | Operation operation; |
2809 | ::tensorflow::Output output; |
2810 | }; |
2811 | |
2812 | /// Returns a batched diagonal tensor with given batched diagonal values. |
2813 | /// |
2814 | /// Returns a tensor with the contents in `diagonal` as `k[0]`-th to `k[1]`-th |
2815 | /// diagonals of a matrix, with everything else padded with `padding`. `num_rows` |
2816 | /// and `num_cols` specify the dimension of the innermost matrix of the output. If |
2817 | /// both are not specified, the op assumes the innermost matrix is square and infers |
2818 | /// its size from `k` and the innermost dimension of `diagonal`. If only one of them |
2819 | /// is specified, the op assumes the unspecified value is the smallest possible |
2820 | /// based on other criteria. |
2821 | /// |
2822 | /// Let `diagonal` have `r` dimensions `[I, J, ..., L, M, N]`. The output tensor has |
2823 | /// rank `r+1` with shape `[I, J, ..., L, M, num_rows, num_cols]` when only one |
2824 | /// diagonal is given (`k` is an integer or `k[0] == k[1]`). Otherwise, it has rank |
2825 | /// `r` with shape `[I, J, ..., L, num_rows, num_cols]`. |
2826 | /// |
2827 | /// The second innermost dimension of `diagonal` has double meaning. |
2828 | /// When `k` is scalar or `k[0] == k[1]`, `M` is part of the batch size |
2829 | /// [I, J, ..., M], and the output tensor is: |
2830 | /// |
2831 | /// ``` |
2832 | /// output[i, j, ..., l, m, n] |
2833 | /// = diagonal[i, j, ..., l, n-max(d_upper, 0)] ; if n - m == d_upper |
2834 | /// padding_value ; otherwise |
2835 | /// ``` |
2836 | /// |
2837 | /// Otherwise, `M` is treated as the number of diagonals for the matrix in the |
2838 | /// same batch (`M = k[1]-k[0]+1`), and the output tensor is: |
2839 | /// |
2840 | /// ``` |
2841 | /// output[i, j, ..., l, m, n] |
2842 | /// = diagonal[i, j, ..., l, diag_index, index_in_diag] ; if k[0] <= d <= k[1] |
2843 | /// padding_value ; otherwise |
2844 | /// ``` |
2845 | /// where `d = n - m`, `diag_index = [k] - d`, and |
2846 | /// `index_in_diag = n - max(d, 0) + offset`. |
2847 | /// |
2848 | /// `offset` is zero except when the alignment of the diagonal is to the right. |
2849 | /// ``` |
2850 | /// offset = max_diag_len - diag_len(d) ; if (`align` in {RIGHT_LEFT, RIGHT_RIGHT} |
2851 | /// and `d >= 0`) or |
2852 | /// (`align` in {LEFT_RIGHT, RIGHT_RIGHT} |
2853 | /// and `d <= 0`) |
2854 | /// 0 ; otherwise |
2855 | /// ``` |
2856 | /// where `diag_len(d) = min(cols - max(d, 0), rows + min(d, 0))`. |
2857 | /// |
2858 | /// For example: |
2859 | /// |
2860 | /// ``` |
2861 | /// # The main diagonal. |
2862 | /// diagonal = np.array([[1, 2, 3, 4], # Input shape: (2, 4) |
2863 | /// [5, 6, 7, 8]]) |
2864 | /// tf.matrix_diag(diagonal) ==> [[[1, 0, 0, 0], # Output shape: (2, 4, 4) |
2865 | /// [0, 2, 0, 0], |
2866 | /// [0, 0, 3, 0], |
2867 | /// [0, 0, 0, 4]], |
2868 | /// [[5, 0, 0, 0], |
2869 | /// [0, 6, 0, 0], |
2870 | /// [0, 0, 7, 0], |
2871 | /// [0, 0, 0, 8]]] |
2872 | /// |
2873 | /// # A superdiagonal (per batch). |
2874 | /// diagonal = np.array([[1, 2, 3], # Input shape: (2, 3) |
2875 | /// [4, 5, 6]]) |
2876 | /// tf.matrix_diag(diagonal, k = 1) |
2877 | /// ==> [[[0, 1, 0, 0], # Output shape: (2, 4, 4) |
2878 | /// [0, 0, 2, 0], |
2879 | /// [0, 0, 0, 3], |
2880 | /// [0, 0, 0, 0]], |
2881 | /// [[0, 4, 0, 0], |
2882 | /// [0, 0, 5, 0], |
2883 | /// [0, 0, 0, 6], |
2884 | /// [0, 0, 0, 0]]] |
2885 | /// |
2886 | /// # A tridiagonal band (per batch). |
2887 | /// diagonals = np.array([[[0, 8, 9], # Input shape: (2, 2, 3) |
2888 | /// [1, 2, 3], |
2889 | /// [4, 5, 0]], |
2890 | /// [[0, 2, 3], |
2891 | /// [6, 7, 9], |
2892 | /// [9, 1, 0]]]) |
2893 | /// tf.matrix_diag(diagonals, k = (-1, 1)) |
2894 | /// ==> [[[1, 8, 0], # Output shape: (2, 3, 3) |
2895 | /// [4, 2, 9], |
2896 | /// [0, 5, 3]], |
2897 | /// [[6, 2, 0], |
2898 | /// [9, 7, 3], |
2899 | /// [0, 1, 9]]] |
2900 | /// |
2901 | /// # LEFT_RIGHT alignment. |
2902 | /// diagonals = np.array([[[8, 9, 0], # Input shape: (2, 2, 3) |
2903 | /// [1, 2, 3], |
2904 | /// [0, 4, 5]], |
2905 | /// [[2, 3, 0], |
2906 | /// [6, 7, 9], |
2907 | /// [0, 9, 1]]]) |
2908 | /// tf.matrix_diag(diagonals, k = (-1, 1), align="LEFT_RIGHT") |
2909 | /// ==> [[[1, 8, 0], # Output shape: (2, 3, 3) |
2910 | /// [4, 2, 9], |
2911 | /// [0, 5, 3]], |
2912 | /// [[6, 2, 0], |
2913 | /// [9, 7, 3], |
2914 | /// [0, 1, 9]]] |
2915 | /// |
2916 | /// # Rectangular matrix. |
2917 | /// diagonal = np.array([1, 2]) # Input shape: (2) |
2918 | /// tf.matrix_diag(diagonal, k = -1, num_rows = 3, num_cols = 4) |
2919 | /// ==> [[0, 0, 0, 0], # Output shape: (3, 4) |
2920 | /// [1, 0, 0, 0], |
2921 | /// [0, 2, 0, 0]] |
2922 | /// |
2923 | /// # Rectangular matrix with inferred num_cols and padding_value = 9. |
2924 | /// tf.matrix_diag(diagonal, k = -1, num_rows = 3, padding_value = 9) |
2925 | /// ==> [[9, 9], # Output shape: (3, 2) |
2926 | /// [1, 9], |
2927 | /// [9, 2]] |
2928 | /// |
2929 | /// ``` |
2930 | /// |
2931 | /// Args: |
2932 | /// * scope: A Scope object |
2933 | /// * diagonal: Rank `r`, where `r >= 1` |
2934 | /// * k: Diagonal offset(s). Positive value means superdiagonal, 0 refers to the main |
2935 | /// diagonal, and negative value means subdiagonals. `k` can be a single integer |
2936 | /// (for a single diagonal) or a pair of integers specifying the low and high ends |
2937 | /// of a matrix band. `k[0]` must not be larger than `k[1]`. |
2938 | /// * num_rows: The number of rows of the output matrix. If it is not provided, the op assumes |
2939 | /// the output matrix is a square matrix and infers the matrix size from k and the |
2940 | /// innermost dimension of `diagonal`. |
2941 | /// * num_cols: The number of columns of the output matrix. If it is not provided, the op |
2942 | /// assumes the output matrix is a square matrix and infers the matrix size from |
2943 | /// k and the innermost dimension of `diagonal`. |
2944 | /// * padding_value: The number to fill the area outside the specified diagonal band with. |
2945 | /// Default is 0. |
2946 | /// |
2947 | /// Optional attributes (see `Attrs`): |
2948 | /// * align: Some diagonals are shorter than `max_diag_len` and need to be padded. `align` is |
2949 | /// a string specifying how superdiagonals and subdiagonals should be aligned, |
2950 | /// respectively. There are four possible alignments: "RIGHT_LEFT" (default), |
2951 | /// "LEFT_RIGHT", "LEFT_LEFT", and "RIGHT_RIGHT". "RIGHT_LEFT" aligns superdiagonals |
2952 | /// to the right (left-pads the row) and subdiagonals to the left (right-pads the |
2953 | /// row). It is the packing format LAPACK uses. cuSPARSE uses "LEFT_RIGHT", which is |
2954 | /// the opposite alignment. |
2955 | /// |
2956 | /// Returns: |
2957 | /// * `Output`: Has rank `r+1` when `k` is an integer or `k[0] == k[1]`, rank `r` otherwise. |
2958 | class MatrixDiagV3 { |
2959 | public: |
2960 | /// Optional attribute setters for MatrixDiagV3 |
2961 | struct Attrs { |
2962 | /// Some diagonals are shorter than `max_diag_len` and need to be padded. `align` is |
2963 | /// a string specifying how superdiagonals and subdiagonals should be aligned, |
2964 | /// respectively. There are four possible alignments: "RIGHT_LEFT" (default), |
2965 | /// "LEFT_RIGHT", "LEFT_LEFT", and "RIGHT_RIGHT". "RIGHT_LEFT" aligns superdiagonals |
2966 | /// to the right (left-pads the row) and subdiagonals to the left (right-pads the |
2967 | /// row). It is the packing format LAPACK uses. cuSPARSE uses "LEFT_RIGHT", which is |
2968 | /// the opposite alignment. |
2969 | /// |
2970 | /// Defaults to "RIGHT_LEFT" |
2971 | TF_MUST_USE_RESULT Attrs Align(StringPiece x) { |
2972 | Attrs ret = *this; |
2973 | ret.align_ = x; |
2974 | return ret; |
2975 | } |
2976 | |
2977 | StringPiece align_ = "RIGHT_LEFT" ; |
2978 | }; |
2979 | MatrixDiagV3(const ::tensorflow::Scope& scope, ::tensorflow::Input diagonal, |
2980 | ::tensorflow::Input k, ::tensorflow::Input num_rows, |
2981 | ::tensorflow::Input num_cols, ::tensorflow::Input padding_value); |
2982 | MatrixDiagV3(const ::tensorflow::Scope& scope, ::tensorflow::Input diagonal, |
2983 | ::tensorflow::Input k, ::tensorflow::Input num_rows, |
2984 | ::tensorflow::Input num_cols, ::tensorflow::Input padding_value, |
2985 | const MatrixDiagV3::Attrs& attrs); |
2986 | operator ::tensorflow::Output() const { return output; } |
2987 | operator ::tensorflow::Input() const { return output; } |
2988 | ::tensorflow::Node* node() const { return output.node(); } |
2989 | |
2990 | static Attrs Align(StringPiece x) { |
2991 | return Attrs().Align(x); |
2992 | } |
2993 | |
2994 | Operation operation; |
2995 | ::tensorflow::Output output; |
2996 | }; |
2997 | |
2998 | /// Returns a batched matrix tensor with new batched diagonal values. |
2999 | /// |
3000 | /// Given `input` and `diagonal`, this operation returns a tensor with the |
3001 | /// same shape and values as `input`, except for the main diagonal of the |
3002 | /// innermost matrices. These will be overwritten by the values in `diagonal`. |
3003 | /// |
3004 | /// The output is computed as follows: |
3005 | /// |
3006 | /// Assume `input` has `k+1` dimensions `[I, J, K, ..., M, N]` and `diagonal` has |
3007 | /// `k` dimensions `[I, J, K, ..., min(M, N)]`. Then the output is a |
3008 | /// tensor of rank `k+1` with dimensions `[I, J, K, ..., M, N]` where: |
3009 | /// |
3010 | /// * `output[i, j, k, ..., m, n] = diagonal[i, j, k, ..., n]` for `m == n`. |
3011 | /// * `output[i, j, k, ..., m, n] = input[i, j, k, ..., m, n]` for `m != n`. |
3012 | /// |
3013 | /// Args: |
3014 | /// * scope: A Scope object |
3015 | /// * input: Rank `k+1`, where `k >= 1`. |
3016 | /// * diagonal: Rank `k`, where `k >= 1`. |
3017 | /// |
3018 | /// Returns: |
3019 | /// * `Output`: Rank `k+1`, with `output.shape = input.shape`. |
3020 | class MatrixSetDiag { |
3021 | public: |
3022 | MatrixSetDiag(const ::tensorflow::Scope& scope, ::tensorflow::Input input, |
3023 | ::tensorflow::Input diagonal); |
3024 | operator ::tensorflow::Output() const { return output; } |
3025 | operator ::tensorflow::Input() const { return output; } |
3026 | ::tensorflow::Node* node() const { return output.node(); } |
3027 | |
3028 | Operation operation; |
3029 | ::tensorflow::Output output; |
3030 | }; |
3031 | |
3032 | /// Returns a batched matrix tensor with new batched diagonal values. |
3033 | /// |
3034 | /// Given `input` and `diagonal`, this operation returns a tensor with the |
3035 | /// same shape and values as `input`, except for the specified diagonals of the |
3036 | /// innermost matrices. These will be overwritten by the values in `diagonal`. |
3037 | /// |
3038 | /// `input` has `r+1` dimensions `[I, J, ..., L, M, N]`. When `k` is scalar or |
3039 | /// `k[0] == k[1]`, `diagonal` has `r` dimensions `[I, J, ..., L, max_diag_len]`. |
3040 | /// Otherwise, it has `r+1` dimensions `[I, J, ..., L, num_diags, max_diag_len]`. |
3041 | /// `num_diags` is the number of diagonals, `num_diags = k[1] - k[0] + 1`. |
3042 | /// `max_diag_len` is the longest diagonal in the range `[k[0], k[1]]`, |
3043 | /// `max_diag_len = min(M + min(k[1], 0), N + min(-k[0], 0))` |
3044 | /// |
3045 | /// The output is a tensor of rank `k+1` with dimensions `[I, J, ..., L, M, N]`. |
3046 | /// If `k` is scalar or `k[0] == k[1]`: |
3047 | /// |
3048 | /// ``` |
3049 | /// output[i, j, ..., l, m, n] |
3050 | /// = diagonal[i, j, ..., l, n-max(k[1], 0)] ; if n - m == k[1] |
3051 | /// input[i, j, ..., l, m, n] ; otherwise |
3052 | /// ``` |
3053 | /// |
3054 | /// Otherwise, |
3055 | /// |
3056 | /// ``` |
3057 | /// output[i, j, ..., l, m, n] |
3058 | /// = diagonal[i, j, ..., l, diag_index, index_in_diag] ; if k[0] <= d <= k[1] |
3059 | /// input[i, j, ..., l, m, n] ; otherwise |
3060 | /// ``` |
3061 | /// where `d = n - m`, `diag_index = k[1] - d`, and `index_in_diag = n - max(d, 0)`. |
3062 | /// |
3063 | /// For example: |
3064 | /// |
3065 | /// ``` |
3066 | /// # The main diagonal. |
3067 | /// input = np.array([[[7, 7, 7, 7], # Input shape: (2, 3, 4) |
3068 | /// [7, 7, 7, 7], |
3069 | /// [7, 7, 7, 7]], |
3070 | /// [[7, 7, 7, 7], |
3071 | /// [7, 7, 7, 7], |
3072 | /// [7, 7, 7, 7]]]) |
3073 | /// diagonal = np.array([[1, 2, 3], # Diagonal shape: (2, 3) |
3074 | /// [4, 5, 6]]) |
3075 | /// tf.matrix_set_diag(diagonal) ==> [[[1, 7, 7, 7], # Output shape: (2, 3, 4) |
3076 | /// [7, 2, 7, 7], |
3077 | /// [7, 7, 3, 7]], |
3078 | /// [[4, 7, 7, 7], |
3079 | /// [7, 5, 7, 7], |
3080 | /// [7, 7, 6, 7]]] |
3081 | /// |
3082 | /// # A superdiagonal (per batch). |
3083 | /// tf.matrix_set_diag(diagonal, k = 1) |
3084 | /// ==> [[[7, 1, 7, 7], # Output shape: (2, 3, 4) |
3085 | /// [7, 7, 2, 7], |
3086 | /// [7, 7, 7, 3]], |
3087 | /// [[7, 4, 7, 7], |
3088 | /// [7, 7, 5, 7], |
3089 | /// [7, 7, 7, 6]]] |
3090 | /// |
3091 | /// # A band of diagonals. |
3092 | /// diagonals = np.array([[[1, 2, 3], # Diagonal shape: (2, 2, 3) |
3093 | /// [4, 5, 0]], |
3094 | /// [[6, 1, 2], |
3095 | /// [3, 4, 0]]]) |
3096 | /// tf.matrix_set_diag(diagonals, k = (-1, 0)) |
3097 | /// ==> [[[1, 7, 7, 7], # Output shape: (2, 3, 4) |
3098 | /// [4, 2, 7, 7], |
3099 | /// [0, 5, 3, 7]], |
3100 | /// [[6, 7, 7, 7], |
3101 | /// [3, 1, 7, 7], |
3102 | /// [7, 4, 2, 7]]] |
3103 | /// |
3104 | /// ``` |
3105 | /// |
3106 | /// Args: |
3107 | /// * scope: A Scope object |
3108 | /// * input: Rank `r+1`, where `r >= 1`. |
3109 | /// * diagonal: Rank `r` when `k` is an integer or `k[0] == k[1]`. Otherwise, it has rank `r+1`. |
3110 | /// `k >= 1`. |
3111 | /// * k: Diagonal offset(s). Positive value means superdiagonal, 0 refers to the main |
3112 | /// diagonal, and negative value means subdiagonals. `k` can be a single integer |
3113 | /// (for a single diagonal) or a pair of integers specifying the low and high ends |
3114 | /// of a matrix band. `k[0]` must not be larger than `k[1]`. |
3115 | /// |
3116 | /// Returns: |
3117 | /// * `Output`: Rank `r+1`, with `output.shape = input.shape`. |
3118 | class MatrixSetDiagV2 { |
3119 | public: |
3120 | MatrixSetDiagV2(const ::tensorflow::Scope& scope, ::tensorflow::Input input, |
3121 | ::tensorflow::Input diagonal, ::tensorflow::Input k); |
3122 | operator ::tensorflow::Output() const { return output; } |
3123 | operator ::tensorflow::Input() const { return output; } |
3124 | ::tensorflow::Node* node() const { return output.node(); } |
3125 | |
3126 | Operation operation; |
3127 | ::tensorflow::Output output; |
3128 | }; |
3129 | |
3130 | /// Returns a batched matrix tensor with new batched diagonal values. |
3131 | /// |
3132 | /// Given `input` and `diagonal`, this operation returns a tensor with the |
3133 | /// same shape and values as `input`, except for the specified diagonals of the |
3134 | /// innermost matrices. These will be overwritten by the values in `diagonal`. |
3135 | /// |
3136 | /// `input` has `r+1` dimensions `[I, J, ..., L, M, N]`. When `k` is scalar or |
3137 | /// `k[0] == k[1]`, `diagonal` has `r` dimensions `[I, J, ..., L, max_diag_len]`. |
3138 | /// Otherwise, it has `r+1` dimensions `[I, J, ..., L, num_diags, max_diag_len]`. |
3139 | /// `num_diags` is the number of diagonals, `num_diags = k[1] - k[0] + 1`. |
3140 | /// `max_diag_len` is the longest diagonal in the range `[k[0], k[1]]`, |
3141 | /// `max_diag_len = min(M + min(k[1], 0), N + min(-k[0], 0))` |
3142 | /// |
3143 | /// The output is a tensor of rank `k+1` with dimensions `[I, J, ..., L, M, N]`. |
3144 | /// If `k` is scalar or `k[0] == k[1]`: |
3145 | /// |
3146 | /// ``` |
3147 | /// output[i, j, ..., l, m, n] |
3148 | /// = diagonal[i, j, ..., l, n-max(k[1], 0)] ; if n - m == k[1] |
3149 | /// input[i, j, ..., l, m, n] ; otherwise |
3150 | /// ``` |
3151 | /// |
3152 | /// Otherwise, |
3153 | /// |
3154 | /// ``` |
3155 | /// output[i, j, ..., l, m, n] |
3156 | /// = diagonal[i, j, ..., l, diag_index, index_in_diag] ; if k[0] <= d <= k[1] |
3157 | /// input[i, j, ..., l, m, n] ; otherwise |
3158 | /// ``` |
3159 | /// where `d = n - m`, `diag_index = k[1] - d`, and |
3160 | /// `index_in_diag = n - max(d, 0) + offset`. |
3161 | /// |
3162 | /// `offset` is zero except when the alignment of the diagonal is to the right. |
3163 | /// ``` |
3164 | /// offset = max_diag_len - diag_len(d) ; if (`align` in {RIGHT_LEFT, RIGHT_RIGHT} |
3165 | /// and `d >= 0`) or |
3166 | /// (`align` in {LEFT_RIGHT, RIGHT_RIGHT} |
3167 | /// and `d <= 0`) |
3168 | /// 0 ; otherwise |
3169 | /// ``` |
3170 | /// where `diag_len(d) = min(cols - max(d, 0), rows + min(d, 0))`. |
3171 | /// |
3172 | /// For example: |
3173 | /// |
3174 | /// ``` |
3175 | /// # The main diagonal. |
3176 | /// input = np.array([[[7, 7, 7, 7], # Input shape: (2, 3, 4) |
3177 | /// [7, 7, 7, 7], |
3178 | /// [7, 7, 7, 7]], |
3179 | /// [[7, 7, 7, 7], |
3180 | /// [7, 7, 7, 7], |
3181 | /// [7, 7, 7, 7]]]) |
3182 | /// diagonal = np.array([[1, 2, 3], # Diagonal shape: (2, 3) |
3183 | /// [4, 5, 6]]) |
3184 | /// tf.matrix_set_diag(input, diagonal) |
3185 | /// ==> [[[1, 7, 7, 7], # Output shape: (2, 3, 4) |
3186 | /// [7, 2, 7, 7], |
3187 | /// [7, 7, 3, 7]], |
3188 | /// [[4, 7, 7, 7], |
3189 | /// [7, 5, 7, 7], |
3190 | /// [7, 7, 6, 7]]] |
3191 | /// |
3192 | /// # A superdiagonal (per batch). |
3193 | /// tf.matrix_set_diag(input, diagonal, k = 1) |
3194 | /// ==> [[[7, 1, 7, 7], # Output shape: (2, 3, 4) |
3195 | /// [7, 7, 2, 7], |
3196 | /// [7, 7, 7, 3]], |
3197 | /// [[7, 4, 7, 7], |
3198 | /// [7, 7, 5, 7], |
3199 | /// [7, 7, 7, 6]]] |
3200 | /// |
3201 | /// # A band of diagonals. |
3202 | /// diagonals = np.array([[[0, 9, 1], # Diagonal shape: (2, 4, 3) |
3203 | /// [6, 5, 8], |
3204 | /// [1, 2, 3], |
3205 | /// [4, 5, 0]], |
3206 | /// [[0, 1, 2], |
3207 | /// [5, 6, 4], |
3208 | /// [6, 1, 2], |
3209 | /// [3, 4, 0]]]) |
3210 | /// tf.matrix_set_diag(input, diagonals, k = (-1, 2)) |
3211 | /// ==> [[[1, 6, 9, 7], # Output shape: (2, 3, 4) |
3212 | /// [4, 2, 5, 1], |
3213 | /// [7, 5, 3, 8]], |
3214 | /// [[6, 5, 1, 7], |
3215 | /// [3, 1, 6, 2], |
3216 | /// [7, 4, 2, 4]]] |
3217 | /// |
3218 | /// # LEFT_RIGHT alignment. |
3219 | /// diagonals = np.array([[[9, 1, 0], # Diagonal shape: (2, 4, 3) |
3220 | /// [6, 5, 8], |
3221 | /// [1, 2, 3], |
3222 | /// [0, 4, 5]], |
3223 | /// [[1, 2, 0], |
3224 | /// [5, 6, 4], |
3225 | /// [6, 1, 2], |
3226 | /// [0, 3, 4]]]) |
3227 | /// tf.matrix_set_diag(input, diagonals, k = (-1, 2), align="LEFT_RIGHT") |
3228 | /// ==> [[[1, 6, 9, 7], # Output shape: (2, 3, 4) |
3229 | /// [4, 2, 5, 1], |
3230 | /// [7, 5, 3, 8]], |
3231 | /// [[6, 5, 1, 7], |
3232 | /// [3, 1, 6, 2], |
3233 | /// [7, 4, 2, 4]]] |
3234 | /// |
3235 | /// ``` |
3236 | /// |
3237 | /// Args: |
3238 | /// * scope: A Scope object |
3239 | /// * input: Rank `r+1`, where `r >= 1`. |
3240 | /// * diagonal: Rank `r` when `k` is an integer or `k[0] == k[1]`. Otherwise, it has rank `r+1`. |
3241 | /// `k >= 1`. |
3242 | /// * k: Diagonal offset(s). Positive value means superdiagonal, 0 refers to the main |
3243 | /// diagonal, and negative value means subdiagonals. `k` can be a single integer |
3244 | /// (for a single diagonal) or a pair of integers specifying the low and high ends |
3245 | /// of a matrix band. `k[0]` must not be larger than `k[1]`. |
3246 | /// |
3247 | /// Optional attributes (see `Attrs`): |
3248 | /// * align: Some diagonals are shorter than `max_diag_len` and need to be padded. `align` is |
3249 | /// a string specifying how superdiagonals and subdiagonals should be aligned, |
3250 | /// respectively. There are four possible alignments: "RIGHT_LEFT" (default), |
3251 | /// "LEFT_RIGHT", "LEFT_LEFT", and "RIGHT_RIGHT". "RIGHT_LEFT" aligns superdiagonals |
3252 | /// to the right (left-pads the row) and subdiagonals to the left (right-pads the |
3253 | /// row). It is the packing format LAPACK uses. cuSPARSE uses "LEFT_RIGHT", which is |
3254 | /// the opposite alignment. |
3255 | /// |
3256 | /// Returns: |
3257 | /// * `Output`: Rank `r+1`, with `output.shape = input.shape`. |
3258 | class MatrixSetDiagV3 { |
3259 | public: |
3260 | /// Optional attribute setters for MatrixSetDiagV3 |
3261 | struct Attrs { |
3262 | /// Some diagonals are shorter than `max_diag_len` and need to be padded. `align` is |
3263 | /// a string specifying how superdiagonals and subdiagonals should be aligned, |
3264 | /// respectively. There are four possible alignments: "RIGHT_LEFT" (default), |
3265 | /// "LEFT_RIGHT", "LEFT_LEFT", and "RIGHT_RIGHT". "RIGHT_LEFT" aligns superdiagonals |
3266 | /// to the right (left-pads the row) and subdiagonals to the left (right-pads the |
3267 | /// row). It is the packing format LAPACK uses. cuSPARSE uses "LEFT_RIGHT", which is |
3268 | /// the opposite alignment. |
3269 | /// |
3270 | /// Defaults to "RIGHT_LEFT" |
3271 | TF_MUST_USE_RESULT Attrs Align(StringPiece x) { |
3272 | Attrs ret = *this; |
3273 | ret.align_ = x; |
3274 | return ret; |
3275 | } |
3276 | |
3277 | StringPiece align_ = "RIGHT_LEFT" ; |
3278 | }; |
3279 | MatrixSetDiagV3(const ::tensorflow::Scope& scope, ::tensorflow::Input input, |
3280 | ::tensorflow::Input diagonal, ::tensorflow::Input k); |
3281 | MatrixSetDiagV3(const ::tensorflow::Scope& scope, ::tensorflow::Input input, |
3282 | ::tensorflow::Input diagonal, ::tensorflow::Input k, const |
3283 | MatrixSetDiagV3::Attrs& attrs); |
3284 | operator ::tensorflow::Output() const { return output; } |
3285 | operator ::tensorflow::Input() const { return output; } |
3286 | ::tensorflow::Node* node() const { return output.node(); } |
3287 | |
3288 | static Attrs Align(StringPiece x) { |
3289 | return Attrs().Align(x); |
3290 | } |
3291 | |
3292 | Operation operation; |
3293 | ::tensorflow::Output output; |
3294 | }; |
3295 | |
3296 | /// Pads a tensor with mirrored values. |
3297 | /// |
3298 | /// This operation pads a `input` with mirrored values according to the `paddings` |
3299 | /// you specify. `paddings` is an integer tensor with shape `[n, 2]`, where n is |
3300 | /// the rank of `input`. For each dimension D of `input`, `paddings[D, 0]` indicates |
3301 | /// how many values to add before the contents of `input` in that dimension, and |
3302 | /// `paddings[D, 1]` indicates how many values to add after the contents of `input` |
3303 | /// in that dimension. Both `paddings[D, 0]` and `paddings[D, 1]` must be no greater |
3304 | /// than `input.dim_size(D)` (or `input.dim_size(D) - 1`) if `copy_border` is true |
3305 | /// (if false, respectively). |
3306 | /// |
3307 | /// The padded size of each dimension D of the output is: |
3308 | /// |
3309 | /// `paddings(D, 0) + input.dim_size(D) + paddings(D, 1)` |
3310 | /// |
3311 | /// For example: |
3312 | /// |
3313 | /// ``` |
3314 | /// # 't' is [[1, 2, 3], [4, 5, 6]]. |
3315 | /// # 'paddings' is [[1, 1]], [2, 2]]. |
3316 | /// # 'mode' is SYMMETRIC. |
3317 | /// # rank of 't' is 2. |
3318 | /// pad(t, paddings) ==> [[2, 1, 1, 2, 3, 3, 2] |
3319 | /// [2, 1, 1, 2, 3, 3, 2] |
3320 | /// [5, 4, 4, 5, 6, 6, 5] |
3321 | /// [5, 4, 4, 5, 6, 6, 5]] |
3322 | /// ``` |
3323 | /// |
3324 | /// Args: |
3325 | /// * scope: A Scope object |
3326 | /// * input: The input tensor to be padded. |
3327 | /// * paddings: A two-column matrix specifying the padding sizes. The number of |
3328 | /// rows must be the same as the rank of `input`. |
3329 | /// * mode: Either `REFLECT` or `SYMMETRIC`. In reflect mode the padded regions |
3330 | /// do not include the borders, while in symmetric mode the padded regions |
3331 | /// do include the borders. For example, if `input` is `[1, 2, 3]` and `paddings` |
3332 | /// is `[0, 2]`, then the output is `[1, 2, 3, 2, 1]` in reflect mode, and |
3333 | /// it is `[1, 2, 3, 3, 2]` in symmetric mode. |
3334 | /// |
3335 | /// Returns: |
3336 | /// * `Output`: The padded tensor. |
3337 | class MirrorPad { |
3338 | public: |
3339 | MirrorPad(const ::tensorflow::Scope& scope, ::tensorflow::Input input, |
3340 | ::tensorflow::Input paddings, StringPiece mode); |
3341 | operator ::tensorflow::Output() const { return output; } |
3342 | operator ::tensorflow::Input() const { return output; } |
3343 | ::tensorflow::Node* node() const { return output.node(); } |
3344 | |
3345 | Operation operation; |
3346 | ::tensorflow::Output output; |
3347 | }; |
3348 | |
3349 | /// Returns a one-hot tensor. |
3350 | /// |
3351 | /// The locations represented by indices in `indices` take value `on_value`, |
3352 | /// while all other locations take value `off_value`. |
3353 | /// |
3354 | /// If the input `indices` is rank `N`, the output will have rank `N+1`, |
3355 | /// The new axis is created at dimension `axis` (default: the new axis is |
3356 | /// appended at the end). |
3357 | /// |
3358 | /// If `indices` is a scalar the output shape will be a vector of length `depth`. |
3359 | /// |
3360 | /// If `indices` is a vector of length `features`, the output shape will be: |
3361 | /// ``` |
3362 | /// features x depth if axis == -1 |
3363 | /// depth x features if axis == 0 |
3364 | /// ``` |
3365 | /// |
3366 | /// If `indices` is a matrix (batch) with shape `[batch, features]`, |
3367 | /// the output shape will be: |
3368 | /// ``` |
3369 | /// batch x features x depth if axis == -1 |
3370 | /// batch x depth x features if axis == 1 |
3371 | /// depth x batch x features if axis == 0 |
3372 | /// ``` |
3373 | /// |
3374 | /// |
3375 | /// Examples |
3376 | /// ========= |
3377 | /// |
3378 | /// Suppose that |
3379 | /// ``` |
3380 | /// indices = [0, 2, -1, 1] |
3381 | /// depth = 3 |
3382 | /// on_value = 5.0 |
3383 | /// off_value = 0.0 |
3384 | /// axis = -1 |
3385 | /// ``` |
3386 | /// |
3387 | /// Then output is `[4 x 3]`: |
3388 | /// ``` |
3389 | /// output = |
3390 | /// [5.0 0.0 0.0] // one_hot(0) |
3391 | /// [0.0 0.0 5.0] // one_hot(2) |
3392 | /// [0.0 0.0 0.0] // one_hot(-1) |
3393 | /// [0.0 5.0 0.0] // one_hot(1) |
3394 | /// ``` |
3395 | /// |
3396 | /// Suppose that |
3397 | /// ``` |
3398 | /// indices = [0, 2, -1, 1] |
3399 | /// depth = 3 |
3400 | /// on_value = 0.0 |
3401 | /// off_value = 3.0 |
3402 | /// axis = 0 |
3403 | /// ``` |
3404 | /// |
3405 | /// Then output is `[3 x 4]`: |
3406 | /// ``` |
3407 | /// output = |
3408 | /// [0.0 3.0 3.0 3.0] |
3409 | /// [3.0 3.0 3.0 0.0] |
3410 | /// [3.0 3.0 3.0 3.0] |
3411 | /// [3.0 0.0 3.0 3.0] |
3412 | /// // ^ one_hot(0) |
3413 | /// // ^ one_hot(2) |
3414 | /// // ^ one_hot(-1) |
3415 | /// // ^ one_hot(1) |
3416 | /// ``` |
3417 | /// |
3418 | /// Suppose that |
3419 | /// ``` |
3420 | /// indices = [[0, 2], [1, -1]] |
3421 | /// depth = 3 |
3422 | /// on_value = 1.0 |
3423 | /// off_value = 0.0 |
3424 | /// axis = -1 |
3425 | /// ``` |
3426 | /// |
3427 | /// Then output is `[2 x 2 x 3]`: |
3428 | /// ``` |
3429 | /// output = |
3430 | /// [ |
3431 | /// [1.0, 0.0, 0.0] // one_hot(0) |
3432 | /// [0.0, 0.0, 1.0] // one_hot(2) |
3433 | /// ][ |
3434 | /// [0.0, 1.0, 0.0] // one_hot(1) |
3435 | /// [0.0, 0.0, 0.0] // one_hot(-1) |
3436 | /// ] |
3437 | /// ``` |
3438 | /// |
3439 | /// Args: |
3440 | /// * scope: A Scope object |
3441 | /// * indices: A tensor of indices. |
3442 | /// * depth: A scalar defining the depth of the one hot dimension. |
3443 | /// * on_value: A scalar defining the value to fill in output when `indices[j] = i`. |
3444 | /// * off_value: A scalar defining the value to fill in output when `indices[j] != i`. |
3445 | /// |
3446 | /// Optional attributes (see `Attrs`): |
3447 | /// * axis: The axis to fill (default: -1, a new inner-most axis). |
3448 | /// |
3449 | /// Returns: |
3450 | /// * `Output`: The one-hot tensor. |
3451 | class OneHot { |
3452 | public: |
3453 | /// Optional attribute setters for OneHot |
3454 | struct Attrs { |
3455 | /// The axis to fill (default: -1, a new inner-most axis). |
3456 | /// |
3457 | /// Defaults to -1 |
3458 | TF_MUST_USE_RESULT Attrs Axis(int64 x) { |
3459 | Attrs ret = *this; |
3460 | ret.axis_ = x; |
3461 | return ret; |
3462 | } |
3463 | |
3464 | int64 axis_ = -1; |
3465 | }; |
3466 | OneHot(const ::tensorflow::Scope& scope, ::tensorflow::Input indices, |
3467 | ::tensorflow::Input depth, ::tensorflow::Input on_value, |
3468 | ::tensorflow::Input off_value); |
3469 | OneHot(const ::tensorflow::Scope& scope, ::tensorflow::Input indices, |
3470 | ::tensorflow::Input depth, ::tensorflow::Input on_value, |
3471 | ::tensorflow::Input off_value, const OneHot::Attrs& attrs); |
3472 | operator ::tensorflow::Output() const { return output; } |
3473 | operator ::tensorflow::Input() const { return output; } |
3474 | ::tensorflow::Node* node() const { return output.node(); } |
3475 | |
3476 | static Attrs Axis(int64 x) { |
3477 | return Attrs().Axis(x); |
3478 | } |
3479 | |
3480 | Operation operation; |
3481 | ::tensorflow::Output output; |
3482 | }; |
3483 | |
3484 | /// Returns a tensor of ones with the same shape and type as x. |
3485 | /// |
3486 | /// Args: |
3487 | /// * scope: A Scope object |
3488 | /// * x: a tensor of type T. |
3489 | /// |
3490 | /// Returns: |
3491 | /// * `Output`: a tensor of the same shape and type as x but filled with ones. |
3492 | class OnesLike { |
3493 | public: |
3494 | OnesLike(const ::tensorflow::Scope& scope, ::tensorflow::Input x); |
3495 | operator ::tensorflow::Output() const { return y; } |
3496 | operator ::tensorflow::Input() const { return y; } |
3497 | ::tensorflow::Node* node() const { return y.node(); } |
3498 | |
3499 | Operation operation; |
3500 | ::tensorflow::Output y; |
3501 | }; |
3502 | |
3503 | /// Packs a list of `N` rank-`R` tensors into one rank-`(R+1)` tensor. |
3504 | /// |
3505 | /// Packs the `N` tensors in `values` into a tensor with rank one higher than each |
3506 | /// tensor in `values`, by packing them along the `axis` dimension. |
3507 | /// Given a list of tensors of shape `(A, B, C)`; |
3508 | /// |
3509 | /// if `axis == 0` then the `output` tensor will have the shape `(N, A, B, C)`. |
3510 | /// if `axis == 1` then the `output` tensor will have the shape `(A, N, B, C)`. |
3511 | /// Etc. |
3512 | /// |
3513 | /// For example: |
3514 | /// |
3515 | /// ``` |
3516 | /// # 'x' is [1, 4] |
3517 | /// # 'y' is [2, 5] |
3518 | /// # 'z' is [3, 6] |
3519 | /// pack([x, y, z]) => [[1, 4], [2, 5], [3, 6]] # Pack along first dim. |
3520 | /// pack([x, y, z], axis=1) => [[1, 2, 3], [4, 5, 6]] |
3521 | /// ``` |
3522 | /// |
3523 | /// This is the opposite of `unpack`. |
3524 | /// |
3525 | /// Args: |
3526 | /// * scope: A Scope object |
3527 | /// * values: Must be of same shape and type. |
3528 | /// |
3529 | /// Optional attributes (see `Attrs`): |
3530 | /// * axis: Dimension along which to pack. Negative values wrap around, so the |
3531 | /// valid range is `[-(R+1), R+1)`. |
3532 | /// |
3533 | /// Returns: |
3534 | /// * `Output`: The packed tensor. |
3535 | class Stack { |
3536 | public: |
3537 | /// Optional attribute setters for Stack |
3538 | struct Attrs { |
3539 | /// Dimension along which to pack. Negative values wrap around, so the |
3540 | /// valid range is `[-(R+1), R+1)`. |
3541 | /// |
3542 | /// Defaults to 0 |
3543 | TF_MUST_USE_RESULT Attrs Axis(int64 x) { |
3544 | Attrs ret = *this; |
3545 | ret.axis_ = x; |
3546 | return ret; |
3547 | } |
3548 | |
3549 | int64 axis_ = 0; |
3550 | }; |
3551 | Stack(const ::tensorflow::Scope& scope, ::tensorflow::InputList values); |
3552 | Stack(const ::tensorflow::Scope& scope, ::tensorflow::InputList values, const |
3553 | Stack::Attrs& attrs); |
3554 | operator ::tensorflow::Output() const { return output; } |
3555 | operator ::tensorflow::Input() const { return output; } |
3556 | ::tensorflow::Node* node() const { return output.node(); } |
3557 | |
3558 | static Attrs Axis(int64 x) { |
3559 | return Attrs().Axis(x); |
3560 | } |
3561 | |
3562 | Operation operation; |
3563 | ::tensorflow::Output output; |
3564 | }; |
3565 | |
3566 | /// Pads a tensor with zeros. |
3567 | /// |
3568 | /// This operation pads a `input` with zeros according to the `paddings` you |
3569 | /// specify. `paddings` is an integer tensor with shape `[Dn, 2]`, where n is the |
3570 | /// rank of `input`. For each dimension D of `input`, `paddings[D, 0]` indicates |
3571 | /// how many zeros to add before the contents of `input` in that dimension, and |
3572 | /// `paddings[D, 1]` indicates how many zeros to add after the contents of `input` |
3573 | /// in that dimension. |
3574 | /// |
3575 | /// The padded size of each dimension D of the output is: |
3576 | /// |
3577 | /// `paddings(D, 0) + input.dim_size(D) + paddings(D, 1)` |
3578 | /// |
3579 | /// For example: |
3580 | /// |
3581 | /// ``` |
3582 | /// # 't' is [[1, 1], [2, 2]] |
3583 | /// # 'paddings' is [[1, 1], [2, 2]] |
3584 | /// # rank of 't' is 2 |
3585 | /// pad(t, paddings) ==> [[0, 0, 0, 0, 0, 0] |
3586 | /// [0, 0, 1, 1, 0, 0] |
3587 | /// [0, 0, 2, 2, 0, 0] |
3588 | /// [0, 0, 0, 0, 0, 0]] |
3589 | /// ``` |
3590 | /// |
3591 | /// |
3592 | /// Args: |
3593 | /// * scope: A Scope object |
3594 | /// |
3595 | /// Returns: |
3596 | /// * `Output`: The output tensor. |
3597 | class Pad { |
3598 | public: |
3599 | Pad(const ::tensorflow::Scope& scope, ::tensorflow::Input input, |
3600 | ::tensorflow::Input paddings); |
3601 | operator ::tensorflow::Output() const { return output; } |
3602 | operator ::tensorflow::Input() const { return output; } |
3603 | ::tensorflow::Node* node() const { return output.node(); } |
3604 | |
3605 | Operation operation; |
3606 | ::tensorflow::Output output; |
3607 | }; |
3608 | |
3609 | /// Pads a tensor. |
3610 | /// |
3611 | /// This operation pads `input` according to the `paddings` and `constant_values` |
3612 | /// you specify. `paddings` is an integer tensor with shape `[Dn, 2]`, where n is |
3613 | /// the rank of `input`. For each dimension D of `input`, `paddings[D, 0]` indicates |
3614 | /// how many padding values to add before the contents of `input` in that dimension, |
3615 | /// and `paddings[D, 1]` indicates how many padding values to add after the contents |
3616 | /// of `input` in that dimension. `constant_values` is a scalar tensor of the same |
3617 | /// type as `input` that indicates the value to use for padding `input`. |
3618 | /// |
3619 | /// The padded size of each dimension D of the output is: |
3620 | /// |
3621 | /// `paddings(D, 0) + input.dim_size(D) + paddings(D, 1)` |
3622 | /// |
3623 | /// For example: |
3624 | /// |
3625 | /// ``` |
3626 | /// # 't' is [[1, 1], [2, 2]] |
3627 | /// # 'paddings' is [[1, 1], [2, 2]] |
3628 | /// # 'constant_values' is 0 |
3629 | /// # rank of 't' is 2 |
3630 | /// pad(t, paddings) ==> [[0, 0, 0, 0, 0, 0] |
3631 | /// [0, 0, 1, 1, 0, 0] |
3632 | /// [0, 0, 2, 2, 0, 0] |
3633 | /// [0, 0, 0, 0, 0, 0]] |
3634 | /// ``` |
3635 | /// |
3636 | /// Args: |
3637 | /// * scope: A Scope object |
3638 | /// |
3639 | /// Returns: |
3640 | /// * `Output`: The output tensor. |
3641 | class PadV2 { |
3642 | public: |
3643 | PadV2(const ::tensorflow::Scope& scope, ::tensorflow::Input input, |
3644 | ::tensorflow::Input paddings, ::tensorflow::Input constant_values); |
3645 | operator ::tensorflow::Output() const { return output; } |
3646 | operator ::tensorflow::Input() const { return output; } |
3647 | ::tensorflow::Node* node() const { return output.node(); } |
3648 | |
3649 | Operation operation; |
3650 | ::tensorflow::Output output; |
3651 | }; |
3652 | |
3653 | /// Concatenates a list of `N` tensors along the first dimension. |
3654 | /// |
3655 | /// The input tensors are all required to have size 1 in the first dimension. |
3656 | /// |
3657 | /// For example: |
3658 | /// |
3659 | /// ``` |
3660 | /// # 'x' is [[1, 4]] |
3661 | /// # 'y' is [[2, 5]] |
3662 | /// # 'z' is [[3, 6]] |
3663 | /// parallel_concat([x, y, z]) => [[1, 4], [2, 5], [3, 6]] # Pack along first dim. |
3664 | /// ``` |
3665 | /// |
3666 | /// The difference between concat and parallel_concat is that concat requires all |
3667 | /// of the inputs be computed before the operation will begin but doesn't require |
3668 | /// that the input shapes be known during graph construction. Parallel concat |
3669 | /// will copy pieces of the input into the output as they become available, in |
3670 | /// some situations this can provide a performance benefit. |
3671 | /// |
3672 | /// Args: |
3673 | /// * scope: A Scope object |
3674 | /// * values: Tensors to be concatenated. All must have size 1 in the first dimension |
3675 | /// and same shape. |
3676 | /// * shape: the final shape of the result; should be equal to the shapes of any input |
3677 | /// but with the number of input values in the first dimension. |
3678 | /// |
3679 | /// Returns: |
3680 | /// * `Output`: The concatenated tensor. |
3681 | class ParallelConcat { |
3682 | public: |
3683 | ParallelConcat(const ::tensorflow::Scope& scope, ::tensorflow::InputList |
3684 | values, PartialTensorShape shape); |
3685 | operator ::tensorflow::Output() const { return output; } |
3686 | operator ::tensorflow::Input() const { return output; } |
3687 | ::tensorflow::Node* node() const { return output.node(); } |
3688 | |
3689 | Operation operation; |
3690 | ::tensorflow::Output output; |
3691 | }; |
3692 | |
3693 | /// A placeholder op for a value that will be fed into the computation. |
3694 | /// |
3695 | /// N.B. This operation will fail with an error if it is executed. It is |
3696 | /// intended as a way to represent a value that will always be fed, and to |
3697 | /// provide attrs that enable the fed value to be checked at runtime. |
3698 | /// |
3699 | /// Args: |
3700 | /// * scope: A Scope object |
3701 | /// * dtype: The type of elements in the tensor. |
3702 | /// |
3703 | /// Optional attributes (see `Attrs`): |
3704 | /// * shape: (Optional) The shape of the tensor. If the shape has 0 dimensions, the |
3705 | /// shape is unconstrained. |
3706 | /// |
3707 | /// Returns: |
3708 | /// * `Output`: A placeholder tensor that must be replaced using the feed mechanism. |
3709 | class Placeholder { |
3710 | public: |
3711 | /// Optional attribute setters for Placeholder |
3712 | struct Attrs { |
3713 | /// (Optional) The shape of the tensor. If the shape has 0 dimensions, the |
3714 | /// shape is unconstrained. |
3715 | /// |
3716 | /// Defaults to <unknown> |
3717 | TF_MUST_USE_RESULT Attrs Shape(PartialTensorShape x) { |
3718 | Attrs ret = *this; |
3719 | ret.shape_ = x; |
3720 | return ret; |
3721 | } |
3722 | |
3723 | PartialTensorShape shape_ = ::tensorflow::PartialTensorShape() /* unknown */; |
3724 | }; |
3725 | Placeholder(const ::tensorflow::Scope& scope, DataType dtype); |
3726 | Placeholder(const ::tensorflow::Scope& scope, DataType dtype, const |
3727 | Placeholder::Attrs& attrs); |
3728 | operator ::tensorflow::Output() const { return output; } |
3729 | operator ::tensorflow::Input() const { return output; } |
3730 | ::tensorflow::Node* node() const { return output.node(); } |
3731 | |
3732 | static Attrs Shape(PartialTensorShape x) { |
3733 | return Attrs().Shape(x); |
3734 | } |
3735 | |
3736 | Operation operation; |
3737 | ::tensorflow::Output output; |
3738 | }; |
3739 | |
3740 | /// A placeholder op that passes through `input` when its output is not fed. |
3741 | /// |
3742 | /// Args: |
3743 | /// * scope: A Scope object |
3744 | /// * input: The default value to produce when `output` is not fed. |
3745 | /// * shape: The (possibly partial) shape of the tensor. |
3746 | /// |
3747 | /// Returns: |
3748 | /// * `Output`: A placeholder tensor that defaults to `input` if it is not fed. |
3749 | class PlaceholderWithDefault { |
3750 | public: |
3751 | PlaceholderWithDefault(const ::tensorflow::Scope& scope, ::tensorflow::Input |
3752 | input, PartialTensorShape shape); |
3753 | operator ::tensorflow::Output() const { return output; } |
3754 | operator ::tensorflow::Input() const { return output; } |
3755 | ::tensorflow::Node* node() const { return output.node(); } |
3756 | |
3757 | Operation operation; |
3758 | ::tensorflow::Output output; |
3759 | }; |
3760 | |
3761 | /// An identity op that triggers an error if a gradient is requested. |
3762 | /// |
3763 | /// When executed in a graph, this op outputs its input tensor as-is. |
3764 | /// |
3765 | /// When building ops to compute gradients, the TensorFlow gradient system |
3766 | /// will return an error when trying to lookup the gradient of this op, |
3767 | /// because no gradient must ever be registered for this function. This |
3768 | /// op exists to prevent subtle bugs from silently returning unimplemented |
3769 | /// gradients in some corner cases. |
3770 | /// |
3771 | /// Args: |
3772 | /// * scope: A Scope object |
3773 | /// * input: any tensor. |
3774 | /// |
3775 | /// Optional attributes (see `Attrs`): |
3776 | /// * message: Will be printed in the error when anyone tries to differentiate |
3777 | /// this operation. |
3778 | /// |
3779 | /// Returns: |
3780 | /// * `Output`: the same input tensor. |
3781 | class PreventGradient { |
3782 | public: |
3783 | /// Optional attribute setters for PreventGradient |
3784 | struct Attrs { |
3785 | /// Will be printed in the error when anyone tries to differentiate |
3786 | /// this operation. |
3787 | /// |
3788 | /// Defaults to "" |
3789 | TF_MUST_USE_RESULT Attrs Message(StringPiece x) { |
3790 | Attrs ret = *this; |
3791 | ret.message_ = x; |
3792 | return ret; |
3793 | } |
3794 | |
3795 | StringPiece message_ = "" ; |
3796 | }; |
3797 | PreventGradient(const ::tensorflow::Scope& scope, ::tensorflow::Input input); |
3798 | PreventGradient(const ::tensorflow::Scope& scope, ::tensorflow::Input input, |
3799 | const PreventGradient::Attrs& attrs); |
3800 | operator ::tensorflow::Output() const { return output; } |
3801 | operator ::tensorflow::Input() const { return output; } |
3802 | ::tensorflow::Node* node() const { return output.node(); } |
3803 | |
3804 | static Attrs Message(StringPiece x) { |
3805 | return Attrs().Message(x); |
3806 | } |
3807 | |
3808 | Operation operation; |
3809 | ::tensorflow::Output output; |
3810 | }; |
3811 | |
3812 | /// Quantizes then dequantizes a tensor. |
3813 | /// |
3814 | /// This op simulates the precision loss from the quantized forward pass by: |
3815 | /// |
3816 | /// 1. Quantizing the tensor to fixed point numbers, which should match the target |
3817 | /// quantization method when it is used in inference. |
3818 | /// 2. Dequantizing it back to floating point numbers for the following ops, most |
3819 | /// likely matmul. |
3820 | /// |
3821 | /// There are different ways to quantize. This version uses only scaling, so 0.0 |
3822 | /// maps to 0. |
3823 | /// |
3824 | /// From the specified 'num_bits' in the quantized output type, it determines |
3825 | /// minimum and maximum representable quantized values. |
3826 | /// |
3827 | /// e.g. |
3828 | /// |
3829 | /// * [-128, 127] for signed, num_bits = 8, or |
3830 | /// * [0, 255] for unsigned, num_bits = 8. |
3831 | /// |
3832 | /// If range_given == False, the initial input_min, input_max will be determined |
3833 | /// automatically as the minimum and maximum values in the input tensor, otherwise |
3834 | /// the specified values of input_min, input_max are used. |
3835 | /// |
3836 | /// Note: If the input_min, input_max are specified, they do not need to equal the |
3837 | /// actual minimum and maximum values in the tensor. e.g. in some cases it may be |
3838 | /// beneficial to specify these values such that the low probability extremes of the |
3839 | /// input distribution are clipped. |
3840 | /// |
3841 | /// This op determines the maximum scale_factor that would map the initial |
3842 | /// [input_min, input_max] range to a range that lies within the representable |
3843 | /// quantized range. |
3844 | /// |
3845 | /// It determines the scale from one of input_min and input_max, then updates the |
3846 | /// other one to maximize the representable range. |
3847 | /// |
3848 | /// e.g. |
3849 | /// |
3850 | /// * if the output is signed, num_bits = 8, [input_min, input_max] = [-10.0, |
3851 | /// 5.0]: it would use a scale_factor of -128 / -10.0 = 12.8 In this case, it |
3852 | /// would update input_max to be 127 / 12.8 = 9.921875 |
3853 | /// * if the output is signed, num_bits = 8, [input_min, input_max] = [-10.0, |
3854 | /// 10.0]: it would use a scale_factor of 127 / 10.0 = 12.7 In this case, it |
3855 | /// would update input_min to be 128.0 / 12.7 = -10.07874 |
3856 | /// * if the output is unsigned, input_min is forced to be 0, and only the |
3857 | /// specified input_max is used. |
3858 | /// |
3859 | /// After determining the scale_factor and updating the input range, it applies the |
3860 | /// following to each value in the 'input' tensor. |
3861 | /// |
3862 | /// output = round(clamp(value, input_min, input_max) * scale_factor) / scale_factor. |
3863 | /// |
3864 | /// The above round function rounds the value based on the given round_mode. |
3865 | /// |
3866 | /// |
3867 | /// Args: |
3868 | /// * scope: A Scope object |
3869 | /// * input: Tensor to quantize and then dequantize. |
3870 | /// * input_min: If `range_given == True`, this specifies the minimum input value that needs to |
3871 | /// be represented, otherwise it is determined from the min value of the `input` |
3872 | /// tensor. |
3873 | /// * input_max: If `range_given == True`, this specifies the maximum input value that needs to |
3874 | /// be represented, otherwise it is determined from the max value of the `input` |
3875 | /// tensor. |
3876 | /// |
3877 | /// Optional attributes (see `Attrs`): |
3878 | /// * signed_input: Whether the quantization is signed or unsigned. (actually this parameter should |
3879 | /// have been called <b>`signed_output`</b>) |
3880 | /// * num_bits: The bitwidth of the quantization. |
3881 | /// * range_given: Whether the range is given or should be determined from the `input` tensor. |
3882 | /// * round_mode: The 'round_mode' attribute controls which rounding tie-breaking algorithm is |
3883 | /// used when rounding float values to their quantized equivalents. The following |
3884 | /// rounding modes are currently supported: |
3885 | /// |
3886 | /// * HALF_TO_EVEN: this is the default round_mode. |
3887 | /// * HALF_UP: round towards positive. In this mode 7.5 rounds up to 8 and -7.5 |
3888 | /// rounds up to -7. |
3889 | /// |
3890 | /// * narrow_range: If True, then the absolute value of the quantized minimum value is the same as |
3891 | /// the quantized maximum value, instead of 1 greater. |
3892 | /// i.e. for 8 bit quantization, the minimum value is -127 instead of -128. |
3893 | /// * axis: If specified, this axis is treated as a channel or slice axis, and a separate |
3894 | /// quantization range is used for each channel or slice along this axis. |
3895 | /// |
3896 | /// Returns: |
3897 | /// * `Output`: The output tensor. |
3898 | class QuantizeAndDequantizeV2 { |
3899 | public: |
3900 | /// Optional attribute setters for QuantizeAndDequantizeV2 |
3901 | struct Attrs { |
3902 | /// Whether the quantization is signed or unsigned. (actually this parameter should |
3903 | /// have been called <b>`signed_output`</b>) |
3904 | /// |
3905 | /// Defaults to true |
3906 | TF_MUST_USE_RESULT Attrs SignedInput(bool x) { |
3907 | Attrs ret = *this; |
3908 | ret.signed_input_ = x; |
3909 | return ret; |
3910 | } |
3911 | |
3912 | /// The bitwidth of the quantization. |
3913 | /// |
3914 | /// Defaults to 8 |
3915 | TF_MUST_USE_RESULT Attrs NumBits(int64 x) { |
3916 | Attrs ret = *this; |
3917 | ret.num_bits_ = x; |
3918 | return ret; |
3919 | } |
3920 | |
3921 | /// Whether the range is given or should be determined from the `input` tensor. |
3922 | /// |
3923 | /// Defaults to false |
3924 | TF_MUST_USE_RESULT Attrs RangeGiven(bool x) { |
3925 | Attrs ret = *this; |
3926 | ret.range_given_ = x; |
3927 | return ret; |
3928 | } |
3929 | |
3930 | /// The 'round_mode' attribute controls which rounding tie-breaking algorithm is |
3931 | /// used when rounding float values to their quantized equivalents. The following |
3932 | /// rounding modes are currently supported: |
3933 | /// |
3934 | /// * HALF_TO_EVEN: this is the default round_mode. |
3935 | /// * HALF_UP: round towards positive. In this mode 7.5 rounds up to 8 and -7.5 |
3936 | /// rounds up to -7. |
3937 | /// |
3938 | /// |
3939 | /// Defaults to "HALF_TO_EVEN" |
3940 | TF_MUST_USE_RESULT Attrs RoundMode(StringPiece x) { |
3941 | Attrs ret = *this; |
3942 | ret.round_mode_ = x; |
3943 | return ret; |
3944 | } |
3945 | |
3946 | /// If True, then the absolute value of the quantized minimum value is the same as |
3947 | /// the quantized maximum value, instead of 1 greater. |
3948 | /// i.e. for 8 bit quantization, the minimum value is -127 instead of -128. |
3949 | /// |
3950 | /// Defaults to false |
3951 | TF_MUST_USE_RESULT Attrs NarrowRange(bool x) { |
3952 | Attrs ret = *this; |
3953 | ret.narrow_range_ = x; |
3954 | return ret; |
3955 | } |
3956 | |
3957 | /// If specified, this axis is treated as a channel or slice axis, and a separate |
3958 | /// quantization range is used for each channel or slice along this axis. |
3959 | /// |
3960 | /// Defaults to -1 |
3961 | TF_MUST_USE_RESULT Attrs Axis(int64 x) { |
3962 | Attrs ret = *this; |
3963 | ret.axis_ = x; |
3964 | return ret; |
3965 | } |
3966 | |
3967 | bool signed_input_ = true; |
3968 | int64 num_bits_ = 8; |
3969 | bool range_given_ = false; |
3970 | StringPiece round_mode_ = "HALF_TO_EVEN" ; |
3971 | bool narrow_range_ = false; |
3972 | int64 axis_ = -1; |
3973 | }; |
3974 | QuantizeAndDequantizeV2(const ::tensorflow::Scope& scope, ::tensorflow::Input |
3975 | input, ::tensorflow::Input input_min, |
3976 | ::tensorflow::Input input_max); |
3977 | QuantizeAndDequantizeV2(const ::tensorflow::Scope& scope, ::tensorflow::Input |
3978 | input, ::tensorflow::Input input_min, |
3979 | ::tensorflow::Input input_max, const |
3980 | QuantizeAndDequantizeV2::Attrs& attrs); |
3981 | operator ::tensorflow::Output() const { return output; } |
3982 | operator ::tensorflow::Input() const { return output; } |
3983 | ::tensorflow::Node* node() const { return output.node(); } |
3984 | |
3985 | static Attrs SignedInput(bool x) { |
3986 | return Attrs().SignedInput(x); |
3987 | } |
3988 | static Attrs NumBits(int64 x) { |
3989 | return Attrs().NumBits(x); |
3990 | } |
3991 | static Attrs RangeGiven(bool x) { |
3992 | return Attrs().RangeGiven(x); |
3993 | } |
3994 | static Attrs RoundMode(StringPiece x) { |
3995 | return Attrs().RoundMode(x); |
3996 | } |
3997 | static Attrs NarrowRange(bool x) { |
3998 | return Attrs().NarrowRange(x); |
3999 | } |
4000 | static Attrs Axis(int64 x) { |
4001 | return Attrs().Axis(x); |
4002 | } |
4003 | |
4004 | Operation operation; |
4005 | ::tensorflow::Output output; |
4006 | }; |
4007 | |
4008 | /// Quantizes then dequantizes a tensor. |
4009 | /// |
4010 | /// This is almost identical to QuantizeAndDequantizeV2, except that num_bits is a |
4011 | /// tensor, so its value can change during training. |
4012 | /// |
4013 | /// Args: |
4014 | /// * scope: A Scope object |
4015 | /// |
4016 | /// Returns: |
4017 | /// * `Output`: The output tensor. |
4018 | class QuantizeAndDequantizeV3 { |
4019 | public: |
4020 | /// Optional attribute setters for QuantizeAndDequantizeV3 |
4021 | struct Attrs { |
4022 | /// Defaults to true |
4023 | TF_MUST_USE_RESULT Attrs SignedInput(bool x) { |
4024 | Attrs ret = *this; |
4025 | ret.signed_input_ = x; |
4026 | return ret; |
4027 | } |
4028 | |
4029 | /// Defaults to true |
4030 | TF_MUST_USE_RESULT Attrs RangeGiven(bool x) { |
4031 | Attrs ret = *this; |
4032 | ret.range_given_ = x; |
4033 | return ret; |
4034 | } |
4035 | |
4036 | /// Defaults to false |
4037 | TF_MUST_USE_RESULT Attrs NarrowRange(bool x) { |
4038 | Attrs ret = *this; |
4039 | ret.narrow_range_ = x; |
4040 | return ret; |
4041 | } |
4042 | |
4043 | /// Defaults to -1 |
4044 | TF_MUST_USE_RESULT Attrs Axis(int64 x) { |
4045 | Attrs ret = *this; |
4046 | ret.axis_ = x; |
4047 | return ret; |
4048 | } |
4049 | |
4050 | bool signed_input_ = true; |
4051 | bool range_given_ = true; |
4052 | bool narrow_range_ = false; |
4053 | int64 axis_ = -1; |
4054 | }; |
4055 | QuantizeAndDequantizeV3(const ::tensorflow::Scope& scope, ::tensorflow::Input |
4056 | input, ::tensorflow::Input input_min, |
4057 | ::tensorflow::Input input_max, ::tensorflow::Input |
4058 | num_bits); |
4059 | QuantizeAndDequantizeV3(const ::tensorflow::Scope& scope, ::tensorflow::Input |
4060 | input, ::tensorflow::Input input_min, |
4061 | ::tensorflow::Input input_max, ::tensorflow::Input |
4062 | num_bits, const QuantizeAndDequantizeV3::Attrs& attrs); |
4063 | operator ::tensorflow::Output() const { return output; } |
4064 | operator ::tensorflow::Input() const { return output; } |
4065 | ::tensorflow::Node* node() const { return output.node(); } |
4066 | |
4067 | static Attrs SignedInput(bool x) { |
4068 | return Attrs().SignedInput(x); |
4069 | } |
4070 | static Attrs RangeGiven(bool x) { |
4071 | return Attrs().RangeGiven(x); |
4072 | } |
4073 | static Attrs NarrowRange(bool x) { |
4074 | return Attrs().NarrowRange(x); |
4075 | } |
4076 | static Attrs Axis(int64 x) { |
4077 | return Attrs().Axis(x); |
4078 | } |
4079 | |
4080 | Operation operation; |
4081 | ::tensorflow::Output output; |
4082 | }; |
4083 | |
4084 | /// Quantizes then dequantizes a tensor. |
4085 | /// |
4086 | /// This is almost identical to QuantizeAndDequantizeV2, except that it returns a |
4087 | /// gradient of 1 for inputs that are within the quantization range, or 0 otherwise. |
4088 | /// |
4089 | /// Args: |
4090 | /// * scope: A Scope object |
4091 | /// * input: Tensor to quantize and then dequantize. |
4092 | /// * input_min: If `range_given == True`, this specifies the minimum input value that needs to |
4093 | /// be represented, otherwise it is determined from the min value of the `input` |
4094 | /// tensor. |
4095 | /// * input_max: If `range_given == True`, this specifies the maximum input value that needs to |
4096 | /// be represented, otherwise it is determined from the max value of the `input` |
4097 | /// tensor. |
4098 | /// |
4099 | /// Optional attributes (see `Attrs`): |
4100 | /// * signed_input: Whether the quantization is signed or unsigned. (actually this parameter should |
4101 | /// have been called <b>`signed_output`</b>) |
4102 | /// * num_bits: The bitwidth of the quantization. |
4103 | /// * range_given: Whether the range is given or should be determined from the `input` tensor. |
4104 | /// * round_mode: The 'round_mode' attribute controls which rounding tie-breaking algorithm is |
4105 | /// used when rounding float values to their quantized equivalents. The following |
4106 | /// rounding modes are currently supported: |
4107 | /// |
4108 | /// * HALF_TO_EVEN: this is the default round_mode. |
4109 | /// * HALF_UP: round towards positive. In this mode 7.5 rounds up to 8 and -7.5 |
4110 | /// rounds up to -7. |
4111 | /// |
4112 | /// * narrow_range: If True, then the absolute value of the quantized minimum value is the same as |
4113 | /// the quantized maximum value, instead of 1 greater. |
4114 | /// i.e. for 8 bit quantization, the minimum value is -127 instead of -128. |
4115 | /// * axis: If specified, this axis is treated as a channel or slice axis, and a separate |
4116 | /// quantization range is used for each channel or slice along this axis. |
4117 | /// |
4118 | /// Returns: |
4119 | /// * `Output`: The output tensor. |
4120 | class QuantizeAndDequantizeV4 { |
4121 | public: |
4122 | /// Optional attribute setters for QuantizeAndDequantizeV4 |
4123 | struct Attrs { |
4124 | /// Whether the quantization is signed or unsigned. (actually this parameter should |
4125 | /// have been called <b>`signed_output`</b>) |
4126 | /// |
4127 | /// Defaults to true |
4128 | TF_MUST_USE_RESULT Attrs SignedInput(bool x) { |
4129 | Attrs ret = *this; |
4130 | ret.signed_input_ = x; |
4131 | return ret; |
4132 | } |
4133 | |
4134 | /// The bitwidth of the quantization. |
4135 | /// |
4136 | /// Defaults to 8 |
4137 | TF_MUST_USE_RESULT Attrs NumBits(int64 x) { |
4138 | Attrs ret = *this; |
4139 | ret.num_bits_ = x; |
4140 | return ret; |
4141 | } |
4142 | |
4143 | /// Whether the range is given or should be determined from the `input` tensor. |
4144 | /// |
4145 | /// Defaults to false |
4146 | TF_MUST_USE_RESULT Attrs RangeGiven(bool x) { |
4147 | Attrs ret = *this; |
4148 | ret.range_given_ = x; |
4149 | return ret; |
4150 | } |
4151 | |
4152 | /// The 'round_mode' attribute controls which rounding tie-breaking algorithm is |
4153 | /// used when rounding float values to their quantized equivalents. The following |
4154 | /// rounding modes are currently supported: |
4155 | /// |
4156 | /// * HALF_TO_EVEN: this is the default round_mode. |
4157 | /// * HALF_UP: round towards positive. In this mode 7.5 rounds up to 8 and -7.5 |
4158 | /// rounds up to -7. |
4159 | /// |
4160 | /// |
4161 | /// Defaults to "HALF_TO_EVEN" |
4162 | TF_MUST_USE_RESULT Attrs RoundMode(StringPiece x) { |
4163 | Attrs ret = *this; |
4164 | ret.round_mode_ = x; |
4165 | return ret; |
4166 | } |
4167 | |
4168 | /// If True, then the absolute value of the quantized minimum value is the same as |
4169 | /// the quantized maximum value, instead of 1 greater. |
4170 | /// i.e. for 8 bit quantization, the minimum value is -127 instead of -128. |
4171 | /// |
4172 | /// Defaults to false |
4173 | TF_MUST_USE_RESULT Attrs NarrowRange(bool x) { |
4174 | Attrs ret = *this; |
4175 | ret.narrow_range_ = x; |
4176 | return ret; |
4177 | } |
4178 | |
4179 | /// If specified, this axis is treated as a channel or slice axis, and a separate |
4180 | /// quantization range is used for each channel or slice along this axis. |
4181 | /// |
4182 | /// Defaults to -1 |
4183 | TF_MUST_USE_RESULT Attrs Axis(int64 x) { |
4184 | Attrs ret = *this; |
4185 | ret.axis_ = x; |
4186 | return ret; |
4187 | } |
4188 | |
4189 | bool signed_input_ = true; |
4190 | int64 num_bits_ = 8; |
4191 | bool range_given_ = false; |
4192 | StringPiece round_mode_ = "HALF_TO_EVEN" ; |
4193 | bool narrow_range_ = false; |
4194 | int64 axis_ = -1; |
4195 | }; |
4196 | QuantizeAndDequantizeV4(const ::tensorflow::Scope& scope, ::tensorflow::Input |
4197 | input, ::tensorflow::Input input_min, |
4198 | ::tensorflow::Input input_max); |
4199 | QuantizeAndDequantizeV4(const ::tensorflow::Scope& scope, ::tensorflow::Input |
4200 | input, ::tensorflow::Input input_min, |
4201 | ::tensorflow::Input input_max, const |
4202 | QuantizeAndDequantizeV4::Attrs& attrs); |
4203 | operator ::tensorflow::Output() const { return output; } |
4204 | operator ::tensorflow::Input() const { return output; } |
4205 | ::tensorflow::Node* node() const { return output.node(); } |
4206 | |
4207 | static Attrs SignedInput(bool x) { |
4208 | return Attrs().SignedInput(x); |
4209 | } |
4210 | static Attrs NumBits(int64 x) { |
4211 | return Attrs().NumBits(x); |
4212 | } |
4213 | static Attrs RangeGiven(bool x) { |
4214 | return Attrs().RangeGiven(x); |
4215 | } |
4216 | static Attrs RoundMode(StringPiece x) { |
4217 | return Attrs().RoundMode(x); |
4218 | } |
4219 | static Attrs NarrowRange(bool x) { |
4220 | return Attrs().NarrowRange(x); |
4221 | } |
4222 | static Attrs Axis(int64 x) { |
4223 | return Attrs().Axis(x); |
4224 | } |
4225 | |
4226 | Operation operation; |
4227 | ::tensorflow::Output output; |
4228 | }; |
4229 | |
4230 | /// Returns the gradient of `QuantizeAndDequantizeV4`. |
4231 | /// |
4232 | /// Returns a gradient of 1 for inputs that are within the quantization range, |
4233 | /// or 0 otherwise. |
4234 | /// |
4235 | /// Args: |
4236 | /// * scope: A Scope object |
4237 | /// |
4238 | /// Returns: |
4239 | /// * `Output` input_backprop |
4240 | /// * `Output` input_min_backprop |
4241 | /// * `Output` input_max_backprop |
4242 | class QuantizeAndDequantizeV4Grad { |
4243 | public: |
4244 | /// Optional attribute setters for QuantizeAndDequantizeV4Grad |
4245 | struct Attrs { |
4246 | /// Defaults to -1 |
4247 | TF_MUST_USE_RESULT Attrs Axis(int64 x) { |
4248 | Attrs ret = *this; |
4249 | ret.axis_ = x; |
4250 | return ret; |
4251 | } |
4252 | |
4253 | int64 axis_ = -1; |
4254 | }; |
4255 | QuantizeAndDequantizeV4Grad(const ::tensorflow::Scope& scope, |
4256 | ::tensorflow::Input gradients, ::tensorflow::Input |
4257 | input, ::tensorflow::Input input_min, |
4258 | ::tensorflow::Input input_max); |
4259 | QuantizeAndDequantizeV4Grad(const ::tensorflow::Scope& scope, |
4260 | ::tensorflow::Input gradients, ::tensorflow::Input |
4261 | input, ::tensorflow::Input input_min, |
4262 | ::tensorflow::Input input_max, const |
4263 | QuantizeAndDequantizeV4Grad::Attrs& attrs); |
4264 | |
4265 | static Attrs Axis(int64 x) { |
4266 | return Attrs().Axis(x); |
4267 | } |
4268 | |
4269 | Operation operation; |
4270 | ::tensorflow::Output input_backprop; |
4271 | ::tensorflow::Output input_min_backprop; |
4272 | ::tensorflow::Output input_max_backprop; |
4273 | }; |
4274 | |
4275 | /// Quantize the 'input' tensor of type float to 'output' tensor of type 'T'. |
4276 | /// |
4277 | /// [min_range, max_range] are scalar floats that specify the range for |
4278 | /// the 'input' data. The 'mode' attribute controls exactly which calculations are |
4279 | /// used to convert the float values to their quantized equivalents. The |
4280 | /// 'round_mode' attribute controls which rounding tie-breaking algorithm is used |
4281 | /// when rounding float values to their quantized equivalents. |
4282 | /// |
4283 | /// In 'MIN_COMBINED' mode, each value of the tensor will undergo the following: |
4284 | /// |
4285 | /// ``` |
4286 | /// out[i] = (in[i] - min_range) * range(T) / (max_range - min_range) |
4287 | /// if T == qint8: out[i] -= (range(T) + 1) / 2.0 |
4288 | /// ``` |
4289 | /// |
4290 | /// here `range(T) = numeric_limits<T>::max() - numeric_limits<T>::min()` |
4291 | /// |
4292 | /// *MIN_COMBINED Mode Example* |
4293 | /// |
4294 | /// Assume the input is type float and has a possible range of [0.0, 6.0] and the |
4295 | /// output type is quint8 ([0, 255]). The min_range and max_range values should be |
4296 | /// specified as 0.0 and 6.0. Quantizing from float to quint8 will multiply each |
4297 | /// value of the input by 255/6 and cast to quint8. |
4298 | /// |
4299 | /// If the output type was qint8 ([-128, 127]), the operation will additionally |
4300 | /// subtract each value by 128 prior to casting, so that the range of values aligns |
4301 | /// with the range of qint8. |
4302 | /// |
4303 | /// If the mode is 'MIN_FIRST', then this approach is used: |
4304 | /// |
4305 | /// ``` |
4306 | /// num_discrete_values = 1 << (# of bits in T) |
4307 | /// range_adjust = num_discrete_values / (num_discrete_values - 1) |
4308 | /// range = (range_max - range_min) * range_adjust |
4309 | /// range_scale = num_discrete_values / range |
4310 | /// quantized = round(input * range_scale) - round(range_min * range_scale) + |
4311 | /// numeric_limits<T>::min() |
4312 | /// quantized = max(quantized, numeric_limits<T>::min()) |
4313 | /// quantized = min(quantized, numeric_limits<T>::max()) |
4314 | /// ``` |
4315 | /// |
4316 | /// The biggest difference between this and MIN_COMBINED is that the minimum range |
4317 | /// is rounded first, before it's subtracted from the rounded value. With |
4318 | /// MIN_COMBINED, a small bias is introduced where repeated iterations of quantizing |
4319 | /// and dequantizing will introduce a larger and larger error. |
4320 | /// |
4321 | /// *SCALED mode Example* |
4322 | /// |
4323 | /// `SCALED` mode matches the quantization approach used in |
4324 | /// `QuantizeAndDequantize{V2|V3}`. |
4325 | /// |
4326 | /// If the mode is `SCALED`, the quantization is performed by multiplying each |
4327 | /// input value by a scaling_factor. |
4328 | /// The scaling_factor is determined from `min_range` and `max_range` to be as large |
4329 | /// as possible such that the range from `min_range` to `max_range` is representable |
4330 | /// within values of type T. |
4331 | /// |
4332 | /// ```c++ |
4333 | /// |
4334 | /// const int min_T = std::numeric_limits<T>::min(); |
4335 | /// const int max_T = std::numeric_limits<T>::max(); |
4336 | /// const float max_float = std::numeric_limits<float>::max(); |
4337 | /// |
4338 | /// const float scale_factor_from_min_side = |
4339 | /// (min_T * min_range > 0) ? min_T / min_range : max_float; |
4340 | /// const float scale_factor_from_max_side = |
4341 | /// (max_T * max_range > 0) ? max_T / max_range : max_float; |
4342 | /// |
4343 | /// const float scale_factor = std::min(scale_factor_from_min_side, |
4344 | /// scale_factor_from_max_side); |
4345 | /// ``` |
4346 | /// |
4347 | /// We next use the scale_factor to adjust min_range and max_range as follows: |
4348 | /// |
4349 | /// ```c++ |
4350 | /// min_range = min_T / scale_factor; |
4351 | /// max_range = max_T / scale_factor; |
4352 | /// ``` |
4353 | /// |
4354 | /// |
4355 | /// e.g. if T = qint8, and initially min_range = -10, and max_range = 9, we would |
4356 | /// compare -128/-10.0 = 12.8 to 127/9.0 = 14.11, and set scaling_factor = 12.8 |
4357 | /// In this case, min_range would remain -10, but max_range would be adjusted to |
4358 | /// 127 / 12.8 = 9.921875 |
4359 | /// |
4360 | /// So we will quantize input values in the range (-10, 9.921875) to (-128, 127). |
4361 | /// |
4362 | /// The input tensor can now be quantized by clipping values to the range |
4363 | /// `min_range` to `max_range`, then multiplying by scale_factor as follows: |
4364 | /// |
4365 | /// ```c++ |
4366 | /// result = round(min(max_range, max(min_range, input)) * scale_factor) |
4367 | /// ``` |
4368 | /// |
4369 | /// The adjusted `min_range` and `max_range` are returned as outputs 2 and 3 of |
4370 | /// this operation. These outputs should be used as the range for any further |
4371 | /// calculations. |
4372 | /// |
4373 | /// |
4374 | /// *narrow_range (bool) attribute* |
4375 | /// |
4376 | /// If true, we do not use the minimum quantized value. |
4377 | /// i.e. for int8 the quantized output, it would be restricted to the range |
4378 | /// -127..127 instead of the full -128..127 range. |
4379 | /// This is provided for compatibility with certain inference backends. |
4380 | /// (Only applies to SCALED mode) |
4381 | /// |
4382 | /// |
4383 | /// *axis (int) attribute* |
4384 | /// |
4385 | /// An optional `axis` attribute can specify a dimension index of the input tensor, |
4386 | /// such that quantization ranges will be calculated and applied separately for each |
4387 | /// slice of the tensor along that dimension. This is useful for per-channel |
4388 | /// quantization. |
4389 | /// |
4390 | /// If axis is specified, min_range and max_range |
4391 | /// |
4392 | /// if `axis`=None, per-tensor quantization is performed as normal. |
4393 | /// |
4394 | /// |
4395 | /// *ensure_minimum_range (float) attribute* |
4396 | /// |
4397 | /// Ensures the minimum quantization range is at least this value. |
4398 | /// The legacy default value for this is 0.01, but it is strongly suggested to |
4399 | /// set it to 0 for new uses. |
4400 | /// |
4401 | /// |
4402 | /// Args: |
4403 | /// * scope: A Scope object |
4404 | /// * min_range: The minimum value of the quantization range. This value may be adjusted by the |
4405 | /// op depending on other parameters. The adjusted value is written to `output_min`. |
4406 | /// If the `axis` attribute is specified, this must be a 1-D tensor whose size |
4407 | /// matches the `axis` dimension of the input and output tensors. |
4408 | /// * max_range: The maximum value of the quantization range. This value may be adjusted by the |
4409 | /// op depending on other parameters. The adjusted value is written to `output_max`. |
4410 | /// If the `axis` attribute is specified, this must be a 1-D tensor whose size |
4411 | /// matches the `axis` dimension of the input and output tensors. |
4412 | /// |
4413 | /// Returns: |
4414 | /// * `Output` output: The quantized data produced from the float input. |
4415 | /// * `Output` output_min: The final quantization range minimum, used to clip input values before scaling |
4416 | /// and rounding them to quantized values. |
4417 | /// If the `axis` attribute is specified, this will be a 1-D tensor whose size |
4418 | /// matches the `axis` dimension of the input and output tensors. |
4419 | /// * `Output` output_max: The final quantization range maximum, used to clip input values before scaling |
4420 | /// and rounding them to quantized values. |
4421 | /// If the `axis` attribute is specified, this will be a 1-D tensor whose size |
4422 | /// matches the `axis` dimension of the input and output tensors. |
4423 | class QuantizeV2 { |
4424 | public: |
4425 | /// Optional attribute setters for QuantizeV2 |
4426 | struct Attrs { |
4427 | /// Defaults to "MIN_COMBINED" |
4428 | TF_MUST_USE_RESULT Attrs Mode(StringPiece x) { |
4429 | Attrs ret = *this; |
4430 | ret.mode_ = x; |
4431 | return ret; |
4432 | } |
4433 | |
4434 | /// Defaults to "HALF_AWAY_FROM_ZERO" |
4435 | TF_MUST_USE_RESULT Attrs RoundMode(StringPiece x) { |
4436 | Attrs ret = *this; |
4437 | ret.round_mode_ = x; |
4438 | return ret; |
4439 | } |
4440 | |
4441 | /// Defaults to false |
4442 | TF_MUST_USE_RESULT Attrs NarrowRange(bool x) { |
4443 | Attrs ret = *this; |
4444 | ret.narrow_range_ = x; |
4445 | return ret; |
4446 | } |
4447 | |
4448 | /// Defaults to -1 |
4449 | TF_MUST_USE_RESULT Attrs Axis(int64 x) { |
4450 | Attrs ret = *this; |
4451 | ret.axis_ = x; |
4452 | return ret; |
4453 | } |
4454 | |
4455 | /// Defaults to 0.01 |
4456 | TF_MUST_USE_RESULT Attrs EnsureMinimumRange(float x) { |
4457 | Attrs ret = *this; |
4458 | ret.ensure_minimum_range_ = x; |
4459 | return ret; |
4460 | } |
4461 | |
4462 | StringPiece mode_ = "MIN_COMBINED" ; |
4463 | StringPiece round_mode_ = "HALF_AWAY_FROM_ZERO" ; |
4464 | bool narrow_range_ = false; |
4465 | int64 axis_ = -1; |
4466 | float ensure_minimum_range_ = 0.01f; |
4467 | }; |
4468 | QuantizeV2(const ::tensorflow::Scope& scope, ::tensorflow::Input input, |
4469 | ::tensorflow::Input min_range, ::tensorflow::Input max_range, |
4470 | DataType T); |
4471 | QuantizeV2(const ::tensorflow::Scope& scope, ::tensorflow::Input input, |
4472 | ::tensorflow::Input min_range, ::tensorflow::Input max_range, |
4473 | DataType T, const QuantizeV2::Attrs& attrs); |
4474 | |
4475 | static Attrs Mode(StringPiece x) { |
4476 | return Attrs().Mode(x); |
4477 | } |
4478 | static Attrs RoundMode(StringPiece x) { |
4479 | return Attrs().RoundMode(x); |
4480 | } |
4481 | static Attrs NarrowRange(bool x) { |
4482 | return Attrs().NarrowRange(x); |
4483 | } |
4484 | static Attrs Axis(int64 x) { |
4485 | return Attrs().Axis(x); |
4486 | } |
4487 | static Attrs EnsureMinimumRange(float x) { |
4488 | return Attrs().EnsureMinimumRange(x); |
4489 | } |
4490 | |
4491 | Operation operation; |
4492 | ::tensorflow::Output output; |
4493 | ::tensorflow::Output output_min; |
4494 | ::tensorflow::Output output_max; |
4495 | }; |
4496 | |
4497 | /// Concatenates quantized tensors along one dimension. |
4498 | /// |
4499 | /// Args: |
4500 | /// * scope: A Scope object |
4501 | /// * concat_dim: 0-D. The dimension along which to concatenate. Must be in the |
4502 | /// range [0, rank(values)). |
4503 | /// * values: The `N` Tensors to concatenate. Their ranks and types must match, |
4504 | /// and their sizes must match in all dimensions except `concat_dim`. |
4505 | /// * input_mins: The minimum scalar values for each of the input tensors. |
4506 | /// * input_maxes: The maximum scalar values for each of the input tensors. |
4507 | /// |
4508 | /// Returns: |
4509 | /// * `Output` output: A `Tensor` with the concatenation of values stacked along the |
4510 | /// `concat_dim` dimension. This tensor's shape matches that of `values` except |
4511 | /// in `concat_dim` where it has the sum of the sizes. |
4512 | /// * `Output` output_min: The float value that the minimum quantized output value represents. |
4513 | /// * `Output` output_max: The float value that the maximum quantized output value represents. |
4514 | class QuantizedConcat { |
4515 | public: |
4516 | QuantizedConcat(const ::tensorflow::Scope& scope, ::tensorflow::Input |
4517 | concat_dim, ::tensorflow::InputList values, |
4518 | ::tensorflow::InputList input_mins, ::tensorflow::InputList |
4519 | input_maxes); |
4520 | |
4521 | Operation operation; |
4522 | ::tensorflow::Output output; |
4523 | ::tensorflow::Output output_min; |
4524 | ::tensorflow::Output output_max; |
4525 | }; |
4526 | |
4527 | /// Quantized Instance normalization. |
4528 | /// |
4529 | /// Args: |
4530 | /// * scope: A Scope object |
4531 | /// * x: A 4D input Tensor. |
4532 | /// * x_min: The value represented by the lowest quantized input. |
4533 | /// * x_max: The value represented by the highest quantized input. |
4534 | /// |
4535 | /// Optional attributes (see `Attrs`): |
4536 | /// * output_range_given: If True, `given_y_min` and `given_y_min` |
4537 | /// and `given_y_max` are used as the output range. Otherwise, |
4538 | /// the implementation computes the output range. |
4539 | /// * given_y_min: Output in `y_min` if `output_range_given` is True. |
4540 | /// * given_y_max: Output in `y_max` if `output_range_given` is True. |
4541 | /// * variance_epsilon: A small float number to avoid dividing by 0. |
4542 | /// * min_separation: Minimum value of `y_max - y_min` |
4543 | /// |
4544 | /// Returns: |
4545 | /// * `Output` y: A 4D Tensor. |
4546 | /// * `Output` y_min: The value represented by the lowest quantized output. |
4547 | /// * `Output` y_max: The value represented by the highest quantized output. |
4548 | class QuantizedInstanceNorm { |
4549 | public: |
4550 | /// Optional attribute setters for QuantizedInstanceNorm |
4551 | struct Attrs { |
4552 | /// If True, `given_y_min` and `given_y_min` |
4553 | /// and `given_y_max` are used as the output range. Otherwise, |
4554 | /// the implementation computes the output range. |
4555 | /// |
4556 | /// Defaults to false |
4557 | TF_MUST_USE_RESULT Attrs OutputRangeGiven(bool x) { |
4558 | Attrs ret = *this; |
4559 | ret.output_range_given_ = x; |
4560 | return ret; |
4561 | } |
4562 | |
4563 | /// Output in `y_min` if `output_range_given` is True. |
4564 | /// |
4565 | /// Defaults to 0 |
4566 | TF_MUST_USE_RESULT Attrs GivenYMin(float x) { |
4567 | Attrs ret = *this; |
4568 | ret.given_y_min_ = x; |
4569 | return ret; |
4570 | } |
4571 | |
4572 | /// Output in `y_max` if `output_range_given` is True. |
4573 | /// |
4574 | /// Defaults to 0 |
4575 | TF_MUST_USE_RESULT Attrs GivenYMax(float x) { |
4576 | Attrs ret = *this; |
4577 | ret.given_y_max_ = x; |
4578 | return ret; |
4579 | } |
4580 | |
4581 | /// A small float number to avoid dividing by 0. |
4582 | /// |
4583 | /// Defaults to 1e-05 |
4584 | TF_MUST_USE_RESULT Attrs VarianceEpsilon(float x) { |
4585 | Attrs ret = *this; |
4586 | ret.variance_epsilon_ = x; |
4587 | return ret; |
4588 | } |
4589 | |
4590 | /// Minimum value of `y_max - y_min` |
4591 | /// |
4592 | /// Defaults to 0.001 |
4593 | TF_MUST_USE_RESULT Attrs MinSeparation(float x) { |
4594 | Attrs ret = *this; |
4595 | ret.min_separation_ = x; |
4596 | return ret; |
4597 | } |
4598 | |
4599 | bool output_range_given_ = false; |
4600 | float given_y_min_ = 0.0f; |
4601 | float given_y_max_ = 0.0f; |
4602 | float variance_epsilon_ = 1e-05f; |
4603 | float min_separation_ = 0.001f; |
4604 | }; |
4605 | QuantizedInstanceNorm(const ::tensorflow::Scope& scope, ::tensorflow::Input x, |
4606 | ::tensorflow::Input x_min, ::tensorflow::Input x_max); |
4607 | QuantizedInstanceNorm(const ::tensorflow::Scope& scope, ::tensorflow::Input x, |
4608 | ::tensorflow::Input x_min, ::tensorflow::Input x_max, |
4609 | const QuantizedInstanceNorm::Attrs& attrs); |
4610 | |
4611 | static Attrs OutputRangeGiven(bool x) { |
4612 | return Attrs().OutputRangeGiven(x); |
4613 | } |
4614 | static Attrs GivenYMin(float x) { |
4615 | return Attrs().GivenYMin(x); |
4616 | } |
4617 | static Attrs GivenYMax(float x) { |
4618 | return Attrs().GivenYMax(x); |
4619 | } |
4620 | static Attrs VarianceEpsilon(float x) { |
4621 | return Attrs().VarianceEpsilon(x); |
4622 | } |
4623 | static Attrs MinSeparation(float x) { |
4624 | return Attrs().MinSeparation(x); |
4625 | } |
4626 | |
4627 | Operation operation; |
4628 | ::tensorflow::Output y; |
4629 | ::tensorflow::Output y_min; |
4630 | ::tensorflow::Output y_max; |
4631 | }; |
4632 | |
4633 | /// Reshapes a quantized tensor as per the Reshape op. |
4634 | /// |
4635 | /// ``` |
4636 | /// |
4637 | /// Args: |
4638 | /// * scope: A Scope object |
4639 | /// * shape: Defines the shape of the output tensor. |
4640 | /// * input_min: The minimum value of the input. |
4641 | /// * input_max: The maximum value of the input. |
4642 | /// |
4643 | /// Returns: |
4644 | /// * `Output` output |
4645 | /// * `Output` output_min: This value is copied from input_min. |
4646 | /// * `Output` output_max: This value is copied from input_max. |
4647 | class QuantizedReshape { |
4648 | public: |
4649 | QuantizedReshape(const ::tensorflow::Scope& scope, ::tensorflow::Input tensor, |
4650 | ::tensorflow::Input shape, ::tensorflow::Input input_min, |
4651 | ::tensorflow::Input input_max); |
4652 | |
4653 | Operation operation; |
4654 | ::tensorflow::Output output; |
4655 | ::tensorflow::Output output_min; |
4656 | ::tensorflow::Output output_max; |
4657 | }; |
4658 | |
4659 | /// Returns the rank of a tensor. |
4660 | /// |
4661 | /// This operation returns an integer representing the rank of `input`. |
4662 | /// |
4663 | /// For example: |
4664 | /// |
4665 | /// ``` |
4666 | /// # 't' is [[[1, 1, 1], [2, 2, 2]], [[3, 3, 3], [4, 4, 4]]] |
4667 | /// # shape of tensor 't' is [2, 2, 3] |
4668 | /// rank(t) ==> 3 |
4669 | /// ``` |
4670 | /// |
4671 | /// **Note**: The rank of a tensor is not the same as the rank of a matrix. The rank |
4672 | /// of a tensor is the number of indices required to uniquely select each element |
4673 | /// of the tensor. Rank is also known as "order", "degree", or "ndims." |
4674 | /// |
4675 | /// Args: |
4676 | /// * scope: A Scope object |
4677 | /// |
4678 | /// Returns: |
4679 | /// * `Output`: The output tensor. |
4680 | class Rank { |
4681 | public: |
4682 | Rank(const ::tensorflow::Scope& scope, ::tensorflow::Input input); |
4683 | operator ::tensorflow::Output() const { return output; } |
4684 | operator ::tensorflow::Input() const { return output; } |
4685 | ::tensorflow::Node* node() const { return output.node(); } |
4686 | |
4687 | Operation operation; |
4688 | ::tensorflow::Output output; |
4689 | }; |
4690 | |
4691 | /// Reshapes a tensor. |
4692 | /// |
4693 | /// Given `tensor`, this operation returns a tensor that has the same values |
4694 | /// as `tensor` with shape `shape`. |
4695 | /// |
4696 | /// If one component of 1-D tensor `shape` is the special value -1, the size of that |
4697 | /// dimension is computed so that the total size remains constant. In particular, a |
4698 | /// `shape` of `[-1]` flattens into 1-D. At most one component of `shape` may be |
4699 | /// unknown. |
4700 | /// |
4701 | /// The `shape` must be 1-D and the operation returns a tensor with shape |
4702 | /// `shape` filled with the values of `tensor`. In this case, the number of elements |
4703 | /// implied by `shape` must be the same as the number of elements in `tensor`. |
4704 | /// |
4705 | /// It is an error if `shape` is not 1-D. |
4706 | /// |
4707 | /// For example: |
4708 | /// |
4709 | /// ``` |
4710 | /// # tensor 't' is [1, 2, 3, 4, 5, 6, 7, 8, 9] |
4711 | /// # tensor 't' has shape [9] |
4712 | /// reshape(t, [3, 3]) ==> [[1, 2, 3], |
4713 | /// [4, 5, 6], |
4714 | /// [7, 8, 9]] |
4715 | /// |
4716 | /// # tensor 't' is [[[1, 1], [2, 2]], |
4717 | /// # [[3, 3], [4, 4]]] |
4718 | /// # tensor 't' has shape [2, 2, 2] |
4719 | /// reshape(t, [2, 4]) ==> [[1, 1, 2, 2], |
4720 | /// [3, 3, 4, 4]] |
4721 | /// |
4722 | /// # tensor 't' is [[[1, 1, 1], |
4723 | /// # [2, 2, 2]], |
4724 | /// # [[3, 3, 3], |
4725 | /// # [4, 4, 4]], |
4726 | /// # [[5, 5, 5], |
4727 | /// # [6, 6, 6]]] |
4728 | /// # tensor 't' has shape [3, 2, 3] |
4729 | /// # pass '[-1]' to flatten 't' |
4730 | /// reshape(t, [-1]) ==> [1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 5, 5, 5, 6, 6, 6] |
4731 | /// |
4732 | /// # -1 can also be used to infer the shape |
4733 | /// |
4734 | /// # -1 is inferred to be 9: |
4735 | /// reshape(t, [2, -1]) ==> [[1, 1, 1, 2, 2, 2, 3, 3, 3], |
4736 | /// [4, 4, 4, 5, 5, 5, 6, 6, 6]] |
4737 | /// # -1 is inferred to be 2: |
4738 | /// reshape(t, [-1, 9]) ==> [[1, 1, 1, 2, 2, 2, 3, 3, 3], |
4739 | /// [4, 4, 4, 5, 5, 5, 6, 6, 6]] |
4740 | /// # -1 is inferred to be 3: |
4741 | /// reshape(t, [ 2, -1, 3]) ==> [[[1, 1, 1], |
4742 | /// [2, 2, 2], |
4743 | /// [3, 3, 3]], |
4744 | /// [[4, 4, 4], |
4745 | /// [5, 5, 5], |
4746 | /// [6, 6, 6]]] |
4747 | /// |
4748 | /// # tensor 't' is [7] |
4749 | /// # shape `[]` reshapes to a scalar |
4750 | /// reshape(t, []) ==> 7 |
4751 | /// ``` |
4752 | /// |
4753 | /// Args: |
4754 | /// * scope: A Scope object |
4755 | /// * shape: Defines the shape of the output tensor. |
4756 | /// |
4757 | /// Returns: |
4758 | /// * `Output`: The output tensor. |
4759 | class Reshape { |
4760 | public: |
4761 | Reshape(const ::tensorflow::Scope& scope, ::tensorflow::Input tensor, |
4762 | ::tensorflow::Input shape); |
4763 | operator ::tensorflow::Output() const { return output; } |
4764 | operator ::tensorflow::Input() const { return output; } |
4765 | ::tensorflow::Node* node() const { return output.node(); } |
4766 | |
4767 | Operation operation; |
4768 | ::tensorflow::Output output; |
4769 | }; |
4770 | |
4771 | /// Assign `value` to the sliced l-value reference of `ref`. |
4772 | /// |
4773 | /// The values of `value` are assigned to the positions in the variable |
4774 | /// `ref` that are selected by the slice parameters. The slice parameters |
4775 | /// `begin, `end`, `strides`, etc. work exactly as in `StridedSlice`. |
4776 | /// |
4777 | /// NOTE this op currently does not support broadcasting and so `value`'s |
4778 | /// shape must be exactly the shape produced by the slice of `ref`. |
4779 | /// |
4780 | /// Args: |
4781 | /// * scope: A Scope object |
4782 | /// |
4783 | /// Returns: |
4784 | /// * the created `Operation` |
4785 | class ResourceStridedSliceAssign { |
4786 | public: |
4787 | /// Optional attribute setters for ResourceStridedSliceAssign |
4788 | struct Attrs { |
4789 | /// Defaults to 0 |
4790 | TF_MUST_USE_RESULT Attrs BeginMask(int64 x) { |
4791 | Attrs ret = *this; |
4792 | ret.begin_mask_ = x; |
4793 | return ret; |
4794 | } |
4795 | |
4796 | /// Defaults to 0 |
4797 | TF_MUST_USE_RESULT Attrs EndMask(int64 x) { |
4798 | Attrs ret = *this; |
4799 | ret.end_mask_ = x; |
4800 | return ret; |
4801 | } |
4802 | |
4803 | /// Defaults to 0 |
4804 | TF_MUST_USE_RESULT Attrs EllipsisMask(int64 x) { |
4805 | Attrs ret = *this; |
4806 | ret.ellipsis_mask_ = x; |
4807 | return ret; |
4808 | } |
4809 | |
4810 | /// Defaults to 0 |
4811 | TF_MUST_USE_RESULT Attrs NewAxisMask(int64 x) { |
4812 | Attrs ret = *this; |
4813 | ret.new_axis_mask_ = x; |
4814 | return ret; |
4815 | } |
4816 | |
4817 | /// Defaults to 0 |
4818 | TF_MUST_USE_RESULT Attrs ShrinkAxisMask(int64 x) { |
4819 | Attrs ret = *this; |
4820 | ret.shrink_axis_mask_ = x; |
4821 | return ret; |
4822 | } |
4823 | |
4824 | int64 begin_mask_ = 0; |
4825 | int64 end_mask_ = 0; |
4826 | int64 ellipsis_mask_ = 0; |
4827 | int64 new_axis_mask_ = 0; |
4828 | int64 shrink_axis_mask_ = 0; |
4829 | }; |
4830 | ResourceStridedSliceAssign(const ::tensorflow::Scope& scope, |
4831 | ::tensorflow::Input ref, ::tensorflow::Input begin, |
4832 | ::tensorflow::Input end, ::tensorflow::Input |
4833 | strides, ::tensorflow::Input value); |
4834 | ResourceStridedSliceAssign(const ::tensorflow::Scope& scope, |
4835 | ::tensorflow::Input ref, ::tensorflow::Input begin, |
4836 | ::tensorflow::Input end, ::tensorflow::Input |
4837 | strides, ::tensorflow::Input value, const |
4838 | ResourceStridedSliceAssign::Attrs& attrs); |
4839 | operator ::tensorflow::Operation() const { return operation; } |
4840 | |
4841 | static Attrs BeginMask(int64 x) { |
4842 | return Attrs().BeginMask(x); |
4843 | } |
4844 | static Attrs EndMask(int64 x) { |
4845 | return Attrs().EndMask(x); |
4846 | } |
4847 | static Attrs EllipsisMask(int64 x) { |
4848 | return Attrs().EllipsisMask(x); |
4849 | } |
4850 | static Attrs NewAxisMask(int64 x) { |
4851 | return Attrs().NewAxisMask(x); |
4852 | } |
4853 | static Attrs ShrinkAxisMask(int64 x) { |
4854 | return Attrs().ShrinkAxisMask(x); |
4855 | } |
4856 | |
4857 | Operation operation; |
4858 | }; |
4859 | |
4860 | /// Reverses variable length slices. |
4861 | /// |
4862 | /// This op first slices `input` along the dimension `batch_dim`, and for each |
4863 | /// slice `i`, reverses the first `seq_lengths[i]` elements along |
4864 | /// the dimension `seq_dim`. |
4865 | /// |
4866 | /// The elements of `seq_lengths` must obey `seq_lengths[i] <= input.dims[seq_dim]`, |
4867 | /// and `seq_lengths` must be a vector of length `input.dims[batch_dim]`. |
4868 | /// |
4869 | /// The output slice `i` along dimension `batch_dim` is then given by input |
4870 | /// slice `i`, with the first `seq_lengths[i]` slices along dimension |
4871 | /// `seq_dim` reversed. |
4872 | /// |
4873 | /// For example: |
4874 | /// |
4875 | /// ``` |
4876 | /// # Given this: |
4877 | /// batch_dim = 0 |
4878 | /// seq_dim = 1 |
4879 | /// input.dims = (4, 8, ...) |
4880 | /// seq_lengths = [7, 2, 3, 5] |
4881 | /// |
4882 | /// # then slices of input are reversed on seq_dim, but only up to seq_lengths: |
4883 | /// output[0, 0:7, :, ...] = input[0, 7:0:-1, :, ...] |
4884 | /// output[1, 0:2, :, ...] = input[1, 2:0:-1, :, ...] |
4885 | /// output[2, 0:3, :, ...] = input[2, 3:0:-1, :, ...] |
4886 | /// output[3, 0:5, :, ...] = input[3, 5:0:-1, :, ...] |
4887 | /// |
4888 | /// # while entries past seq_lens are copied through: |
4889 | /// output[0, 7:, :, ...] = input[0, 7:, :, ...] |
4890 | /// output[1, 2:, :, ...] = input[1, 2:, :, ...] |
4891 | /// output[2, 3:, :, ...] = input[2, 3:, :, ...] |
4892 | /// output[3, 2:, :, ...] = input[3, 2:, :, ...] |
4893 | /// ``` |
4894 | /// |
4895 | /// In contrast, if: |
4896 | /// |
4897 | /// ``` |
4898 | /// # Given this: |
4899 | /// batch_dim = 2 |
4900 | /// seq_dim = 0 |
4901 | /// input.dims = (8, ?, 4, ...) |
4902 | /// seq_lengths = [7, 2, 3, 5] |
4903 | /// |
4904 | /// # then slices of input are reversed on seq_dim, but only up to seq_lengths: |
4905 | /// output[0:7, :, 0, :, ...] = input[7:0:-1, :, 0, :, ...] |
4906 | /// output[0:2, :, 1, :, ...] = input[2:0:-1, :, 1, :, ...] |
4907 | /// output[0:3, :, 2, :, ...] = input[3:0:-1, :, 2, :, ...] |
4908 | /// output[0:5, :, 3, :, ...] = input[5:0:-1, :, 3, :, ...] |
4909 | /// |
4910 | /// # while entries past seq_lens are copied through: |
4911 | /// output[7:, :, 0, :, ...] = input[7:, :, 0, :, ...] |
4912 | /// output[2:, :, 1, :, ...] = input[2:, :, 1, :, ...] |
4913 | /// output[3:, :, 2, :, ...] = input[3:, :, 2, :, ...] |
4914 | /// output[2:, :, 3, :, ...] = input[2:, :, 3, :, ...] |
4915 | /// ``` |
4916 | /// |
4917 | /// Args: |
4918 | /// * scope: A Scope object |
4919 | /// * input: The input to reverse. |
4920 | /// * seq_lengths: 1-D with length `input.dims(batch_dim)` and |
4921 | /// `max(seq_lengths) <= input.dims(seq_dim)` |
4922 | /// * seq_dim: The dimension which is partially reversed. |
4923 | /// |
4924 | /// Optional attributes (see `Attrs`): |
4925 | /// * batch_dim: The dimension along which reversal is performed. |
4926 | /// |
4927 | /// Returns: |
4928 | /// * `Output`: The partially reversed input. It has the same shape as `input`. |
4929 | class ReverseSequence { |
4930 | public: |
4931 | /// Optional attribute setters for ReverseSequence |
4932 | struct Attrs { |
4933 | /// The dimension along which reversal is performed. |
4934 | /// |
4935 | /// Defaults to 0 |
4936 | TF_MUST_USE_RESULT Attrs BatchDim(int64 x) { |
4937 | Attrs ret = *this; |
4938 | ret.batch_dim_ = x; |
4939 | return ret; |
4940 | } |
4941 | |
4942 | int64 batch_dim_ = 0; |
4943 | }; |
4944 | ReverseSequence(const ::tensorflow::Scope& scope, ::tensorflow::Input input, |
4945 | ::tensorflow::Input seq_lengths, int64 seq_dim); |
4946 | ReverseSequence(const ::tensorflow::Scope& scope, ::tensorflow::Input input, |
4947 | ::tensorflow::Input seq_lengths, int64 seq_dim, const |
4948 | ReverseSequence::Attrs& attrs); |
4949 | operator ::tensorflow::Output() const { return output; } |
4950 | operator ::tensorflow::Input() const { return output; } |
4951 | ::tensorflow::Node* node() const { return output.node(); } |
4952 | |
4953 | static Attrs BatchDim(int64 x) { |
4954 | return Attrs().BatchDim(x); |
4955 | } |
4956 | |
4957 | Operation operation; |
4958 | ::tensorflow::Output output; |
4959 | }; |
4960 | |
4961 | /// Reverses specific dimensions of a tensor. |
4962 | /// |
4963 | /// Given a `tensor`, and a `int32` tensor `axis` representing the set of |
4964 | /// dimensions of `tensor` to reverse. This operation reverses each dimension |
4965 | /// `i` for which there exists `j` s.t. `axis[j] == i`. |
4966 | /// |
4967 | /// `tensor` can have up to 8 dimensions. The number of dimensions specified |
4968 | /// in `axis` may be 0 or more entries. If an index is specified more than |
4969 | /// once, a InvalidArgument error is raised. |
4970 | /// |
4971 | /// For example: |
4972 | /// |
4973 | /// ``` |
4974 | /// # tensor 't' is [[[[ 0, 1, 2, 3], |
4975 | /// # [ 4, 5, 6, 7], |
4976 | /// # [ 8, 9, 10, 11]], |
4977 | /// # [[12, 13, 14, 15], |
4978 | /// # [16, 17, 18, 19], |
4979 | /// # [20, 21, 22, 23]]]] |
4980 | /// # tensor 't' shape is [1, 2, 3, 4] |
4981 | /// |
4982 | /// # 'dims' is [3] or 'dims' is [-1] |
4983 | /// reverse(t, dims) ==> [[[[ 3, 2, 1, 0], |
4984 | /// [ 7, 6, 5, 4], |
4985 | /// [ 11, 10, 9, 8]], |
4986 | /// [[15, 14, 13, 12], |
4987 | /// [19, 18, 17, 16], |
4988 | /// [23, 22, 21, 20]]]] |
4989 | /// |
4990 | /// # 'dims' is '[1]' (or 'dims' is '[-3]') |
4991 | /// reverse(t, dims) ==> [[[[12, 13, 14, 15], |
4992 | /// [16, 17, 18, 19], |
4993 | /// [20, 21, 22, 23] |
4994 | /// [[ 0, 1, 2, 3], |
4995 | /// [ 4, 5, 6, 7], |
4996 | /// [ 8, 9, 10, 11]]]] |
4997 | /// |
4998 | /// # 'dims' is '[2]' (or 'dims' is '[-2]') |
4999 | /// reverse(t, dims) ==> [[[[8, 9, 10, 11], |
5000 | /// [4, 5, 6, 7], |
5001 | /// [0, 1, 2, 3]] |
5002 | /// [[20, 21, 22, 23], |
5003 | /// [16, 17, 18, 19], |
5004 | /// [12, 13, 14, 15]]]] |
5005 | /// ``` |
5006 | /// |
5007 | /// Args: |
5008 | /// * scope: A Scope object |
5009 | /// * tensor: Up to 8-D. |
5010 | /// * axis: 1-D. The indices of the dimensions to reverse. Must be in the range |
5011 | /// `[-rank(tensor), rank(tensor))`. |
5012 | /// |
5013 | /// Returns: |
5014 | /// * `Output`: The same shape as `tensor`. |
5015 | class Reverse { |
5016 | public: |
5017 | Reverse(const ::tensorflow::Scope& scope, ::tensorflow::Input tensor, |
5018 | ::tensorflow::Input axis); |
5019 | operator ::tensorflow::Output() const { return output; } |
5020 | operator ::tensorflow::Input() const { return output; } |
5021 | ::tensorflow::Node* node() const { return output.node(); } |
5022 | |
5023 | Operation operation; |
5024 | ::tensorflow::Output output; |
5025 | }; |
5026 | |
5027 | /// Scatters `updates` into a tensor of shape `shape` according to `indices`. |
5028 | /// |
5029 | /// Scatter sparse `updates` according to individual values at the specified |
5030 | /// `indices`. This op returns an output tensor with the `shape` you specify. This |
5031 | /// op is the inverse of the `tf.gather_nd` operator which extracts values or slices |
5032 | /// from a given tensor. |
5033 | /// |
5034 | /// This operation is similar to `tf.tensor_scatter_nd_add`, except that the tensor |
5035 | /// is zero-initialized. Calling `tf.scatter_nd(indices, updates, shape)` |
5036 | /// is identical to calling |
5037 | /// `tf.tensor_scatter_nd_add(tf.zeros(shape, updates.dtype), indices, updates)` |
5038 | /// |
5039 | /// If `indices` contains duplicates, the associated `updates` are accumulated |
5040 | /// (summed) into the output tensor. |
5041 | /// |
5042 | /// **WARNING**: For floating-point data types, the output may be nondeterministic. |
5043 | /// This is because the order in which the updates are applied is nondeterministic |
5044 | /// and when floating-point numbers are added in different orders the resulting |
5045 | /// numerical approximation error can be slightly different. However, the output |
5046 | /// will be deterministic if op determinism is enabled via |
5047 | /// `tf.config.experimental.enable_op_determinism`. |
5048 | /// |
5049 | /// `indices` is an integer tensor containing indices into the output tensor. The |
5050 | /// last dimension of `indices` can be at most the rank of `shape`: |
5051 | /// |
5052 | /// indices.shape[-1] <= shape.rank |
5053 | /// |
5054 | /// The last dimension of `indices` corresponds to indices of elements |
5055 | /// (if `indices.shape[-1] = shape.rank`) or slices |
5056 | /// (if `indices.shape[-1] < shape.rank`) along dimension `indices.shape[-1]` of |
5057 | /// `shape`. |
5058 | /// |
5059 | /// `updates` is a tensor with shape: |
5060 | /// |
5061 | /// indices.shape[:-1] + shape[indices.shape[-1]:] |
5062 | /// |
5063 | /// The simplest form of the scatter op is to insert individual elements in |
5064 | /// a tensor by index. Consider an example where you want to insert 4 scattered |
5065 | /// elements in a rank-1 tensor with 8 elements. |
5066 | /// |
5067 | /// <div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;"> |
5068 | /// <img style="width:100%" src="https://www.tensorflow.org/images/ScatterNd1.png" alt> |
5069 | /// </div> |
5070 | /// |
5071 | /// In Python, this scatter operation would look like this: |
5072 | /// |
5073 | /// ```python |
5074 | /// indices = tf.constant([[4], [3], [1], [7]]) |
5075 | /// updates = tf.constant([9, 10, 11, 12]) |
5076 | /// shape = tf.constant([8]) |
5077 | /// scatter = tf.scatter_nd(indices, updates, shape) |
5078 | /// print(scatter) |
5079 | /// ``` |
5080 | /// |
5081 | /// The resulting tensor would look like this: |
5082 | /// |
5083 | /// [0, 11, 0, 10, 9, 0, 0, 12] |
5084 | /// |
5085 | /// You can also insert entire slices of a higher rank tensor all at once. For |
5086 | /// example, you can insert two slices in the first dimension of a rank-3 tensor |
5087 | /// with two matrices of new values. |
5088 | /// |
5089 | /// <div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;"> |
5090 | /// <img style="width:100%" src="https://www.tensorflow.org/images/ScatterNd2.png" alt> |
5091 | /// </div> |
5092 | /// |
5093 | /// In Python, this scatter operation would look like this: |
5094 | /// |
5095 | /// ```python |
5096 | /// indices = tf.constant([[1], [3]]) |
5097 | /// updates = tf.constant([[[5, 5, 5, 5], [6, 6, 6, 6], |
5098 | /// [7, 7, 7, 7], [8, 8, 8, 8]], |
5099 | /// [[5, 5, 5, 5], [6, 6, 6, 6], |
5100 | /// [7, 7, 7, 7], [8, 8, 8, 8]]]) |
5101 | /// shape = tf.constant([4, 4, 4]) |
5102 | /// scatter = tf.scatter_nd(indices, updates, shape) |
5103 | /// print(scatter) |
5104 | /// ``` |
5105 | /// |
5106 | /// The resulting tensor would look like this: |
5107 | /// |
5108 | /// [[[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]], |
5109 | /// [[5, 5, 5, 5], [6, 6, 6, 6], [7, 7, 7, 7], [8, 8, 8, 8]], |
5110 | /// [[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]], |
5111 | /// [[5, 5, 5, 5], [6, 6, 6, 6], [7, 7, 7, 7], [8, 8, 8, 8]]] |
5112 | /// |
5113 | /// Note that on CPU, if an out of bound index is found, an error is returned. |
5114 | /// On GPU, if an out of bound index is found, the index is ignored. |
5115 | /// |
5116 | /// Args: |
5117 | /// * scope: A Scope object |
5118 | /// * indices: Tensor of indices. |
5119 | /// * updates: Values to scatter into the output tensor. |
5120 | /// * shape: 1-D. The shape of the output tensor. |
5121 | /// |
5122 | /// Returns: |
5123 | /// * `Output`: A new tensor with the given shape and updates applied according |
5124 | /// to the indices. |
5125 | class ScatterNd { |
5126 | public: |
5127 | ScatterNd(const ::tensorflow::Scope& scope, ::tensorflow::Input indices, |
5128 | ::tensorflow::Input updates, ::tensorflow::Input shape); |
5129 | operator ::tensorflow::Output() const { return output; } |
5130 | operator ::tensorflow::Input() const { return output; } |
5131 | ::tensorflow::Node* node() const { return output.node(); } |
5132 | |
5133 | Operation operation; |
5134 | ::tensorflow::Output output; |
5135 | }; |
5136 | |
5137 | /// Applies sparse addition to `input` using individual values or slices |
5138 | /// |
5139 | /// from `updates` according to indices `indices`. The updates are non-aliasing: |
5140 | /// `input` is only modified in-place if no other operations will use it. |
5141 | /// Otherwise, a copy of `input` is made. This operation has a gradient with |
5142 | /// respect to both `input` and `updates`. |
5143 | /// |
5144 | /// `input` is a `Tensor` with rank `P` and `indices` is a `Tensor` of rank `Q`. |
5145 | /// |
5146 | /// `indices` must be integer tensor, containing indices into `input`. |
5147 | /// It must be shape \\([d_0, ..., d_{Q-2}, K]\\) where `0 < K <= P`. |
5148 | /// |
5149 | /// The innermost dimension of `indices` (with length `K`) corresponds to |
5150 | /// indices into elements (if `K = P`) or `(P-K)`-dimensional slices |
5151 | /// (if `K < P`) along the `K`th dimension of `input`. |
5152 | /// |
5153 | /// `updates` is `Tensor` of rank `Q-1+P-K` with shape: |
5154 | /// |
5155 | /// $$[d_0, ..., d_{Q-2}, input.shape[K], ..., input.shape[P-1]].$$ |
5156 | /// |
5157 | /// For example, say we want to add 4 scattered elements to a rank-1 tensor to 8 |
5158 | /// elements. In Python, that addition would look like this: |
5159 | /// |
5160 | /// input = tf.constant([1, 2, 3, 4, 5, 6, 7, 8]) |
5161 | /// indices = tf.constant([[4], [3], [1], [7]]) |
5162 | /// updates = tf.constant([9, 10, 11, 12]) |
5163 | /// output = tf.scatter_nd_non_aliasing_add(input, indices, updates) |
5164 | /// with tf.Session() as sess: |
5165 | /// print(sess.run(output)) |
5166 | /// |
5167 | /// The resulting value `output` would look like this: |
5168 | /// |
5169 | /// [1, 13, 3, 14, 14, 6, 7, 20] |
5170 | /// |
5171 | /// See `tf.scatter_nd` for more details about how to make updates to slices. |
5172 | /// |
5173 | /// Args: |
5174 | /// * scope: A Scope object |
5175 | /// * input: A Tensor. |
5176 | /// * indices: A Tensor. Must be one of the following types: `int32`, `int64`. |
5177 | /// A tensor of indices into `input`. |
5178 | /// * updates: A Tensor. Must have the same type as ref. A tensor of updated values |
5179 | /// to add to `input`. |
5180 | /// |
5181 | /// Returns: |
5182 | /// * `Output`: A `Tensor` with the same shape as `input`, containing values of `input` |
5183 | /// updated with `updates`. |
5184 | class ScatterNdNonAliasingAdd { |
5185 | public: |
5186 | ScatterNdNonAliasingAdd(const ::tensorflow::Scope& scope, ::tensorflow::Input |
5187 | input, ::tensorflow::Input indices, ::tensorflow::Input |
5188 | updates); |
5189 | operator ::tensorflow::Output() const { return output; } |
5190 | operator ::tensorflow::Input() const { return output; } |
5191 | ::tensorflow::Node* node() const { return output.node(); } |
5192 | |
5193 | Operation operation; |
5194 | ::tensorflow::Output output; |
5195 | }; |
5196 | |
5197 | /// Returns the shape of a tensor. |
5198 | /// |
5199 | /// This operation returns a 1-D integer tensor representing the shape of `input`. |
5200 | /// |
5201 | /// For example: |
5202 | /// |
5203 | /// ``` |
5204 | /// # 't' is [[[1, 1, 1], [2, 2, 2]], [[3, 3, 3], [4, 4, 4]]] |
5205 | /// shape(t) ==> [2, 2, 3] |
5206 | /// ``` |
5207 | /// |
5208 | /// Args: |
5209 | /// * scope: A Scope object |
5210 | /// |
5211 | /// Returns: |
5212 | /// * `Output`: The output tensor. |
5213 | class Shape { |
5214 | public: |
5215 | /// Optional attribute setters for Shape |
5216 | struct Attrs { |
5217 | /// Defaults to DT_INT32 |
5218 | TF_MUST_USE_RESULT Attrs OutType(DataType x) { |
5219 | Attrs ret = *this; |
5220 | ret.out_type_ = x; |
5221 | return ret; |
5222 | } |
5223 | |
5224 | DataType out_type_ = DT_INT32; |
5225 | }; |
5226 | Shape(const ::tensorflow::Scope& scope, ::tensorflow::Input input); |
5227 | Shape(const ::tensorflow::Scope& scope, ::tensorflow::Input input, const |
5228 | Shape::Attrs& attrs); |
5229 | operator ::tensorflow::Output() const { return output; } |
5230 | operator ::tensorflow::Input() const { return output; } |
5231 | ::tensorflow::Node* node() const { return output.node(); } |
5232 | |
5233 | static Attrs OutType(DataType x) { |
5234 | return Attrs().OutType(x); |
5235 | } |
5236 | |
5237 | Operation operation; |
5238 | ::tensorflow::Output output; |
5239 | }; |
5240 | |
5241 | /// Returns shape of tensors. |
5242 | /// |
5243 | /// This operation returns N 1-D integer tensors representing shape of `input[i]s`. |
5244 | /// |
5245 | /// Args: |
5246 | /// * scope: A Scope object |
5247 | /// |
5248 | /// Returns: |
5249 | /// * `OutputList`: The output tensor. |
5250 | class ShapeN { |
5251 | public: |
5252 | /// Optional attribute setters for ShapeN |
5253 | struct Attrs { |
5254 | /// Defaults to DT_INT32 |
5255 | TF_MUST_USE_RESULT Attrs OutType(DataType x) { |
5256 | Attrs ret = *this; |
5257 | ret.out_type_ = x; |
5258 | return ret; |
5259 | } |
5260 | |
5261 | DataType out_type_ = DT_INT32; |
5262 | }; |
5263 | ShapeN(const ::tensorflow::Scope& scope, ::tensorflow::InputList input); |
5264 | ShapeN(const ::tensorflow::Scope& scope, ::tensorflow::InputList input, const |
5265 | ShapeN::Attrs& attrs); |
5266 | ::tensorflow::Output operator[](size_t index) const { return output[index]; } |
5267 | |
5268 | |
5269 | static Attrs OutType(DataType x) { |
5270 | return Attrs().OutType(x); |
5271 | } |
5272 | |
5273 | Operation operation; |
5274 | ::tensorflow::OutputList output; |
5275 | }; |
5276 | |
5277 | /// Returns the size of a tensor. |
5278 | /// |
5279 | /// This operation returns an integer representing the number of elements in |
5280 | /// `input`. |
5281 | /// |
5282 | /// For example: |
5283 | /// |
5284 | /// ``` |
5285 | /// # 't' is [[[1, 1,, 1], [2, 2, 2]], [[3, 3, 3], [4, 4, 4]]]] |
5286 | /// size(t) ==> 12 |
5287 | /// ``` |
5288 | /// |
5289 | /// Args: |
5290 | /// * scope: A Scope object |
5291 | /// |
5292 | /// Returns: |
5293 | /// * `Output`: The output tensor. |
5294 | class Size { |
5295 | public: |
5296 | /// Optional attribute setters for Size |
5297 | struct Attrs { |
5298 | /// Defaults to DT_INT32 |
5299 | TF_MUST_USE_RESULT Attrs OutType(DataType x) { |
5300 | Attrs ret = *this; |
5301 | ret.out_type_ = x; |
5302 | return ret; |
5303 | } |
5304 | |
5305 | DataType out_type_ = DT_INT32; |
5306 | }; |
5307 | Size(const ::tensorflow::Scope& scope, ::tensorflow::Input input); |
5308 | Size(const ::tensorflow::Scope& scope, ::tensorflow::Input input, const |
5309 | Size::Attrs& attrs); |
5310 | operator ::tensorflow::Output() const { return output; } |
5311 | operator ::tensorflow::Input() const { return output; } |
5312 | ::tensorflow::Node* node() const { return output.node(); } |
5313 | |
5314 | static Attrs OutType(DataType x) { |
5315 | return Attrs().OutType(x); |
5316 | } |
5317 | |
5318 | Operation operation; |
5319 | ::tensorflow::Output output; |
5320 | }; |
5321 | |
5322 | /// Return a slice from 'input'. |
5323 | /// |
5324 | /// The output tensor is a tensor with dimensions described by 'size' |
5325 | /// whose values are extracted from 'input' starting at the offsets in |
5326 | /// 'begin'. |
5327 | /// |
5328 | /// *Requirements*: |
5329 | /// 0 <= begin[i] <= begin[i] + size[i] <= Di for i in [0, n) |
5330 | /// |
5331 | /// Args: |
5332 | /// * scope: A Scope object |
5333 | /// * begin: begin[i] specifies the offset into the 'i'th dimension of |
5334 | /// 'input' to slice from. |
5335 | /// * size: size[i] specifies the number of elements of the 'i'th dimension |
5336 | /// of 'input' to slice. If size[i] is -1, all remaining elements in dimension |
5337 | /// i are included in the slice (i.e. this is equivalent to setting |
5338 | /// size[i] = input.dim_size(i) - begin[i]). |
5339 | /// |
5340 | /// Returns: |
5341 | /// * `Output`: The output tensor. |
5342 | class Slice { |
5343 | public: |
5344 | Slice(const ::tensorflow::Scope& scope, ::tensorflow::Input input, |
5345 | ::tensorflow::Input begin, ::tensorflow::Input size); |
5346 | operator ::tensorflow::Output() const { return output; } |
5347 | operator ::tensorflow::Input() const { return output; } |
5348 | ::tensorflow::Node* node() const { return output.node(); } |
5349 | |
5350 | Operation operation; |
5351 | ::tensorflow::Output output; |
5352 | }; |
5353 | |
5354 | /// Returns a copy of the input tensor. |
5355 | /// |
5356 | /// Args: |
5357 | /// * scope: A Scope object |
5358 | /// |
5359 | /// Returns: |
5360 | /// * `Output`: The output tensor. |
5361 | class Snapshot { |
5362 | public: |
5363 | Snapshot(const ::tensorflow::Scope& scope, ::tensorflow::Input input); |
5364 | operator ::tensorflow::Output() const { return output; } |
5365 | operator ::tensorflow::Input() const { return output; } |
5366 | ::tensorflow::Node* node() const { return output.node(); } |
5367 | |
5368 | Operation operation; |
5369 | ::tensorflow::Output output; |
5370 | }; |
5371 | |
5372 | /// SpaceToBatch for 4-D tensors of type T. |
5373 | /// |
5374 | /// This is a legacy version of the more general SpaceToBatchND. |
5375 | /// |
5376 | /// Zero-pads and then rearranges (permutes) blocks of spatial data into batch. |
5377 | /// More specifically, this op outputs a copy of the input tensor where values from |
5378 | /// the `height` and `width` dimensions are moved to the `batch` dimension. After |
5379 | /// the zero-padding, both `height` and `width` of the input must be divisible by the |
5380 | /// block size. |
5381 | /// |
5382 | /// The attr `block_size` must be greater than one. It indicates the block size. |
5383 | /// |
5384 | /// * Non-overlapping blocks of size `block_size x block size` in the height and |
5385 | /// width dimensions are rearranged into the batch dimension at each location. |
5386 | /// * The batch of the output tensor is `batch * block_size * block_size`. |
5387 | /// * Both height_pad and width_pad must be divisible by block_size. |
5388 | /// |
5389 | /// The shape of the output will be: |
5390 | /// |
5391 | /// [batch*block_size*block_size, height_pad/block_size, width_pad/block_size, |
5392 | /// depth] |
5393 | /// |
5394 | /// Some examples: |
5395 | /// |
5396 | /// (1) For the following input of shape `[1, 2, 2, 1]` and block_size of 2: |
5397 | /// |
5398 | /// ``` |
5399 | /// x = [[[[1], [2]], [[3], [4]]]] |
5400 | /// ``` |
5401 | /// |
5402 | /// The output tensor has shape `[4, 1, 1, 1]` and value: |
5403 | /// |
5404 | /// ``` |
5405 | /// [[[[1]]], [[[2]]], [[[3]]], [[[4]]]] |
5406 | /// ``` |
5407 | /// |
5408 | /// (2) For the following input of shape `[1, 2, 2, 3]` and block_size of 2: |
5409 | /// |
5410 | /// ``` |
5411 | /// x = [[[[1, 2, 3], [4, 5, 6]], |
5412 | /// [[7, 8, 9], [10, 11, 12]]]] |
5413 | /// ``` |
5414 | /// |
5415 | /// The output tensor has shape `[4, 1, 1, 3]` and value: |
5416 | /// |
5417 | /// ``` |
5418 | /// [[[[1, 2, 3]]], [[[4, 5, 6]]], [[[7, 8, 9]]], [[[10, 11, 12]]]] |
5419 | /// ``` |
5420 | /// |
5421 | /// (3) For the following input of shape `[1, 4, 4, 1]` and block_size of 2: |
5422 | /// |
5423 | /// ``` |
5424 | /// x = [[[[1], [2], [3], [4]], |
5425 | /// [[5], [6], [7], [8]], |
5426 | /// [[9], [10], [11], [12]], |
5427 | /// [[13], [14], [15], [16]]]] |
5428 | /// ``` |
5429 | /// |
5430 | /// The output tensor has shape `[4, 2, 2, 1]` and value: |
5431 | /// |
5432 | /// ``` |
5433 | /// x = [[[[1], [3]], [[9], [11]]], |
5434 | /// [[[2], [4]], [[10], [12]]], |
5435 | /// [[[5], [7]], [[13], [15]]], |
5436 | /// [[[6], [8]], [[14], [16]]]] |
5437 | /// ``` |
5438 | /// |
5439 | /// (4) For the following input of shape `[2, 2, 4, 1]` and block_size of 2: |
5440 | /// |
5441 | /// ``` |
5442 | /// x = [[[[1], [2], [3], [4]], |
5443 | /// [[5], [6], [7], [8]]], |
5444 | /// [[[9], [10], [11], [12]], |
5445 | /// [[13], [14], [15], [16]]]] |
5446 | /// ``` |
5447 | /// |
5448 | /// The output tensor has shape `[8, 1, 2, 1]` and value: |
5449 | /// |
5450 | /// ``` |
5451 | /// x = [[[[1], [3]]], [[[9], [11]]], [[[2], [4]]], [[[10], [12]]], |
5452 | /// [[[5], [7]]], [[[13], [15]]], [[[6], [8]]], [[[14], [16]]]] |
5453 | /// ``` |
5454 | /// |
5455 | /// Among others, this operation is useful for reducing atrous convolution into |
5456 | /// regular convolution. |
5457 | /// |
5458 | /// Args: |
5459 | /// * scope: A Scope object |
5460 | /// * input: 4-D with shape `[batch, height, width, depth]`. |
5461 | /// * paddings: 2-D tensor of non-negative integers with shape `[2, 2]`. It specifies |
5462 | /// the padding of the input with zeros across the spatial dimensions as follows: |
5463 | /// |
5464 | /// paddings = [[pad_top, pad_bottom], [pad_left, pad_right]] |
5465 | /// |
5466 | /// The effective spatial dimensions of the zero-padded input tensor will be: |
5467 | /// |
5468 | /// height_pad = pad_top + height + pad_bottom |
5469 | /// width_pad = pad_left + width + pad_right |
5470 | /// |
5471 | /// Returns: |
5472 | /// * `Output`: The output tensor. |
5473 | class SpaceToBatch { |
5474 | public: |
5475 | SpaceToBatch(const ::tensorflow::Scope& scope, ::tensorflow::Input input, |
5476 | ::tensorflow::Input paddings, int64 block_size); |
5477 | operator ::tensorflow::Output() const { return output; } |
5478 | operator ::tensorflow::Input() const { return output; } |
5479 | ::tensorflow::Node* node() const { return output.node(); } |
5480 | |
5481 | Operation operation; |
5482 | ::tensorflow::Output output; |
5483 | }; |
5484 | |
5485 | /// SpaceToBatch for N-D tensors of type T. |
5486 | /// |
5487 | /// This operation divides "spatial" dimensions `[1, ..., M]` of the input into a |
5488 | /// grid of blocks of shape `block_shape`, and interleaves these blocks with the |
5489 | /// "batch" dimension (0) such that in the output, the spatial dimensions |
5490 | /// `[1, ..., M]` correspond to the position within the grid, and the batch |
5491 | /// dimension combines both the position within a spatial block and the original |
5492 | /// batch position. Prior to division into blocks, the spatial dimensions of the |
5493 | /// input are optionally zero padded according to `paddings`. See below for a |
5494 | /// precise description. |
5495 | /// |
5496 | /// This operation is equivalent to the following steps: |
5497 | /// |
5498 | /// 1. Zero-pad the start and end of dimensions `[1, ..., M]` of the |
5499 | /// input according to `paddings` to produce `padded` of shape `padded_shape`. |
5500 | /// |
5501 | /// 2. Reshape `padded` to `reshaped_padded` of shape: |
5502 | /// |
5503 | /// [batch] + |
5504 | /// [padded_shape[1] / block_shape[0], |
5505 | /// block_shape[0], |
5506 | /// ..., |
5507 | /// padded_shape[M] / block_shape[M-1], |
5508 | /// block_shape[M-1]] + |
5509 | /// remaining_shape |
5510 | /// |
5511 | /// 3. Permute dimensions of `reshaped_padded` to produce |
5512 | /// `permuted_reshaped_padded` of shape: |
5513 | /// |
5514 | /// block_shape + |
5515 | /// [batch] + |
5516 | /// [padded_shape[1] / block_shape[0], |
5517 | /// ..., |
5518 | /// padded_shape[M] / block_shape[M-1]] + |
5519 | /// remaining_shape |
5520 | /// |
5521 | /// 4. Reshape `permuted_reshaped_padded` to flatten `block_shape` into the batch |
5522 | /// dimension, producing an output tensor of shape: |
5523 | /// |
5524 | /// [batch * prod(block_shape)] + |
5525 | /// [padded_shape[1] / block_shape[0], |
5526 | /// ..., |
5527 | /// padded_shape[M] / block_shape[M-1]] + |
5528 | /// remaining_shape |
5529 | /// |
5530 | /// Some examples: |
5531 | /// |
5532 | /// (1) For the following input of shape `[1, 2, 2, 1]`, `block_shape = [2, 2]`, and |
5533 | /// `paddings = [[0, 0], [0, 0]]`: |
5534 | /// |
5535 | /// ``` |
5536 | /// x = [[[[1], [2]], [[3], [4]]]] |
5537 | /// ``` |
5538 | /// |
5539 | /// The output tensor has shape `[4, 1, 1, 1]` and value: |
5540 | /// |
5541 | /// ``` |
5542 | /// [[[[1]]], [[[2]]], [[[3]]], [[[4]]]] |
5543 | /// ``` |
5544 | /// |
5545 | /// (2) For the following input of shape `[1, 2, 2, 3]`, `block_shape = [2, 2]`, and |
5546 | /// `paddings = [[0, 0], [0, 0]]`: |
5547 | /// |
5548 | /// ``` |
5549 | /// x = [[[[1, 2, 3], [4, 5, 6]], |
5550 | /// [[7, 8, 9], [10, 11, 12]]]] |
5551 | /// ``` |
5552 | /// |
5553 | /// The output tensor has shape `[4, 1, 1, 3]` and value: |
5554 | /// |
5555 | /// ``` |
5556 | /// [[[[1, 2, 3]]], [[[4, 5, 6]]], [[[7, 8, 9]]], [[[10, 11, 12]]]] |
5557 | /// ``` |
5558 | /// |
5559 | /// (3) For the following input of shape `[1, 4, 4, 1]`, `block_shape = [2, 2]`, and |
5560 | /// `paddings = [[0, 0], [0, 0]]`: |
5561 | /// |
5562 | /// ``` |
5563 | /// x = [[[[1], [2], [3], [4]], |
5564 | /// [[5], [6], [7], [8]], |
5565 | /// [[9], [10], [11], [12]], |
5566 | /// [[13], [14], [15], [16]]]] |
5567 | /// ``` |
5568 | /// |
5569 | /// The output tensor has shape `[4, 2, 2, 1]` and value: |
5570 | /// |
5571 | /// ``` |
5572 | /// x = [[[[1], [3]], [[9], [11]]], |
5573 | /// [[[2], [4]], [[10], [12]]], |
5574 | /// [[[5], [7]], [[13], [15]]], |
5575 | /// [[[6], [8]], [[14], [16]]]] |
5576 | /// ``` |
5577 | /// |
5578 | /// (4) For the following input of shape `[2, 2, 4, 1]`, block_shape = `[2, 2]`, and |
5579 | /// paddings = `[[0, 0], [2, 0]]`: |
5580 | /// |
5581 | /// ``` |
5582 | /// x = [[[[1], [2], [3], [4]], |
5583 | /// [[5], [6], [7], [8]]], |
5584 | /// [[[9], [10], [11], [12]], |
5585 | /// [[13], [14], [15], [16]]]] |
5586 | /// ``` |
5587 | /// |
5588 | /// The output tensor has shape `[8, 1, 3, 1]` and value: |
5589 | /// |
5590 | /// ``` |
5591 | /// x = [[[[0], [1], [3]]], [[[0], [9], [11]]], |
5592 | /// [[[0], [2], [4]]], [[[0], [10], [12]]], |
5593 | /// [[[0], [5], [7]]], [[[0], [13], [15]]], |
5594 | /// [[[0], [6], [8]]], [[[0], [14], [16]]]] |
5595 | /// ``` |
5596 | /// |
5597 | /// Among others, this operation is useful for reducing atrous convolution into |
5598 | /// regular convolution. |
5599 | /// |
5600 | /// Args: |
5601 | /// * scope: A Scope object |
5602 | /// * input: N-D with shape `input_shape = [batch] + spatial_shape + remaining_shape`, |
5603 | /// where spatial_shape has `M` dimensions. |
5604 | /// * block_shape: 1-D with shape `[M]`, all values must be >= 1. |
5605 | /// * paddings: 2-D with shape `[M, 2]`, all values must be >= 0. |
5606 | /// `paddings[i] = [pad_start, pad_end]` specifies the padding for input dimension |
5607 | /// `i + 1`, which corresponds to spatial dimension `i`. It is required that |
5608 | /// `block_shape[i]` divides `input_shape[i + 1] + pad_start + pad_end`. |
5609 | /// |
5610 | /// Returns: |
5611 | /// * `Output`: The output tensor. |
5612 | class SpaceToBatchND { |
5613 | public: |
5614 | SpaceToBatchND(const ::tensorflow::Scope& scope, ::tensorflow::Input input, |
5615 | ::tensorflow::Input block_shape, ::tensorflow::Input paddings); |
5616 | operator ::tensorflow::Output() const { return output; } |
5617 | operator ::tensorflow::Input() const { return output; } |
5618 | ::tensorflow::Node* node() const { return output.node(); } |
5619 | |
5620 | Operation operation; |
5621 | ::tensorflow::Output output; |
5622 | }; |
5623 | |
5624 | /// SpaceToDepth for tensors of type T. |
5625 | /// |
5626 | /// Rearranges blocks of spatial data, into depth. More specifically, |
5627 | /// this op outputs a copy of the input tensor where values from the `height` |
5628 | /// and `width` dimensions are moved to the `depth` dimension. |
5629 | /// The attr `block_size` indicates the input block size. |
5630 | /// |
5631 | /// * Non-overlapping blocks of size `block_size x block size` are rearranged |
5632 | /// into depth at each location. |
5633 | /// * The depth of the output tensor is `block_size * block_size * input_depth`. |
5634 | /// * The Y, X coordinates within each block of the input become the high order |
5635 | /// component of the output channel index. |
5636 | /// * The input tensor's height and width must be divisible by block_size. |
5637 | /// |
5638 | /// The `data_format` attr specifies the layout of the input and output tensors |
5639 | /// with the following options: |
5640 | /// "NHWC": `[ batch, height, width, channels ]` |
5641 | /// "NCHW": `[ batch, channels, height, width ]` |
5642 | /// "NCHW_VECT_C": |
5643 | /// `qint8 [ batch, channels / 4, height, width, 4 ]` |
5644 | /// |
5645 | /// It is useful to consider the operation as transforming a 6-D Tensor. |
5646 | /// e.g. for data_format = NHWC, |
5647 | /// Each element in the input tensor can be specified via 6 coordinates, |
5648 | /// ordered by decreasing memory layout significance as: |
5649 | /// n,oY,bY,oX,bX,iC (where n=batch index, oX, oY means X or Y coordinates |
5650 | /// within the output image, bX, bY means coordinates |
5651 | /// within the input block, iC means input channels). |
5652 | /// The output would be a transpose to the following layout: |
5653 | /// n,oY,oX,bY,bX,iC |
5654 | /// |
5655 | /// This operation is useful for resizing the activations between convolutions |
5656 | /// (but keeping all data), e.g. instead of pooling. It is also useful for training |
5657 | /// purely convolutional models. |
5658 | /// |
5659 | /// For example, given an input of shape `[1, 2, 2, 1]`, data_format = "NHWC" and |
5660 | /// block_size = 2: |
5661 | /// |
5662 | /// ``` |
5663 | /// x = [[[[1], [2]], |
5664 | /// [[3], [4]]]] |
5665 | /// ``` |
5666 | /// |
5667 | /// This operation will output a tensor of shape `[1, 1, 1, 4]`: |
5668 | /// |
5669 | /// ``` |
5670 | /// [[[[1, 2, 3, 4]]]] |
5671 | /// ``` |
5672 | /// |
5673 | /// Here, the input has a batch of 1 and each batch element has shape `[2, 2, 1]`, |
5674 | /// the corresponding output will have a single element (i.e. width and height are |
5675 | /// both 1) and will have a depth of 4 channels (1 * block_size * block_size). |
5676 | /// The output element shape is `[1, 1, 4]`. |
5677 | /// |
5678 | /// For an input tensor with larger depth, here of shape `[1, 2, 2, 3]`, e.g. |
5679 | /// |
5680 | /// ``` |
5681 | /// x = [[[[1, 2, 3], [4, 5, 6]], |
5682 | /// [[7, 8, 9], [10, 11, 12]]]] |
5683 | /// ``` |
5684 | /// |
5685 | /// This operation, for block_size of 2, will return the following tensor of shape |
5686 | /// `[1, 1, 1, 12]` |
5687 | /// |
5688 | /// ``` |
5689 | /// [[[[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]]]] |
5690 | /// ``` |
5691 | /// |
5692 | /// Similarly, for the following input of shape `[1 4 4 1]`, and a block size of 2: |
5693 | /// |
5694 | /// ``` |
5695 | /// x = [[[[1], [2], [5], [6]], |
5696 | /// [[3], [4], [7], [8]], |
5697 | /// [[9], [10], [13], [14]], |
5698 | /// [[11], [12], [15], [16]]]] |
5699 | /// ``` |
5700 | /// |
5701 | /// the operator will return the following tensor of shape `[1 2 2 4]`: |
5702 | /// |
5703 | /// ``` |
5704 | /// x = [[[[1, 2, 3, 4], |
5705 | /// [5, 6, 7, 8]], |
5706 | /// [[9, 10, 11, 12], |
5707 | /// [13, 14, 15, 16]]]] |
5708 | /// ``` |
5709 | /// |
5710 | /// Args: |
5711 | /// * scope: A Scope object |
5712 | /// * block_size: The size of the spatial block. |
5713 | /// |
5714 | /// Returns: |
5715 | /// * `Output`: The output tensor. |
5716 | class SpaceToDepth { |
5717 | public: |
5718 | /// Optional attribute setters for SpaceToDepth |
5719 | struct Attrs { |
5720 | /// Defaults to "NHWC" |
5721 | TF_MUST_USE_RESULT Attrs DataFormat(StringPiece x) { |
5722 | Attrs ret = *this; |
5723 | ret.data_format_ = x; |
5724 | return ret; |
5725 | } |
5726 | |
5727 | StringPiece data_format_ = "NHWC" ; |
5728 | }; |
5729 | SpaceToDepth(const ::tensorflow::Scope& scope, ::tensorflow::Input input, int64 |
5730 | block_size); |
5731 | SpaceToDepth(const ::tensorflow::Scope& scope, ::tensorflow::Input input, int64 |
5732 | block_size, const SpaceToDepth::Attrs& attrs); |
5733 | operator ::tensorflow::Output() const { return output; } |
5734 | operator ::tensorflow::Input() const { return output; } |
5735 | ::tensorflow::Node* node() const { return output.node(); } |
5736 | |
5737 | static Attrs DataFormat(StringPiece x) { |
5738 | return Attrs().DataFormat(x); |
5739 | } |
5740 | |
5741 | Operation operation; |
5742 | ::tensorflow::Output output; |
5743 | }; |
5744 | |
5745 | /// Splits a tensor into `num_split` tensors along one dimension. |
5746 | /// |
5747 | /// Args: |
5748 | /// * scope: A Scope object |
5749 | /// * axis: 0-D. The dimension along which to split. Must be in the range |
5750 | /// `[-rank(value), rank(value))`. |
5751 | /// * value: The tensor to split. |
5752 | /// * num_split: The number of ways to split. Must evenly divide |
5753 | /// `value.shape[split_dim]`. |
5754 | /// |
5755 | /// Returns: |
5756 | /// * `OutputList`: They are identically shaped tensors, whose shape matches that of `value` |
5757 | /// except along `axis`, where their sizes are |
5758 | /// `values.shape[split_dim] / num_split`. |
5759 | class Split { |
5760 | public: |
5761 | Split(const ::tensorflow::Scope& scope, ::tensorflow::Input axis, |
5762 | ::tensorflow::Input value, int64 num_split); |
5763 | ::tensorflow::Output operator[](size_t index) const { return output[index]; } |
5764 | |
5765 | |
5766 | Operation operation; |
5767 | ::tensorflow::OutputList output; |
5768 | }; |
5769 | |
5770 | /// Splits a tensor into `num_split` tensors along one dimension. |
5771 | /// |
5772 | /// Args: |
5773 | /// * scope: A Scope object |
5774 | /// * value: The tensor to split. |
5775 | /// * size_splits: list containing the sizes of each output tensor along the split |
5776 | /// dimension. Must sum to the dimension of value along split_dim. |
5777 | /// Can contain one -1 indicating that dimension is to be inferred. |
5778 | /// * axis: 0-D. The dimension along which to split. Must be in the range |
5779 | /// `[-rank(value), rank(value))`. |
5780 | /// |
5781 | /// Returns: |
5782 | /// * `OutputList`: Tensors whose shape matches that of `value` |
5783 | /// except along `axis`, where their sizes are |
5784 | /// `size_splits[i]`. |
5785 | class SplitV { |
5786 | public: |
5787 | SplitV(const ::tensorflow::Scope& scope, ::tensorflow::Input value, |
5788 | ::tensorflow::Input size_splits, ::tensorflow::Input axis, int64 |
5789 | num_split); |
5790 | ::tensorflow::Output operator[](size_t index) const { return output[index]; } |
5791 | |
5792 | |
5793 | Operation operation; |
5794 | ::tensorflow::OutputList output; |
5795 | }; |
5796 | |
5797 | /// Removes dimensions of size 1 from the shape of a tensor. |
5798 | /// |
5799 | /// Given a tensor `input`, this operation returns a tensor of the same type with |
5800 | /// all dimensions of size 1 removed. If you don't want to remove all size 1 |
5801 | /// dimensions, you can remove specific size 1 dimensions by specifying |
5802 | /// `axis`. |
5803 | /// |
5804 | /// For example: |
5805 | /// |
5806 | /// ``` |
5807 | /// # 't' is a tensor of shape [1, 2, 1, 3, 1, 1] |
5808 | /// shape(squeeze(t)) ==> [2, 3] |
5809 | /// ``` |
5810 | /// |
5811 | /// Or, to remove specific size 1 dimensions: |
5812 | /// |
5813 | /// ``` |
5814 | /// # 't' is a tensor of shape [1, 2, 1, 3, 1, 1] |
5815 | /// shape(squeeze(t, [2, 4])) ==> [1, 2, 3, 1] |
5816 | /// ``` |
5817 | /// |
5818 | /// Args: |
5819 | /// * scope: A Scope object |
5820 | /// * input: The `input` to squeeze. |
5821 | /// |
5822 | /// Optional attributes (see `Attrs`): |
5823 | /// * axis: If specified, only squeezes the dimensions listed. The dimension |
5824 | /// index starts at 0. It is an error to squeeze a dimension that is not 1. Must |
5825 | /// be in the range `[-rank(input), rank(input))`. |
5826 | /// |
5827 | /// Returns: |
5828 | /// * `Output`: Contains the same data as `input`, but has one or more dimensions of |
5829 | /// size 1 removed. |
5830 | class Squeeze { |
5831 | public: |
5832 | /// Optional attribute setters for Squeeze |
5833 | struct Attrs { |
5834 | /// If specified, only squeezes the dimensions listed. The dimension |
5835 | /// index starts at 0. It is an error to squeeze a dimension that is not 1. Must |
5836 | /// be in the range `[-rank(input), rank(input))`. |
5837 | /// |
5838 | /// Defaults to [] |
5839 | TF_MUST_USE_RESULT Attrs Axis(const gtl::ArraySlice<int>& x) { |
5840 | Attrs ret = *this; |
5841 | ret.axis_ = x; |
5842 | return ret; |
5843 | } |
5844 | |
5845 | gtl::ArraySlice<int> axis_ = {}; |
5846 | }; |
5847 | Squeeze(const ::tensorflow::Scope& scope, ::tensorflow::Input input); |
5848 | Squeeze(const ::tensorflow::Scope& scope, ::tensorflow::Input input, const |
5849 | Squeeze::Attrs& attrs); |
5850 | operator ::tensorflow::Output() const { return output; } |
5851 | operator ::tensorflow::Input() const { return output; } |
5852 | ::tensorflow::Node* node() const { return output.node(); } |
5853 | |
5854 | static Attrs Axis(const gtl::ArraySlice<int>& x) { |
5855 | return Attrs().Axis(x); |
5856 | } |
5857 | |
5858 | Operation operation; |
5859 | ::tensorflow::Output output; |
5860 | }; |
5861 | |
5862 | /// Stops gradient computation. |
5863 | /// |
5864 | /// When executed in a graph, this op outputs its input tensor as-is. |
5865 | /// |
5866 | /// When building ops to compute gradients, this op prevents the contribution of |
5867 | /// its inputs to be taken into account. Normally, the gradient generator adds ops |
5868 | /// to a graph to compute the derivatives of a specified 'loss' by recursively |
5869 | /// finding out inputs that contributed to its computation. If you insert this op |
5870 | /// in the graph it inputs are masked from the gradient generator. They are not |
5871 | /// taken into account for computing gradients. |
5872 | /// |
5873 | /// This is useful any time you want to compute a value with TensorFlow but need |
5874 | /// to pretend that the value was a constant. For example, the softmax function |
5875 | /// for a vector x can be written as |
5876 | /// |
5877 | /// ```python |
5878 | /// |
5879 | /// def softmax(x): |
5880 | /// numerator = tf.exp(x) |
5881 | /// denominator = tf.reduce_sum(numerator) |
5882 | /// return numerator / denominator |
5883 | /// ``` |
5884 | /// |
5885 | /// This however is susceptible to overflow if the values in x are large. An |
5886 | /// alternative more stable way is to subtract the maximum of x from each of the |
5887 | /// values. |
5888 | /// |
5889 | /// ```python |
5890 | /// |
5891 | /// def stable_softmax(x): |
5892 | /// z = x - tf.reduce_max(x) |
5893 | /// numerator = tf.exp(z) |
5894 | /// denominator = tf.reduce_sum(numerator) |
5895 | /// return numerator / denominator |
5896 | /// ``` |
5897 | /// |
5898 | /// However, when we backprop through the softmax to x, we dont want to backprop |
5899 | /// through the `tf.reduce_max(x)` (if the max values are not unique then the |
5900 | /// gradient could flow to the wrong input) calculation and treat that as a |
5901 | /// constant. Therefore, we should write this out as |
5902 | /// |
5903 | /// ```python |
5904 | /// |
5905 | /// def stable_softmax(x): |
5906 | /// z = x - tf.stop_gradient(tf.reduce_max(x)) |
5907 | /// numerator = tf.exp(z) |
5908 | /// denominator = tf.reduce_sum(numerator) |
5909 | /// return numerator / denominator |
5910 | /// ``` |
5911 | /// |
5912 | /// Some other examples include: |
5913 | /// |
5914 | /// * The *EM* algorithm where the *M-step* should not involve backpropagation |
5915 | /// through the output of the *E-step*. |
5916 | /// * Contrastive divergence training of Boltzmann machines where, when |
5917 | /// differentiating the energy function, the training must not backpropagate |
5918 | /// through the graph that generated the samples from the model. |
5919 | /// * Adversarial training, where no backprop should happen through the adversarial |
5920 | /// example generation process. |
5921 | /// |
5922 | /// Args: |
5923 | /// * scope: A Scope object |
5924 | /// |
5925 | /// Returns: |
5926 | /// * `Output`: The output tensor. |
5927 | class StopGradient { |
5928 | public: |
5929 | StopGradient(const ::tensorflow::Scope& scope, ::tensorflow::Input input); |
5930 | operator ::tensorflow::Output() const { return output; } |
5931 | operator ::tensorflow::Input() const { return output; } |
5932 | ::tensorflow::Node* node() const { return output.node(); } |
5933 | |
5934 | Operation operation; |
5935 | ::tensorflow::Output output; |
5936 | }; |
5937 | |
5938 | /// Return a strided slice from `input`. |
5939 | /// |
5940 | /// Note, most python users will want to use the Python `Tensor.__getitem__` |
5941 | /// or `Variable.__getitem__` rather than this op directly. |
5942 | /// |
5943 | /// The goal of this op is to produce a new tensor with a subset of |
5944 | /// the elements from the `n` dimensional `input` tensor. The subset is chosen using |
5945 | /// a sequence of `m` sparse range specifications encoded into the arguments |
5946 | /// of this function. Note, in some cases |
5947 | /// `m` could be equal to `n`, but this need not be the case. Each |
5948 | /// range specification entry can be one of the following: |
5949 | /// |
5950 | /// - An ellipsis (...). Ellipses are used to imply zero or more |
5951 | /// dimensions of full-dimension selection and are produced using |
5952 | /// `ellipsis_mask`. For example, `foo[...]` is the identity slice. |
5953 | /// |
5954 | /// - A new axis. This is used to insert a new shape=1 dimension and is |
5955 | /// produced using `new_axis_mask`. For example, `foo[:, ...]` where |
5956 | /// `foo` is shape `(3, 4)` produces a `(1, 3, 4)` tensor. |
5957 | /// |
5958 | /// |
5959 | /// - A range `begin:end:stride`. This is used to specify how much to choose from |
5960 | /// a given dimension. `stride` can be any integer but 0. `begin` is an integer |
5961 | /// which represents the index of the first value to select while `end` represents |
5962 | /// the index of the last value to select. The number of values selected in each |
5963 | /// dimension is `end - begin` if `stride > 0` and `begin - end` if `stride < 0`. |
5964 | /// `begin` and `end` can be negative where `-1` is the last element, `-2` is |
5965 | /// the second to last. `begin_mask` controls whether to replace the explicitly |
5966 | /// given `begin` with an implicit effective value of `0` if `stride > 0` and |
5967 | /// `-1` if `stride < 0`. `end_mask` is analogous but produces the number |
5968 | /// required to create the largest open interval. For example, given a shape |
5969 | /// `(3,)` tensor `foo[:]`, the effective `begin` and `end` are `0` and `3`. Do |
5970 | /// not assume this is equivalent to `foo[0:-1]` which has an effective `begin` |
5971 | /// and `end` of `0` and `2`. Another example is `foo[-2::-1]` which reverses the |
5972 | /// first dimension of a tensor while dropping the last two (in the original |
5973 | /// order elements). For example `foo = [1,2,3,4]; foo[-2::-1]` is `[4,3]`. |
5974 | /// |
5975 | /// - A single index. This is used to keep only elements that have a given |
5976 | /// index. For example (`foo[2, :]` on a shape `(5,6)` tensor produces a |
5977 | /// shape `(6,)` tensor. This is encoded in `begin` and `end` and |
5978 | /// `shrink_axis_mask`. |
5979 | /// |
5980 | /// Each conceptual range specification is encoded in the op's argument. This |
5981 | /// encoding is best understand by considering a non-trivial example. In |
5982 | /// particular, |
5983 | /// `foo[1, 2:4, None, ..., :-3:-1, :]` will be encoded as |
5984 | /// |
5985 | /// ``` |
5986 | /// begin = [1, 2, x, x, 0, x] # x denotes don't care (usually 0) |
5987 | /// end = [2, 4, x, x, -3, x] |
5988 | /// strides = [1, 1, x, x, -1, 1] |
5989 | /// begin_mask = 1<<4 | 1<<5 = 48 |
5990 | /// end_mask = 1<<5 = 32 |
5991 | /// ellipsis_mask = 1<<3 = 8 |
5992 | /// new_axis_mask = 1<<2 = 4 |
5993 | /// shrink_axis_mask = 1<<0 = 1 |
5994 | /// ``` |
5995 | /// |
5996 | /// In this case if `foo.shape` is (5, 5, 5, 5, 5, 5) the final shape of |
5997 | /// the slice becomes (2, 1, 5, 5, 2, 5). |
5998 | /// Let us walk step by step through each argument specification. |
5999 | /// |
6000 | /// 1. The first argument in the example slice is turned into `begin = 1` and |
6001 | /// `end = begin + 1 = 2`. To disambiguate from the original spec `2:4` we |
6002 | /// also set the appropriate bit in `shrink_axis_mask`. |
6003 | /// |
6004 | /// 2. `2:4` is contributes 2, 4, 1 to begin, end, and stride. All masks have |
6005 | /// zero bits contributed. |
6006 | /// |
6007 | /// 3. None is a synonym for `tf.newaxis`. This means insert a dimension of size 1 |
6008 | /// dimension in the final shape. Dummy values are contributed to begin, |
6009 | /// end and stride, while the new_axis_mask bit is set. |
6010 | /// |
6011 | /// 4. `...` grab the full ranges from as many dimensions as needed to |
6012 | /// fully specify a slice for every dimension of the input shape. |
6013 | /// |
6014 | /// 5. `:-3:-1` shows the use of negative indices. A negative index `i` associated |
6015 | /// with a dimension that has shape `s` is converted to a positive index |
6016 | /// `s + i`. So `-1` becomes `s-1` (i.e. the last element). This conversion |
6017 | /// is done internally so begin, end and strides receive x, -3, and -1. |
6018 | /// The appropriate begin_mask bit is set to indicate the start range is the |
6019 | /// full range (ignoring the x). |
6020 | /// |
6021 | /// 6. `:` indicates that the entire contents of the corresponding dimension |
6022 | /// is selected. This is equivalent to `::` or `0::1`. begin, end, and strides |
6023 | /// receive 0, 0, and 1, respectively. The appropriate bits in `begin_mask` and |
6024 | /// `end_mask` are also set. |
6025 | /// |
6026 | /// *Requirements*: |
6027 | /// `0 != strides[i] for i in [0, m)` |
6028 | /// `ellipsis_mask must be a power of two (only one ellipsis)` |
6029 | /// |
6030 | /// Args: |
6031 | /// * scope: A Scope object |
6032 | /// * begin: `begin[k]` specifies the offset into the `k`th range specification. |
6033 | /// The exact dimension this corresponds to will be determined by context. |
6034 | /// Out-of-bounds values will be silently clamped. If the `k`th bit of |
6035 | /// `begin_mask` then `begin[k]` is ignored and the full range of the |
6036 | /// appropriate dimension is used instead. Negative values causes indexing |
6037 | /// to start from the highest element e.g. If `foo==[1,2,3]` then `foo[-1]==3`. |
6038 | /// * end: `end[i]` is like `begin` with the exception that `end_mask` is |
6039 | /// used to determine full ranges. |
6040 | /// * strides: `strides[i]` specifies the increment in the `i`th specification |
6041 | /// after extracting a given element. Negative indices will reverse |
6042 | /// the original order. Out or range values are |
6043 | /// clamped to `[0,dim[i]) if slice[i]>0` or `[-1,dim[i]-1] if slice[i] < 0` |
6044 | /// |
6045 | /// Optional attributes (see `Attrs`): |
6046 | /// * begin_mask: a bitmask where a bit i being 1 means to ignore the begin |
6047 | /// value and instead use the largest interval possible. At runtime |
6048 | /// begin[i] will be replaced with `[0, n-1)` if `stride[i] > 0` or |
6049 | /// `[-1, n-1]` if `stride[i] < 0` |
6050 | /// * end_mask: analogous to `begin_mask` |
6051 | /// * ellipsis_mask: a bitmask where bit `i` being 1 means the `i`th |
6052 | /// position is actually an ellipsis. One bit at most can be 1. |
6053 | /// If `ellipsis_mask == 0`, then an implicit ellipsis mask of `1 << (m+1)` |
6054 | /// is provided. This means that `foo[3:5] == foo[3:5, ...]`. An ellipsis |
6055 | /// implicitly creates as many range specifications as necessary to fully |
6056 | /// specify the sliced range for every dimension. For example for a 4-dimensional |
6057 | /// tensor `foo` the slice `foo[2, ..., 5:8]` implies `foo[2, :, :, 5:8]`. |
6058 | /// * new_axis_mask: a bitmask where bit `i` being 1 means the `i`th |
6059 | /// specification creates a new shape 1 dimension. For example |
6060 | /// `foo[:4, tf.newaxis, :2]` would produce a shape `(4, 1, 2)` tensor. |
6061 | /// * shrink_axis_mask: a bitmask where bit `i` implies that the `i`th |
6062 | /// specification should shrink the dimensionality. begin and end |
6063 | /// must imply a slice of size 1 in the dimension. For example in |
6064 | /// python one might do `foo[:, 3, :]` which would result in |
6065 | /// `shrink_axis_mask` being 2. |
6066 | /// |
6067 | /// Returns: |
6068 | /// * `Output`: The output tensor. |
6069 | class StridedSlice { |
6070 | public: |
6071 | /// Optional attribute setters for StridedSlice |
6072 | struct Attrs { |
6073 | /// a bitmask where a bit i being 1 means to ignore the begin |
6074 | /// value and instead use the largest interval possible. At runtime |
6075 | /// begin[i] will be replaced with `[0, n-1)` if `stride[i] > 0` or |
6076 | /// `[-1, n-1]` if `stride[i] < 0` |
6077 | /// |
6078 | /// Defaults to 0 |
6079 | TF_MUST_USE_RESULT Attrs BeginMask(int64 x) { |
6080 | Attrs ret = *this; |
6081 | ret.begin_mask_ = x; |
6082 | return ret; |
6083 | } |
6084 | |
6085 | /// analogous to `begin_mask` |
6086 | /// |
6087 | /// Defaults to 0 |
6088 | TF_MUST_USE_RESULT Attrs EndMask(int64 x) { |
6089 | Attrs ret = *this; |
6090 | ret.end_mask_ = x; |
6091 | return ret; |
6092 | } |
6093 | |
6094 | /// a bitmask where bit `i` being 1 means the `i`th |
6095 | /// position is actually an ellipsis. One bit at most can be 1. |
6096 | /// If `ellipsis_mask == 0`, then an implicit ellipsis mask of `1 << (m+1)` |
6097 | /// is provided. This means that `foo[3:5] == foo[3:5, ...]`. An ellipsis |
6098 | /// implicitly creates as many range specifications as necessary to fully |
6099 | /// specify the sliced range for every dimension. For example for a 4-dimensional |
6100 | /// tensor `foo` the slice `foo[2, ..., 5:8]` implies `foo[2, :, :, 5:8]`. |
6101 | /// |
6102 | /// Defaults to 0 |
6103 | TF_MUST_USE_RESULT Attrs EllipsisMask(int64 x) { |
6104 | Attrs ret = *this; |
6105 | ret.ellipsis_mask_ = x; |
6106 | return ret; |
6107 | } |
6108 | |
6109 | /// a bitmask where bit `i` being 1 means the `i`th |
6110 | /// specification creates a new shape 1 dimension. For example |
6111 | /// `foo[:4, tf.newaxis, :2]` would produce a shape `(4, 1, 2)` tensor. |
6112 | /// |
6113 | /// Defaults to 0 |
6114 | TF_MUST_USE_RESULT Attrs NewAxisMask(int64 x) { |
6115 | Attrs ret = *this; |
6116 | ret.new_axis_mask_ = x; |
6117 | return ret; |
6118 | } |
6119 | |
6120 | /// a bitmask where bit `i` implies that the `i`th |
6121 | /// specification should shrink the dimensionality. begin and end |
6122 | /// must imply a slice of size 1 in the dimension. For example in |
6123 | /// python one might do `foo[:, 3, :]` which would result in |
6124 | /// `shrink_axis_mask` being 2. |
6125 | /// |
6126 | /// Defaults to 0 |
6127 | TF_MUST_USE_RESULT Attrs ShrinkAxisMask(int64 x) { |
6128 | Attrs ret = *this; |
6129 | ret.shrink_axis_mask_ = x; |
6130 | return ret; |
6131 | } |
6132 | |
6133 | int64 begin_mask_ = 0; |
6134 | int64 end_mask_ = 0; |
6135 | int64 ellipsis_mask_ = 0; |
6136 | int64 new_axis_mask_ = 0; |
6137 | int64 shrink_axis_mask_ = 0; |
6138 | }; |
6139 | StridedSlice(const ::tensorflow::Scope& scope, ::tensorflow::Input input, |
6140 | ::tensorflow::Input begin, ::tensorflow::Input end, |
6141 | ::tensorflow::Input strides); |
6142 | StridedSlice(const ::tensorflow::Scope& scope, ::tensorflow::Input input, |
6143 | ::tensorflow::Input begin, ::tensorflow::Input end, |
6144 | ::tensorflow::Input strides, const StridedSlice::Attrs& attrs); |
6145 | operator ::tensorflow::Output() const { return output; } |
6146 | operator ::tensorflow::Input() const { return output; } |
6147 | ::tensorflow::Node* node() const { return output.node(); } |
6148 | |
6149 | static Attrs BeginMask(int64 x) { |
6150 | return Attrs().BeginMask(x); |
6151 | } |
6152 | static Attrs EndMask(int64 x) { |
6153 | return Attrs().EndMask(x); |
6154 | } |
6155 | static Attrs EllipsisMask(int64 x) { |
6156 | return Attrs().EllipsisMask(x); |
6157 | } |
6158 | static Attrs NewAxisMask(int64 x) { |
6159 | return Attrs().NewAxisMask(x); |
6160 | } |
6161 | static Attrs ShrinkAxisMask(int64 x) { |
6162 | return Attrs().ShrinkAxisMask(x); |
6163 | } |
6164 | |
6165 | Operation operation; |
6166 | ::tensorflow::Output output; |
6167 | }; |
6168 | |
6169 | /// Assign `value` to the sliced l-value reference of `ref`. |
6170 | /// |
6171 | /// The values of `value` are assigned to the positions in the variable |
6172 | /// `ref` that are selected by the slice parameters. The slice parameters |
6173 | /// `begin`, `end`, `strides`, etc. work exactly as in `StridedSlice`. |
6174 | /// |
6175 | /// NOTE this op currently does not support broadcasting and so `value`'s |
6176 | /// shape must be exactly the shape produced by the slice of `ref`. |
6177 | /// |
6178 | /// Args: |
6179 | /// * scope: A Scope object |
6180 | /// |
6181 | /// Returns: |
6182 | /// * `Output`: The output_ref tensor. |
6183 | class StridedSliceAssign { |
6184 | public: |
6185 | /// Optional attribute setters for StridedSliceAssign |
6186 | struct Attrs { |
6187 | /// Defaults to 0 |
6188 | TF_MUST_USE_RESULT Attrs BeginMask(int64 x) { |
6189 | Attrs ret = *this; |
6190 | ret.begin_mask_ = x; |
6191 | return ret; |
6192 | } |
6193 | |
6194 | /// Defaults to 0 |
6195 | TF_MUST_USE_RESULT Attrs EndMask(int64 x) { |
6196 | Attrs ret = *this; |
6197 | ret.end_mask_ = x; |
6198 | return ret; |
6199 | } |
6200 | |
6201 | /// Defaults to 0 |
6202 | TF_MUST_USE_RESULT Attrs EllipsisMask(int64 x) { |
6203 | Attrs ret = *this; |
6204 | ret.ellipsis_mask_ = x; |
6205 | return ret; |
6206 | } |
6207 | |
6208 | /// Defaults to 0 |
6209 | TF_MUST_USE_RESULT Attrs NewAxisMask(int64 x) { |
6210 | Attrs ret = *this; |
6211 | ret.new_axis_mask_ = x; |
6212 | return ret; |
6213 | } |
6214 | |
6215 | /// Defaults to 0 |
6216 | TF_MUST_USE_RESULT Attrs ShrinkAxisMask(int64 x) { |
6217 | Attrs ret = *this; |
6218 | ret.shrink_axis_mask_ = x; |
6219 | return ret; |
6220 | } |
6221 | |
6222 | int64 begin_mask_ = 0; |
6223 | int64 end_mask_ = 0; |
6224 | int64 ellipsis_mask_ = 0; |
6225 | int64 new_axis_mask_ = 0; |
6226 | int64 shrink_axis_mask_ = 0; |
6227 | }; |
6228 | StridedSliceAssign(const ::tensorflow::Scope& scope, ::tensorflow::Input ref, |
6229 | ::tensorflow::Input begin, ::tensorflow::Input end, |
6230 | ::tensorflow::Input strides, ::tensorflow::Input value); |
6231 | StridedSliceAssign(const ::tensorflow::Scope& scope, ::tensorflow::Input ref, |
6232 | ::tensorflow::Input begin, ::tensorflow::Input end, |
6233 | ::tensorflow::Input strides, ::tensorflow::Input value, |
6234 | const StridedSliceAssign::Attrs& attrs); |
6235 | operator ::tensorflow::Output() const { return output_ref; } |
6236 | operator ::tensorflow::Input() const { return output_ref; } |
6237 | ::tensorflow::Node* node() const { return output_ref.node(); } |
6238 | |
6239 | static Attrs BeginMask(int64 x) { |
6240 | return Attrs().BeginMask(x); |
6241 | } |
6242 | static Attrs EndMask(int64 x) { |
6243 | return Attrs().EndMask(x); |
6244 | } |
6245 | static Attrs EllipsisMask(int64 x) { |
6246 | return Attrs().EllipsisMask(x); |
6247 | } |
6248 | static Attrs NewAxisMask(int64 x) { |
6249 | return Attrs().NewAxisMask(x); |
6250 | } |
6251 | static Attrs ShrinkAxisMask(int64 x) { |
6252 | return Attrs().ShrinkAxisMask(x); |
6253 | } |
6254 | |
6255 | Operation operation; |
6256 | ::tensorflow::Output output_ref; |
6257 | }; |
6258 | |
6259 | /// Returns the gradient of `StridedSlice`. |
6260 | /// |
6261 | /// Since `StridedSlice` cuts out pieces of its `input` which is size |
6262 | /// `shape`, its gradient will have the same shape (which is passed here |
6263 | /// as `shape`). The gradient will be zero in any element that the slice |
6264 | /// does not select. |
6265 | /// |
6266 | /// Arguments are the same as StridedSliceGrad with the exception that |
6267 | /// `dy` is the input gradient to be propagated and `shape` is the |
6268 | /// shape of `StridedSlice`'s `input`. |
6269 | /// |
6270 | /// Args: |
6271 | /// * scope: A Scope object |
6272 | /// |
6273 | /// Returns: |
6274 | /// * `Output`: The output tensor. |
6275 | class StridedSliceGrad { |
6276 | public: |
6277 | /// Optional attribute setters for StridedSliceGrad |
6278 | struct Attrs { |
6279 | /// Defaults to 0 |
6280 | TF_MUST_USE_RESULT Attrs BeginMask(int64 x) { |
6281 | Attrs ret = *this; |
6282 | ret.begin_mask_ = x; |
6283 | return ret; |
6284 | } |
6285 | |
6286 | /// Defaults to 0 |
6287 | TF_MUST_USE_RESULT Attrs EndMask(int64 x) { |
6288 | Attrs ret = *this; |
6289 | ret.end_mask_ = x; |
6290 | return ret; |
6291 | } |
6292 | |
6293 | /// Defaults to 0 |
6294 | TF_MUST_USE_RESULT Attrs EllipsisMask(int64 x) { |
6295 | Attrs ret = *this; |
6296 | ret.ellipsis_mask_ = x; |
6297 | return ret; |
6298 | } |
6299 | |
6300 | /// Defaults to 0 |
6301 | TF_MUST_USE_RESULT Attrs NewAxisMask(int64 x) { |
6302 | Attrs ret = *this; |
6303 | ret.new_axis_mask_ = x; |
6304 | return ret; |
6305 | } |
6306 | |
6307 | /// Defaults to 0 |
6308 | TF_MUST_USE_RESULT Attrs ShrinkAxisMask(int64 x) { |
6309 | Attrs ret = *this; |
6310 | ret.shrink_axis_mask_ = x; |
6311 | return ret; |
6312 | } |
6313 | |
6314 | int64 begin_mask_ = 0; |
6315 | int64 end_mask_ = 0; |
6316 | int64 ellipsis_mask_ = 0; |
6317 | int64 new_axis_mask_ = 0; |
6318 | int64 shrink_axis_mask_ = 0; |
6319 | }; |
6320 | StridedSliceGrad(const ::tensorflow::Scope& scope, ::tensorflow::Input shape, |
6321 | ::tensorflow::Input begin, ::tensorflow::Input end, |
6322 | ::tensorflow::Input strides, ::tensorflow::Input dy); |
6323 | StridedSliceGrad(const ::tensorflow::Scope& scope, ::tensorflow::Input shape, |
6324 | ::tensorflow::Input begin, ::tensorflow::Input end, |
6325 | ::tensorflow::Input strides, ::tensorflow::Input dy, const |
6326 | StridedSliceGrad::Attrs& attrs); |
6327 | operator ::tensorflow::Output() const { return output; } |
6328 | operator ::tensorflow::Input() const { return output; } |
6329 | ::tensorflow::Node* node() const { return output.node(); } |
6330 | |
6331 | static Attrs BeginMask(int64 x) { |
6332 | return Attrs().BeginMask(x); |
6333 | } |
6334 | static Attrs EndMask(int64 x) { |
6335 | return Attrs().EndMask(x); |
6336 | } |
6337 | static Attrs EllipsisMask(int64 x) { |
6338 | return Attrs().EllipsisMask(x); |
6339 | } |
6340 | static Attrs NewAxisMask(int64 x) { |
6341 | return Attrs().NewAxisMask(x); |
6342 | } |
6343 | static Attrs ShrinkAxisMask(int64 x) { |
6344 | return Attrs().ShrinkAxisMask(x); |
6345 | } |
6346 | |
6347 | Operation operation; |
6348 | ::tensorflow::Output output; |
6349 | }; |
6350 | |
6351 | /// Adds sparse `updates` to an existing tensor according to `indices`. |
6352 | /// |
6353 | /// This operation creates a new tensor by adding sparse `updates` to the passed |
6354 | /// in `tensor`. |
6355 | /// This operation is very similar to `tf.compat.v1.scatter_nd_add`, except that the |
6356 | /// updates are added onto an existing tensor (as opposed to a variable). If the |
6357 | /// memory for the existing tensor cannot be re-used, a copy is made and updated. |
6358 | /// |
6359 | /// `indices` is an integer tensor containing indices into a new tensor of shape |
6360 | /// `tensor.shape`. The last dimension of `indices` can be at most the rank of |
6361 | /// `tensor.shape`: |
6362 | /// |
6363 | /// ``` |
6364 | /// indices.shape[-1] <= tensor.shape.rank |
6365 | /// ``` |
6366 | /// |
6367 | /// The last dimension of `indices` corresponds to indices into elements |
6368 | /// (if `indices.shape[-1] = tensor.shape.rank`) or slices |
6369 | /// (if `indices.shape[-1] < tensor.shape.rank`) along dimension |
6370 | /// `indices.shape[-1]` of `tensor.shape`. `updates` is a tensor with shape |
6371 | /// |
6372 | /// ``` |
6373 | /// indices.shape[:-1] + tensor.shape[indices.shape[-1]:] |
6374 | /// ``` |
6375 | /// |
6376 | /// The simplest form of `tensor_scatter_nd_add` is to add individual elements to a |
6377 | /// tensor by index. For example, say we want to add 4 elements in a rank-1 |
6378 | /// tensor with 8 elements. |
6379 | /// |
6380 | /// In Python, this scatter add operation would look like this: |
6381 | /// |
6382 | /// >>> indices = tf.constant([[4], [3], [1], [7]]) |
6383 | /// >>> updates = tf.constant([9, 10, 11, 12]) |
6384 | /// >>> tensor = tf.ones([8], dtype=tf.int32) |
6385 | /// >>> updated = tf.tensor_scatter_nd_add(tensor, indices, updates) |
6386 | /// >>> updated |
6387 | /// <tf.Tensor: shape=(8,), dtype=int32, |
6388 | /// numpy=array([ 1, 12, 1, 11, 10, 1, 1, 13], dtype=int32)> |
6389 | /// |
6390 | /// We can also, insert entire slices of a higher rank tensor all at once. For |
6391 | /// example, if we wanted to insert two slices in the first dimension of a |
6392 | /// rank-3 tensor with two matrices of new values. |
6393 | /// |
6394 | /// In Python, this scatter add operation would look like this: |
6395 | /// |
6396 | /// >>> indices = tf.constant([[0], [2]]) |
6397 | /// >>> updates = tf.constant([[[5, 5, 5, 5], [6, 6, 6, 6], |
6398 | /// ... [7, 7, 7, 7], [8, 8, 8, 8]], |
6399 | /// ... [[5, 5, 5, 5], [6, 6, 6, 6], |
6400 | /// ... [7, 7, 7, 7], [8, 8, 8, 8]]]) |
6401 | /// >>> tensor = tf.ones([4, 4, 4],dtype=tf.int32) |
6402 | /// >>> updated = tf.tensor_scatter_nd_add(tensor, indices, updates) |
6403 | /// >>> updated |
6404 | /// <tf.Tensor: shape=(4, 4, 4), dtype=int32, |
6405 | /// numpy=array([[[6, 6, 6, 6], [7, 7, 7, 7], [8, 8, 8, 8], [9, 9, 9, 9]], |
6406 | /// [[1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1]], |
6407 | /// [[6, 6, 6, 6], [7, 7, 7, 7], [8, 8, 8, 8], [9, 9, 9, 9]], |
6408 | /// [[1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1]]], dtype=int32)> |
6409 | /// |
6410 | /// Note: on CPU, if an out of bound index is found, an error is returned. |
6411 | /// On GPU, if an out of bound index is found, the index is ignored. |
6412 | /// |
6413 | /// Args: |
6414 | /// * scope: A Scope object |
6415 | /// * tensor: Tensor to copy/update. |
6416 | /// * indices: Index tensor. |
6417 | /// * updates: Updates to scatter into output. |
6418 | /// |
6419 | /// Returns: |
6420 | /// * `Output`: A new tensor copied from tensor and updates added according to the indices. |
6421 | class TensorScatterAdd { |
6422 | public: |
6423 | TensorScatterAdd(const ::tensorflow::Scope& scope, ::tensorflow::Input tensor, |
6424 | ::tensorflow::Input indices, ::tensorflow::Input updates); |
6425 | operator ::tensorflow::Output() const { return output; } |
6426 | operator ::tensorflow::Input() const { return output; } |
6427 | ::tensorflow::Node* node() const { return output.node(); } |
6428 | |
6429 | Operation operation; |
6430 | ::tensorflow::Output output; |
6431 | }; |
6432 | |
6433 | /// Apply a sparse update to a tensor taking the element-wise maximum. |
6434 | /// |
6435 | /// Returns a new tensor copied from `tensor` whose values are element-wise maximum between |
6436 | /// tensor and updates according to the indices. |
6437 | /// |
6438 | /// >>> tensor = [0, 0, 0, 0, 0, 0, 0, 0] |
6439 | /// >>> indices = [[1], [4], [5]] |
6440 | /// >>> updates = [1, -1, 1] |
6441 | /// >>> tf.tensor_scatter_nd_max(tensor, indices, updates).numpy() |
6442 | /// array([0, 1, 0, 0, 0, 1, 0, 0], dtype=int32) |
6443 | /// |
6444 | /// Refer to `tf.tensor_scatter_nd_update` for more details. |
6445 | /// |
6446 | /// Args: |
6447 | /// * scope: A Scope object |
6448 | /// * tensor: Tensor to update. |
6449 | /// * indices: Index tensor. |
6450 | /// * updates: Updates to scatter into output. |
6451 | /// |
6452 | /// Returns: |
6453 | /// * `Output`: A new tensor copied from tensor whose values are element-wise maximum between tensor and updates according to the indices. |
6454 | class TensorScatterMax { |
6455 | public: |
6456 | TensorScatterMax(const ::tensorflow::Scope& scope, ::tensorflow::Input tensor, |
6457 | ::tensorflow::Input indices, ::tensorflow::Input updates); |
6458 | operator ::tensorflow::Output() const { return output; } |
6459 | operator ::tensorflow::Input() const { return output; } |
6460 | ::tensorflow::Node* node() const { return output.node(); } |
6461 | |
6462 | Operation operation; |
6463 | ::tensorflow::Output output; |
6464 | }; |
6465 | |
6466 | /// TODO: add doc. |
6467 | /// |
6468 | /// Args: |
6469 | /// * scope: A Scope object |
6470 | /// * tensor: Tensor to update. |
6471 | /// * indices: Index tensor. |
6472 | /// * updates: Updates to scatter into output. |
6473 | /// |
6474 | /// Returns: |
6475 | /// * `Output`: A new tensor copied from tensor whose values are element-wise minimum between tensor and updates according to the indices. |
6476 | class TensorScatterMin { |
6477 | public: |
6478 | TensorScatterMin(const ::tensorflow::Scope& scope, ::tensorflow::Input tensor, |
6479 | ::tensorflow::Input indices, ::tensorflow::Input updates); |
6480 | operator ::tensorflow::Output() const { return output; } |
6481 | operator ::tensorflow::Input() const { return output; } |
6482 | ::tensorflow::Node* node() const { return output.node(); } |
6483 | |
6484 | Operation operation; |
6485 | ::tensorflow::Output output; |
6486 | }; |
6487 | |
6488 | /// Subtracts sparse `updates` from an existing tensor according to `indices`. |
6489 | /// |
6490 | /// This operation creates a new tensor by subtracting sparse `updates` from the |
6491 | /// passed in `tensor`. |
6492 | /// This operation is very similar to `tf.scatter_nd_sub`, except that the updates |
6493 | /// are subtracted from an existing tensor (as opposed to a variable). If the memory |
6494 | /// for the existing tensor cannot be re-used, a copy is made and updated. |
6495 | /// |
6496 | /// `indices` is an integer tensor containing indices into a new tensor of shape |
6497 | /// `shape`. The last dimension of `indices` can be at most the rank of `shape`: |
6498 | /// |
6499 | /// indices.shape[-1] <= shape.rank |
6500 | /// |
6501 | /// The last dimension of `indices` corresponds to indices into elements |
6502 | /// (if `indices.shape[-1] = shape.rank`) or slices |
6503 | /// (if `indices.shape[-1] < shape.rank`) along dimension `indices.shape[-1]` of |
6504 | /// `shape`. `updates` is a tensor with shape |
6505 | /// |
6506 | /// indices.shape[:-1] + shape[indices.shape[-1]:] |
6507 | /// |
6508 | /// The simplest form of tensor_scatter_sub is to subtract individual elements |
6509 | /// from a tensor by index. For example, say we want to insert 4 scattered elements |
6510 | /// in a rank-1 tensor with 8 elements. |
6511 | /// |
6512 | /// In Python, this scatter subtract operation would look like this: |
6513 | /// |
6514 | /// ```python |
6515 | /// indices = tf.constant([[4], [3], [1], [7]]) |
6516 | /// updates = tf.constant([9, 10, 11, 12]) |
6517 | /// tensor = tf.ones([8], dtype=tf.int32) |
6518 | /// updated = tf.tensor_scatter_nd_sub(tensor, indices, updates) |
6519 | /// print(updated) |
6520 | /// ``` |
6521 | /// |
6522 | /// The resulting tensor would look like this: |
6523 | /// |
6524 | /// [1, -10, 1, -9, -8, 1, 1, -11] |
6525 | /// |
6526 | /// We can also, insert entire slices of a higher rank tensor all at once. For |
6527 | /// example, if we wanted to insert two slices in the first dimension of a |
6528 | /// rank-3 tensor with two matrices of new values. |
6529 | /// |
6530 | /// In Python, this scatter add operation would look like this: |
6531 | /// |
6532 | /// ```python |
6533 | /// indices = tf.constant([[0], [2]]) |
6534 | /// updates = tf.constant([[[5, 5, 5, 5], [6, 6, 6, 6], |
6535 | /// [7, 7, 7, 7], [8, 8, 8, 8]], |
6536 | /// [[5, 5, 5, 5], [6, 6, 6, 6], |
6537 | /// [7, 7, 7, 7], [8, 8, 8, 8]]]) |
6538 | /// tensor = tf.ones([4, 4, 4],dtype=tf.int32) |
6539 | /// updated = tf.tensor_scatter_nd_sub(tensor, indices, updates) |
6540 | /// print(updated) |
6541 | /// ``` |
6542 | /// |
6543 | /// The resulting tensor would look like this: |
6544 | /// |
6545 | /// [[[-4, -4, -4, -4], [-5, -5, -5, -5], [-6, -6, -6, -6], [-7, -7, -7, -7]], |
6546 | /// [[1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1]], |
6547 | /// [[-4, -4, -4, -4], [-5, -5, -5, -5], [-6, -6, -6, -6], [-7, -7, -7, -7]], |
6548 | /// [[1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1]]] |
6549 | /// |
6550 | /// Note that on CPU, if an out of bound index is found, an error is returned. |
6551 | /// On GPU, if an out of bound index is found, the index is ignored. |
6552 | /// |
6553 | /// Args: |
6554 | /// * scope: A Scope object |
6555 | /// * tensor: Tensor to copy/update. |
6556 | /// * indices: Index tensor. |
6557 | /// * updates: Updates to scatter into output. |
6558 | /// |
6559 | /// Returns: |
6560 | /// * `Output`: A new tensor copied from tensor and updates subtracted according to the indices. |
6561 | class TensorScatterSub { |
6562 | public: |
6563 | TensorScatterSub(const ::tensorflow::Scope& scope, ::tensorflow::Input tensor, |
6564 | ::tensorflow::Input indices, ::tensorflow::Input updates); |
6565 | operator ::tensorflow::Output() const { return output; } |
6566 | operator ::tensorflow::Input() const { return output; } |
6567 | ::tensorflow::Node* node() const { return output.node(); } |
6568 | |
6569 | Operation operation; |
6570 | ::tensorflow::Output output; |
6571 | }; |
6572 | |
6573 | /// Scatter `updates` into an existing tensor according to `indices`. |
6574 | /// |
6575 | /// This operation creates a new tensor by applying sparse `updates` to the passed |
6576 | /// in `tensor`. |
6577 | /// This operation is very similar to `tf.scatter_nd`, except that the updates are |
6578 | /// scattered onto an existing tensor (as opposed to a zero-tensor). If the memory |
6579 | /// for the existing tensor cannot be re-used, a copy is made and updated. |
6580 | /// |
6581 | /// If `indices` contains duplicates, then we pick the last update for the index. |
6582 | /// |
6583 | /// If an out of bound index is found on CPU, an error is returned. |
6584 | /// |
6585 | /// **WARNING**: There are some GPU specific semantics for this operation. |
6586 | /// - If an out of bound index is found, the index is ignored. |
6587 | /// - The order in which updates are applied is nondeterministic, so the output |
6588 | /// will be nondeterministic if `indices` contains duplicates. |
6589 | /// |
6590 | /// `indices` is an integer tensor containing indices into a new tensor of shape |
6591 | /// `shape`. |
6592 | /// |
6593 | /// * `indices` must have at least 2 axes: `(num_updates, index_depth)`. |
6594 | /// * The last axis of `indices` is how deep to index into `tensor` so this index |
6595 | /// depth must be less than the rank of `tensor`: `indices.shape[-1] <= tensor.ndim` |
6596 | /// |
6597 | /// if `indices.shape[-1] = tensor.rank` this Op indexes and updates scalar elements. |
6598 | /// if `indices.shape[-1] < tensor.rank` it indexes and updates slices of the input |
6599 | /// `tensor`. |
6600 | /// |
6601 | /// Each `update` has a rank of `tensor.rank - indices.shape[-1]`. |
6602 | /// The overall shape of `updates` is: |
6603 | /// |
6604 | /// ``` |
6605 | /// indices.shape[:-1] + tensor.shape[indices.shape[-1]:] |
6606 | /// ``` |
6607 | /// |
6608 | /// For usage examples see the python [tf.tensor_scatter_nd_update]( |
6609 | /// https://www.tensorflow.org/api_docs/python/tf/tensor_scatter_nd_update) function |
6610 | /// |
6611 | /// |
6612 | /// Args: |
6613 | /// * scope: A Scope object |
6614 | /// * tensor: Tensor to copy/update. |
6615 | /// * indices: Index tensor. |
6616 | /// * updates: Updates to scatter into output. |
6617 | /// |
6618 | /// Returns: |
6619 | /// * `Output`: A new tensor with the given shape and updates applied according |
6620 | /// to the indices. |
6621 | class TensorScatterUpdate { |
6622 | public: |
6623 | TensorScatterUpdate(const ::tensorflow::Scope& scope, ::tensorflow::Input |
6624 | tensor, ::tensorflow::Input indices, ::tensorflow::Input |
6625 | updates); |
6626 | operator ::tensorflow::Output() const { return output; } |
6627 | operator ::tensorflow::Input() const { return output; } |
6628 | ::tensorflow::Node* node() const { return output.node(); } |
6629 | |
6630 | Operation operation; |
6631 | ::tensorflow::Output output; |
6632 | }; |
6633 | |
6634 | /// Assign `value` to the sliced l-value reference of `input`. |
6635 | /// |
6636 | /// The values of `value` are assigned to the positions in the tensor `input` that |
6637 | /// are selected by the slice parameters. The slice parameters `begin` `end` |
6638 | /// `strides` etc. work exactly as in `StridedSlice`. |
6639 | /// |
6640 | /// NOTE this op currently does not support broadcasting and so `value`'s shape |
6641 | /// must be exactly the shape produced by the slice of `input`. |
6642 | /// |
6643 | /// Args: |
6644 | /// * scope: A Scope object |
6645 | /// |
6646 | /// Returns: |
6647 | /// * `Output`: The output tensor. |
6648 | class TensorStridedSliceUpdate { |
6649 | public: |
6650 | /// Optional attribute setters for TensorStridedSliceUpdate |
6651 | struct Attrs { |
6652 | /// Defaults to 0 |
6653 | TF_MUST_USE_RESULT Attrs BeginMask(int64 x) { |
6654 | Attrs ret = *this; |
6655 | ret.begin_mask_ = x; |
6656 | return ret; |
6657 | } |
6658 | |
6659 | /// Defaults to 0 |
6660 | TF_MUST_USE_RESULT Attrs EndMask(int64 x) { |
6661 | Attrs ret = *this; |
6662 | ret.end_mask_ = x; |
6663 | return ret; |
6664 | } |
6665 | |
6666 | /// Defaults to 0 |
6667 | TF_MUST_USE_RESULT Attrs EllipsisMask(int64 x) { |
6668 | Attrs ret = *this; |
6669 | ret.ellipsis_mask_ = x; |
6670 | return ret; |
6671 | } |
6672 | |
6673 | /// Defaults to 0 |
6674 | TF_MUST_USE_RESULT Attrs NewAxisMask(int64 x) { |
6675 | Attrs ret = *this; |
6676 | ret.new_axis_mask_ = x; |
6677 | return ret; |
6678 | } |
6679 | |
6680 | /// Defaults to 0 |
6681 | TF_MUST_USE_RESULT Attrs ShrinkAxisMask(int64 x) { |
6682 | Attrs ret = *this; |
6683 | ret.shrink_axis_mask_ = x; |
6684 | return ret; |
6685 | } |
6686 | |
6687 | int64 begin_mask_ = 0; |
6688 | int64 end_mask_ = 0; |
6689 | int64 ellipsis_mask_ = 0; |
6690 | int64 new_axis_mask_ = 0; |
6691 | int64 shrink_axis_mask_ = 0; |
6692 | }; |
6693 | TensorStridedSliceUpdate(const ::tensorflow::Scope& scope, ::tensorflow::Input |
6694 | input, ::tensorflow::Input begin, ::tensorflow::Input |
6695 | end, ::tensorflow::Input strides, ::tensorflow::Input |
6696 | value); |
6697 | TensorStridedSliceUpdate(const ::tensorflow::Scope& scope, ::tensorflow::Input |
6698 | input, ::tensorflow::Input begin, ::tensorflow::Input |
6699 | end, ::tensorflow::Input strides, ::tensorflow::Input |
6700 | value, const TensorStridedSliceUpdate::Attrs& attrs); |
6701 | operator ::tensorflow::Output() const { return output; } |
6702 | operator ::tensorflow::Input() const { return output; } |
6703 | ::tensorflow::Node* node() const { return output.node(); } |
6704 | |
6705 | static Attrs BeginMask(int64 x) { |
6706 | return Attrs().BeginMask(x); |
6707 | } |
6708 | static Attrs EndMask(int64 x) { |
6709 | return Attrs().EndMask(x); |
6710 | } |
6711 | static Attrs EllipsisMask(int64 x) { |
6712 | return Attrs().EllipsisMask(x); |
6713 | } |
6714 | static Attrs NewAxisMask(int64 x) { |
6715 | return Attrs().NewAxisMask(x); |
6716 | } |
6717 | static Attrs ShrinkAxisMask(int64 x) { |
6718 | return Attrs().ShrinkAxisMask(x); |
6719 | } |
6720 | |
6721 | Operation operation; |
6722 | ::tensorflow::Output output; |
6723 | }; |
6724 | |
6725 | /// Constructs a tensor by tiling a given tensor. |
6726 | /// |
6727 | /// This operation creates a new tensor by replicating `input` `multiples` times. |
6728 | /// The output tensor's i'th dimension has `input.dims(i) * multiples[i]` elements, |
6729 | /// and the values of `input` are replicated `multiples[i]` times along the 'i'th |
6730 | /// dimension. For example, tiling `[a b c d]` by `[2]` produces |
6731 | /// `[a b c d a b c d]`. |
6732 | /// |
6733 | /// >>> a = tf.constant([[1,2,3],[4,5,6]], tf.int32) |
6734 | /// >>> b = tf.constant([1,2], tf.int32) |
6735 | /// >>> tf.tile(a, b) |
6736 | /// <tf.Tensor: shape=(2, 6), dtype=int32, numpy= |
6737 | /// array([[1, 2, 3, 1, 2, 3], |
6738 | /// [4, 5, 6, 4, 5, 6]], dtype=int32)> |
6739 | /// >>> c = tf.constant([2,1], tf.int32) |
6740 | /// >>> tf.tile(a, c) |
6741 | /// <tf.Tensor: shape=(4, 3), dtype=int32, numpy= |
6742 | /// array([[1, 2, 3], |
6743 | /// [4, 5, 6], |
6744 | /// [1, 2, 3], |
6745 | /// [4, 5, 6]], dtype=int32)> |
6746 | /// >>> d = tf.constant([2,2], tf.int32) |
6747 | /// >>> tf.tile(a, d) |
6748 | /// <tf.Tensor: shape=(4, 6), dtype=int32, numpy= |
6749 | /// array([[1, 2, 3, 1, 2, 3], |
6750 | /// [4, 5, 6, 4, 5, 6], |
6751 | /// [1, 2, 3, 1, 2, 3], |
6752 | /// [4, 5, 6, 4, 5, 6]], dtype=int32)> |
6753 | /// |
6754 | /// Args: |
6755 | /// * scope: A Scope object |
6756 | /// * input: 1-D or higher. |
6757 | /// * multiples: 1-D. Length must be the same as the number of dimensions in `input` |
6758 | /// |
6759 | /// Returns: |
6760 | /// * `Output`: The output tensor. |
6761 | class Tile { |
6762 | public: |
6763 | Tile(const ::tensorflow::Scope& scope, ::tensorflow::Input input, |
6764 | ::tensorflow::Input multiples); |
6765 | operator ::tensorflow::Output() const { return output; } |
6766 | operator ::tensorflow::Input() const { return output; } |
6767 | ::tensorflow::Node* node() const { return output.node(); } |
6768 | |
6769 | Operation operation; |
6770 | ::tensorflow::Output output; |
6771 | }; |
6772 | |
6773 | /// Shuffle dimensions of x according to a permutation. |
6774 | /// |
6775 | /// The output `y` has the same rank as `x`. The shapes of `x` and `y` satisfy: |
6776 | /// `y.shape[i] == x.shape[perm[i]] for i in [0, 1, ..., rank(x) - 1]` |
6777 | /// |
6778 | /// Args: |
6779 | /// * scope: A Scope object |
6780 | /// |
6781 | /// Returns: |
6782 | /// * `Output`: The y tensor. |
6783 | class Transpose { |
6784 | public: |
6785 | Transpose(const ::tensorflow::Scope& scope, ::tensorflow::Input x, |
6786 | ::tensorflow::Input perm); |
6787 | operator ::tensorflow::Output() const { return y; } |
6788 | operator ::tensorflow::Input() const { return y; } |
6789 | ::tensorflow::Node* node() const { return y.node(); } |
6790 | |
6791 | Operation operation; |
6792 | ::tensorflow::Output y; |
6793 | }; |
6794 | |
6795 | /// Finds unique elements in a 1-D tensor. |
6796 | /// |
6797 | /// This operation returns a tensor `y` containing all of the unique elements of `x` |
6798 | /// sorted in the same order that they occur in `x`; `x` does not need to be sorted. |
6799 | /// This operation also returns a tensor `idx` the same size as `x` that contains |
6800 | /// the index of each value of `x` in the unique output `y`. In other words: |
6801 | /// |
6802 | /// `y[idx[i]] = x[i] for i in [0, 1,...,rank(x) - 1]` |
6803 | /// |
6804 | /// Examples: |
6805 | /// |
6806 | /// ``` |
6807 | /// # tensor 'x' is [1, 1, 2, 4, 4, 4, 7, 8, 8] |
6808 | /// y, idx = unique(x) |
6809 | /// y ==> [1, 2, 4, 7, 8] |
6810 | /// idx ==> [0, 0, 1, 2, 2, 2, 3, 4, 4] |
6811 | /// ``` |
6812 | /// |
6813 | /// ``` |
6814 | /// # tensor 'x' is [4, 5, 1, 2, 3, 3, 4, 5] |
6815 | /// y, idx = unique(x) |
6816 | /// y ==> [4, 5, 1, 2, 3] |
6817 | /// idx ==> [0, 1, 2, 3, 4, 4, 0, 1] |
6818 | /// ``` |
6819 | /// |
6820 | /// Args: |
6821 | /// * scope: A Scope object |
6822 | /// * x: 1-D. |
6823 | /// |
6824 | /// Returns: |
6825 | /// * `Output` y: 1-D. |
6826 | /// * `Output` idx: 1-D. |
6827 | class Unique { |
6828 | public: |
6829 | /// Optional attribute setters for Unique |
6830 | struct Attrs { |
6831 | /// Defaults to DT_INT32 |
6832 | TF_MUST_USE_RESULT Attrs OutIdx(DataType x) { |
6833 | Attrs ret = *this; |
6834 | ret.out_idx_ = x; |
6835 | return ret; |
6836 | } |
6837 | |
6838 | DataType out_idx_ = DT_INT32; |
6839 | }; |
6840 | Unique(const ::tensorflow::Scope& scope, ::tensorflow::Input x); |
6841 | Unique(const ::tensorflow::Scope& scope, ::tensorflow::Input x, const |
6842 | Unique::Attrs& attrs); |
6843 | |
6844 | static Attrs OutIdx(DataType x) { |
6845 | return Attrs().OutIdx(x); |
6846 | } |
6847 | |
6848 | Operation operation; |
6849 | ::tensorflow::Output y; |
6850 | ::tensorflow::Output idx; |
6851 | }; |
6852 | |
6853 | /// Finds unique elements along an axis of a tensor. |
6854 | /// |
6855 | /// This operation either returns a tensor `y` containing unique elements |
6856 | /// along the `axis` of a tensor. The returned unique elements is sorted |
6857 | /// in the same order as they occur along `axis` in `x`. |
6858 | /// This operation also returns a tensor `idx` that is the same size as |
6859 | /// the number of the elements in `x` along the `axis` dimension. It |
6860 | /// contains the index in the unique output `y`. |
6861 | /// In other words, for an `1-D` tensor `x` with `axis = None: |
6862 | /// |
6863 | /// `y[idx[i]] = x[i] for i in [0, 1,...,rank(x) - 1]` |
6864 | /// |
6865 | /// For example: |
6866 | /// |
6867 | /// ``` |
6868 | /// # tensor 'x' is [1, 1, 2, 4, 4, 4, 7, 8, 8] |
6869 | /// y, idx = unique(x) |
6870 | /// y ==> [1, 2, 4, 7, 8] |
6871 | /// idx ==> [0, 0, 1, 2, 2, 2, 3, 4, 4] |
6872 | /// ``` |
6873 | /// |
6874 | /// For an `2-D` tensor `x` with `axis = 0`: |
6875 | /// |
6876 | /// ``` |
6877 | /// # tensor 'x' is [[1, 0, 0], |
6878 | /// # [1, 0, 0], |
6879 | /// # [2, 0, 0]] |
6880 | /// y, idx = unique(x, axis=0) |
6881 | /// y ==> [[1, 0, 0], |
6882 | /// [2, 0, 0]] |
6883 | /// idx ==> [0, 0, 1] |
6884 | /// ``` |
6885 | /// |
6886 | /// For an `2-D` tensor `x` with `axis = 1`: |
6887 | /// |
6888 | /// ``` |
6889 | /// # tensor 'x' is [[1, 0, 0], |
6890 | /// # [1, 0, 0], |
6891 | /// # [2, 0, 0]] |
6892 | /// y, idx = unique(x, axis=1) |
6893 | /// y ==> [[1, 0], |
6894 | /// [1, 0], |
6895 | /// [2, 0]] |
6896 | /// idx ==> [0, 1, 1] |
6897 | /// ``` |
6898 | /// |
6899 | /// Args: |
6900 | /// * scope: A Scope object |
6901 | /// * x: A `Tensor`. |
6902 | /// * axis: A `Tensor` of type `int32` (default: None). The axis of the Tensor to |
6903 | /// find the unique elements. |
6904 | /// |
6905 | /// Returns: |
6906 | /// * `Output` y: A `Tensor`. Unique elements along the `axis` of `Tensor` x. |
6907 | /// * `Output` idx: A 1-D Tensor. Has the same type as x that contains the index of each |
6908 | /// value of x in the output y. |
6909 | class UniqueV2 { |
6910 | public: |
6911 | /// Optional attribute setters for UniqueV2 |
6912 | struct Attrs { |
6913 | /// Defaults to DT_INT32 |
6914 | TF_MUST_USE_RESULT Attrs OutIdx(DataType x) { |
6915 | Attrs ret = *this; |
6916 | ret.out_idx_ = x; |
6917 | return ret; |
6918 | } |
6919 | |
6920 | DataType out_idx_ = DT_INT32; |
6921 | }; |
6922 | UniqueV2(const ::tensorflow::Scope& scope, ::tensorflow::Input x, |
6923 | ::tensorflow::Input axis); |
6924 | UniqueV2(const ::tensorflow::Scope& scope, ::tensorflow::Input x, |
6925 | ::tensorflow::Input axis, const UniqueV2::Attrs& attrs); |
6926 | |
6927 | static Attrs OutIdx(DataType x) { |
6928 | return Attrs().OutIdx(x); |
6929 | } |
6930 | |
6931 | Operation operation; |
6932 | ::tensorflow::Output y; |
6933 | ::tensorflow::Output idx; |
6934 | }; |
6935 | |
6936 | /// Finds unique elements in a 1-D tensor. |
6937 | /// |
6938 | /// This operation returns a tensor `y` containing all of the unique elements of `x` |
6939 | /// sorted in the same order that they occur in `x`. This operation also returns a |
6940 | /// tensor `idx` the same size as `x` that contains the index of each value of `x` |
6941 | /// in the unique output `y`. Finally, it returns a third tensor `count` that |
6942 | /// contains the count of each element of `y` in `x`. In other words: |
6943 | /// |
6944 | /// `y[idx[i]] = x[i] for i in [0, 1,...,rank(x) - 1]` |
6945 | /// |
6946 | /// For example: |
6947 | /// |
6948 | /// ``` |
6949 | /// # tensor 'x' is [1, 1, 2, 4, 4, 4, 7, 8, 8] |
6950 | /// y, idx, count = unique_with_counts(x) |
6951 | /// y ==> [1, 2, 4, 7, 8] |
6952 | /// idx ==> [0, 0, 1, 2, 2, 2, 3, 4, 4] |
6953 | /// count ==> [2, 1, 3, 1, 2] |
6954 | /// ``` |
6955 | /// |
6956 | /// Args: |
6957 | /// * scope: A Scope object |
6958 | /// * x: 1-D. |
6959 | /// |
6960 | /// Returns: |
6961 | /// * `Output` y: 1-D. |
6962 | /// * `Output` idx: 1-D. |
6963 | /// * `Output` count: 1-D. |
6964 | class UniqueWithCounts { |
6965 | public: |
6966 | /// Optional attribute setters for UniqueWithCounts |
6967 | struct Attrs { |
6968 | /// Defaults to DT_INT32 |
6969 | TF_MUST_USE_RESULT Attrs OutIdx(DataType x) { |
6970 | Attrs ret = *this; |
6971 | ret.out_idx_ = x; |
6972 | return ret; |
6973 | } |
6974 | |
6975 | DataType out_idx_ = DT_INT32; |
6976 | }; |
6977 | UniqueWithCounts(const ::tensorflow::Scope& scope, ::tensorflow::Input x); |
6978 | UniqueWithCounts(const ::tensorflow::Scope& scope, ::tensorflow::Input x, const |
6979 | UniqueWithCounts::Attrs& attrs); |
6980 | |
6981 | static Attrs OutIdx(DataType x) { |
6982 | return Attrs().OutIdx(x); |
6983 | } |
6984 | |
6985 | Operation operation; |
6986 | ::tensorflow::Output y; |
6987 | ::tensorflow::Output idx; |
6988 | ::tensorflow::Output count; |
6989 | }; |
6990 | |
6991 | /// Finds unique elements along an axis of a tensor. |
6992 | /// |
6993 | /// This operation either returns a tensor `y` containing unique elements |
6994 | /// along the `axis` of a tensor. The returned unique elements is sorted |
6995 | /// in the same order as they occur along `axis` in `x`. |
6996 | /// This operation also returns a tensor `idx` and a tensor `count` |
6997 | /// that are the same size as the number of the elements in `x` along the |
6998 | /// `axis` dimension. The `idx` contains the index in the unique output `y` |
6999 | /// and the `count` contains the count in the unique output `y`. |
7000 | /// In other words, for an `1-D` tensor `x` with `axis = None: |
7001 | /// |
7002 | /// `y[idx[i]] = x[i] for i in [0, 1,...,rank(x) - 1]` |
7003 | /// |
7004 | /// For example: |
7005 | /// |
7006 | /// ``` |
7007 | /// x = tf.constant([1, 1, 2, 4, 4, 4, 7, 8, 8]) |
7008 | /// y, idx, count = UniqueWithCountsV2(x, axis = [0]) |
7009 | /// y ==> [1, 2, 4, 7, 8] |
7010 | /// idx ==> [0, 0, 1, 2, 2, 2, 3, 4, 4] |
7011 | /// count ==> [2, 1, 3, 1, 2] |
7012 | /// ``` |
7013 | /// |
7014 | /// For a `2-D` tensor `x` with `axis = 0`: |
7015 | /// |
7016 | /// ``` |
7017 | /// x = tf.constant([[1, 0, 0], |
7018 | /// [1, 0, 0], |
7019 | /// [2, 0, 0]]) |
7020 | /// y, idx, count = UniqueWithCountsV2(x, axis=[0]) |
7021 | /// y ==> [[1, 0, 0], |
7022 | /// [2, 0, 0]] |
7023 | /// idx ==> [0, 0, 1] |
7024 | /// count ==> [2, 1] |
7025 | /// ``` |
7026 | /// |
7027 | /// For a `2-D` tensor `x` with `axis = 1`: |
7028 | /// |
7029 | /// ``` |
7030 | /// x = tf.constant([[1, 0, 0], |
7031 | /// [1, 0, 0], |
7032 | /// [2, 0, 0]]) |
7033 | /// y, idx, count = UniqueWithCountsV2(x, axis=[1]) |
7034 | /// y ==> [[1, 0], |
7035 | /// [1, 0], |
7036 | /// [2, 0]] |
7037 | /// idx ==> [0, 1, 1] |
7038 | /// count ==> [1, 2] |
7039 | /// ``` |
7040 | /// |
7041 | /// Args: |
7042 | /// * scope: A Scope object |
7043 | /// * x: A `Tensor`. |
7044 | /// * axis: A `Tensor` of type `int32` (default: None). The axis of the Tensor to |
7045 | /// find the unique elements. |
7046 | /// |
7047 | /// Returns: |
7048 | /// * `Output` y: A `Tensor`. Unique elements along the `axis` of `Tensor` x. |
7049 | /// * `Output` idx: A 1-D Tensor. Has the same type as x that contains the index of each |
7050 | /// value of x in the output y. |
7051 | /// * `Output` count: A 1-D Tensor. The count of each value of x in the output y. |
7052 | class UniqueWithCountsV2 { |
7053 | public: |
7054 | /// Optional attribute setters for UniqueWithCountsV2 |
7055 | struct Attrs { |
7056 | /// Defaults to DT_INT32 |
7057 | TF_MUST_USE_RESULT Attrs OutIdx(DataType x) { |
7058 | Attrs ret = *this; |
7059 | ret.out_idx_ = x; |
7060 | return ret; |
7061 | } |
7062 | |
7063 | DataType out_idx_ = DT_INT32; |
7064 | }; |
7065 | UniqueWithCountsV2(const ::tensorflow::Scope& scope, ::tensorflow::Input x, |
7066 | ::tensorflow::Input axis); |
7067 | UniqueWithCountsV2(const ::tensorflow::Scope& scope, ::tensorflow::Input x, |
7068 | ::tensorflow::Input axis, const UniqueWithCountsV2::Attrs& |
7069 | attrs); |
7070 | |
7071 | static Attrs OutIdx(DataType x) { |
7072 | return Attrs().OutIdx(x); |
7073 | } |
7074 | |
7075 | Operation operation; |
7076 | ::tensorflow::Output y; |
7077 | ::tensorflow::Output idx; |
7078 | ::tensorflow::Output count; |
7079 | }; |
7080 | |
7081 | /// Unpacks a given dimension of a rank-`R` tensor into `num` rank-`(R-1)` tensors. |
7082 | /// |
7083 | /// Unpacks `num` tensors from `value` by chipping it along the `axis` dimension. |
7084 | /// For example, given a tensor of shape `(A, B, C, D)`; |
7085 | /// |
7086 | /// If `axis == 0` then the i'th tensor in `output` is the slice `value[i, :, :, :]` |
7087 | /// and each tensor in `output` will have shape `(B, C, D)`. (Note that the |
7088 | /// dimension unpacked along is gone, unlike `split`). |
7089 | /// |
7090 | /// If `axis == 1` then the i'th tensor in `output` is the slice `value[:, i, :, :]` |
7091 | /// and each tensor in `output` will have shape `(A, C, D)`. |
7092 | /// Etc. |
7093 | /// |
7094 | /// This is the opposite of `pack`. |
7095 | /// |
7096 | /// Args: |
7097 | /// * scope: A Scope object |
7098 | /// * value: 1-D or higher, with `axis` dimension size equal to `num`. |
7099 | /// |
7100 | /// Optional attributes (see `Attrs`): |
7101 | /// * axis: Dimension along which to unpack. Negative values wrap around, so the |
7102 | /// valid range is `[-R, R)`. |
7103 | /// |
7104 | /// Returns: |
7105 | /// * `OutputList`: The list of tensors unpacked from `value`. |
7106 | class Unstack { |
7107 | public: |
7108 | /// Optional attribute setters for Unstack |
7109 | struct Attrs { |
7110 | /// Dimension along which to unpack. Negative values wrap around, so the |
7111 | /// valid range is `[-R, R)`. |
7112 | /// |
7113 | /// Defaults to 0 |
7114 | TF_MUST_USE_RESULT Attrs Axis(int64 x) { |
7115 | Attrs ret = *this; |
7116 | ret.axis_ = x; |
7117 | return ret; |
7118 | } |
7119 | |
7120 | int64 axis_ = 0; |
7121 | }; |
7122 | Unstack(const ::tensorflow::Scope& scope, ::tensorflow::Input value, int64 num); |
7123 | Unstack(const ::tensorflow::Scope& scope, ::tensorflow::Input value, int64 num, |
7124 | const Unstack::Attrs& attrs); |
7125 | ::tensorflow::Output operator[](size_t index) const { return output[index]; } |
7126 | |
7127 | |
7128 | static Attrs Axis(int64 x) { |
7129 | return Attrs().Axis(x); |
7130 | } |
7131 | |
7132 | Operation operation; |
7133 | ::tensorflow::OutputList output; |
7134 | }; |
7135 | |
7136 | /// Converts an array of flat indices into a tuple of coordinate arrays. |
7137 | /// |
7138 | /// |
7139 | /// Example: |
7140 | /// |
7141 | /// ``` |
7142 | /// y = tf.unravel_index(indices=[2, 5, 7], dims=[3, 3]) |
7143 | /// # 'dims' represent a hypothetical (3, 3) tensor of indices: |
7144 | /// # [[0, 1, *2*], |
7145 | /// # [3, 4, *5*], |
7146 | /// # [6, *7*, 8]] |
7147 | /// # For each entry from 'indices', this operation returns |
7148 | /// # its coordinates (marked with '*'), such as |
7149 | /// # 2 ==> (0, 2) |
7150 | /// # 5 ==> (1, 2) |
7151 | /// # 7 ==> (2, 1) |
7152 | /// y ==> [[0, 1, 2], [2, 2, 1]] |
7153 | /// ``` |
7154 | /// |
7155 | /// @compatibility(numpy) |
7156 | /// Equivalent to np.unravel_index |
7157 | /// @end_compatibility |
7158 | /// |
7159 | /// Args: |
7160 | /// * scope: A Scope object |
7161 | /// * indices: An 0-D or 1-D `int` Tensor whose elements are indices into the |
7162 | /// flattened version of an array of dimensions dims. |
7163 | /// * dims: An 1-D `int` Tensor. The shape of the array to use for unraveling |
7164 | /// indices. |
7165 | /// |
7166 | /// Returns: |
7167 | /// * `Output`: An 2-D (or 1-D if indices is 0-D) tensor where each row has the |
7168 | /// same shape as the indices array. |
7169 | class UnravelIndex { |
7170 | public: |
7171 | UnravelIndex(const ::tensorflow::Scope& scope, ::tensorflow::Input indices, |
7172 | ::tensorflow::Input dims); |
7173 | operator ::tensorflow::Output() const { return output; } |
7174 | operator ::tensorflow::Input() const { return output; } |
7175 | ::tensorflow::Node* node() const { return output.node(); } |
7176 | |
7177 | Operation operation; |
7178 | ::tensorflow::Output output; |
7179 | }; |
7180 | |
7181 | /// Returns locations of nonzero / true values in a tensor. |
7182 | /// |
7183 | /// This operation returns the coordinates of true elements in `condition`. The |
7184 | /// coordinates are returned in a 2-D tensor where the first dimension (rows) |
7185 | /// represents the number of true elements, and the second dimension (columns) |
7186 | /// represents the coordinates of the true elements. Keep in mind, the shape of |
7187 | /// the output tensor can vary depending on how many true values there are in |
7188 | /// `condition`. Indices are output in row-major order. |
7189 | /// |
7190 | /// For example: |
7191 | /// |
7192 | /// ``` |
7193 | /// # 'input' tensor is [[True, False] |
7194 | /// # [True, False]] |
7195 | /// # 'input' has two true values, so output has two coordinates. |
7196 | /// # 'input' has rank of 2, so coordinates have two indices. |
7197 | /// where(input) ==> [[0, 0], |
7198 | /// [1, 0]] |
7199 | /// |
7200 | /// # `condition` tensor is [[[True, False] |
7201 | /// # [True, False]] |
7202 | /// # [[False, True] |
7203 | /// # [False, True]] |
7204 | /// # [[False, False] |
7205 | /// # [False, True]]] |
7206 | /// # 'input' has 5 true values, so output has 5 coordinates. |
7207 | /// # 'input' has rank of 3, so coordinates have three indices. |
7208 | /// where(input) ==> [[0, 0, 0], |
7209 | /// [0, 1, 0], |
7210 | /// [1, 0, 1], |
7211 | /// [1, 1, 1], |
7212 | /// [2, 1, 1]] |
7213 | /// |
7214 | /// # `condition` tensor is [[[1.5, 0.0] |
7215 | /// # [-0.5, 0.0]] |
7216 | /// # [[0.0, 0.25] |
7217 | /// # [0.0, 0.75]] |
7218 | /// # [[0.0, 0.0] |
7219 | /// # [0.0, 0.01]]] |
7220 | /// # 'input' has 5 nonzero values, so output has 5 coordinates. |
7221 | /// # 'input' has rank of 3, so coordinates have three indices. |
7222 | /// where(input) ==> [[0, 0, 0], |
7223 | /// [0, 1, 0], |
7224 | /// [1, 0, 1], |
7225 | /// [1, 1, 1], |
7226 | /// [2, 1, 1]] |
7227 | /// |
7228 | /// # `condition` tensor is [[[1.5 + 0.0j, 0.0 + 0.0j] |
7229 | /// # [0.0 + 0.5j, 0.0 + 0.0j]] |
7230 | /// # [[0.0 + 0.0j, 0.25 + 1.5j] |
7231 | /// # [0.0 + 0.0j, 0.75 + 0.0j]] |
7232 | /// # [[0.0 + 0.0j, 0.0 + 0.0j] |
7233 | /// # [0.0 + 0.0j, 0.01 + 0.0j]]] |
7234 | /// # 'input' has 5 nonzero magnitude values, so output has 5 coordinates. |
7235 | /// # 'input' has rank of 3, so coordinates have three indices. |
7236 | /// where(input) ==> [[0, 0, 0], |
7237 | /// [0, 1, 0], |
7238 | /// [1, 0, 1], |
7239 | /// [1, 1, 1], |
7240 | /// [2, 1, 1]] |
7241 | /// ``` |
7242 | /// |
7243 | /// Args: |
7244 | /// * scope: A Scope object |
7245 | /// |
7246 | /// Returns: |
7247 | /// * `Output`: The index tensor. |
7248 | class Where { |
7249 | public: |
7250 | Where(const ::tensorflow::Scope& scope, ::tensorflow::Input condition); |
7251 | operator ::tensorflow::Output() const { return index; } |
7252 | operator ::tensorflow::Input() const { return index; } |
7253 | ::tensorflow::Node* node() const { return index.node(); } |
7254 | |
7255 | Operation operation; |
7256 | ::tensorflow::Output index; |
7257 | }; |
7258 | |
7259 | /// Returns a tensor of zeros with the same shape and type as x. |
7260 | /// |
7261 | /// Args: |
7262 | /// * scope: A Scope object |
7263 | /// * x: a tensor of type T. |
7264 | /// |
7265 | /// Returns: |
7266 | /// * `Output`: a tensor of the same shape and type as x but filled with zeros. |
7267 | class ZerosLike { |
7268 | public: |
7269 | ZerosLike(const ::tensorflow::Scope& scope, ::tensorflow::Input x); |
7270 | operator ::tensorflow::Output() const { return y; } |
7271 | operator ::tensorflow::Input() const { return y; } |
7272 | ::tensorflow::Node* node() const { return y.node(); } |
7273 | |
7274 | Operation operation; |
7275 | ::tensorflow::Output y; |
7276 | }; |
7277 | |
7278 | /// @} |
7279 | |
7280 | } // namespace ops |
7281 | } // namespace tensorflow |
7282 | |
7283 | #endif // TENSORFLOW_CC_OPS_ARRAY_OPS_H_ |
7284 | |