1// This file is MACHINE GENERATED! Do not edit.
2
3#ifndef TENSORFLOW_CC_OPS_NN_OPS_H_
4#define TENSORFLOW_CC_OPS_NN_OPS_H_
5
6// This file is MACHINE GENERATED! Do not edit.
7
8#include "tensorflow/cc/framework/ops.h"
9#include "tensorflow/cc/framework/scope.h"
10#include "tensorflow/core/framework/tensor.h"
11#include "tensorflow/core/framework/tensor_shape.h"
12#include "tensorflow/core/framework/types.h"
13#include "tensorflow/core/lib/gtl/array_slice.h"
14
15namespace tensorflow {
16namespace ops {
17
18/// @defgroup nn_ops Nn Ops
19/// @{
20
21/// Returns min/max k values and their indices of the input operand in an approximate manner.
22///
23/// See https://arxiv.org/abs/2206.14286 for the algorithm details.
24/// This op is only optimized on TPU currently.
25///
26/// Args:
27/// * scope: A Scope object
28/// * input: Array to search. Must be at least 1-D of the floating type
29/// * k: Specifies the number of min/max-k.
30///
31/// Optional attributes (see `Attrs`):
32/// * reduction_dimension: Integer dimension along which to search. Default: -1.
33/// * recall_target: Recall target for the approximation. Range in (0,1]
34/// * is_max_k: When true, computes max-k; otherwise computes min-k.
35/// * reduction_input_size_override: When set to a positive value, it overrides the size determined by
36/// `input[reduction_dim]` for evaluating the recall. This option is useful when
37/// the given `input` is only a subset of the overall computation in SPMD or
38/// distributed pipelines, where the true input size cannot be deferred by the
39/// `input` shape.
40/// * aggregate_to_topk: When true, aggregates approximate results to top-k. When false, returns the
41/// approximate results. The number of the approximate results is implementation
42/// defined and is greater equals to the specified `k`.
43///
44/// Returns:
45/// * `Output` values: The min/max k values along the `reduction_dimension` of the `input` operand.
46/// The dimension are the same as the `input` operand except for the
47/// `reduction_dimension`: when `aggregate_to_topk` is true, the reduction
48/// dimension is `k`; otherwise, it is greater equals to `k` where the size is
49/// implementation-defined.
50/// * `Output` indices: The indices of `values` along the `reduction_dimension` of the `input` operand.
51class ApproxTopK {
52 public:
53 /// Optional attribute setters for ApproxTopK
54 struct Attrs {
55 /// Integer dimension along which to search. Default: -1.
56 ///
57 /// Defaults to -1
58 TF_MUST_USE_RESULT Attrs ReductionDimension(int64 x) {
59 Attrs ret = *this;
60 ret.reduction_dimension_ = x;
61 return ret;
62 }
63
64 /// Recall target for the approximation. Range in (0,1]
65 ///
66 /// Defaults to 0.95
67 TF_MUST_USE_RESULT Attrs RecallTarget(float x) {
68 Attrs ret = *this;
69 ret.recall_target_ = x;
70 return ret;
71 }
72
73 /// When true, computes max-k; otherwise computes min-k.
74 ///
75 /// Defaults to true
76 TF_MUST_USE_RESULT Attrs IsMaxK(bool x) {
77 Attrs ret = *this;
78 ret.is_max_k_ = x;
79 return ret;
80 }
81
82 /// When set to a positive value, it overrides the size determined by
83 /// `input[reduction_dim]` for evaluating the recall. This option is useful when
84 /// the given `input` is only a subset of the overall computation in SPMD or
85 /// distributed pipelines, where the true input size cannot be deferred by the
86 /// `input` shape.
87 ///
88 /// Defaults to -1
89 TF_MUST_USE_RESULT Attrs ReductionInputSizeOverride(int64 x) {
90 Attrs ret = *this;
91 ret.reduction_input_size_override_ = x;
92 return ret;
93 }
94
95 /// When true, aggregates approximate results to top-k. When false, returns the
96 /// approximate results. The number of the approximate results is implementation
97 /// defined and is greater equals to the specified `k`.
98 ///
99 /// Defaults to true
100 TF_MUST_USE_RESULT Attrs AggregateToTopk(bool x) {
101 Attrs ret = *this;
102 ret.aggregate_to_topk_ = x;
103 return ret;
104 }
105
106 int64 reduction_dimension_ = -1;
107 float recall_target_ = 0.95f;
108 bool is_max_k_ = true;
109 int64 reduction_input_size_override_ = -1;
110 bool aggregate_to_topk_ = true;
111 };
112 ApproxTopK(const ::tensorflow::Scope& scope, ::tensorflow::Input input, int64
113 k);
114 ApproxTopK(const ::tensorflow::Scope& scope, ::tensorflow::Input input, int64
115 k, const ApproxTopK::Attrs& attrs);
116
117 static Attrs ReductionDimension(int64 x) {
118 return Attrs().ReductionDimension(x);
119 }
120 static Attrs RecallTarget(float x) {
121 return Attrs().RecallTarget(x);
122 }
123 static Attrs IsMaxK(bool x) {
124 return Attrs().IsMaxK(x);
125 }
126 static Attrs ReductionInputSizeOverride(int64 x) {
127 return Attrs().ReductionInputSizeOverride(x);
128 }
129 static Attrs AggregateToTopk(bool x) {
130 return Attrs().AggregateToTopk(x);
131 }
132
133 Operation operation;
134 ::tensorflow::Output values;
135 ::tensorflow::Output indices;
136};
137
138/// Performs average pooling on the input.
139///
140/// Each entry in `output` is the mean of the corresponding size `ksize`
141/// window in `value`.
142///
143/// Args:
144/// * scope: A Scope object
145/// * value: 4-D with shape `[batch, height, width, channels]`.
146/// * ksize: The size of the sliding window for each dimension of `value`.
147/// * strides: The stride of the sliding window for each dimension of `value`.
148/// * padding: The type of padding algorithm to use.
149///
150/// Optional attributes (see `Attrs`):
151/// * data_format: Specify the data format of the input and output data. With the
152/// default format "NHWC", the data is stored in the order of:
153/// [batch, in_height, in_width, in_channels].
154/// Alternatively, the format could be "NCHW", the data storage order of:
155/// [batch, in_channels, in_height, in_width].
156///
157/// Returns:
158/// * `Output`: The average pooled output tensor.
159class AvgPool {
160 public:
161 /// Optional attribute setters for AvgPool
162 struct Attrs {
163 /// Specify the data format of the input and output data. With the
164 /// default format "NHWC", the data is stored in the order of:
165 /// [batch, in_height, in_width, in_channels].
166 /// Alternatively, the format could be "NCHW", the data storage order of:
167 /// [batch, in_channels, in_height, in_width].
168 ///
169 /// Defaults to "NHWC"
170 TF_MUST_USE_RESULT Attrs DataFormat(StringPiece x) {
171 Attrs ret = *this;
172 ret.data_format_ = x;
173 return ret;
174 }
175
176 StringPiece data_format_ = "NHWC";
177 };
178 AvgPool(const ::tensorflow::Scope& scope, ::tensorflow::Input value, const
179 gtl::ArraySlice<int>& ksize, const gtl::ArraySlice<int>& strides,
180 StringPiece padding);
181 AvgPool(const ::tensorflow::Scope& scope, ::tensorflow::Input value, const
182 gtl::ArraySlice<int>& ksize, const gtl::ArraySlice<int>& strides,
183 StringPiece padding, const AvgPool::Attrs& attrs);
184 operator ::tensorflow::Output() const { return output; }
185 operator ::tensorflow::Input() const { return output; }
186 ::tensorflow::Node* node() const { return output.node(); }
187
188 static Attrs DataFormat(StringPiece x) {
189 return Attrs().DataFormat(x);
190 }
191
192 Operation operation;
193 ::tensorflow::Output output;
194};
195
196/// Performs 3D average pooling on the input.
197///
198/// Each entry in `output` is the mean of the corresponding size `ksize` window in
199/// `value`.
200///
201/// Args:
202/// * scope: A Scope object
203/// * input: Shape `[batch, depth, rows, cols, channels]` tensor to pool over.
204/// * ksize: 1-D tensor of length 5. The size of the window for each dimension of
205/// the input tensor. Must have `ksize[0] = ksize[4] = 1`.
206/// * strides: 1-D tensor of length 5. The stride of the sliding window for each
207/// dimension of `input`. Must have `strides[0] = strides[4] = 1`.
208/// * padding: The type of padding algorithm to use.
209///
210/// Optional attributes (see `Attrs`):
211/// * data_format: The data format of the input and output data. With the
212/// default format "NDHWC", the data is stored in the order of:
213/// [batch, in_depth, in_height, in_width, in_channels].
214/// Alternatively, the format could be "NCDHW", the data storage order is:
215/// [batch, in_channels, in_depth, in_height, in_width].
216///
217/// Returns:
218/// * `Output`: The average pooled output tensor.
219class AvgPool3D {
220 public:
221 /// Optional attribute setters for AvgPool3D
222 struct Attrs {
223 /// The data format of the input and output data. With the
224 /// default format "NDHWC", the data is stored in the order of:
225 /// [batch, in_depth, in_height, in_width, in_channels].
226 /// Alternatively, the format could be "NCDHW", the data storage order is:
227 /// [batch, in_channels, in_depth, in_height, in_width].
228 ///
229 /// Defaults to "NDHWC"
230 TF_MUST_USE_RESULT Attrs DataFormat(StringPiece x) {
231 Attrs ret = *this;
232 ret.data_format_ = x;
233 return ret;
234 }
235
236 StringPiece data_format_ = "NDHWC";
237 };
238 AvgPool3D(const ::tensorflow::Scope& scope, ::tensorflow::Input input, const
239 gtl::ArraySlice<int>& ksize, const gtl::ArraySlice<int>& strides,
240 StringPiece padding);
241 AvgPool3D(const ::tensorflow::Scope& scope, ::tensorflow::Input input, const
242 gtl::ArraySlice<int>& ksize, const gtl::ArraySlice<int>& strides,
243 StringPiece padding, const AvgPool3D::Attrs& attrs);
244 operator ::tensorflow::Output() const { return output; }
245 operator ::tensorflow::Input() const { return output; }
246 ::tensorflow::Node* node() const { return output.node(); }
247
248 static Attrs DataFormat(StringPiece x) {
249 return Attrs().DataFormat(x);
250 }
251
252 Operation operation;
253 ::tensorflow::Output output;
254};
255
256/// Computes gradients of average pooling function.
257///
258/// Args:
259/// * scope: A Scope object
260/// * orig_input_shape: The original input dimensions.
261/// * grad: Output backprop of shape `[batch, depth, rows, cols, channels]`.
262/// * ksize: 1-D tensor of length 5. The size of the window for each dimension of
263/// the input tensor. Must have `ksize[0] = ksize[4] = 1`.
264/// * strides: 1-D tensor of length 5. The stride of the sliding window for each
265/// dimension of `input`. Must have `strides[0] = strides[4] = 1`.
266/// * padding: The type of padding algorithm to use.
267///
268/// Optional attributes (see `Attrs`):
269/// * data_format: The data format of the input and output data. With the
270/// default format "NDHWC", the data is stored in the order of:
271/// [batch, in_depth, in_height, in_width, in_channels].
272/// Alternatively, the format could be "NCDHW", the data storage order is:
273/// [batch, in_channels, in_depth, in_height, in_width].
274///
275/// Returns:
276/// * `Output`: The backprop for input.
277class AvgPool3DGrad {
278 public:
279 /// Optional attribute setters for AvgPool3DGrad
280 struct Attrs {
281 /// The data format of the input and output data. With the
282 /// default format "NDHWC", the data is stored in the order of:
283 /// [batch, in_depth, in_height, in_width, in_channels].
284 /// Alternatively, the format could be "NCDHW", the data storage order is:
285 /// [batch, in_channels, in_depth, in_height, in_width].
286 ///
287 /// Defaults to "NDHWC"
288 TF_MUST_USE_RESULT Attrs DataFormat(StringPiece x) {
289 Attrs ret = *this;
290 ret.data_format_ = x;
291 return ret;
292 }
293
294 StringPiece data_format_ = "NDHWC";
295 };
296 AvgPool3DGrad(const ::tensorflow::Scope& scope, ::tensorflow::Input
297 orig_input_shape, ::tensorflow::Input grad, const
298 gtl::ArraySlice<int>& ksize, const gtl::ArraySlice<int>& strides,
299 StringPiece padding);
300 AvgPool3DGrad(const ::tensorflow::Scope& scope, ::tensorflow::Input
301 orig_input_shape, ::tensorflow::Input grad, const
302 gtl::ArraySlice<int>& ksize, const gtl::ArraySlice<int>& strides,
303 StringPiece padding, const AvgPool3DGrad::Attrs& attrs);
304 operator ::tensorflow::Output() const { return output; }
305 operator ::tensorflow::Input() const { return output; }
306 ::tensorflow::Node* node() const { return output.node(); }
307
308 static Attrs DataFormat(StringPiece x) {
309 return Attrs().DataFormat(x);
310 }
311
312 Operation operation;
313 ::tensorflow::Output output;
314};
315
316/// Adds `bias` to `value`.
317///
318/// This is a special case of `tf.add` where `bias` is restricted to be 1-D.
319/// Broadcasting is supported, so `value` may have any number of dimensions.
320///
321/// Args:
322/// * scope: A Scope object
323/// * value: Any number of dimensions.
324/// * bias: 1-D with size the last dimension of `value`.
325///
326/// Optional attributes (see `Attrs`):
327/// * data_format: Specify the data format of the input and output data. With the
328/// default format "NHWC", the bias tensor will be added to the last dimension
329/// of the value tensor.
330/// Alternatively, the format could be "NCHW", the data storage order of:
331/// [batch, in_channels, in_height, in_width].
332/// The tensor will be added to "in_channels", the third-to-the-last
333/// dimension.
334///
335/// Returns:
336/// * `Output`: Broadcasted sum of `value` and `bias`.
337class BiasAdd {
338 public:
339 /// Optional attribute setters for BiasAdd
340 struct Attrs {
341 /// Specify the data format of the input and output data. With the
342 /// default format "NHWC", the bias tensor will be added to the last dimension
343 /// of the value tensor.
344 /// Alternatively, the format could be "NCHW", the data storage order of:
345 /// [batch, in_channels, in_height, in_width].
346 /// The tensor will be added to "in_channels", the third-to-the-last
347 /// dimension.
348 ///
349 /// Defaults to "NHWC"
350 TF_MUST_USE_RESULT Attrs DataFormat(StringPiece x) {
351 Attrs ret = *this;
352 ret.data_format_ = x;
353 return ret;
354 }
355
356 StringPiece data_format_ = "NHWC";
357 };
358 BiasAdd(const ::tensorflow::Scope& scope, ::tensorflow::Input value,
359 ::tensorflow::Input bias);
360 BiasAdd(const ::tensorflow::Scope& scope, ::tensorflow::Input value,
361 ::tensorflow::Input bias, const BiasAdd::Attrs& attrs);
362 operator ::tensorflow::Output() const { return output; }
363 operator ::tensorflow::Input() const { return output; }
364 ::tensorflow::Node* node() const { return output.node(); }
365
366 static Attrs DataFormat(StringPiece x) {
367 return Attrs().DataFormat(x);
368 }
369
370 Operation operation;
371 ::tensorflow::Output output;
372};
373
374/// The backward operation for "BiasAdd" on the "bias" tensor.
375///
376/// It accumulates all the values from out_backprop into the feature dimension.
377/// For NHWC data format, the feature dimension is the last. For NCHW data format,
378/// the feature dimension is the third-to-last.
379///
380/// Args:
381/// * scope: A Scope object
382/// * out_backprop: Any number of dimensions.
383///
384/// Optional attributes (see `Attrs`):
385/// * data_format: Specify the data format of the input and output data. With the
386/// default format "NHWC", the bias tensor will be added to the last dimension
387/// of the value tensor.
388/// Alternatively, the format could be "NCHW", the data storage order of:
389/// [batch, in_channels, in_height, in_width].
390/// The tensor will be added to "in_channels", the third-to-the-last
391/// dimension.
392///
393/// Returns:
394/// * `Output`: 1-D with size the feature dimension of `out_backprop`.
395class BiasAddGrad {
396 public:
397 /// Optional attribute setters for BiasAddGrad
398 struct Attrs {
399 /// Specify the data format of the input and output data. With the
400 /// default format "NHWC", the bias tensor will be added to the last dimension
401 /// of the value tensor.
402 /// Alternatively, the format could be "NCHW", the data storage order of:
403 /// [batch, in_channels, in_height, in_width].
404 /// The tensor will be added to "in_channels", the third-to-the-last
405 /// dimension.
406 ///
407 /// Defaults to "NHWC"
408 TF_MUST_USE_RESULT Attrs DataFormat(StringPiece x) {
409 Attrs ret = *this;
410 ret.data_format_ = x;
411 return ret;
412 }
413
414 StringPiece data_format_ = "NHWC";
415 };
416 BiasAddGrad(const ::tensorflow::Scope& scope, ::tensorflow::Input out_backprop);
417 BiasAddGrad(const ::tensorflow::Scope& scope, ::tensorflow::Input out_backprop,
418 const BiasAddGrad::Attrs& attrs);
419 operator ::tensorflow::Output() const { return output; }
420 operator ::tensorflow::Input() const { return output; }
421 ::tensorflow::Node* node() const { return output.node(); }
422
423 static Attrs DataFormat(StringPiece x) {
424 return Attrs().DataFormat(x);
425 }
426
427 Operation operation;
428 ::tensorflow::Output output;
429};
430
431/// Computes a 2-D convolution given 4-D `input` and `filter` tensors.
432///
433/// Given an input tensor of shape `[batch, in_height, in_width, in_channels]`
434/// and a filter / kernel tensor of shape
435/// `[filter_height, filter_width, in_channels, out_channels]`, this op
436/// performs the following:
437///
438/// 1. Flattens the filter to a 2-D matrix with shape
439/// `[filter_height * filter_width * in_channels, output_channels]`.
440/// 2. Extracts image patches from the input tensor to form a *virtual*
441/// tensor of shape `[batch, out_height, out_width,
442/// filter_height * filter_width * in_channels]`.
443/// 3. For each patch, right-multiplies the filter matrix and the image patch
444/// vector.
445///
446/// In detail, with the default NHWC format,
447///
448/// output[b, i, j, k] =
449/// sum_{di, dj, q} input[b, strides[1] * i + di, strides[2] * j + dj, q] *
450/// filter[di, dj, q, k]
451///
452/// Must have `strides[0] = strides[3] = 1`. For the most common case of the same
453/// horizontal and vertices strides, `strides = [1, stride, stride, 1]`.
454///
455/// Args:
456/// * scope: A Scope object
457/// * input: A 4-D tensor. The dimension order is interpreted according to the value
458/// of `data_format`, see below for details.
459/// * filter: A 4-D tensor of shape
460/// `[filter_height, filter_width, in_channels, out_channels]`
461/// * strides: 1-D tensor of length 4. The stride of the sliding window for each
462/// dimension of `input`. The dimension order is determined by the value of
463/// `data_format`, see below for details.
464/// * padding: The type of padding algorithm to use.
465///
466/// Optional attributes (see `Attrs`):
467/// * explicit_paddings: If `padding` is `"EXPLICIT"`, the list of explicit padding amounts. For the ith
468/// dimension, the amount of padding inserted before and after the dimension is
469/// `explicit_paddings[2 * i]` and `explicit_paddings[2 * i + 1]`, respectively. If
470/// `padding` is not `"EXPLICIT"`, `explicit_paddings` must be empty.
471/// * data_format: Specify the data format of the input and output data. With the
472/// default format "NHWC", the data is stored in the order of:
473/// [batch, height, width, channels].
474/// Alternatively, the format could be "NCHW", the data storage order of:
475/// [batch, channels, height, width].
476/// * dilations: 1-D tensor of length 4. The dilation factor for each dimension of
477/// `input`. If set to k > 1, there will be k-1 skipped cells between each
478/// filter element on that dimension. The dimension order is determined by the
479/// value of `data_format`, see above for details. Dilations in the batch and
480/// depth dimensions must be 1.
481///
482/// Returns:
483/// * `Output`: A 4-D tensor. The dimension order is determined by the value of
484/// `data_format`, see below for details.
485class Conv2D {
486 public:
487 /// Optional attribute setters for Conv2D
488 struct Attrs {
489 /// Defaults to true
490 TF_MUST_USE_RESULT Attrs UseCudnnOnGpu(bool x) {
491 Attrs ret = *this;
492 ret.use_cudnn_on_gpu_ = x;
493 return ret;
494 }
495
496 /// If `padding` is `"EXPLICIT"`, the list of explicit padding amounts. For the ith
497 /// dimension, the amount of padding inserted before and after the dimension is
498 /// `explicit_paddings[2 * i]` and `explicit_paddings[2 * i + 1]`, respectively. If
499 /// `padding` is not `"EXPLICIT"`, `explicit_paddings` must be empty.
500 ///
501 /// Defaults to []
502 TF_MUST_USE_RESULT Attrs ExplicitPaddings(const gtl::ArraySlice<int>& x) {
503 Attrs ret = *this;
504 ret.explicit_paddings_ = x;
505 return ret;
506 }
507
508 /// Specify the data format of the input and output data. With the
509 /// default format "NHWC", the data is stored in the order of:
510 /// [batch, height, width, channels].
511 /// Alternatively, the format could be "NCHW", the data storage order of:
512 /// [batch, channels, height, width].
513 ///
514 /// Defaults to "NHWC"
515 TF_MUST_USE_RESULT Attrs DataFormat(StringPiece x) {
516 Attrs ret = *this;
517 ret.data_format_ = x;
518 return ret;
519 }
520
521 /// 1-D tensor of length 4. The dilation factor for each dimension of
522 /// `input`. If set to k > 1, there will be k-1 skipped cells between each
523 /// filter element on that dimension. The dimension order is determined by the
524 /// value of `data_format`, see above for details. Dilations in the batch and
525 /// depth dimensions must be 1.
526 ///
527 /// Defaults to [1, 1, 1, 1]
528 TF_MUST_USE_RESULT Attrs Dilations(const gtl::ArraySlice<int>& x) {
529 Attrs ret = *this;
530 ret.dilations_ = x;
531 return ret;
532 }
533
534 bool use_cudnn_on_gpu_ = true;
535 gtl::ArraySlice<int> explicit_paddings_ = {};
536 StringPiece data_format_ = "NHWC";
537 gtl::ArraySlice<int> dilations_ = Default_dilations();
538 private:
539 static gtl::ArraySlice<int> Default_dilations() {
540 static const int kStorage[] = {1, 1, 1, 1};
541 return gtl::ArraySlice<int>(kStorage);
542 }
543 };
544 Conv2D(const ::tensorflow::Scope& scope, ::tensorflow::Input input,
545 ::tensorflow::Input filter, const gtl::ArraySlice<int>& strides,
546 StringPiece padding);
547 Conv2D(const ::tensorflow::Scope& scope, ::tensorflow::Input input,
548 ::tensorflow::Input filter, const gtl::ArraySlice<int>& strides,
549 StringPiece padding, const Conv2D::Attrs& attrs);
550 operator ::tensorflow::Output() const { return output; }
551 operator ::tensorflow::Input() const { return output; }
552 ::tensorflow::Node* node() const { return output.node(); }
553
554 static Attrs UseCudnnOnGpu(bool x) {
555 return Attrs().UseCudnnOnGpu(x);
556 }
557 static Attrs ExplicitPaddings(const gtl::ArraySlice<int>& x) {
558 return Attrs().ExplicitPaddings(x);
559 }
560 static Attrs DataFormat(StringPiece x) {
561 return Attrs().DataFormat(x);
562 }
563 static Attrs Dilations(const gtl::ArraySlice<int>& x) {
564 return Attrs().Dilations(x);
565 }
566
567 Operation operation;
568 ::tensorflow::Output output;
569};
570
571/// Computes the gradients of convolution with respect to the filter.
572///
573/// Args:
574/// * scope: A Scope object
575/// * input: 4-D with shape `[batch, in_height, in_width, in_channels]`.
576/// * filter_sizes: An integer vector representing the tensor shape of `filter`,
577/// where `filter` is a 4-D
578/// `[filter_height, filter_width, in_channels, out_channels]` tensor.
579/// * out_backprop: 4-D with shape `[batch, out_height, out_width, out_channels]`.
580/// Gradients w.r.t. the output of the convolution.
581/// * strides: The stride of the sliding window for each dimension of the input
582/// of the convolution. Must be in the same order as the dimension specified with
583/// format.
584/// * padding: The type of padding algorithm to use.
585///
586/// Optional attributes (see `Attrs`):
587/// * explicit_paddings: If `padding` is `"EXPLICIT"`, the list of explicit padding amounts. For the ith
588/// dimension, the amount of padding inserted before and after the dimension is
589/// `explicit_paddings[2 * i]` and `explicit_paddings[2 * i + 1]`, respectively. If
590/// `padding` is not `"EXPLICIT"`, `explicit_paddings` must be empty.
591/// * data_format: Specify the data format of the input and output data. With the
592/// default format "NHWC", the data is stored in the order of:
593/// [batch, in_height, in_width, in_channels].
594/// Alternatively, the format could be "NCHW", the data storage order of:
595/// [batch, in_channels, in_height, in_width].
596/// * dilations: 1-D tensor of length 4. The dilation factor for each dimension of
597/// `input`. If set to k > 1, there will be k-1 skipped cells between each filter
598/// element on that dimension. The dimension order is determined by the value of
599/// `data_format`, see above for details. Dilations in the batch and depth
600/// dimensions must be 1.
601///
602/// Returns:
603/// * `Output`: 4-D with shape
604/// `[filter_height, filter_width, in_channels, out_channels]`. Gradient w.r.t.
605/// the `filter` input of the convolution.
606class Conv2DBackpropFilter {
607 public:
608 /// Optional attribute setters for Conv2DBackpropFilter
609 struct Attrs {
610 /// Defaults to true
611 TF_MUST_USE_RESULT Attrs UseCudnnOnGpu(bool x) {
612 Attrs ret = *this;
613 ret.use_cudnn_on_gpu_ = x;
614 return ret;
615 }
616
617 /// If `padding` is `"EXPLICIT"`, the list of explicit padding amounts. For the ith
618 /// dimension, the amount of padding inserted before and after the dimension is
619 /// `explicit_paddings[2 * i]` and `explicit_paddings[2 * i + 1]`, respectively. If
620 /// `padding` is not `"EXPLICIT"`, `explicit_paddings` must be empty.
621 ///
622 /// Defaults to []
623 TF_MUST_USE_RESULT Attrs ExplicitPaddings(const gtl::ArraySlice<int>& x) {
624 Attrs ret = *this;
625 ret.explicit_paddings_ = x;
626 return ret;
627 }
628
629 /// Specify the data format of the input and output data. With the
630 /// default format "NHWC", the data is stored in the order of:
631 /// [batch, in_height, in_width, in_channels].
632 /// Alternatively, the format could be "NCHW", the data storage order of:
633 /// [batch, in_channels, in_height, in_width].
634 ///
635 /// Defaults to "NHWC"
636 TF_MUST_USE_RESULT Attrs DataFormat(StringPiece x) {
637 Attrs ret = *this;
638 ret.data_format_ = x;
639 return ret;
640 }
641
642 /// 1-D tensor of length 4. The dilation factor for each dimension of
643 /// `input`. If set to k > 1, there will be k-1 skipped cells between each filter
644 /// element on that dimension. The dimension order is determined by the value of
645 /// `data_format`, see above for details. Dilations in the batch and depth
646 /// dimensions must be 1.
647 ///
648 /// Defaults to [1, 1, 1, 1]
649 TF_MUST_USE_RESULT Attrs Dilations(const gtl::ArraySlice<int>& x) {
650 Attrs ret = *this;
651 ret.dilations_ = x;
652 return ret;
653 }
654
655 bool use_cudnn_on_gpu_ = true;
656 gtl::ArraySlice<int> explicit_paddings_ = {};
657 StringPiece data_format_ = "NHWC";
658 gtl::ArraySlice<int> dilations_ = Default_dilations();
659 private:
660 static gtl::ArraySlice<int> Default_dilations() {
661 static const int kStorage[] = {1, 1, 1, 1};
662 return gtl::ArraySlice<int>(kStorage);
663 }
664 };
665 Conv2DBackpropFilter(const ::tensorflow::Scope& scope, ::tensorflow::Input
666 input, ::tensorflow::Input filter_sizes,
667 ::tensorflow::Input out_backprop, const
668 gtl::ArraySlice<int>& strides, StringPiece padding);
669 Conv2DBackpropFilter(const ::tensorflow::Scope& scope, ::tensorflow::Input
670 input, ::tensorflow::Input filter_sizes,
671 ::tensorflow::Input out_backprop, const
672 gtl::ArraySlice<int>& strides, StringPiece padding, const
673 Conv2DBackpropFilter::Attrs& attrs);
674 operator ::tensorflow::Output() const { return output; }
675 operator ::tensorflow::Input() const { return output; }
676 ::tensorflow::Node* node() const { return output.node(); }
677
678 static Attrs UseCudnnOnGpu(bool x) {
679 return Attrs().UseCudnnOnGpu(x);
680 }
681 static Attrs ExplicitPaddings(const gtl::ArraySlice<int>& x) {
682 return Attrs().ExplicitPaddings(x);
683 }
684 static Attrs DataFormat(StringPiece x) {
685 return Attrs().DataFormat(x);
686 }
687 static Attrs Dilations(const gtl::ArraySlice<int>& x) {
688 return Attrs().Dilations(x);
689 }
690
691 Operation operation;
692 ::tensorflow::Output output;
693};
694
695/// Computes the gradients of convolution with respect to the input.
696///
697/// Args:
698/// * scope: A Scope object
699/// * input_sizes: An integer vector representing the shape of `input`,
700/// where `input` is a 4-D `[batch, height, width, channels]` tensor.
701/// * filter: 4-D with shape
702/// `[filter_height, filter_width, in_channels, out_channels]`.
703/// * out_backprop: 4-D with shape `[batch, out_height, out_width, out_channels]`.
704/// Gradients w.r.t. the output of the convolution.
705/// * strides: The stride of the sliding window for each dimension of the input
706/// of the convolution. Must be in the same order as the dimension specified with
707/// format.
708/// * padding: The type of padding algorithm to use.
709///
710/// Optional attributes (see `Attrs`):
711/// * explicit_paddings: If `padding` is `"EXPLICIT"`, the list of explicit padding amounts. For the ith
712/// dimension, the amount of padding inserted before and after the dimension is
713/// `explicit_paddings[2 * i]` and `explicit_paddings[2 * i + 1]`, respectively. If
714/// `padding` is not `"EXPLICIT"`, `explicit_paddings` must be empty.
715/// * data_format: Specify the data format of the input and output data. With the
716/// default format "NHWC", the data is stored in the order of:
717/// [batch, in_height, in_width, in_channels].
718/// Alternatively, the format could be "NCHW", the data storage order of:
719/// [batch, in_channels, in_height, in_width].
720/// * dilations: 1-D tensor of length 4. The dilation factor for each dimension of
721/// `input`. If set to k > 1, there will be k-1 skipped cells between each filter
722/// element on that dimension. The dimension order is determined by the value of
723/// `data_format`, see above for details. Dilations in the batch and depth
724/// dimensions must be 1.
725///
726/// Returns:
727/// * `Output`: 4-D with shape `[batch, in_height, in_width, in_channels]`. Gradient
728/// w.r.t. the input of the convolution.
729class Conv2DBackpropInput {
730 public:
731 /// Optional attribute setters for Conv2DBackpropInput
732 struct Attrs {
733 /// Defaults to true
734 TF_MUST_USE_RESULT Attrs UseCudnnOnGpu(bool x) {
735 Attrs ret = *this;
736 ret.use_cudnn_on_gpu_ = x;
737 return ret;
738 }
739
740 /// If `padding` is `"EXPLICIT"`, the list of explicit padding amounts. For the ith
741 /// dimension, the amount of padding inserted before and after the dimension is
742 /// `explicit_paddings[2 * i]` and `explicit_paddings[2 * i + 1]`, respectively. If
743 /// `padding` is not `"EXPLICIT"`, `explicit_paddings` must be empty.
744 ///
745 /// Defaults to []
746 TF_MUST_USE_RESULT Attrs ExplicitPaddings(const gtl::ArraySlice<int>& x) {
747 Attrs ret = *this;
748 ret.explicit_paddings_ = x;
749 return ret;
750 }
751
752 /// Specify the data format of the input and output data. With the
753 /// default format "NHWC", the data is stored in the order of:
754 /// [batch, in_height, in_width, in_channels].
755 /// Alternatively, the format could be "NCHW", the data storage order of:
756 /// [batch, in_channels, in_height, in_width].
757 ///
758 /// Defaults to "NHWC"
759 TF_MUST_USE_RESULT Attrs DataFormat(StringPiece x) {
760 Attrs ret = *this;
761 ret.data_format_ = x;
762 return ret;
763 }
764
765 /// 1-D tensor of length 4. The dilation factor for each dimension of
766 /// `input`. If set to k > 1, there will be k-1 skipped cells between each filter
767 /// element on that dimension. The dimension order is determined by the value of
768 /// `data_format`, see above for details. Dilations in the batch and depth
769 /// dimensions must be 1.
770 ///
771 /// Defaults to [1, 1, 1, 1]
772 TF_MUST_USE_RESULT Attrs Dilations(const gtl::ArraySlice<int>& x) {
773 Attrs ret = *this;
774 ret.dilations_ = x;
775 return ret;
776 }
777
778 bool use_cudnn_on_gpu_ = true;
779 gtl::ArraySlice<int> explicit_paddings_ = {};
780 StringPiece data_format_ = "NHWC";
781 gtl::ArraySlice<int> dilations_ = Default_dilations();
782 private:
783 static gtl::ArraySlice<int> Default_dilations() {
784 static const int kStorage[] = {1, 1, 1, 1};
785 return gtl::ArraySlice<int>(kStorage);
786 }
787 };
788 Conv2DBackpropInput(const ::tensorflow::Scope& scope, ::tensorflow::Input
789 input_sizes, ::tensorflow::Input filter,
790 ::tensorflow::Input out_backprop, const
791 gtl::ArraySlice<int>& strides, StringPiece padding);
792 Conv2DBackpropInput(const ::tensorflow::Scope& scope, ::tensorflow::Input
793 input_sizes, ::tensorflow::Input filter,
794 ::tensorflow::Input out_backprop, const
795 gtl::ArraySlice<int>& strides, StringPiece padding, const
796 Conv2DBackpropInput::Attrs& attrs);
797 operator ::tensorflow::Output() const { return output; }
798 operator ::tensorflow::Input() const { return output; }
799 ::tensorflow::Node* node() const { return output.node(); }
800
801 static Attrs UseCudnnOnGpu(bool x) {
802 return Attrs().UseCudnnOnGpu(x);
803 }
804 static Attrs ExplicitPaddings(const gtl::ArraySlice<int>& x) {
805 return Attrs().ExplicitPaddings(x);
806 }
807 static Attrs DataFormat(StringPiece x) {
808 return Attrs().DataFormat(x);
809 }
810 static Attrs Dilations(const gtl::ArraySlice<int>& x) {
811 return Attrs().Dilations(x);
812 }
813
814 Operation operation;
815 ::tensorflow::Output output;
816};
817
818/// Computes a 3-D convolution given 5-D `input` and `filter` tensors.
819///
820/// In signal processing, cross-correlation is a measure of similarity of
821/// two waveforms as a function of a time-lag applied to one of them. This
822/// is also known as a sliding dot product or sliding inner-product.
823///
824/// Our Conv3D implements a form of cross-correlation.
825///
826/// Args:
827/// * scope: A Scope object
828/// * input: Shape `[batch, in_depth, in_height, in_width, in_channels]`.
829/// * filter: Shape `[filter_depth, filter_height, filter_width, in_channels,
830/// out_channels]`. `in_channels` must match between `input` and `filter`.
831/// * strides: 1-D tensor of length 5. The stride of the sliding window for each
832/// dimension of `input`. Must have `strides[0] = strides[4] = 1`.
833/// * padding: The type of padding algorithm to use.
834///
835/// Optional attributes (see `Attrs`):
836/// * data_format: The data format of the input and output data. With the
837/// default format "NDHWC", the data is stored in the order of:
838/// [batch, in_depth, in_height, in_width, in_channels].
839/// Alternatively, the format could be "NCDHW", the data storage order is:
840/// [batch, in_channels, in_depth, in_height, in_width].
841/// * dilations: 1-D tensor of length 5. The dilation factor for each dimension of
842/// `input`. If set to k > 1, there will be k-1 skipped cells between each
843/// filter element on that dimension. The dimension order is determined by the
844/// value of `data_format`, see above for details. Dilations in the batch and
845/// depth dimensions must be 1.
846///
847/// Returns:
848/// * `Output`: The output tensor.
849class Conv3D {
850 public:
851 /// Optional attribute setters for Conv3D
852 struct Attrs {
853 /// The data format of the input and output data. With the
854 /// default format "NDHWC", the data is stored in the order of:
855 /// [batch, in_depth, in_height, in_width, in_channels].
856 /// Alternatively, the format could be "NCDHW", the data storage order is:
857 /// [batch, in_channels, in_depth, in_height, in_width].
858 ///
859 /// Defaults to "NDHWC"
860 TF_MUST_USE_RESULT Attrs DataFormat(StringPiece x) {
861 Attrs ret = *this;
862 ret.data_format_ = x;
863 return ret;
864 }
865
866 /// 1-D tensor of length 5. The dilation factor for each dimension of
867 /// `input`. If set to k > 1, there will be k-1 skipped cells between each
868 /// filter element on that dimension. The dimension order is determined by the
869 /// value of `data_format`, see above for details. Dilations in the batch and
870 /// depth dimensions must be 1.
871 ///
872 /// Defaults to [1, 1, 1, 1, 1]
873 TF_MUST_USE_RESULT Attrs Dilations(const gtl::ArraySlice<int>& x) {
874 Attrs ret = *this;
875 ret.dilations_ = x;
876 return ret;
877 }
878
879 StringPiece data_format_ = "NDHWC";
880 gtl::ArraySlice<int> dilations_ = Default_dilations();
881 private:
882 static gtl::ArraySlice<int> Default_dilations() {
883 static const int kStorage[] = {1, 1, 1, 1, 1};
884 return gtl::ArraySlice<int>(kStorage);
885 }
886 };
887 Conv3D(const ::tensorflow::Scope& scope, ::tensorflow::Input input,
888 ::tensorflow::Input filter, const gtl::ArraySlice<int>& strides,
889 StringPiece padding);
890 Conv3D(const ::tensorflow::Scope& scope, ::tensorflow::Input input,
891 ::tensorflow::Input filter, const gtl::ArraySlice<int>& strides,
892 StringPiece padding, const Conv3D::Attrs& attrs);
893 operator ::tensorflow::Output() const { return output; }
894 operator ::tensorflow::Input() const { return output; }
895 ::tensorflow::Node* node() const { return output.node(); }
896
897 static Attrs DataFormat(StringPiece x) {
898 return Attrs().DataFormat(x);
899 }
900 static Attrs Dilations(const gtl::ArraySlice<int>& x) {
901 return Attrs().Dilations(x);
902 }
903
904 Operation operation;
905 ::tensorflow::Output output;
906};
907
908/// Computes the gradients of 3-D convolution with respect to the filter.
909///
910/// Args:
911/// * scope: A Scope object
912/// * input: Shape `[batch, depth, rows, cols, in_channels]`.
913/// * filter_sizes: An integer vector representing the tensor shape of `filter`,
914/// where `filter` is a 5-D
915/// `[filter_depth, filter_height, filter_width, in_channels, out_channels]`
916/// tensor.
917/// * out_backprop: Backprop signal of shape `[batch, out_depth, out_rows, out_cols,
918/// out_channels]`.
919/// * strides: 1-D tensor of length 5. The stride of the sliding window for each
920/// dimension of `input`. Must have `strides[0] = strides[4] = 1`.
921/// * padding: The type of padding algorithm to use.
922///
923/// Optional attributes (see `Attrs`):
924/// * data_format: The data format of the input and output data. With the
925/// default format "NDHWC", the data is stored in the order of:
926/// [batch, in_depth, in_height, in_width, in_channels].
927/// Alternatively, the format could be "NCDHW", the data storage order is:
928/// [batch, in_channels, in_depth, in_height, in_width].
929/// * dilations: 1-D tensor of length 5. The dilation factor for each dimension of
930/// `input`. If set to k > 1, there will be k-1 skipped cells between each
931/// filter element on that dimension. The dimension order is determined by the
932/// value of `data_format`, see above for details. Dilations in the batch and
933/// depth dimensions must be 1.
934///
935/// Returns:
936/// * `Output`: The output tensor.
937class Conv3DBackpropFilterV2 {
938 public:
939 /// Optional attribute setters for Conv3DBackpropFilterV2
940 struct Attrs {
941 /// The data format of the input and output data. With the
942 /// default format "NDHWC", the data is stored in the order of:
943 /// [batch, in_depth, in_height, in_width, in_channels].
944 /// Alternatively, the format could be "NCDHW", the data storage order is:
945 /// [batch, in_channels, in_depth, in_height, in_width].
946 ///
947 /// Defaults to "NDHWC"
948 TF_MUST_USE_RESULT Attrs DataFormat(StringPiece x) {
949 Attrs ret = *this;
950 ret.data_format_ = x;
951 return ret;
952 }
953
954 /// 1-D tensor of length 5. The dilation factor for each dimension of
955 /// `input`. If set to k > 1, there will be k-1 skipped cells between each
956 /// filter element on that dimension. The dimension order is determined by the
957 /// value of `data_format`, see above for details. Dilations in the batch and
958 /// depth dimensions must be 1.
959 ///
960 /// Defaults to [1, 1, 1, 1, 1]
961 TF_MUST_USE_RESULT Attrs Dilations(const gtl::ArraySlice<int>& x) {
962 Attrs ret = *this;
963 ret.dilations_ = x;
964 return ret;
965 }
966
967 StringPiece data_format_ = "NDHWC";
968 gtl::ArraySlice<int> dilations_ = Default_dilations();
969 private:
970 static gtl::ArraySlice<int> Default_dilations() {
971 static const int kStorage[] = {1, 1, 1, 1, 1};
972 return gtl::ArraySlice<int>(kStorage);
973 }
974 };
975 Conv3DBackpropFilterV2(const ::tensorflow::Scope& scope, ::tensorflow::Input
976 input, ::tensorflow::Input filter_sizes,
977 ::tensorflow::Input out_backprop, const
978 gtl::ArraySlice<int>& strides, StringPiece padding);
979 Conv3DBackpropFilterV2(const ::tensorflow::Scope& scope, ::tensorflow::Input
980 input, ::tensorflow::Input filter_sizes,
981 ::tensorflow::Input out_backprop, const
982 gtl::ArraySlice<int>& strides, StringPiece padding,
983 const Conv3DBackpropFilterV2::Attrs& attrs);
984 operator ::tensorflow::Output() const { return output; }
985 operator ::tensorflow::Input() const { return output; }
986 ::tensorflow::Node* node() const { return output.node(); }
987
988 static Attrs DataFormat(StringPiece x) {
989 return Attrs().DataFormat(x);
990 }
991 static Attrs Dilations(const gtl::ArraySlice<int>& x) {
992 return Attrs().Dilations(x);
993 }
994
995 Operation operation;
996 ::tensorflow::Output output;
997};
998
999/// Computes the gradients of 3-D convolution with respect to the input.
1000///
1001/// Args:
1002/// * scope: A Scope object
1003/// * input_sizes: An integer vector representing the tensor shape of `input`,
1004/// where `input` is a 5-D
1005/// `[batch, depth, rows, cols, in_channels]` tensor.
1006/// * filter: Shape `[depth, rows, cols, in_channels, out_channels]`.
1007/// `in_channels` must match between `input` and `filter`.
1008/// * out_backprop: Backprop signal of shape `[batch, out_depth, out_rows, out_cols,
1009/// out_channels]`.
1010/// * strides: 1-D tensor of length 5. The stride of the sliding window for each
1011/// dimension of `input`. Must have `strides[0] = strides[4] = 1`.
1012/// * padding: The type of padding algorithm to use.
1013///
1014/// Optional attributes (see `Attrs`):
1015/// * data_format: The data format of the input and output data. With the
1016/// default format "NDHWC", the data is stored in the order of:
1017/// [batch, in_depth, in_height, in_width, in_channels].
1018/// Alternatively, the format could be "NCDHW", the data storage order is:
1019/// [batch, in_channels, in_depth, in_height, in_width].
1020/// * dilations: 1-D tensor of length 5. The dilation factor for each dimension of
1021/// `input`. If set to k > 1, there will be k-1 skipped cells between each
1022/// filter element on that dimension. The dimension order is determined by the
1023/// value of `data_format`, see above for details. Dilations in the batch and
1024/// depth dimensions must be 1.
1025///
1026/// Returns:
1027/// * `Output`: The output tensor.
1028class Conv3DBackpropInputV2 {
1029 public:
1030 /// Optional attribute setters for Conv3DBackpropInputV2
1031 struct Attrs {
1032 /// The data format of the input and output data. With the
1033 /// default format "NDHWC", the data is stored in the order of:
1034 /// [batch, in_depth, in_height, in_width, in_channels].
1035 /// Alternatively, the format could be "NCDHW", the data storage order is:
1036 /// [batch, in_channels, in_depth, in_height, in_width].
1037 ///
1038 /// Defaults to "NDHWC"
1039 TF_MUST_USE_RESULT Attrs DataFormat(StringPiece x) {
1040 Attrs ret = *this;
1041 ret.data_format_ = x;
1042 return ret;
1043 }
1044
1045 /// 1-D tensor of length 5. The dilation factor for each dimension of
1046 /// `input`. If set to k > 1, there will be k-1 skipped cells between each
1047 /// filter element on that dimension. The dimension order is determined by the
1048 /// value of `data_format`, see above for details. Dilations in the batch and
1049 /// depth dimensions must be 1.
1050 ///
1051 /// Defaults to [1, 1, 1, 1, 1]
1052 TF_MUST_USE_RESULT Attrs Dilations(const gtl::ArraySlice<int>& x) {
1053 Attrs ret = *this;
1054 ret.dilations_ = x;
1055 return ret;
1056 }
1057
1058 StringPiece data_format_ = "NDHWC";
1059 gtl::ArraySlice<int> dilations_ = Default_dilations();
1060 private:
1061 static gtl::ArraySlice<int> Default_dilations() {
1062 static const int kStorage[] = {1, 1, 1, 1, 1};
1063 return gtl::ArraySlice<int>(kStorage);
1064 }
1065 };
1066 Conv3DBackpropInputV2(const ::tensorflow::Scope& scope, ::tensorflow::Input
1067 input_sizes, ::tensorflow::Input filter,
1068 ::tensorflow::Input out_backprop, const
1069 gtl::ArraySlice<int>& strides, StringPiece padding);
1070 Conv3DBackpropInputV2(const ::tensorflow::Scope& scope, ::tensorflow::Input
1071 input_sizes, ::tensorflow::Input filter,
1072 ::tensorflow::Input out_backprop, const
1073 gtl::ArraySlice<int>& strides, StringPiece padding, const
1074 Conv3DBackpropInputV2::Attrs& attrs);
1075 operator ::tensorflow::Output() const { return output; }
1076 operator ::tensorflow::Input() const { return output; }
1077 ::tensorflow::Node* node() const { return output.node(); }
1078
1079 static Attrs DataFormat(StringPiece x) {
1080 return Attrs().DataFormat(x);
1081 }
1082 static Attrs Dilations(const gtl::ArraySlice<int>& x) {
1083 return Attrs().Dilations(x);
1084 }
1085
1086 Operation operation;
1087 ::tensorflow::Output output;
1088};
1089
1090/// Returns the dimension index in the destination data format given the one in
1091///
1092/// the source data format.
1093///
1094/// Args:
1095/// * scope: A Scope object
1096/// * x: A Tensor with each element as a dimension index in source data format.
1097/// Must be in the range [-4, 4).
1098///
1099/// Optional attributes (see `Attrs`):
1100/// * src_format: source data format.
1101/// * dst_format: destination data format.
1102///
1103/// Returns:
1104/// * `Output`: A Tensor with each element as a dimension index in destination data format.
1105class DataFormatDimMap {
1106 public:
1107 /// Optional attribute setters for DataFormatDimMap
1108 struct Attrs {
1109 /// source data format.
1110 ///
1111 /// Defaults to "NHWC"
1112 TF_MUST_USE_RESULT Attrs SrcFormat(StringPiece x) {
1113 Attrs ret = *this;
1114 ret.src_format_ = x;
1115 return ret;
1116 }
1117
1118 /// destination data format.
1119 ///
1120 /// Defaults to "NCHW"
1121 TF_MUST_USE_RESULT Attrs DstFormat(StringPiece x) {
1122 Attrs ret = *this;
1123 ret.dst_format_ = x;
1124 return ret;
1125 }
1126
1127 StringPiece src_format_ = "NHWC";
1128 StringPiece dst_format_ = "NCHW";
1129 };
1130 DataFormatDimMap(const ::tensorflow::Scope& scope, ::tensorflow::Input x);
1131 DataFormatDimMap(const ::tensorflow::Scope& scope, ::tensorflow::Input x, const
1132 DataFormatDimMap::Attrs& attrs);
1133 operator ::tensorflow::Output() const { return y; }
1134 operator ::tensorflow::Input() const { return y; }
1135 ::tensorflow::Node* node() const { return y.node(); }
1136
1137 static Attrs SrcFormat(StringPiece x) {
1138 return Attrs().SrcFormat(x);
1139 }
1140 static Attrs DstFormat(StringPiece x) {
1141 return Attrs().DstFormat(x);
1142 }
1143
1144 Operation operation;
1145 ::tensorflow::Output y;
1146};
1147
1148/// Permute input tensor from `src_format` to `dst_format`.
1149///
1150/// Given source and destination format strings of length n=4 or 5, the input
1151/// tensor must be a vector of size n or n-2, or a 2D tensor of shape
1152/// (n, 2) or (n-2, 2).
1153///
1154/// If the first dimension of the input tensor is n-2, it is assumed that
1155/// non-spatial dimensions are omitted (i.e `N`, `C`).
1156///
1157/// For example, with `src_format` of `NHWC`, `dst_format` of `NCHW`, and input:
1158/// ```
1159/// [1, 2, 3, 4]
1160/// ```
1161/// , the output will be:
1162/// ```
1163/// [1, 4, 2, 3]
1164/// ```
1165/// With `src_format` of `NDHWC`, `dst_format` of `NCDHW`, and input:
1166/// ```
1167/// [[1, 6], [2, 7], [3, 8], [4, 9], [5, 10]]
1168/// ```
1169/// , the output will be:
1170/// ```
1171/// [[1, 6], [5, 10], [2, 7], [3, 8], [4, 9]]
1172/// ```
1173/// With `src_format` of `NHWC`, `dst_format` of `NCHW`, and input:
1174/// ```
1175/// [1, 2]
1176/// ```
1177/// , the output will be:
1178/// ```
1179/// [1, 2]
1180/// ```
1181///
1182/// Args:
1183/// * scope: A Scope object
1184/// * x: Tensor of rank 1 or 2 in source data format.
1185///
1186/// Optional attributes (see `Attrs`):
1187/// * src_format: source data format.
1188/// * dst_format: destination data format.
1189///
1190/// Returns:
1191/// * `Output`: Tensor of rank 1 or 2 in destination data format.
1192class DataFormatVecPermute {
1193 public:
1194 /// Optional attribute setters for DataFormatVecPermute
1195 struct Attrs {
1196 /// source data format.
1197 ///
1198 /// Defaults to "NHWC"
1199 TF_MUST_USE_RESULT Attrs SrcFormat(StringPiece x) {
1200 Attrs ret = *this;
1201 ret.src_format_ = x;
1202 return ret;
1203 }
1204
1205 /// destination data format.
1206 ///
1207 /// Defaults to "NCHW"
1208 TF_MUST_USE_RESULT Attrs DstFormat(StringPiece x) {
1209 Attrs ret = *this;
1210 ret.dst_format_ = x;
1211 return ret;
1212 }
1213
1214 StringPiece src_format_ = "NHWC";
1215 StringPiece dst_format_ = "NCHW";
1216 };
1217 DataFormatVecPermute(const ::tensorflow::Scope& scope, ::tensorflow::Input x);
1218 DataFormatVecPermute(const ::tensorflow::Scope& scope, ::tensorflow::Input x,
1219 const DataFormatVecPermute::Attrs& attrs);
1220 operator ::tensorflow::Output() const { return y; }
1221 operator ::tensorflow::Input() const { return y; }
1222 ::tensorflow::Node* node() const { return y.node(); }
1223
1224 static Attrs SrcFormat(StringPiece x) {
1225 return Attrs().SrcFormat(x);
1226 }
1227 static Attrs DstFormat(StringPiece x) {
1228 return Attrs().DstFormat(x);
1229 }
1230
1231 Operation operation;
1232 ::tensorflow::Output y;
1233};
1234
1235/// Computes a 2-D depthwise convolution given 4-D `input` and `filter` tensors.
1236///
1237/// Given an input tensor of shape `[batch, in_height, in_width, in_channels]`
1238/// and a filter / kernel tensor of shape
1239/// `[filter_height, filter_width, in_channels, channel_multiplier]`, containing
1240/// `in_channels` convolutional filters of depth 1, `depthwise_conv2d` applies
1241/// a different filter to each input channel (expanding from 1 channel to
1242/// `channel_multiplier` channels for each), then concatenates the results
1243/// together. Thus, the output has `in_channels * channel_multiplier` channels.
1244///
1245/// ```
1246/// for k in 0..in_channels-1
1247/// for q in 0..channel_multiplier-1
1248/// output[b, i, j, k * channel_multiplier + q] =
1249/// sum_{di, dj} input[b, strides[1] * i + di, strides[2] * j + dj, k] *
1250/// filter[di, dj, k, q]
1251/// ```
1252///
1253/// Must have `strides[0] = strides[3] = 1`. For the most common case of the same
1254/// horizontal and vertices strides, `strides = [1, stride, stride, 1]`.
1255///
1256/// Args:
1257/// * scope: A Scope object
1258/// * strides: 1-D of length 4. The stride of the sliding window for each dimension
1259/// of `input`.
1260/// * padding: The type of padding algorithm to use.
1261///
1262/// Optional attributes (see `Attrs`):
1263/// * data_format: Specify the data format of the input and output data. With the
1264/// default format "NHWC", the data is stored in the order of:
1265/// [batch, height, width, channels].
1266/// Alternatively, the format could be "NCHW", the data storage order of:
1267/// [batch, channels, height, width].
1268/// * dilations: 1-D tensor of length 4. The dilation factor for each dimension of
1269/// `input`. If set to k > 1, there will be k-1 skipped cells between each filter
1270/// element on that dimension. The dimension order is determined by the value of
1271/// `data_format`, see above for details. Dilations in the batch and depth
1272/// dimensions must be 1.
1273///
1274/// Returns:
1275/// * `Output`: The output tensor.
1276class DepthwiseConv2dNative {
1277 public:
1278 /// Optional attribute setters for DepthwiseConv2dNative
1279 struct Attrs {
1280 /// Defaults to []
1281 TF_MUST_USE_RESULT Attrs ExplicitPaddings(const gtl::ArraySlice<int>& x) {
1282 Attrs ret = *this;
1283 ret.explicit_paddings_ = x;
1284 return ret;
1285 }
1286
1287 /// Specify the data format of the input and output data. With the
1288 /// default format "NHWC", the data is stored in the order of:
1289 /// [batch, height, width, channels].
1290 /// Alternatively, the format could be "NCHW", the data storage order of:
1291 /// [batch, channels, height, width].
1292 ///
1293 /// Defaults to "NHWC"
1294 TF_MUST_USE_RESULT Attrs DataFormat(StringPiece x) {
1295 Attrs ret = *this;
1296 ret.data_format_ = x;
1297 return ret;
1298 }
1299
1300 /// 1-D tensor of length 4. The dilation factor for each dimension of
1301 /// `input`. If set to k > 1, there will be k-1 skipped cells between each filter
1302 /// element on that dimension. The dimension order is determined by the value of
1303 /// `data_format`, see above for details. Dilations in the batch and depth
1304 /// dimensions must be 1.
1305 ///
1306 /// Defaults to [1, 1, 1, 1]
1307 TF_MUST_USE_RESULT Attrs Dilations(const gtl::ArraySlice<int>& x) {
1308 Attrs ret = *this;
1309 ret.dilations_ = x;
1310 return ret;
1311 }
1312
1313 gtl::ArraySlice<int> explicit_paddings_ = {};
1314 StringPiece data_format_ = "NHWC";
1315 gtl::ArraySlice<int> dilations_ = Default_dilations();
1316 private:
1317 static gtl::ArraySlice<int> Default_dilations() {
1318 static const int kStorage[] = {1, 1, 1, 1};
1319 return gtl::ArraySlice<int>(kStorage);
1320 }
1321 };
1322 DepthwiseConv2dNative(const ::tensorflow::Scope& scope, ::tensorflow::Input
1323 input, ::tensorflow::Input filter, const
1324 gtl::ArraySlice<int>& strides, StringPiece padding);
1325 DepthwiseConv2dNative(const ::tensorflow::Scope& scope, ::tensorflow::Input
1326 input, ::tensorflow::Input filter, const
1327 gtl::ArraySlice<int>& strides, StringPiece padding, const
1328 DepthwiseConv2dNative::Attrs& attrs);
1329 operator ::tensorflow::Output() const { return output; }
1330 operator ::tensorflow::Input() const { return output; }
1331 ::tensorflow::Node* node() const { return output.node(); }
1332
1333 static Attrs ExplicitPaddings(const gtl::ArraySlice<int>& x) {
1334 return Attrs().ExplicitPaddings(x);
1335 }
1336 static Attrs DataFormat(StringPiece x) {
1337 return Attrs().DataFormat(x);
1338 }
1339 static Attrs Dilations(const gtl::ArraySlice<int>& x) {
1340 return Attrs().Dilations(x);
1341 }
1342
1343 Operation operation;
1344 ::tensorflow::Output output;
1345};
1346
1347/// Computes the gradients of depthwise convolution with respect to the filter.
1348///
1349/// Args:
1350/// * scope: A Scope object
1351/// * input: 4-D with shape based on `data_format`. For example, if
1352/// `data_format` is 'NHWC' then `input` is a 4-D `[batch, in_height,
1353/// in_width, in_channels]` tensor.
1354/// * filter_sizes: An integer vector representing the tensor shape of `filter`,
1355/// where `filter` is a 4-D
1356/// `[filter_height, filter_width, in_channels, depthwise_multiplier]` tensor.
1357/// * out_backprop: 4-D with shape based on `data_format`.
1358/// For example, if `data_format` is 'NHWC' then
1359/// out_backprop shape is `[batch, out_height, out_width, out_channels]`.
1360/// Gradients w.r.t. the output of the convolution.
1361/// * strides: The stride of the sliding window for each dimension of the input
1362/// of the convolution.
1363/// * padding: The type of padding algorithm to use.
1364///
1365/// Optional attributes (see `Attrs`):
1366/// * data_format: Specify the data format of the input and output data. With the
1367/// default format "NHWC", the data is stored in the order of:
1368/// [batch, height, width, channels].
1369/// Alternatively, the format could be "NCHW", the data storage order of:
1370/// [batch, channels, height, width].
1371/// * dilations: 1-D tensor of length 4. The dilation factor for each dimension of
1372/// `input`. If set to k > 1, there will be k-1 skipped cells between each filter
1373/// element on that dimension. The dimension order is determined by the value of
1374/// `data_format`, see above for details. Dilations in the batch and depth
1375/// dimensions must be 1.
1376///
1377/// Returns:
1378/// * `Output`: 4-D with shape
1379/// `[filter_height, filter_width, in_channels, out_channels]`. Gradient w.r.t.
1380/// the `filter` input of the convolution.
1381class DepthwiseConv2dNativeBackpropFilter {
1382 public:
1383 /// Optional attribute setters for DepthwiseConv2dNativeBackpropFilter
1384 struct Attrs {
1385 /// Defaults to []
1386 TF_MUST_USE_RESULT Attrs ExplicitPaddings(const gtl::ArraySlice<int>& x) {
1387 Attrs ret = *this;
1388 ret.explicit_paddings_ = x;
1389 return ret;
1390 }
1391
1392 /// Specify the data format of the input and output data. With the
1393 /// default format "NHWC", the data is stored in the order of:
1394 /// [batch, height, width, channels].
1395 /// Alternatively, the format could be "NCHW", the data storage order of:
1396 /// [batch, channels, height, width].
1397 ///
1398 /// Defaults to "NHWC"
1399 TF_MUST_USE_RESULT Attrs DataFormat(StringPiece x) {
1400 Attrs ret = *this;
1401 ret.data_format_ = x;
1402 return ret;
1403 }
1404
1405 /// 1-D tensor of length 4. The dilation factor for each dimension of
1406 /// `input`. If set to k > 1, there will be k-1 skipped cells between each filter
1407 /// element on that dimension. The dimension order is determined by the value of
1408 /// `data_format`, see above for details. Dilations in the batch and depth
1409 /// dimensions must be 1.
1410 ///
1411 /// Defaults to [1, 1, 1, 1]
1412 TF_MUST_USE_RESULT Attrs Dilations(const gtl::ArraySlice<int>& x) {
1413 Attrs ret = *this;
1414 ret.dilations_ = x;
1415 return ret;
1416 }
1417
1418 gtl::ArraySlice<int> explicit_paddings_ = {};
1419 StringPiece data_format_ = "NHWC";
1420 gtl::ArraySlice<int> dilations_ = Default_dilations();
1421 private:
1422 static gtl::ArraySlice<int> Default_dilations() {
1423 static const int kStorage[] = {1, 1, 1, 1};
1424 return gtl::ArraySlice<int>(kStorage);
1425 }
1426 };
1427 DepthwiseConv2dNativeBackpropFilter(const ::tensorflow::Scope& scope,
1428 ::tensorflow::Input input,
1429 ::tensorflow::Input filter_sizes,
1430 ::tensorflow::Input out_backprop, const
1431 gtl::ArraySlice<int>& strides, StringPiece
1432 padding);
1433 DepthwiseConv2dNativeBackpropFilter(const ::tensorflow::Scope& scope,
1434 ::tensorflow::Input input,
1435 ::tensorflow::Input filter_sizes,
1436 ::tensorflow::Input out_backprop, const
1437 gtl::ArraySlice<int>& strides, StringPiece
1438 padding, const
1439 DepthwiseConv2dNativeBackpropFilter::Attrs&
1440 attrs);
1441 operator ::tensorflow::Output() const { return output; }
1442 operator ::tensorflow::Input() const { return output; }
1443 ::tensorflow::Node* node() const { return output.node(); }
1444
1445 static Attrs ExplicitPaddings(const gtl::ArraySlice<int>& x) {
1446 return Attrs().ExplicitPaddings(x);
1447 }
1448 static Attrs DataFormat(StringPiece x) {
1449 return Attrs().DataFormat(x);
1450 }
1451 static Attrs Dilations(const gtl::ArraySlice<int>& x) {
1452 return Attrs().Dilations(x);
1453 }
1454
1455 Operation operation;
1456 ::tensorflow::Output output;
1457};
1458
1459/// Computes the gradients of depthwise convolution with respect to the input.
1460///
1461/// Args:
1462/// * scope: A Scope object
1463/// * input_sizes: An integer vector representing the shape of `input`, based
1464/// on `data_format`. For example, if `data_format` is 'NHWC' then
1465/// `input` is a 4-D `[batch, height, width, channels]` tensor.
1466/// * filter: 4-D with shape
1467/// `[filter_height, filter_width, in_channels, depthwise_multiplier]`.
1468/// * out_backprop: 4-D with shape based on `data_format`.
1469/// For example, if `data_format` is 'NHWC' then
1470/// out_backprop shape is `[batch, out_height, out_width, out_channels]`.
1471/// Gradients w.r.t. the output of the convolution.
1472/// * strides: The stride of the sliding window for each dimension of the input
1473/// of the convolution.
1474/// * padding: The type of padding algorithm to use.
1475///
1476/// Optional attributes (see `Attrs`):
1477/// * data_format: Specify the data format of the input and output data. With the
1478/// default format "NHWC", the data is stored in the order of:
1479/// [batch, height, width, channels].
1480/// Alternatively, the format could be "NCHW", the data storage order of:
1481/// [batch, channels, height, width].
1482/// * dilations: 1-D tensor of length 4. The dilation factor for each dimension of
1483/// `input`. If set to k > 1, there will be k-1 skipped cells between each filter
1484/// element on that dimension. The dimension order is determined by the value of
1485/// `data_format`, see above for details. Dilations in the batch and depth
1486/// dimensions must be 1.
1487///
1488/// Returns:
1489/// * `Output`: 4-D with shape according to `data_format`. For example, if
1490/// `data_format` is 'NHWC', output shape is `[batch, in_height,
1491/// in_width, in_channels]`. Gradient w.r.t. the input of the
1492/// convolution.
1493class DepthwiseConv2dNativeBackpropInput {
1494 public:
1495 /// Optional attribute setters for DepthwiseConv2dNativeBackpropInput
1496 struct Attrs {
1497 /// Defaults to []
1498 TF_MUST_USE_RESULT Attrs ExplicitPaddings(const gtl::ArraySlice<int>& x) {
1499 Attrs ret = *this;
1500 ret.explicit_paddings_ = x;
1501 return ret;
1502 }
1503
1504 /// Specify the data format of the input and output data. With the
1505 /// default format "NHWC", the data is stored in the order of:
1506 /// [batch, height, width, channels].
1507 /// Alternatively, the format could be "NCHW", the data storage order of:
1508 /// [batch, channels, height, width].
1509 ///
1510 /// Defaults to "NHWC"
1511 TF_MUST_USE_RESULT Attrs DataFormat(StringPiece x) {
1512 Attrs ret = *this;
1513 ret.data_format_ = x;
1514 return ret;
1515 }
1516
1517 /// 1-D tensor of length 4. The dilation factor for each dimension of
1518 /// `input`. If set to k > 1, there will be k-1 skipped cells between each filter
1519 /// element on that dimension. The dimension order is determined by the value of
1520 /// `data_format`, see above for details. Dilations in the batch and depth
1521 /// dimensions must be 1.
1522 ///
1523 /// Defaults to [1, 1, 1, 1]
1524 TF_MUST_USE_RESULT Attrs Dilations(const gtl::ArraySlice<int>& x) {
1525 Attrs ret = *this;
1526 ret.dilations_ = x;
1527 return ret;
1528 }
1529
1530 gtl::ArraySlice<int> explicit_paddings_ = {};
1531 StringPiece data_format_ = "NHWC";
1532 gtl::ArraySlice<int> dilations_ = Default_dilations();
1533 private:
1534 static gtl::ArraySlice<int> Default_dilations() {
1535 static const int kStorage[] = {1, 1, 1, 1};
1536 return gtl::ArraySlice<int>(kStorage);
1537 }
1538 };
1539 DepthwiseConv2dNativeBackpropInput(const ::tensorflow::Scope& scope,
1540 ::tensorflow::Input input_sizes,
1541 ::tensorflow::Input filter,
1542 ::tensorflow::Input out_backprop, const
1543 gtl::ArraySlice<int>& strides, StringPiece
1544 padding);
1545 DepthwiseConv2dNativeBackpropInput(const ::tensorflow::Scope& scope,
1546 ::tensorflow::Input input_sizes,
1547 ::tensorflow::Input filter,
1548 ::tensorflow::Input out_backprop, const
1549 gtl::ArraySlice<int>& strides, StringPiece
1550 padding, const
1551 DepthwiseConv2dNativeBackpropInput::Attrs&
1552 attrs);
1553 operator ::tensorflow::Output() const { return output; }
1554 operator ::tensorflow::Input() const { return output; }
1555 ::tensorflow::Node* node() const { return output.node(); }
1556
1557 static Attrs ExplicitPaddings(const gtl::ArraySlice<int>& x) {
1558 return Attrs().ExplicitPaddings(x);
1559 }
1560 static Attrs DataFormat(StringPiece x) {
1561 return Attrs().DataFormat(x);
1562 }
1563 static Attrs Dilations(const gtl::ArraySlice<int>& x) {
1564 return Attrs().Dilations(x);
1565 }
1566
1567 Operation operation;
1568 ::tensorflow::Output output;
1569};
1570
1571/// Computes the grayscale dilation of 4-D `input` and 3-D `filter` tensors.
1572///
1573/// The `input` tensor has shape `[batch, in_height, in_width, depth]` and the
1574/// `filter` tensor has shape `[filter_height, filter_width, depth]`, i.e., each
1575/// input channel is processed independently of the others with its own structuring
1576/// function. The `output` tensor has shape
1577/// `[batch, out_height, out_width, depth]`. The spatial dimensions of the output
1578/// tensor depend on the `padding` algorithm. We currently only support the default
1579/// "NHWC" `data_format`.
1580///
1581/// In detail, the grayscale morphological 2-D dilation is the max-sum correlation
1582/// (for consistency with `conv2d`, we use unmirrored filters):
1583///
1584/// output[b, y, x, c] =
1585/// max_{dy, dx} input[b,
1586/// strides[1] * y + rates[1] * dy,
1587/// strides[2] * x + rates[2] * dx,
1588/// c] +
1589/// filter[dy, dx, c]
1590///
1591/// Max-pooling is a special case when the filter has size equal to the pooling
1592/// kernel size and contains all zeros.
1593///
1594/// Note on duality: The dilation of `input` by the `filter` is equal to the
1595/// negation of the erosion of `-input` by the reflected `filter`.
1596///
1597/// Args:
1598/// * scope: A Scope object
1599/// * input: 4-D with shape `[batch, in_height, in_width, depth]`.
1600/// * filter: 3-D with shape `[filter_height, filter_width, depth]`.
1601/// * strides: The stride of the sliding window for each dimension of the input
1602/// tensor. Must be: `[1, stride_height, stride_width, 1]`.
1603/// * rates: The input stride for atrous morphological dilation. Must be:
1604/// `[1, rate_height, rate_width, 1]`.
1605/// * padding: The type of padding algorithm to use.
1606///
1607/// Returns:
1608/// * `Output`: 4-D with shape `[batch, out_height, out_width, depth]`.
1609class Dilation2D {
1610 public:
1611 Dilation2D(const ::tensorflow::Scope& scope, ::tensorflow::Input input,
1612 ::tensorflow::Input filter, const gtl::ArraySlice<int>& strides,
1613 const gtl::ArraySlice<int>& rates, StringPiece padding);
1614 operator ::tensorflow::Output() const { return output; }
1615 operator ::tensorflow::Input() const { return output; }
1616 ::tensorflow::Node* node() const { return output.node(); }
1617
1618 Operation operation;
1619 ::tensorflow::Output output;
1620};
1621
1622/// Computes the gradient of morphological 2-D dilation with respect to the filter.
1623///
1624/// Args:
1625/// * scope: A Scope object
1626/// * input: 4-D with shape `[batch, in_height, in_width, depth]`.
1627/// * filter: 3-D with shape `[filter_height, filter_width, depth]`.
1628/// * out_backprop: 4-D with shape `[batch, out_height, out_width, depth]`.
1629/// * strides: 1-D of length 4. The stride of the sliding window for each dimension of
1630/// the input tensor. Must be: `[1, stride_height, stride_width, 1]`.
1631/// * rates: 1-D of length 4. The input stride for atrous morphological dilation.
1632/// Must be: `[1, rate_height, rate_width, 1]`.
1633/// * padding: The type of padding algorithm to use.
1634///
1635/// Returns:
1636/// * `Output`: 3-D with shape `[filter_height, filter_width, depth]`.
1637class Dilation2DBackpropFilter {
1638 public:
1639 Dilation2DBackpropFilter(const ::tensorflow::Scope& scope, ::tensorflow::Input
1640 input, ::tensorflow::Input filter, ::tensorflow::Input
1641 out_backprop, const gtl::ArraySlice<int>& strides,
1642 const gtl::ArraySlice<int>& rates, StringPiece
1643 padding);
1644 operator ::tensorflow::Output() const { return filter_backprop; }
1645 operator ::tensorflow::Input() const { return filter_backprop; }
1646 ::tensorflow::Node* node() const { return filter_backprop.node(); }
1647
1648 Operation operation;
1649 ::tensorflow::Output filter_backprop;
1650};
1651
1652/// Computes the gradient of morphological 2-D dilation with respect to the input.
1653///
1654/// Args:
1655/// * scope: A Scope object
1656/// * input: 4-D with shape `[batch, in_height, in_width, depth]`.
1657/// * filter: 3-D with shape `[filter_height, filter_width, depth]`.
1658/// * out_backprop: 4-D with shape `[batch, out_height, out_width, depth]`.
1659/// * strides: 1-D of length 4. The stride of the sliding window for each dimension of
1660/// the input tensor. Must be: `[1, stride_height, stride_width, 1]`.
1661/// * rates: 1-D of length 4. The input stride for atrous morphological dilation.
1662/// Must be: `[1, rate_height, rate_width, 1]`.
1663/// * padding: The type of padding algorithm to use.
1664///
1665/// Returns:
1666/// * `Output`: 4-D with shape `[batch, in_height, in_width, depth]`.
1667class Dilation2DBackpropInput {
1668 public:
1669 Dilation2DBackpropInput(const ::tensorflow::Scope& scope, ::tensorflow::Input
1670 input, ::tensorflow::Input filter, ::tensorflow::Input
1671 out_backprop, const gtl::ArraySlice<int>& strides,
1672 const gtl::ArraySlice<int>& rates, StringPiece padding);
1673 operator ::tensorflow::Output() const { return in_backprop; }
1674 operator ::tensorflow::Input() const { return in_backprop; }
1675 ::tensorflow::Node* node() const { return in_backprop.node(); }
1676
1677 Operation operation;
1678 ::tensorflow::Output in_backprop;
1679};
1680
1681/// Computes the exponential linear function.
1682///
1683/// The ELU function is defined as:
1684///
1685/// * $ e ^ x - 1 $ if $ x < 0 $
1686/// * $ x $ if $ x >= 0 $
1687///
1688/// Examples:
1689///
1690/// >>> tf.nn.elu(1.0)
1691/// <tf.Tensor: shape=(), dtype=float32, numpy=1.0>
1692/// >>> tf.nn.elu(0.0)
1693/// <tf.Tensor: shape=(), dtype=float32, numpy=0.0>
1694/// >>> tf.nn.elu(-1000.0)
1695/// <tf.Tensor: shape=(), dtype=float32, numpy=-1.0>
1696///
1697/// See [Fast and Accurate Deep Network Learning by Exponential Linear Units (ELUs)
1698/// ](http://arxiv.org/abs/1511.07289)
1699///
1700/// Args:
1701/// * scope: A Scope object
1702///
1703/// Returns:
1704/// * `Output`: The activations tensor.
1705class Elu {
1706 public:
1707 Elu(const ::tensorflow::Scope& scope, ::tensorflow::Input features);
1708 operator ::tensorflow::Output() const { return activations; }
1709 operator ::tensorflow::Input() const { return activations; }
1710 ::tensorflow::Node* node() const { return activations.node(); }
1711
1712 Operation operation;
1713 ::tensorflow::Output activations;
1714};
1715
1716/// Performs fractional average pooling on the input.
1717///
1718/// Fractional average pooling is similar to Fractional max pooling in the pooling
1719/// region generation step. The only difference is that after pooling regions are
1720/// generated, a mean operation is performed instead of a max operation in each
1721/// pooling region.
1722///
1723/// Args:
1724/// * scope: A Scope object
1725/// * value: 4-D with shape `[batch, height, width, channels]`.
1726/// * pooling_ratio: Pooling ratio for each dimension of `value`, currently only
1727/// supports row and col dimension and should be >= 1.0. For example, a valid
1728/// pooling ratio looks like [1.0, 1.44, 1.73, 1.0]. The first and last elements
1729/// must be 1.0 because we don't allow pooling on batch and channels
1730/// dimensions. 1.44 and 1.73 are pooling ratio on height and width dimensions
1731/// respectively.
1732///
1733/// Optional attributes (see `Attrs`):
1734/// * pseudo_random: When set to True, generates the pooling sequence in a
1735/// pseudorandom fashion, otherwise, in a random fashion. Check paper [Benjamin
1736/// Graham, Fractional Max-Pooling](http://arxiv.org/abs/1412.6071) for
1737/// difference between pseudorandom and random.
1738/// * overlapping: When set to True, it means when pooling, the values at the boundary
1739/// of adjacent pooling cells are used by both cells. For example:
1740///
1741/// `index 0 1 2 3 4`
1742///
1743/// `value 20 5 16 3 7`
1744///
1745/// If the pooling sequence is [0, 2, 4], then 16, at index 2 will be used twice.
1746/// The result would be [41/3, 26/3] for fractional avg pooling.
1747/// * deterministic: When set to True, a fixed pooling region will be used when
1748/// iterating over a FractionalAvgPool node in the computation graph. Mainly used
1749/// in unit test to make FractionalAvgPool deterministic.
1750/// * seed: If either seed or seed2 are set to be non-zero, the random number
1751/// generator is seeded by the given seed. Otherwise, it is seeded by a
1752/// random seed.
1753/// * seed2: An second seed to avoid seed collision.
1754///
1755/// Returns:
1756/// * `Output` output: output tensor after fractional avg pooling.
1757/// * `Output` row_pooling_sequence: row pooling sequence, needed to calculate gradient.
1758/// * `Output` col_pooling_sequence: column pooling sequence, needed to calculate gradient.
1759class FractionalAvgPool {
1760 public:
1761 /// Optional attribute setters for FractionalAvgPool
1762 struct Attrs {
1763 /// When set to True, generates the pooling sequence in a
1764 /// pseudorandom fashion, otherwise, in a random fashion. Check paper [Benjamin
1765 /// Graham, Fractional Max-Pooling](http://arxiv.org/abs/1412.6071) for
1766 /// difference between pseudorandom and random.
1767 ///
1768 /// Defaults to false
1769 TF_MUST_USE_RESULT Attrs PseudoRandom(bool x) {
1770 Attrs ret = *this;
1771 ret.pseudo_random_ = x;
1772 return ret;
1773 }
1774
1775 /// When set to True, it means when pooling, the values at the boundary
1776 /// of adjacent pooling cells are used by both cells. For example:
1777 ///
1778 /// `index 0 1 2 3 4`
1779 ///
1780 /// `value 20 5 16 3 7`
1781 ///
1782 /// If the pooling sequence is [0, 2, 4], then 16, at index 2 will be used twice.
1783 /// The result would be [41/3, 26/3] for fractional avg pooling.
1784 ///
1785 /// Defaults to false
1786 TF_MUST_USE_RESULT Attrs Overlapping(bool x) {
1787 Attrs ret = *this;
1788 ret.overlapping_ = x;
1789 return ret;
1790 }
1791
1792 /// When set to True, a fixed pooling region will be used when
1793 /// iterating over a FractionalAvgPool node in the computation graph. Mainly used
1794 /// in unit test to make FractionalAvgPool deterministic.
1795 ///
1796 /// Defaults to false
1797 TF_MUST_USE_RESULT Attrs Deterministic(bool x) {
1798 Attrs ret = *this;
1799 ret.deterministic_ = x;
1800 return ret;
1801 }
1802
1803 /// If either seed or seed2 are set to be non-zero, the random number
1804 /// generator is seeded by the given seed. Otherwise, it is seeded by a
1805 /// random seed.
1806 ///
1807 /// Defaults to 0
1808 TF_MUST_USE_RESULT Attrs Seed(int64 x) {
1809 Attrs ret = *this;
1810 ret.seed_ = x;
1811 return ret;
1812 }
1813
1814 /// An second seed to avoid seed collision.
1815 ///
1816 /// Defaults to 0
1817 TF_MUST_USE_RESULT Attrs Seed2(int64 x) {
1818 Attrs ret = *this;
1819 ret.seed2_ = x;
1820 return ret;
1821 }
1822
1823 bool pseudo_random_ = false;
1824 bool overlapping_ = false;
1825 bool deterministic_ = false;
1826 int64 seed_ = 0;
1827 int64 seed2_ = 0;
1828 };
1829 FractionalAvgPool(const ::tensorflow::Scope& scope, ::tensorflow::Input value,
1830 const gtl::ArraySlice<float>& pooling_ratio);
1831 FractionalAvgPool(const ::tensorflow::Scope& scope, ::tensorflow::Input value,
1832 const gtl::ArraySlice<float>& pooling_ratio, const
1833 FractionalAvgPool::Attrs& attrs);
1834
1835 static Attrs PseudoRandom(bool x) {
1836 return Attrs().PseudoRandom(x);
1837 }
1838 static Attrs Overlapping(bool x) {
1839 return Attrs().Overlapping(x);
1840 }
1841 static Attrs Deterministic(bool x) {
1842 return Attrs().Deterministic(x);
1843 }
1844 static Attrs Seed(int64 x) {
1845 return Attrs().Seed(x);
1846 }
1847 static Attrs Seed2(int64 x) {
1848 return Attrs().Seed2(x);
1849 }
1850
1851 Operation operation;
1852 ::tensorflow::Output output;
1853 ::tensorflow::Output row_pooling_sequence;
1854 ::tensorflow::Output col_pooling_sequence;
1855};
1856
1857/// Performs fractional max pooling on the input.
1858///
1859/// Fractional max pooling is slightly different than regular max pooling. In
1860/// regular max pooling, you downsize an input set by taking the maximum value of
1861/// smaller N x N subsections of the set (often 2x2), and try to reduce the set by
1862/// a factor of N, where N is an integer. Fractional max pooling, as you might
1863/// expect from the word "fractional", means that the overall reduction ratio N
1864/// does not have to be an integer.
1865///
1866/// The sizes of the pooling regions are generated randomly but are fairly uniform.
1867/// For example, let's look at the height dimension, and the constraints on the
1868/// list of rows that will be pool boundaries.
1869///
1870/// First we define the following:
1871///
1872/// 1. input_row_length : the number of rows from the input set
1873/// 2. output_row_length : which will be smaller than the input
1874/// 3. alpha = input_row_length / output_row_length : our reduction ratio
1875/// 4. K = floor(alpha)
1876/// 5. row_pooling_sequence : this is the result list of pool boundary rows
1877///
1878/// Then, row_pooling_sequence should satisfy:
1879///
1880/// 1. a[0] = 0 : the first value of the sequence is 0
1881/// 2. a[end] = input_row_length : the last value of the sequence is the size
1882/// 3. K <= (a[i+1] - a[i]) <= K+1 : all intervals are K or K+1 size
1883/// 4. length(row_pooling_sequence) = output_row_length+1
1884///
1885/// For more details on fractional max pooling, see this paper:
1886/// [Benjamin Graham, Fractional Max-Pooling](http://arxiv.org/abs/1412.6071)
1887///
1888/// Args:
1889/// * scope: A Scope object
1890/// * value: 4-D with shape `[batch, height, width, channels]`.
1891/// * pooling_ratio: Pooling ratio for each dimension of `value`, currently only
1892/// supports row and col dimension and should be >= 1.0. For example, a valid
1893/// pooling ratio looks like [1.0, 1.44, 1.73, 1.0]. The first and last elements
1894/// must be 1.0 because we don't allow pooling on batch and channels
1895/// dimensions. 1.44 and 1.73 are pooling ratio on height and width dimensions
1896/// respectively.
1897///
1898/// Optional attributes (see `Attrs`):
1899/// * pseudo_random: When set to True, generates the pooling sequence in a
1900/// pseudorandom fashion, otherwise, in a random fashion. Check paper [Benjamin
1901/// Graham, Fractional Max-Pooling](http://arxiv.org/abs/1412.6071) for
1902/// difference between pseudorandom and random.
1903/// * overlapping: When set to True, it means when pooling, the values at the boundary
1904/// of adjacent pooling cells are used by both cells. For example:
1905///
1906/// `index 0 1 2 3 4`
1907///
1908/// `value 20 5 16 3 7`
1909///
1910/// If the pooling sequence is [0, 2, 4], then 16, at index 2 will be used twice.
1911/// The result would be [20, 16] for fractional max pooling.
1912/// * deterministic: When set to True, a fixed pooling region will be used when
1913/// iterating over a FractionalMaxPool node in the computation graph. Mainly used
1914/// in unit test to make FractionalMaxPool deterministic.
1915/// * seed: If either seed or seed2 are set to be non-zero, the random number
1916/// generator is seeded by the given seed. Otherwise, it is seeded by a
1917/// random seed.
1918/// * seed2: An second seed to avoid seed collision.
1919///
1920/// Returns:
1921/// * `Output` output: output tensor after fractional max pooling.
1922/// * `Output` row_pooling_sequence: row pooling sequence, needed to calculate gradient.
1923/// * `Output` col_pooling_sequence: column pooling sequence, needed to calculate gradient.
1924class FractionalMaxPool {
1925 public:
1926 /// Optional attribute setters for FractionalMaxPool
1927 struct Attrs {
1928 /// When set to True, generates the pooling sequence in a
1929 /// pseudorandom fashion, otherwise, in a random fashion. Check paper [Benjamin
1930 /// Graham, Fractional Max-Pooling](http://arxiv.org/abs/1412.6071) for
1931 /// difference between pseudorandom and random.
1932 ///
1933 /// Defaults to false
1934 TF_MUST_USE_RESULT Attrs PseudoRandom(bool x) {
1935 Attrs ret = *this;
1936 ret.pseudo_random_ = x;
1937 return ret;
1938 }
1939
1940 /// When set to True, it means when pooling, the values at the boundary
1941 /// of adjacent pooling cells are used by both cells. For example:
1942 ///
1943 /// `index 0 1 2 3 4`
1944 ///
1945 /// `value 20 5 16 3 7`
1946 ///
1947 /// If the pooling sequence is [0, 2, 4], then 16, at index 2 will be used twice.
1948 /// The result would be [20, 16] for fractional max pooling.
1949 ///
1950 /// Defaults to false
1951 TF_MUST_USE_RESULT Attrs Overlapping(bool x) {
1952 Attrs ret = *this;
1953 ret.overlapping_ = x;
1954 return ret;
1955 }
1956
1957 /// When set to True, a fixed pooling region will be used when
1958 /// iterating over a FractionalMaxPool node in the computation graph. Mainly used
1959 /// in unit test to make FractionalMaxPool deterministic.
1960 ///
1961 /// Defaults to false
1962 TF_MUST_USE_RESULT Attrs Deterministic(bool x) {
1963 Attrs ret = *this;
1964 ret.deterministic_ = x;
1965 return ret;
1966 }
1967
1968 /// If either seed or seed2 are set to be non-zero, the random number
1969 /// generator is seeded by the given seed. Otherwise, it is seeded by a
1970 /// random seed.
1971 ///
1972 /// Defaults to 0
1973 TF_MUST_USE_RESULT Attrs Seed(int64 x) {
1974 Attrs ret = *this;
1975 ret.seed_ = x;
1976 return ret;
1977 }
1978
1979 /// An second seed to avoid seed collision.
1980 ///
1981 /// Defaults to 0
1982 TF_MUST_USE_RESULT Attrs Seed2(int64 x) {
1983 Attrs ret = *this;
1984 ret.seed2_ = x;
1985 return ret;
1986 }
1987
1988 bool pseudo_random_ = false;
1989 bool overlapping_ = false;
1990 bool deterministic_ = false;
1991 int64 seed_ = 0;
1992 int64 seed2_ = 0;
1993 };
1994 FractionalMaxPool(const ::tensorflow::Scope& scope, ::tensorflow::Input value,
1995 const gtl::ArraySlice<float>& pooling_ratio);
1996 FractionalMaxPool(const ::tensorflow::Scope& scope, ::tensorflow::Input value,
1997 const gtl::ArraySlice<float>& pooling_ratio, const
1998 FractionalMaxPool::Attrs& attrs);
1999
2000 static Attrs PseudoRandom(bool x) {
2001 return Attrs().PseudoRandom(x);
2002 }
2003 static Attrs Overlapping(bool x) {
2004 return Attrs().Overlapping(x);
2005 }
2006 static Attrs Deterministic(bool x) {
2007 return Attrs().Deterministic(x);
2008 }
2009 static Attrs Seed(int64 x) {
2010 return Attrs().Seed(x);
2011 }
2012 static Attrs Seed2(int64 x) {
2013 return Attrs().Seed2(x);
2014 }
2015
2016 Operation operation;
2017 ::tensorflow::Output output;
2018 ::tensorflow::Output row_pooling_sequence;
2019 ::tensorflow::Output col_pooling_sequence;
2020};
2021
2022/// Batch normalization.
2023///
2024/// Note that the size of 4D Tensors are defined by either "NHWC" or "NCHW".
2025/// The size of 1D Tensors matches the dimension C of the 4D Tensors.
2026///
2027/// Args:
2028/// * scope: A Scope object
2029/// * x: A 4D Tensor for input data.
2030/// * scale: A 1D Tensor for scaling factor, to scale the normalized x.
2031/// * offset: A 1D Tensor for offset, to shift to the normalized x.
2032/// * mean: A 1D Tensor for population mean. Used for inference only;
2033/// must be empty for training.
2034/// * variance: A 1D Tensor for population variance. Used for inference only;
2035/// must be empty for training.
2036///
2037/// Optional attributes (see `Attrs`):
2038/// * epsilon: A small float number added to the variance of x.
2039/// * data_format: The data format for x and y. Either "NHWC" (default) or "NCHW".
2040/// * is_training: A bool value to indicate the operation is for training (default)
2041/// or inference.
2042///
2043/// Returns:
2044/// * `Output` y: A 4D Tensor for output data.
2045/// * `Output` batch_mean: A 1D Tensor for the computed batch mean, to be used by TensorFlow
2046/// to compute the running mean.
2047/// * `Output` batch_variance: A 1D Tensor for the computed batch variance, to be used by
2048/// TensorFlow to compute the running variance.
2049/// * `Output` reserve_space_1: A 1D Tensor for the computed batch mean, to be reused
2050/// in the gradient computation.
2051/// * `Output` reserve_space_2: A 1D Tensor for the computed batch variance (inverted variance
2052/// in the cuDNN case), to be reused in the gradient computation.
2053class FusedBatchNorm {
2054 public:
2055 /// Optional attribute setters for FusedBatchNorm
2056 struct Attrs {
2057 /// A small float number added to the variance of x.
2058 ///
2059 /// Defaults to 0.0001
2060 TF_MUST_USE_RESULT Attrs Epsilon(float x) {
2061 Attrs ret = *this;
2062 ret.epsilon_ = x;
2063 return ret;
2064 }
2065
2066 /// Defaults to 1
2067 TF_MUST_USE_RESULT Attrs ExponentialAvgFactor(float x) {
2068 Attrs ret = *this;
2069 ret.exponential_avg_factor_ = x;
2070 return ret;
2071 }
2072
2073 /// The data format for x and y. Either "NHWC" (default) or "NCHW".
2074 ///
2075 /// Defaults to "NHWC"
2076 TF_MUST_USE_RESULT Attrs DataFormat(StringPiece x) {
2077 Attrs ret = *this;
2078 ret.data_format_ = x;
2079 return ret;
2080 }
2081
2082 /// A bool value to indicate the operation is for training (default)
2083 /// or inference.
2084 ///
2085 /// Defaults to true
2086 TF_MUST_USE_RESULT Attrs IsTraining(bool x) {
2087 Attrs ret = *this;
2088 ret.is_training_ = x;
2089 return ret;
2090 }
2091
2092 float epsilon_ = 0.0001f;
2093 float exponential_avg_factor_ = 1.0f;
2094 StringPiece data_format_ = "NHWC";
2095 bool is_training_ = true;
2096 };
2097 FusedBatchNorm(const ::tensorflow::Scope& scope, ::tensorflow::Input x,
2098 ::tensorflow::Input scale, ::tensorflow::Input offset,
2099 ::tensorflow::Input mean, ::tensorflow::Input variance);
2100 FusedBatchNorm(const ::tensorflow::Scope& scope, ::tensorflow::Input x,
2101 ::tensorflow::Input scale, ::tensorflow::Input offset,
2102 ::tensorflow::Input mean, ::tensorflow::Input variance, const
2103 FusedBatchNorm::Attrs& attrs);
2104
2105 static Attrs Epsilon(float x) {
2106 return Attrs().Epsilon(x);
2107 }
2108 static Attrs ExponentialAvgFactor(float x) {
2109 return Attrs().ExponentialAvgFactor(x);
2110 }
2111 static Attrs DataFormat(StringPiece x) {
2112 return Attrs().DataFormat(x);
2113 }
2114 static Attrs IsTraining(bool x) {
2115 return Attrs().IsTraining(x);
2116 }
2117
2118 Operation operation;
2119 ::tensorflow::Output y;
2120 ::tensorflow::Output batch_mean;
2121 ::tensorflow::Output batch_variance;
2122 ::tensorflow::Output reserve_space_1;
2123 ::tensorflow::Output reserve_space_2;
2124};
2125
2126/// Gradient for batch normalization.
2127///
2128/// Note that the size of 4D Tensors are defined by either "NHWC" or "NCHW".
2129/// The size of 1D Tensors matches the dimension C of the 4D Tensors.
2130///
2131/// Args:
2132/// * scope: A Scope object
2133/// * y_backprop: A 4D Tensor for the gradient with respect to y.
2134/// * x: A 4D Tensor for input data.
2135/// * scale: A 1D Tensor for scaling factor, to scale the normalized x.
2136/// * reserve_space_1: When is_training is True, a 1D Tensor for the computed batch
2137/// mean to be reused in gradient computation. When is_training is
2138/// False, a 1D Tensor for the population mean to be reused in both
2139/// 1st and 2nd order gradient computation.
2140/// * reserve_space_2: When is_training is True, a 1D Tensor for the computed batch
2141/// variance (inverted variance in the cuDNN case) to be reused in
2142/// gradient computation. When is_training is False, a 1D Tensor
2143/// for the population variance to be reused in both 1st and 2nd
2144/// order gradient computation.
2145///
2146/// Optional attributes (see `Attrs`):
2147/// * epsilon: A small float number added to the variance of x.
2148/// * data_format: The data format for y_backprop, x, x_backprop.
2149/// Either "NHWC" (default) or "NCHW".
2150/// * is_training: A bool value to indicate the operation is for training (default)
2151/// or inference.
2152///
2153/// Returns:
2154/// * `Output` x_backprop: A 4D Tensor for the gradient with respect to x.
2155/// * `Output` scale_backprop: A 1D Tensor for the gradient with respect to scale.
2156/// * `Output` offset_backprop: A 1D Tensor for the gradient with respect to offset.
2157/// * `Output` reserve_space_3: Unused placeholder to match the mean input in FusedBatchNorm.
2158/// * `Output` reserve_space_4: Unused placeholder to match the variance input
2159/// in FusedBatchNorm.
2160class FusedBatchNormGrad {
2161 public:
2162 /// Optional attribute setters for FusedBatchNormGrad
2163 struct Attrs {
2164 /// A small float number added to the variance of x.
2165 ///
2166 /// Defaults to 0.0001
2167 TF_MUST_USE_RESULT Attrs Epsilon(float x) {
2168 Attrs ret = *this;
2169 ret.epsilon_ = x;
2170 return ret;
2171 }
2172
2173 /// The data format for y_backprop, x, x_backprop.
2174 /// Either "NHWC" (default) or "NCHW".
2175 ///
2176 /// Defaults to "NHWC"
2177 TF_MUST_USE_RESULT Attrs DataFormat(StringPiece x) {
2178 Attrs ret = *this;
2179 ret.data_format_ = x;
2180 return ret;
2181 }
2182
2183 /// A bool value to indicate the operation is for training (default)
2184 /// or inference.
2185 ///
2186 /// Defaults to true
2187 TF_MUST_USE_RESULT Attrs IsTraining(bool x) {
2188 Attrs ret = *this;
2189 ret.is_training_ = x;
2190 return ret;
2191 }
2192
2193 float epsilon_ = 0.0001f;
2194 StringPiece data_format_ = "NHWC";
2195 bool is_training_ = true;
2196 };
2197 FusedBatchNormGrad(const ::tensorflow::Scope& scope, ::tensorflow::Input
2198 y_backprop, ::tensorflow::Input x, ::tensorflow::Input
2199 scale, ::tensorflow::Input reserve_space_1,
2200 ::tensorflow::Input reserve_space_2);
2201 FusedBatchNormGrad(const ::tensorflow::Scope& scope, ::tensorflow::Input
2202 y_backprop, ::tensorflow::Input x, ::tensorflow::Input
2203 scale, ::tensorflow::Input reserve_space_1,
2204 ::tensorflow::Input reserve_space_2, const
2205 FusedBatchNormGrad::Attrs& attrs);
2206
2207 static Attrs Epsilon(float x) {
2208 return Attrs().Epsilon(x);
2209 }
2210 static Attrs DataFormat(StringPiece x) {
2211 return Attrs().DataFormat(x);
2212 }
2213 static Attrs IsTraining(bool x) {
2214 return Attrs().IsTraining(x);
2215 }
2216
2217 Operation operation;
2218 ::tensorflow::Output x_backprop;
2219 ::tensorflow::Output scale_backprop;
2220 ::tensorflow::Output offset_backprop;
2221 ::tensorflow::Output reserve_space_3;
2222 ::tensorflow::Output reserve_space_4;
2223};
2224
2225/// Gradient for batch normalization.
2226///
2227/// Note that the size of 4D Tensors are defined by either "NHWC" or "NCHW".
2228/// The size of 1D Tensors matches the dimension C of the 4D Tensors.
2229///
2230/// Args:
2231/// * scope: A Scope object
2232/// * y_backprop: A 4D Tensor for the gradient with respect to y.
2233/// * x: A 4D Tensor for input data.
2234/// * scale: A 1D Tensor for scaling factor, to scale the normalized x.
2235/// * reserve_space_1: When is_training is True, a 1D Tensor for the computed batch
2236/// mean to be reused in gradient computation. When is_training is
2237/// False, a 1D Tensor for the population mean to be reused in both
2238/// 1st and 2nd order gradient computation.
2239/// * reserve_space_2: When is_training is True, a 1D Tensor for the computed batch
2240/// variance (inverted variance in the cuDNN case) to be reused in
2241/// gradient computation. When is_training is False, a 1D Tensor
2242/// for the population variance to be reused in both 1st and 2nd
2243/// order gradient computation.
2244///
2245/// Optional attributes (see `Attrs`):
2246/// * epsilon: A small float number added to the variance of x.
2247/// * data_format: The data format for y_backprop, x, x_backprop.
2248/// Either "NHWC" (default) or "NCHW".
2249/// * is_training: A bool value to indicate the operation is for training (default)
2250/// or inference.
2251///
2252/// Returns:
2253/// * `Output` x_backprop: A 4D Tensor for the gradient with respect to x.
2254/// * `Output` scale_backprop: A 1D Tensor for the gradient with respect to scale.
2255/// * `Output` offset_backprop: A 1D Tensor for the gradient with respect to offset.
2256/// * `Output` reserve_space_3: Unused placeholder to match the mean input in FusedBatchNorm.
2257/// * `Output` reserve_space_4: Unused placeholder to match the variance input
2258/// in FusedBatchNorm.
2259class FusedBatchNormGradV2 {
2260 public:
2261 /// Optional attribute setters for FusedBatchNormGradV2
2262 struct Attrs {
2263 /// A small float number added to the variance of x.
2264 ///
2265 /// Defaults to 0.0001
2266 TF_MUST_USE_RESULT Attrs Epsilon(float x) {
2267 Attrs ret = *this;
2268 ret.epsilon_ = x;
2269 return ret;
2270 }
2271
2272 /// The data format for y_backprop, x, x_backprop.
2273 /// Either "NHWC" (default) or "NCHW".
2274 ///
2275 /// Defaults to "NHWC"
2276 TF_MUST_USE_RESULT Attrs DataFormat(StringPiece x) {
2277 Attrs ret = *this;
2278 ret.data_format_ = x;
2279 return ret;
2280 }
2281
2282 /// A bool value to indicate the operation is for training (default)
2283 /// or inference.
2284 ///
2285 /// Defaults to true
2286 TF_MUST_USE_RESULT Attrs IsTraining(bool x) {
2287 Attrs ret = *this;
2288 ret.is_training_ = x;
2289 return ret;
2290 }
2291
2292 float epsilon_ = 0.0001f;
2293 StringPiece data_format_ = "NHWC";
2294 bool is_training_ = true;
2295 };
2296 FusedBatchNormGradV2(const ::tensorflow::Scope& scope, ::tensorflow::Input
2297 y_backprop, ::tensorflow::Input x, ::tensorflow::Input
2298 scale, ::tensorflow::Input reserve_space_1,
2299 ::tensorflow::Input reserve_space_2);
2300 FusedBatchNormGradV2(const ::tensorflow::Scope& scope, ::tensorflow::Input
2301 y_backprop, ::tensorflow::Input x, ::tensorflow::Input
2302 scale, ::tensorflow::Input reserve_space_1,
2303 ::tensorflow::Input reserve_space_2, const
2304 FusedBatchNormGradV2::Attrs& attrs);
2305
2306 static Attrs Epsilon(float x) {
2307 return Attrs().Epsilon(x);
2308 }
2309 static Attrs DataFormat(StringPiece x) {
2310 return Attrs().DataFormat(x);
2311 }
2312 static Attrs IsTraining(bool x) {
2313 return Attrs().IsTraining(x);
2314 }
2315
2316 Operation operation;
2317 ::tensorflow::Output x_backprop;
2318 ::tensorflow::Output scale_backprop;
2319 ::tensorflow::Output offset_backprop;
2320 ::tensorflow::Output reserve_space_3;
2321 ::tensorflow::Output reserve_space_4;
2322};
2323
2324/// Gradient for batch normalization.
2325///
2326/// Note that the size of 4D Tensors are defined by either "NHWC" or "NCHW".
2327/// The size of 1D Tensors matches the dimension C of the 4D Tensors.
2328///
2329/// Args:
2330/// * scope: A Scope object
2331/// * y_backprop: A 4D Tensor for the gradient with respect to y.
2332/// * x: A 4D Tensor for input data.
2333/// * scale: A 1D Tensor for scaling factor, to scale the normalized x.
2334/// * reserve_space_1: When is_training is True, a 1D Tensor for the computed batch
2335/// mean to be reused in gradient computation. When is_training is
2336/// False, a 1D Tensor for the population mean to be reused in both
2337/// 1st and 2nd order gradient computation.
2338/// * reserve_space_2: When is_training is True, a 1D Tensor for the computed batch
2339/// variance (inverted variance in the cuDNN case) to be reused in
2340/// gradient computation. When is_training is False, a 1D Tensor
2341/// for the population variance to be reused in both 1st and 2nd
2342/// order gradient computation.
2343/// * reserve_space_3: When is_training is True, a 1D Tensor for some intermediate results to be reused
2344/// in gradient computation. When is_training is False, a dummy empty Tensor will be
2345/// created.
2346///
2347/// Optional attributes (see `Attrs`):
2348/// * epsilon: A small float number added to the variance of x.
2349/// * data_format: The data format for y_backprop, x, x_backprop.
2350/// Either "NHWC" (default) or "NCHW".
2351/// * is_training: A bool value to indicate the operation is for training (default)
2352/// or inference.
2353///
2354/// Returns:
2355/// * `Output` x_backprop: A 4D Tensor for the gradient with respect to x.
2356/// * `Output` scale_backprop: A 1D Tensor for the gradient with respect to scale.
2357/// * `Output` offset_backprop: A 1D Tensor for the gradient with respect to offset.
2358/// * `Output` reserve_space_4: Unused placeholder to match the mean input in FusedBatchNorm.
2359/// * `Output` reserve_space_5: Unused placeholder to match the variance input
2360/// in FusedBatchNorm.
2361class FusedBatchNormGradV3 {
2362 public:
2363 /// Optional attribute setters for FusedBatchNormGradV3
2364 struct Attrs {
2365 /// A small float number added to the variance of x.
2366 ///
2367 /// Defaults to 0.0001
2368 TF_MUST_USE_RESULT Attrs Epsilon(float x) {
2369 Attrs ret = *this;
2370 ret.epsilon_ = x;
2371 return ret;
2372 }
2373
2374 /// The data format for y_backprop, x, x_backprop.
2375 /// Either "NHWC" (default) or "NCHW".
2376 ///
2377 /// Defaults to "NHWC"
2378 TF_MUST_USE_RESULT Attrs DataFormat(StringPiece x) {
2379 Attrs ret = *this;
2380 ret.data_format_ = x;
2381 return ret;
2382 }
2383
2384 /// A bool value to indicate the operation is for training (default)
2385 /// or inference.
2386 ///
2387 /// Defaults to true
2388 TF_MUST_USE_RESULT Attrs IsTraining(bool x) {
2389 Attrs ret = *this;
2390 ret.is_training_ = x;
2391 return ret;
2392 }
2393
2394 float epsilon_ = 0.0001f;
2395 StringPiece data_format_ = "NHWC";
2396 bool is_training_ = true;
2397 };
2398 FusedBatchNormGradV3(const ::tensorflow::Scope& scope, ::tensorflow::Input
2399 y_backprop, ::tensorflow::Input x, ::tensorflow::Input
2400 scale, ::tensorflow::Input reserve_space_1,
2401 ::tensorflow::Input reserve_space_2, ::tensorflow::Input
2402 reserve_space_3);
2403 FusedBatchNormGradV3(const ::tensorflow::Scope& scope, ::tensorflow::Input
2404 y_backprop, ::tensorflow::Input x, ::tensorflow::Input
2405 scale, ::tensorflow::Input reserve_space_1,
2406 ::tensorflow::Input reserve_space_2, ::tensorflow::Input
2407 reserve_space_3, const FusedBatchNormGradV3::Attrs& attrs);
2408
2409 static Attrs Epsilon(float x) {
2410 return Attrs().Epsilon(x);
2411 }
2412 static Attrs DataFormat(StringPiece x) {
2413 return Attrs().DataFormat(x);
2414 }
2415 static Attrs IsTraining(bool x) {
2416 return Attrs().IsTraining(x);
2417 }
2418
2419 Operation operation;
2420 ::tensorflow::Output x_backprop;
2421 ::tensorflow::Output scale_backprop;
2422 ::tensorflow::Output offset_backprop;
2423 ::tensorflow::Output reserve_space_4;
2424 ::tensorflow::Output reserve_space_5;
2425};
2426
2427/// Batch normalization.
2428///
2429/// Note that the size of 4D Tensors are defined by either "NHWC" or "NCHW".
2430/// The size of 1D Tensors matches the dimension C of the 4D Tensors.
2431///
2432/// Args:
2433/// * scope: A Scope object
2434/// * x: A 4D Tensor for input data.
2435/// * scale: A 1D Tensor for scaling factor, to scale the normalized x.
2436/// * offset: A 1D Tensor for offset, to shift to the normalized x.
2437/// * mean: A 1D Tensor for population mean. Used for inference only;
2438/// must be empty for training.
2439/// * variance: A 1D Tensor for population variance. Used for inference only;
2440/// must be empty for training.
2441///
2442/// Optional attributes (see `Attrs`):
2443/// * epsilon: A small float number added to the variance of x.
2444/// * data_format: The data format for x and y. Either "NHWC" (default) or "NCHW".
2445/// * is_training: A bool value to indicate the operation is for training (default)
2446/// or inference.
2447///
2448/// Returns:
2449/// * `Output` y: A 4D Tensor for output data.
2450/// * `Output` batch_mean: A 1D Tensor for the computed batch mean, to be used by TensorFlow
2451/// to compute the running mean.
2452/// * `Output` batch_variance: A 1D Tensor for the computed batch variance, to be used by
2453/// TensorFlow to compute the running variance.
2454/// * `Output` reserve_space_1: A 1D Tensor for the computed batch mean, to be reused
2455/// in the gradient computation.
2456/// * `Output` reserve_space_2: A 1D Tensor for the computed batch variance (inverted variance
2457/// in the cuDNN case), to be reused in the gradient computation.
2458class FusedBatchNormV2 {
2459 public:
2460 /// Optional attribute setters for FusedBatchNormV2
2461 struct Attrs {
2462 /// A small float number added to the variance of x.
2463 ///
2464 /// Defaults to 0.0001
2465 TF_MUST_USE_RESULT Attrs Epsilon(float x) {
2466 Attrs ret = *this;
2467 ret.epsilon_ = x;
2468 return ret;
2469 }
2470
2471 /// Defaults to 1
2472 TF_MUST_USE_RESULT Attrs ExponentialAvgFactor(float x) {
2473 Attrs ret = *this;
2474 ret.exponential_avg_factor_ = x;
2475 return ret;
2476 }
2477
2478 /// The data format for x and y. Either "NHWC" (default) or "NCHW".
2479 ///
2480 /// Defaults to "NHWC"
2481 TF_MUST_USE_RESULT Attrs DataFormat(StringPiece x) {
2482 Attrs ret = *this;
2483 ret.data_format_ = x;
2484 return ret;
2485 }
2486
2487 /// A bool value to indicate the operation is for training (default)
2488 /// or inference.
2489 ///
2490 /// Defaults to true
2491 TF_MUST_USE_RESULT Attrs IsTraining(bool x) {
2492 Attrs ret = *this;
2493 ret.is_training_ = x;
2494 return ret;
2495 }
2496
2497 float epsilon_ = 0.0001f;
2498 float exponential_avg_factor_ = 1.0f;
2499 StringPiece data_format_ = "NHWC";
2500 bool is_training_ = true;
2501 };
2502 FusedBatchNormV2(const ::tensorflow::Scope& scope, ::tensorflow::Input x,
2503 ::tensorflow::Input scale, ::tensorflow::Input offset,
2504 ::tensorflow::Input mean, ::tensorflow::Input variance);
2505 FusedBatchNormV2(const ::tensorflow::Scope& scope, ::tensorflow::Input x,
2506 ::tensorflow::Input scale, ::tensorflow::Input offset,
2507 ::tensorflow::Input mean, ::tensorflow::Input variance, const
2508 FusedBatchNormV2::Attrs& attrs);
2509
2510 static Attrs Epsilon(float x) {
2511 return Attrs().Epsilon(x);
2512 }
2513 static Attrs ExponentialAvgFactor(float x) {
2514 return Attrs().ExponentialAvgFactor(x);
2515 }
2516 static Attrs DataFormat(StringPiece x) {
2517 return Attrs().DataFormat(x);
2518 }
2519 static Attrs IsTraining(bool x) {
2520 return Attrs().IsTraining(x);
2521 }
2522
2523 Operation operation;
2524 ::tensorflow::Output y;
2525 ::tensorflow::Output batch_mean;
2526 ::tensorflow::Output batch_variance;
2527 ::tensorflow::Output reserve_space_1;
2528 ::tensorflow::Output reserve_space_2;
2529};
2530
2531/// Batch normalization.
2532///
2533/// Note that the size of 4D Tensors are defined by either "NHWC" or "NCHW".
2534/// The size of 1D Tensors matches the dimension C of the 4D Tensors.
2535///
2536/// Args:
2537/// * scope: A Scope object
2538/// * x: A 4D Tensor for input data.
2539/// * scale: A 1D Tensor for scaling factor, to scale the normalized x.
2540/// * offset: A 1D Tensor for offset, to shift to the normalized x.
2541/// * mean: A 1D Tensor for population mean. Used for inference only;
2542/// must be empty for training.
2543/// * variance: A 1D Tensor for population variance. Used for inference only;
2544/// must be empty for training.
2545///
2546/// Optional attributes (see `Attrs`):
2547/// * epsilon: A small float number added to the variance of x.
2548/// * data_format: The data format for x and y. Either "NHWC" (default) or "NCHW".
2549/// * is_training: A bool value to indicate the operation is for training (default)
2550/// or inference.
2551///
2552/// Returns:
2553/// * `Output` y: A 4D Tensor for output data.
2554/// * `Output` batch_mean: A 1D Tensor for the computed batch mean, to be used by TensorFlow
2555/// to compute the running mean.
2556/// * `Output` batch_variance: A 1D Tensor for the computed batch variance, to be used by
2557/// TensorFlow to compute the running variance.
2558/// * `Output` reserve_space_1: A 1D Tensor for the computed batch mean, to be reused
2559/// in the gradient computation.
2560/// * `Output` reserve_space_2: A 1D Tensor for the computed batch variance (inverted variance
2561/// in the cuDNN case), to be reused in the gradient computation.
2562/// * `Output` reserve_space_3: A 1D Tensor for some intermediate results, to be reused in the gradient
2563/// computation for better efficiency.
2564class FusedBatchNormV3 {
2565 public:
2566 /// Optional attribute setters for FusedBatchNormV3
2567 struct Attrs {
2568 /// A small float number added to the variance of x.
2569 ///
2570 /// Defaults to 0.0001
2571 TF_MUST_USE_RESULT Attrs Epsilon(float x) {
2572 Attrs ret = *this;
2573 ret.epsilon_ = x;
2574 return ret;
2575 }
2576
2577 /// Defaults to 1
2578 TF_MUST_USE_RESULT Attrs ExponentialAvgFactor(float x) {
2579 Attrs ret = *this;
2580 ret.exponential_avg_factor_ = x;
2581 return ret;
2582 }
2583
2584 /// The data format for x and y. Either "NHWC" (default) or "NCHW".
2585 ///
2586 /// Defaults to "NHWC"
2587 TF_MUST_USE_RESULT Attrs DataFormat(StringPiece x) {
2588 Attrs ret = *this;
2589 ret.data_format_ = x;
2590 return ret;
2591 }
2592
2593 /// A bool value to indicate the operation is for training (default)
2594 /// or inference.
2595 ///
2596 /// Defaults to true
2597 TF_MUST_USE_RESULT Attrs IsTraining(bool x) {
2598 Attrs ret = *this;
2599 ret.is_training_ = x;
2600 return ret;
2601 }
2602
2603 float epsilon_ = 0.0001f;
2604 float exponential_avg_factor_ = 1.0f;
2605 StringPiece data_format_ = "NHWC";
2606 bool is_training_ = true;
2607 };
2608 FusedBatchNormV3(const ::tensorflow::Scope& scope, ::tensorflow::Input x,
2609 ::tensorflow::Input scale, ::tensorflow::Input offset,
2610 ::tensorflow::Input mean, ::tensorflow::Input variance);
2611 FusedBatchNormV3(const ::tensorflow::Scope& scope, ::tensorflow::Input x,
2612 ::tensorflow::Input scale, ::tensorflow::Input offset,
2613 ::tensorflow::Input mean, ::tensorflow::Input variance, const
2614 FusedBatchNormV3::Attrs& attrs);
2615
2616 static Attrs Epsilon(float x) {
2617 return Attrs().Epsilon(x);
2618 }
2619 static Attrs ExponentialAvgFactor(float x) {
2620 return Attrs().ExponentialAvgFactor(x);
2621 }
2622 static Attrs DataFormat(StringPiece x) {
2623 return Attrs().DataFormat(x);
2624 }
2625 static Attrs IsTraining(bool x) {
2626 return Attrs().IsTraining(x);
2627 }
2628
2629 Operation operation;
2630 ::tensorflow::Output y;
2631 ::tensorflow::Output batch_mean;
2632 ::tensorflow::Output batch_variance;
2633 ::tensorflow::Output reserve_space_1;
2634 ::tensorflow::Output reserve_space_2;
2635 ::tensorflow::Output reserve_space_3;
2636};
2637
2638/// Performs a padding as a preprocess during a convolution.
2639///
2640/// Similar to FusedResizeAndPadConv2d, this op allows for an optimized
2641/// implementation where the spatial padding transformation stage is fused with the
2642/// im2col lookup, but in this case without the bilinear filtering required for
2643/// resizing. Fusing the padding prevents the need to write out the intermediate
2644/// results as whole tensors, reducing memory pressure, and we can get some latency
2645/// gains by merging the transformation calculations.
2646/// The data_format attribute for Conv2D isn't supported by this op, and 'NHWC'
2647/// order is used instead.
2648/// Internally this op uses a single per-graph scratch buffer, which means that it
2649/// will block if multiple versions are being run in parallel. This is because this
2650/// operator is primarily an optimization to minimize memory usage.
2651///
2652/// Args:
2653/// * scope: A Scope object
2654/// * input: 4-D with shape `[batch, in_height, in_width, in_channels]`.
2655/// * paddings: A two-column matrix specifying the padding sizes. The number of
2656/// rows must be the same as the rank of `input`.
2657/// * filter: 4-D with shape
2658/// `[filter_height, filter_width, in_channels, out_channels]`.
2659/// * strides: 1-D of length 4. The stride of the sliding window for each dimension
2660/// of `input`. Must be in the same order as the dimension specified with format.
2661/// * padding: The type of padding algorithm to use.
2662///
2663/// Returns:
2664/// * `Output`: The output tensor.
2665class FusedPadConv2D {
2666 public:
2667 FusedPadConv2D(const ::tensorflow::Scope& scope, ::tensorflow::Input input,
2668 ::tensorflow::Input paddings, ::tensorflow::Input filter,
2669 StringPiece mode, const gtl::ArraySlice<int>& strides,
2670 StringPiece padding);
2671 operator ::tensorflow::Output() const { return output; }
2672 operator ::tensorflow::Input() const { return output; }
2673 ::tensorflow::Node* node() const { return output.node(); }
2674
2675 Operation operation;
2676 ::tensorflow::Output output;
2677};
2678
2679/// Performs a resize and padding as a preprocess during a convolution.
2680///
2681/// It's often possible to do spatial transformations more efficiently as part of
2682/// the packing stage of a convolution, so this op allows for an optimized
2683/// implementation where these stages are fused together. This prevents the need to
2684/// write out the intermediate results as whole tensors, reducing memory pressure,
2685/// and we can get some latency gains by merging the transformation calculations.
2686/// The data_format attribute for Conv2D isn't supported by this op, and defaults to
2687/// 'NHWC' order.
2688/// Internally this op uses a single per-graph scratch buffer, which means that it
2689/// will block if multiple versions are being run in parallel. This is because this
2690/// operator is primarily an optimization to minimize memory usage.
2691///
2692/// Args:
2693/// * scope: A Scope object
2694/// * input: 4-D with shape `[batch, in_height, in_width, in_channels]`.
2695/// * size: A 1-D int32 Tensor of 2 elements: `new_height, new_width`. The
2696/// new size for the images.
2697/// * paddings: A two-column matrix specifying the padding sizes. The number of
2698/// rows must be the same as the rank of `input`.
2699/// * filter: 4-D with shape
2700/// `[filter_height, filter_width, in_channels, out_channels]`.
2701/// * strides: 1-D of length 4. The stride of the sliding window for each dimension
2702/// of `input`. Must be in the same order as the dimension specified with format.
2703/// * padding: The type of padding algorithm to use.
2704///
2705/// Optional attributes (see `Attrs`):
2706/// * resize_align_corners: If true, the centers of the 4 corner pixels of the input and output tensors are
2707/// aligned, preserving the values at the corner pixels. Defaults to false.
2708///
2709/// Returns:
2710/// * `Output`: The output tensor.
2711class FusedResizeAndPadConv2D {
2712 public:
2713 /// Optional attribute setters for FusedResizeAndPadConv2D
2714 struct Attrs {
2715 /// If true, the centers of the 4 corner pixels of the input and output tensors are
2716 /// aligned, preserving the values at the corner pixels. Defaults to false.
2717 ///
2718 /// Defaults to false
2719 TF_MUST_USE_RESULT Attrs ResizeAlignCorners(bool x) {
2720 Attrs ret = *this;
2721 ret.resize_align_corners_ = x;
2722 return ret;
2723 }
2724
2725 bool resize_align_corners_ = false;
2726 };
2727 FusedResizeAndPadConv2D(const ::tensorflow::Scope& scope, ::tensorflow::Input
2728 input, ::tensorflow::Input size, ::tensorflow::Input
2729 paddings, ::tensorflow::Input filter, StringPiece mode,
2730 const gtl::ArraySlice<int>& strides, StringPiece
2731 padding);
2732 FusedResizeAndPadConv2D(const ::tensorflow::Scope& scope, ::tensorflow::Input
2733 input, ::tensorflow::Input size, ::tensorflow::Input
2734 paddings, ::tensorflow::Input filter, StringPiece mode,
2735 const gtl::ArraySlice<int>& strides, StringPiece
2736 padding, const FusedResizeAndPadConv2D::Attrs& attrs);
2737 operator ::tensorflow::Output() const { return output; }
2738 operator ::tensorflow::Input() const { return output; }
2739 ::tensorflow::Node* node() const { return output.node(); }
2740
2741 static Attrs ResizeAlignCorners(bool x) {
2742 return Attrs().ResizeAlignCorners(x);
2743 }
2744
2745 Operation operation;
2746 ::tensorflow::Output output;
2747};
2748
2749/// Says whether the targets are in the top `K` predictions.
2750///
2751/// This outputs a `batch_size` bool array, an entry `out[i]` is `true` if the
2752/// prediction for the target class is among the top `k` predictions among
2753/// all predictions for example `i`. Note that the behavior of `InTopK` differs
2754/// from the `TopK` op in its handling of ties; if multiple classes have the
2755/// same prediction value and straddle the top-`k` boundary, all of those
2756/// classes are considered to be in the top `k`.
2757///
2758/// More formally, let
2759///
2760/// \\(predictions_i\\) be the predictions for all classes for example `i`,
2761/// \\(targets_i\\) be the target class for example `i`,
2762/// \\(out_i\\) be the output for example `i`,
2763///
2764/// $$out_i = predictions_{i, targets_i} \in TopKIncludingTies(predictions_i)$$
2765///
2766/// Args:
2767/// * scope: A Scope object
2768/// * predictions: A `batch_size` x `classes` tensor.
2769/// * targets: A `batch_size` vector of class ids.
2770/// * k: Number of top elements to look at for computing precision.
2771///
2772/// Returns:
2773/// * `Output`: Computed Precision at `k` as a `bool Tensor`.
2774class InTopK {
2775 public:
2776 InTopK(const ::tensorflow::Scope& scope, ::tensorflow::Input predictions,
2777 ::tensorflow::Input targets, int64 k);
2778 operator ::tensorflow::Output() const { return precision; }
2779 operator ::tensorflow::Input() const { return precision; }
2780 ::tensorflow::Node* node() const { return precision.node(); }
2781
2782 Operation operation;
2783 ::tensorflow::Output precision;
2784};
2785
2786/// Says whether the targets are in the top `K` predictions.
2787///
2788/// This outputs a `batch_size` bool array, an entry `out[i]` is `true` if the
2789/// prediction for the target class is among the top `k` predictions among
2790/// all predictions for example `i`. Note that the behavior of `InTopK` differs
2791/// from the `TopK` op in its handling of ties; if multiple classes have the
2792/// same prediction value and straddle the top-`k` boundary, all of those
2793/// classes are considered to be in the top `k`.
2794///
2795/// More formally, let
2796///
2797/// \\(predictions_i\\) be the predictions for all classes for example `i`,
2798/// \\(targets_i\\) be the target class for example `i`,
2799/// \\(out_i\\) be the output for example `i`,
2800///
2801/// $$out_i = predictions_{i, targets_i} \in TopKIncludingTies(predictions_i)$$
2802///
2803/// Args:
2804/// * scope: A Scope object
2805/// * predictions: A `batch_size` x `classes` tensor.
2806/// * targets: A `batch_size` vector of class ids.
2807/// * k: Number of top elements to look at for computing precision.
2808///
2809/// Returns:
2810/// * `Output`: Computed precision at `k` as a `bool Tensor`.
2811class InTopKV2 {
2812 public:
2813 InTopKV2(const ::tensorflow::Scope& scope, ::tensorflow::Input predictions,
2814 ::tensorflow::Input targets, ::tensorflow::Input k);
2815 operator ::tensorflow::Output() const { return precision; }
2816 operator ::tensorflow::Input() const { return precision; }
2817 ::tensorflow::Node* node() const { return precision.node(); }
2818
2819 Operation operation;
2820 ::tensorflow::Output precision;
2821};
2822
2823/// L2 Loss.
2824///
2825/// Computes half the L2 norm of a tensor without the `sqrt`:
2826///
2827/// output = sum(t ** 2) / 2
2828///
2829/// Args:
2830/// * scope: A Scope object
2831/// * t: Typically 2-D, but may have any dimensions.
2832///
2833/// Returns:
2834/// * `Output`: 0-D.
2835class L2Loss {
2836 public:
2837 L2Loss(const ::tensorflow::Scope& scope, ::tensorflow::Input t);
2838 operator ::tensorflow::Output() const { return output; }
2839 operator ::tensorflow::Input() const { return output; }
2840 ::tensorflow::Node* node() const { return output.node(); }
2841
2842 Operation operation;
2843 ::tensorflow::Output output;
2844};
2845
2846/// Local Response Normalization.
2847///
2848/// The 4-D `input` tensor is treated as a 3-D array of 1-D vectors (along the last
2849/// dimension), and each vector is normalized independently. Within a given vector,
2850/// each component is divided by the weighted, squared sum of inputs within
2851/// `depth_radius`. In detail,
2852///
2853/// sqr_sum[a, b, c, d] =
2854/// sum(input[a, b, c, d - depth_radius : d + depth_radius + 1] ** 2)
2855/// output = input / (bias + alpha * sqr_sum) ** beta
2856///
2857/// For details, see [Krizhevsky et al., ImageNet classification with deep
2858/// convolutional neural networks (NIPS 2012)](http://papers.nips.cc/paper/4824-imagenet-classification-with-deep-convolutional-neural-networks).
2859///
2860/// Args:
2861/// * scope: A Scope object
2862/// * input: 4-D.
2863///
2864/// Optional attributes (see `Attrs`):
2865/// * depth_radius: 0-D. Half-width of the 1-D normalization window.
2866/// * bias: An offset (usually positive to avoid dividing by 0).
2867/// * alpha: A scale factor, usually positive.
2868/// * beta: An exponent.
2869///
2870/// Returns:
2871/// * `Output`: The output tensor.
2872class LRN {
2873 public:
2874 /// Optional attribute setters for LRN
2875 struct Attrs {
2876 /// 0-D. Half-width of the 1-D normalization window.
2877 ///
2878 /// Defaults to 5
2879 TF_MUST_USE_RESULT Attrs DepthRadius(int64 x) {
2880 Attrs ret = *this;
2881 ret.depth_radius_ = x;
2882 return ret;
2883 }
2884
2885 /// An offset (usually positive to avoid dividing by 0).
2886 ///
2887 /// Defaults to 1
2888 TF_MUST_USE_RESULT Attrs Bias(float x) {
2889 Attrs ret = *this;
2890 ret.bias_ = x;
2891 return ret;
2892 }
2893
2894 /// A scale factor, usually positive.
2895 ///
2896 /// Defaults to 1
2897 TF_MUST_USE_RESULT Attrs Alpha(float x) {
2898 Attrs ret = *this;
2899 ret.alpha_ = x;
2900 return ret;
2901 }
2902
2903 /// An exponent.
2904 ///
2905 /// Defaults to 0.5
2906 TF_MUST_USE_RESULT Attrs Beta(float x) {
2907 Attrs ret = *this;
2908 ret.beta_ = x;
2909 return ret;
2910 }
2911
2912 int64 depth_radius_ = 5;
2913 float bias_ = 1.0f;
2914 float alpha_ = 1.0f;
2915 float beta_ = 0.5f;
2916 };
2917 LRN(const ::tensorflow::Scope& scope, ::tensorflow::Input input);
2918 LRN(const ::tensorflow::Scope& scope, ::tensorflow::Input input, const
2919 LRN::Attrs& attrs);
2920 operator ::tensorflow::Output() const { return output; }
2921 operator ::tensorflow::Input() const { return output; }
2922 ::tensorflow::Node* node() const { return output.node(); }
2923
2924 static Attrs DepthRadius(int64 x) {
2925 return Attrs().DepthRadius(x);
2926 }
2927 static Attrs Bias(float x) {
2928 return Attrs().Bias(x);
2929 }
2930 static Attrs Alpha(float x) {
2931 return Attrs().Alpha(x);
2932 }
2933 static Attrs Beta(float x) {
2934 return Attrs().Beta(x);
2935 }
2936
2937 Operation operation;
2938 ::tensorflow::Output output;
2939};
2940
2941/// Computes log softmax activations.
2942///
2943/// For each batch `i` and class `j` we have
2944///
2945/// logsoftmax[i, j] = logits[i, j] - log(sum(exp(logits[i])))
2946///
2947/// Args:
2948/// * scope: A Scope object
2949/// * logits: 2-D with shape `[batch_size, num_classes]`.
2950///
2951/// Returns:
2952/// * `Output`: Same shape as `logits`.
2953class LogSoftmax {
2954 public:
2955 LogSoftmax(const ::tensorflow::Scope& scope, ::tensorflow::Input logits);
2956 operator ::tensorflow::Output() const { return logsoftmax; }
2957 operator ::tensorflow::Input() const { return logsoftmax; }
2958 ::tensorflow::Node* node() const { return logsoftmax.node(); }
2959
2960 Operation operation;
2961 ::tensorflow::Output logsoftmax;
2962};
2963
2964/// Performs max pooling on the input.
2965///
2966/// Args:
2967/// * scope: A Scope object
2968/// * input: 4-D input to pool over.
2969/// * ksize: The size of the window for each dimension of the input tensor.
2970/// * strides: The stride of the sliding window for each dimension of the
2971/// input tensor.
2972/// * padding: The type of padding algorithm to use.
2973///
2974/// Optional attributes (see `Attrs`):
2975/// * data_format: Specify the data format of the input and output data. With the
2976/// default format "NHWC", the data is stored in the order of:
2977/// [batch, in_height, in_width, in_channels].
2978/// Alternatively, the format could be "NCHW", the data storage order of:
2979/// [batch, in_channels, in_height, in_width].
2980///
2981/// Returns:
2982/// * `Output`: The max pooled output tensor.
2983class MaxPool {
2984 public:
2985 /// Optional attribute setters for MaxPool
2986 struct Attrs {
2987 /// Defaults to []
2988 TF_MUST_USE_RESULT Attrs ExplicitPaddings(const gtl::ArraySlice<int>& x) {
2989 Attrs ret = *this;
2990 ret.explicit_paddings_ = x;
2991 return ret;
2992 }
2993
2994 /// Specify the data format of the input and output data. With the
2995 /// default format "NHWC", the data is stored in the order of:
2996 /// [batch, in_height, in_width, in_channels].
2997 /// Alternatively, the format could be "NCHW", the data storage order of:
2998 /// [batch, in_channels, in_height, in_width].
2999 ///
3000 /// Defaults to "NHWC"
3001 TF_MUST_USE_RESULT Attrs DataFormat(StringPiece x) {
3002 Attrs ret = *this;
3003 ret.data_format_ = x;
3004 return ret;
3005 }
3006
3007 gtl::ArraySlice<int> explicit_paddings_ = {};
3008 StringPiece data_format_ = "NHWC";
3009 };
3010 MaxPool(const ::tensorflow::Scope& scope, ::tensorflow::Input input, const
3011 gtl::ArraySlice<int>& ksize, const gtl::ArraySlice<int>& strides,
3012 StringPiece padding);
3013 MaxPool(const ::tensorflow::Scope& scope, ::tensorflow::Input input, const
3014 gtl::ArraySlice<int>& ksize, const gtl::ArraySlice<int>& strides,
3015 StringPiece padding, const MaxPool::Attrs& attrs);
3016 operator ::tensorflow::Output() const { return output; }
3017 operator ::tensorflow::Input() const { return output; }
3018 ::tensorflow::Node* node() const { return output.node(); }
3019
3020 static Attrs ExplicitPaddings(const gtl::ArraySlice<int>& x) {
3021 return Attrs().ExplicitPaddings(x);
3022 }
3023 static Attrs DataFormat(StringPiece x) {
3024 return Attrs().DataFormat(x);
3025 }
3026
3027 Operation operation;
3028 ::tensorflow::Output output;
3029};
3030
3031/// Performs 3D max pooling on the input.
3032///
3033/// Args:
3034/// * scope: A Scope object
3035/// * input: Shape `[batch, depth, rows, cols, channels]` tensor to pool over.
3036/// * ksize: 1-D tensor of length 5. The size of the window for each dimension of
3037/// the input tensor. Must have `ksize[0] = ksize[4] = 1`.
3038/// * strides: 1-D tensor of length 5. The stride of the sliding window for each
3039/// dimension of `input`. Must have `strides[0] = strides[4] = 1`.
3040/// * padding: The type of padding algorithm to use.
3041///
3042/// Optional attributes (see `Attrs`):
3043/// * data_format: The data format of the input and output data. With the
3044/// default format "NDHWC", the data is stored in the order of:
3045/// [batch, in_depth, in_height, in_width, in_channels].
3046/// Alternatively, the format could be "NCDHW", the data storage order is:
3047/// [batch, in_channels, in_depth, in_height, in_width].
3048///
3049/// Returns:
3050/// * `Output`: The max pooled output tensor.
3051class MaxPool3D {
3052 public:
3053 /// Optional attribute setters for MaxPool3D
3054 struct Attrs {
3055 /// The data format of the input and output data. With the
3056 /// default format "NDHWC", the data is stored in the order of:
3057 /// [batch, in_depth, in_height, in_width, in_channels].
3058 /// Alternatively, the format could be "NCDHW", the data storage order is:
3059 /// [batch, in_channels, in_depth, in_height, in_width].
3060 ///
3061 /// Defaults to "NDHWC"
3062 TF_MUST_USE_RESULT Attrs DataFormat(StringPiece x) {
3063 Attrs ret = *this;
3064 ret.data_format_ = x;
3065 return ret;
3066 }
3067
3068 StringPiece data_format_ = "NDHWC";
3069 };
3070 MaxPool3D(const ::tensorflow::Scope& scope, ::tensorflow::Input input, const
3071 gtl::ArraySlice<int>& ksize, const gtl::ArraySlice<int>& strides,
3072 StringPiece padding);
3073 MaxPool3D(const ::tensorflow::Scope& scope, ::tensorflow::Input input, const
3074 gtl::ArraySlice<int>& ksize, const gtl::ArraySlice<int>& strides,
3075 StringPiece padding, const MaxPool3D::Attrs& attrs);
3076 operator ::tensorflow::Output() const { return output; }
3077 operator ::tensorflow::Input() const { return output; }
3078 ::tensorflow::Node* node() const { return output.node(); }
3079
3080 static Attrs DataFormat(StringPiece x) {
3081 return Attrs().DataFormat(x);
3082 }
3083
3084 Operation operation;
3085 ::tensorflow::Output output;
3086};
3087
3088/// Computes gradients of 3D max pooling function.
3089///
3090/// Args:
3091/// * scope: A Scope object
3092/// * orig_input: The original input tensor.
3093/// * orig_output: The original output tensor.
3094/// * grad: Output backprop of shape `[batch, depth, rows, cols, channels]`.
3095/// * ksize: 1-D tensor of length 5. The size of the window for each dimension of
3096/// the input tensor. Must have `ksize[0] = ksize[4] = 1`.
3097/// * strides: 1-D tensor of length 5. The stride of the sliding window for each
3098/// dimension of `input`. Must have `strides[0] = strides[4] = 1`.
3099/// * padding: The type of padding algorithm to use.
3100///
3101/// Optional attributes (see `Attrs`):
3102/// * data_format: The data format of the input and output data. With the
3103/// default format "NDHWC", the data is stored in the order of:
3104/// [batch, in_depth, in_height, in_width, in_channels].
3105/// Alternatively, the format could be "NCDHW", the data storage order is:
3106/// [batch, in_channels, in_depth, in_height, in_width].
3107///
3108/// Returns:
3109/// * `Output`: The output tensor.
3110class MaxPool3DGrad {
3111 public:
3112 /// Optional attribute setters for MaxPool3DGrad
3113 struct Attrs {
3114 /// The data format of the input and output data. With the
3115 /// default format "NDHWC", the data is stored in the order of:
3116 /// [batch, in_depth, in_height, in_width, in_channels].
3117 /// Alternatively, the format could be "NCDHW", the data storage order is:
3118 /// [batch, in_channels, in_depth, in_height, in_width].
3119 ///
3120 /// Defaults to "NDHWC"
3121 TF_MUST_USE_RESULT Attrs DataFormat(StringPiece x) {
3122 Attrs ret = *this;
3123 ret.data_format_ = x;
3124 return ret;
3125 }
3126
3127 StringPiece data_format_ = "NDHWC";
3128 };
3129 MaxPool3DGrad(const ::tensorflow::Scope& scope, ::tensorflow::Input orig_input,
3130 ::tensorflow::Input orig_output, ::tensorflow::Input grad, const
3131 gtl::ArraySlice<int>& ksize, const gtl::ArraySlice<int>& strides,
3132 StringPiece padding);
3133 MaxPool3DGrad(const ::tensorflow::Scope& scope, ::tensorflow::Input orig_input,
3134 ::tensorflow::Input orig_output, ::tensorflow::Input grad, const
3135 gtl::ArraySlice<int>& ksize, const gtl::ArraySlice<int>& strides,
3136 StringPiece padding, const MaxPool3DGrad::Attrs& attrs);
3137 operator ::tensorflow::Output() const { return output; }
3138 operator ::tensorflow::Input() const { return output; }
3139 ::tensorflow::Node* node() const { return output.node(); }
3140
3141 static Attrs DataFormat(StringPiece x) {
3142 return Attrs().DataFormat(x);
3143 }
3144
3145 Operation operation;
3146 ::tensorflow::Output output;
3147};
3148
3149/// Computes second-order gradients of the maxpooling function.
3150///
3151/// Args:
3152/// * scope: A Scope object
3153/// * orig_input: The original input tensor.
3154/// * orig_output: The original output tensor.
3155/// * grad: Output backprop of shape `[batch, depth, rows, cols, channels]`.
3156/// * ksize: 1-D tensor of length 5. The size of the window for each dimension of
3157/// the input tensor. Must have `ksize[0] = ksize[4] = 1`.
3158/// * strides: 1-D tensor of length 5. The stride of the sliding window for each
3159/// dimension of `input`. Must have `strides[0] = strides[4] = 1`.
3160/// * padding: The type of padding algorithm to use.
3161///
3162/// Optional attributes (see `Attrs`):
3163/// * data_format: The data format of the input and output data. With the
3164/// default format "NDHWC", the data is stored in the order of:
3165/// [batch, in_depth, in_height, in_width, in_channels].
3166/// Alternatively, the format could be "NCDHW", the data storage order is:
3167/// [batch, in_channels, in_depth, in_height, in_width].
3168///
3169/// Returns:
3170/// * `Output`: Gradients of gradients w.r.t. the input to `max_pool`.
3171class MaxPool3DGradGrad {
3172 public:
3173 /// Optional attribute setters for MaxPool3DGradGrad
3174 struct Attrs {
3175 /// The data format of the input and output data. With the
3176 /// default format "NDHWC", the data is stored in the order of:
3177 /// [batch, in_depth, in_height, in_width, in_channels].
3178 /// Alternatively, the format could be "NCDHW", the data storage order is:
3179 /// [batch, in_channels, in_depth, in_height, in_width].
3180 ///
3181 /// Defaults to "NDHWC"
3182 TF_MUST_USE_RESULT Attrs DataFormat(StringPiece x) {
3183 Attrs ret = *this;
3184 ret.data_format_ = x;
3185 return ret;
3186 }
3187
3188 StringPiece data_format_ = "NDHWC";
3189 };
3190 MaxPool3DGradGrad(const ::tensorflow::Scope& scope, ::tensorflow::Input
3191 orig_input, ::tensorflow::Input orig_output,
3192 ::tensorflow::Input grad, const gtl::ArraySlice<int>& ksize,
3193 const gtl::ArraySlice<int>& strides, StringPiece padding);
3194 MaxPool3DGradGrad(const ::tensorflow::Scope& scope, ::tensorflow::Input
3195 orig_input, ::tensorflow::Input orig_output,
3196 ::tensorflow::Input grad, const gtl::ArraySlice<int>& ksize,
3197 const gtl::ArraySlice<int>& strides, StringPiece padding,
3198 const MaxPool3DGradGrad::Attrs& attrs);
3199 operator ::tensorflow::Output() const { return output; }
3200 operator ::tensorflow::Input() const { return output; }
3201 ::tensorflow::Node* node() const { return output.node(); }
3202
3203 static Attrs DataFormat(StringPiece x) {
3204 return Attrs().DataFormat(x);
3205 }
3206
3207 Operation operation;
3208 ::tensorflow::Output output;
3209};
3210
3211/// Computes second-order gradients of the maxpooling function.
3212///
3213/// Args:
3214/// * scope: A Scope object
3215/// * orig_input: The original input tensor.
3216/// * orig_output: The original output tensor.
3217/// * grad: 4-D. Gradients of gradients w.r.t. the input of `max_pool`.
3218/// * ksize: The size of the window for each dimension of the input tensor.
3219/// * strides: The stride of the sliding window for each dimension of the
3220/// input tensor.
3221/// * padding: The type of padding algorithm to use.
3222///
3223/// Optional attributes (see `Attrs`):
3224/// * data_format: Specify the data format of the input and output data. With the
3225/// default format "NHWC", the data is stored in the order of:
3226/// [batch, in_height, in_width, in_channels].
3227/// Alternatively, the format could be "NCHW", the data storage order of:
3228/// [batch, in_channels, in_height, in_width].
3229///
3230/// Returns:
3231/// * `Output`: Gradients of gradients w.r.t. the input to `max_pool`.
3232class MaxPoolGradGrad {
3233 public:
3234 /// Optional attribute setters for MaxPoolGradGrad
3235 struct Attrs {
3236 /// Specify the data format of the input and output data. With the
3237 /// default format "NHWC", the data is stored in the order of:
3238 /// [batch, in_height, in_width, in_channels].
3239 /// Alternatively, the format could be "NCHW", the data storage order of:
3240 /// [batch, in_channels, in_height, in_width].
3241 ///
3242 /// Defaults to "NHWC"
3243 TF_MUST_USE_RESULT Attrs DataFormat(StringPiece x) {
3244 Attrs ret = *this;
3245 ret.data_format_ = x;
3246 return ret;
3247 }
3248
3249 StringPiece data_format_ = "NHWC";
3250 };
3251 MaxPoolGradGrad(const ::tensorflow::Scope& scope, ::tensorflow::Input
3252 orig_input, ::tensorflow::Input orig_output,
3253 ::tensorflow::Input grad, const gtl::ArraySlice<int>& ksize,
3254 const gtl::ArraySlice<int>& strides, StringPiece padding);
3255 MaxPoolGradGrad(const ::tensorflow::Scope& scope, ::tensorflow::Input
3256 orig_input, ::tensorflow::Input orig_output,
3257 ::tensorflow::Input grad, const gtl::ArraySlice<int>& ksize,
3258 const gtl::ArraySlice<int>& strides, StringPiece padding, const
3259 MaxPoolGradGrad::Attrs& attrs);
3260 operator ::tensorflow::Output() const { return output; }
3261 operator ::tensorflow::Input() const { return output; }
3262 ::tensorflow::Node* node() const { return output.node(); }
3263
3264 static Attrs DataFormat(StringPiece x) {
3265 return Attrs().DataFormat(x);
3266 }
3267
3268 Operation operation;
3269 ::tensorflow::Output output;
3270};
3271
3272/// Computes second-order gradients of the maxpooling function.
3273///
3274/// Args:
3275/// * scope: A Scope object
3276/// * orig_input: The original input tensor.
3277/// * orig_output: The original output tensor.
3278/// * grad: 4-D. Gradients of gradients w.r.t. the input of `max_pool`.
3279/// * ksize: The size of the window for each dimension of the input tensor.
3280/// * strides: The stride of the sliding window for each dimension of the
3281/// input tensor.
3282/// * padding: The type of padding algorithm to use.
3283///
3284/// Optional attributes (see `Attrs`):
3285/// * data_format: Specify the data format of the input and output data. With the
3286/// default format "NHWC", the data is stored in the order of:
3287/// [batch, in_height, in_width, in_channels].
3288/// Alternatively, the format could be "NCHW", the data storage order of:
3289/// [batch, in_channels, in_height, in_width].
3290///
3291/// Returns:
3292/// * `Output`: Gradients of gradients w.r.t. the input to `max_pool`.
3293class MaxPoolGradGradV2 {
3294 public:
3295 /// Optional attribute setters for MaxPoolGradGradV2
3296 struct Attrs {
3297 /// Specify the data format of the input and output data. With the
3298 /// default format "NHWC", the data is stored in the order of:
3299 /// [batch, in_height, in_width, in_channels].
3300 /// Alternatively, the format could be "NCHW", the data storage order of:
3301 /// [batch, in_channels, in_height, in_width].
3302 ///
3303 /// Defaults to "NHWC"
3304 TF_MUST_USE_RESULT Attrs DataFormat(StringPiece x) {
3305 Attrs ret = *this;
3306 ret.data_format_ = x;
3307 return ret;
3308 }
3309
3310 StringPiece data_format_ = "NHWC";
3311 };
3312 MaxPoolGradGradV2(const ::tensorflow::Scope& scope, ::tensorflow::Input
3313 orig_input, ::tensorflow::Input orig_output,
3314 ::tensorflow::Input grad, ::tensorflow::Input ksize,
3315 ::tensorflow::Input strides, StringPiece padding);
3316 MaxPoolGradGradV2(const ::tensorflow::Scope& scope, ::tensorflow::Input
3317 orig_input, ::tensorflow::Input orig_output,
3318 ::tensorflow::Input grad, ::tensorflow::Input ksize,
3319 ::tensorflow::Input strides, StringPiece padding, const
3320 MaxPoolGradGradV2::Attrs& attrs);
3321 operator ::tensorflow::Output() const { return output; }
3322 operator ::tensorflow::Input() const { return output; }
3323 ::tensorflow::Node* node() const { return output.node(); }
3324
3325 static Attrs DataFormat(StringPiece x) {
3326 return Attrs().DataFormat(x);
3327 }
3328
3329 Operation operation;
3330 ::tensorflow::Output output;
3331};
3332
3333/// Computes second-order gradients of the maxpooling function.
3334///
3335/// Args:
3336/// * scope: A Scope object
3337/// * input: The original input.
3338/// * grad: 4-D with shape `[batch, height, width, channels]`. Gradients w.r.t. the
3339/// input of `max_pool`.
3340/// * argmax: The indices of the maximum values chosen for each output of `max_pool`.
3341/// * ksize: The size of the window for each dimension of the input tensor.
3342/// * strides: The stride of the sliding window for each dimension of the
3343/// input tensor.
3344/// * padding: The type of padding algorithm to use.
3345///
3346/// Optional attributes (see `Attrs`):
3347/// * include_batch_in_index: Whether to include batch dimension in flattened index of `argmax`.
3348///
3349/// Returns:
3350/// * `Output`: Gradients of gradients w.r.t. the input of `max_pool`.
3351class MaxPoolGradGradWithArgmax {
3352 public:
3353 /// Optional attribute setters for MaxPoolGradGradWithArgmax
3354 struct Attrs {
3355 /// Whether to include batch dimension in flattened index of `argmax`.
3356 ///
3357 /// Defaults to false
3358 TF_MUST_USE_RESULT Attrs IncludeBatchInIndex(bool x) {
3359 Attrs ret = *this;
3360 ret.include_batch_in_index_ = x;
3361 return ret;
3362 }
3363
3364 bool include_batch_in_index_ = false;
3365 };
3366 MaxPoolGradGradWithArgmax(const ::tensorflow::Scope& scope, ::tensorflow::Input
3367 input, ::tensorflow::Input grad, ::tensorflow::Input
3368 argmax, const gtl::ArraySlice<int>& ksize, const
3369 gtl::ArraySlice<int>& strides, StringPiece padding);
3370 MaxPoolGradGradWithArgmax(const ::tensorflow::Scope& scope, ::tensorflow::Input
3371 input, ::tensorflow::Input grad, ::tensorflow::Input
3372 argmax, const gtl::ArraySlice<int>& ksize, const
3373 gtl::ArraySlice<int>& strides, StringPiece padding,
3374 const MaxPoolGradGradWithArgmax::Attrs& attrs);
3375 operator ::tensorflow::Output() const { return output; }
3376 operator ::tensorflow::Input() const { return output; }
3377 ::tensorflow::Node* node() const { return output.node(); }
3378
3379 static Attrs IncludeBatchInIndex(bool x) {
3380 return Attrs().IncludeBatchInIndex(x);
3381 }
3382
3383 Operation operation;
3384 ::tensorflow::Output output;
3385};
3386
3387/// Computes gradients of the maxpooling function.
3388///
3389/// Args:
3390/// * scope: A Scope object
3391/// * orig_input: The original input tensor.
3392/// * orig_output: The original output tensor.
3393/// * grad: 4-D. Gradients w.r.t. the output of `max_pool`.
3394/// * ksize: The size of the window for each dimension of the input tensor.
3395/// * strides: The stride of the sliding window for each dimension of the
3396/// input tensor.
3397/// * padding: The type of padding algorithm to use.
3398///
3399/// Optional attributes (see `Attrs`):
3400/// * data_format: Specify the data format of the input and output data. With the
3401/// default format "NHWC", the data is stored in the order of:
3402/// [batch, in_height, in_width, in_channels].
3403/// Alternatively, the format could be "NCHW", the data storage order of:
3404/// [batch, in_channels, in_height, in_width].
3405///
3406/// Returns:
3407/// * `Output`: Gradients w.r.t. the input to `max_pool`.
3408class MaxPoolGradV2 {
3409 public:
3410 /// Optional attribute setters for MaxPoolGradV2
3411 struct Attrs {
3412 /// Specify the data format of the input and output data. With the
3413 /// default format "NHWC", the data is stored in the order of:
3414 /// [batch, in_height, in_width, in_channels].
3415 /// Alternatively, the format could be "NCHW", the data storage order of:
3416 /// [batch, in_channels, in_height, in_width].
3417 ///
3418 /// Defaults to "NHWC"
3419 TF_MUST_USE_RESULT Attrs DataFormat(StringPiece x) {
3420 Attrs ret = *this;
3421 ret.data_format_ = x;
3422 return ret;
3423 }
3424
3425 StringPiece data_format_ = "NHWC";
3426 };
3427 MaxPoolGradV2(const ::tensorflow::Scope& scope, ::tensorflow::Input orig_input,
3428 ::tensorflow::Input orig_output, ::tensorflow::Input grad,
3429 ::tensorflow::Input ksize, ::tensorflow::Input strides,
3430 StringPiece padding);
3431 MaxPoolGradV2(const ::tensorflow::Scope& scope, ::tensorflow::Input orig_input,
3432 ::tensorflow::Input orig_output, ::tensorflow::Input grad,
3433 ::tensorflow::Input ksize, ::tensorflow::Input strides,
3434 StringPiece padding, const MaxPoolGradV2::Attrs& attrs);
3435 operator ::tensorflow::Output() const { return output; }
3436 operator ::tensorflow::Input() const { return output; }
3437 ::tensorflow::Node* node() const { return output.node(); }
3438
3439 static Attrs DataFormat(StringPiece x) {
3440 return Attrs().DataFormat(x);
3441 }
3442
3443 Operation operation;
3444 ::tensorflow::Output output;
3445};
3446
3447/// Performs max pooling on the input.
3448///
3449/// Args:
3450/// * scope: A Scope object
3451/// * input: 4-D input to pool over.
3452/// * ksize: The size of the window for each dimension of the input tensor.
3453/// * strides: The stride of the sliding window for each dimension of the
3454/// input tensor.
3455/// * padding: The type of padding algorithm to use.
3456///
3457/// Optional attributes (see `Attrs`):
3458/// * data_format: Specify the data format of the input and output data. With the
3459/// default format "NHWC", the data is stored in the order of:
3460/// [batch, in_height, in_width, in_channels].
3461/// Alternatively, the format could be "NCHW", the data storage order of:
3462/// [batch, in_channels, in_height, in_width].
3463///
3464/// Returns:
3465/// * `Output`: The max pooled output tensor.
3466class MaxPoolV2 {
3467 public:
3468 /// Optional attribute setters for MaxPoolV2
3469 struct Attrs {
3470 /// Specify the data format of the input and output data. With the
3471 /// default format "NHWC", the data is stored in the order of:
3472 /// [batch, in_height, in_width, in_channels].
3473 /// Alternatively, the format could be "NCHW", the data storage order of:
3474 /// [batch, in_channels, in_height, in_width].
3475 ///
3476 /// Defaults to "NHWC"
3477 TF_MUST_USE_RESULT Attrs DataFormat(StringPiece x) {
3478 Attrs ret = *this;
3479 ret.data_format_ = x;
3480 return ret;
3481 }
3482
3483 StringPiece data_format_ = "NHWC";
3484 };
3485 MaxPoolV2(const ::tensorflow::Scope& scope, ::tensorflow::Input input,
3486 ::tensorflow::Input ksize, ::tensorflow::Input strides, StringPiece
3487 padding);
3488 MaxPoolV2(const ::tensorflow::Scope& scope, ::tensorflow::Input input,
3489 ::tensorflow::Input ksize, ::tensorflow::Input strides, StringPiece
3490 padding, const MaxPoolV2::Attrs& attrs);
3491 operator ::tensorflow::Output() const { return output; }
3492 operator ::tensorflow::Input() const { return output; }
3493 ::tensorflow::Node* node() const { return output.node(); }
3494
3495 static Attrs DataFormat(StringPiece x) {
3496 return Attrs().DataFormat(x);
3497 }
3498
3499 Operation operation;
3500 ::tensorflow::Output output;
3501};
3502
3503/// Performs max pooling on the input and outputs both max values and indices.
3504///
3505/// The indices in `argmax` are flattened, so that a maximum value at position
3506/// `[b, y, x, c]` becomes flattened index:
3507/// `(y * width + x) * channels + c` if `include_batch_in_index` is False;
3508/// `((b * height + y) * width + x) * channels + c` if `include_batch_in_index` is True.
3509///
3510/// The indices returned are always in `[0, height) x [0, width)` before flattening,
3511/// even if padding is involved and the mathematically correct answer is outside
3512/// (either negative or too large). This is a bug, but fixing it is difficult to do
3513/// in a safe backwards compatible way, especially due to flattening.
3514///
3515/// Args:
3516/// * scope: A Scope object
3517/// * input: 4-D with shape `[batch, height, width, channels]`. Input to pool over.
3518/// * ksize: The size of the window for each dimension of the input tensor.
3519/// * strides: The stride of the sliding window for each dimension of the
3520/// input tensor.
3521/// * padding: The type of padding algorithm to use.
3522///
3523/// Optional attributes (see `Attrs`):
3524/// * include_batch_in_index: Whether to include batch dimension in flattened index of `argmax`.
3525///
3526/// Returns:
3527/// * `Output` output: The max pooled output tensor.
3528/// * `Output` argmax: 4-D. The flattened indices of the max values chosen for each output.
3529class MaxPoolWithArgmax {
3530 public:
3531 /// Optional attribute setters for MaxPoolWithArgmax
3532 struct Attrs {
3533 /// Defaults to DT_INT64
3534 TF_MUST_USE_RESULT Attrs Targmax(DataType x) {
3535 Attrs ret = *this;
3536 ret.Targmax_ = x;
3537 return ret;
3538 }
3539
3540 /// Whether to include batch dimension in flattened index of `argmax`.
3541 ///
3542 /// Defaults to false
3543 TF_MUST_USE_RESULT Attrs IncludeBatchInIndex(bool x) {
3544 Attrs ret = *this;
3545 ret.include_batch_in_index_ = x;
3546 return ret;
3547 }
3548
3549 DataType Targmax_ = DT_INT64;
3550 bool include_batch_in_index_ = false;
3551 };
3552 MaxPoolWithArgmax(const ::tensorflow::Scope& scope, ::tensorflow::Input input,
3553 const gtl::ArraySlice<int>& ksize, const
3554 gtl::ArraySlice<int>& strides, StringPiece padding);
3555 MaxPoolWithArgmax(const ::tensorflow::Scope& scope, ::tensorflow::Input input,
3556 const gtl::ArraySlice<int>& ksize, const
3557 gtl::ArraySlice<int>& strides, StringPiece padding, const
3558 MaxPoolWithArgmax::Attrs& attrs);
3559
3560 static Attrs Targmax(DataType x) {
3561 return Attrs().Targmax(x);
3562 }
3563 static Attrs IncludeBatchInIndex(bool x) {
3564 return Attrs().IncludeBatchInIndex(x);
3565 }
3566
3567 Operation operation;
3568 ::tensorflow::Output output;
3569 ::tensorflow::Output argmax;
3570};
3571
3572/// Finds values of the `n`-th order statistic for the last dimension.
3573///
3574/// If the input is a vector (rank-1), finds the entries which is the nth-smallest
3575/// value in the vector and outputs their values as scalar tensor.
3576///
3577/// For matrices (resp. higher rank input), computes the entries which is the
3578/// nth-smallest value in each row (resp. vector along the last dimension). Thus,
3579///
3580/// values.shape = input.shape[:-1]
3581///
3582/// Args:
3583/// * scope: A Scope object
3584/// * input: 1-D or higher with last dimension at least `n+1`.
3585/// * n: 0-D. Position of sorted vector to select along the last dimension (along
3586/// each row for matrices). Valid range of n is `[0, input.shape[:-1])`
3587///
3588/// Optional attributes (see `Attrs`):
3589/// * reverse: When set to True, find the nth-largest value in the vector and vice
3590/// versa.
3591///
3592/// Returns:
3593/// * `Output`: The `n`-th order statistic along each last dimensional slice.
3594class NthElement {
3595 public:
3596 /// Optional attribute setters for NthElement
3597 struct Attrs {
3598 /// When set to True, find the nth-largest value in the vector and vice
3599 /// versa.
3600 ///
3601 /// Defaults to false
3602 TF_MUST_USE_RESULT Attrs Reverse(bool x) {
3603 Attrs ret = *this;
3604 ret.reverse_ = x;
3605 return ret;
3606 }
3607
3608 bool reverse_ = false;
3609 };
3610 NthElement(const ::tensorflow::Scope& scope, ::tensorflow::Input input,
3611 ::tensorflow::Input n);
3612 NthElement(const ::tensorflow::Scope& scope, ::tensorflow::Input input,
3613 ::tensorflow::Input n, const NthElement::Attrs& attrs);
3614 operator ::tensorflow::Output() const { return values; }
3615 operator ::tensorflow::Input() const { return values; }
3616 ::tensorflow::Node* node() const { return values.node(); }
3617
3618 static Attrs Reverse(bool x) {
3619 return Attrs().Reverse(x);
3620 }
3621
3622 Operation operation;
3623 ::tensorflow::Output values;
3624};
3625
3626/// Produces the average pool of the input tensor for quantized types.
3627///
3628/// Args:
3629/// * scope: A Scope object
3630/// * input: 4-D with shape `[batch, height, width, channels]`.
3631/// * min_input: The float value that the lowest quantized input value represents.
3632/// * max_input: The float value that the highest quantized input value represents.
3633/// * ksize: The size of the window for each dimension of the input tensor.
3634/// The length must be 4 to match the number of dimensions of the input.
3635/// * strides: The stride of the sliding window for each dimension of the input
3636/// tensor. The length must be 4 to match the number of dimensions of the input.
3637/// * padding: The type of padding algorithm to use.
3638///
3639/// Returns:
3640/// * `Output` output
3641/// * `Output` min_output: The float value that the lowest quantized output value represents.
3642/// * `Output` max_output: The float value that the highest quantized output value represents.
3643class QuantizedAvgPool {
3644 public:
3645 QuantizedAvgPool(const ::tensorflow::Scope& scope, ::tensorflow::Input input,
3646 ::tensorflow::Input min_input, ::tensorflow::Input max_input,
3647 const gtl::ArraySlice<int>& ksize, const gtl::ArraySlice<int>&
3648 strides, StringPiece padding);
3649
3650 Operation operation;
3651 ::tensorflow::Output output;
3652 ::tensorflow::Output min_output;
3653 ::tensorflow::Output max_output;
3654};
3655
3656/// Quantized Batch normalization.
3657///
3658/// This op is deprecated and will be removed in the future. Prefer
3659/// `tf.nn.batch_normalization`.
3660///
3661/// Args:
3662/// * scope: A Scope object
3663/// * t: A 4D input Tensor.
3664/// * t_min: The value represented by the lowest quantized input.
3665/// * t_max: The value represented by the highest quantized input.
3666/// * m: A 1D mean Tensor with size matching the last dimension of t.
3667/// This is the first output from tf.nn.moments,
3668/// or a saved moving average thereof.
3669/// * m_min: The value represented by the lowest quantized mean.
3670/// * m_max: The value represented by the highest quantized mean.
3671/// * v: A 1D variance Tensor with size matching the last dimension of t.
3672/// This is the second output from tf.nn.moments,
3673/// or a saved moving average thereof.
3674/// * v_min: The value represented by the lowest quantized variance.
3675/// * v_max: The value represented by the highest quantized variance.
3676/// * beta: A 1D beta Tensor with size matching the last dimension of t.
3677/// An offset to be added to the normalized tensor.
3678/// * beta_min: The value represented by the lowest quantized offset.
3679/// * beta_max: The value represented by the highest quantized offset.
3680/// * gamma: A 1D gamma Tensor with size matching the last dimension of t.
3681/// If "scale_after_normalization" is true, this tensor will be multiplied
3682/// with the normalized tensor.
3683/// * gamma_min: The value represented by the lowest quantized gamma.
3684/// * gamma_max: The value represented by the highest quantized gamma.
3685/// * variance_epsilon: A small float number to avoid dividing by 0.
3686/// * scale_after_normalization: A bool indicating whether the resulted tensor
3687/// needs to be multiplied with gamma.
3688///
3689/// Returns:
3690/// * `Output` result
3691/// * `Output` result_min
3692/// * `Output` result_max
3693class QuantizedBatchNormWithGlobalNormalization {
3694 public:
3695 QuantizedBatchNormWithGlobalNormalization(const ::tensorflow::Scope& scope,
3696 ::tensorflow::Input t,
3697 ::tensorflow::Input t_min,
3698 ::tensorflow::Input t_max,
3699 ::tensorflow::Input m,
3700 ::tensorflow::Input m_min,
3701 ::tensorflow::Input m_max,
3702 ::tensorflow::Input v,
3703 ::tensorflow::Input v_min,
3704 ::tensorflow::Input v_max,
3705 ::tensorflow::Input beta,
3706 ::tensorflow::Input beta_min,
3707 ::tensorflow::Input beta_max,
3708 ::tensorflow::Input gamma,
3709 ::tensorflow::Input gamma_min,
3710 ::tensorflow::Input gamma_max,
3711 DataType out_type, float
3712 variance_epsilon, bool
3713 scale_after_normalization);
3714
3715 Operation operation;
3716 ::tensorflow::Output result;
3717 ::tensorflow::Output result_min;
3718 ::tensorflow::Output result_max;
3719};
3720
3721/// Adds Tensor 'bias' to Tensor 'input' for Quantized types.
3722///
3723/// Broadcasts the values of bias on dimensions 0..N-2 of 'input'.
3724///
3725/// Args:
3726/// * scope: A Scope object
3727/// * bias: A 1D bias Tensor with size matching the last dimension of 'input'.
3728/// * min_input: The float value that the lowest quantized input value represents.
3729/// * max_input: The float value that the highest quantized input value represents.
3730/// * min_bias: The float value that the lowest quantized bias value represents.
3731/// * max_bias: The float value that the highest quantized bias value represents.
3732///
3733/// Returns:
3734/// * `Output` output
3735/// * `Output` min_out: The float value that the lowest quantized output value represents.
3736/// * `Output` max_out: The float value that the highest quantized output value represents.
3737class QuantizedBiasAdd {
3738 public:
3739 QuantizedBiasAdd(const ::tensorflow::Scope& scope, ::tensorflow::Input input,
3740 ::tensorflow::Input bias, ::tensorflow::Input min_input,
3741 ::tensorflow::Input max_input, ::tensorflow::Input min_bias,
3742 ::tensorflow::Input max_bias, DataType out_type);
3743
3744 Operation operation;
3745 ::tensorflow::Output output;
3746 ::tensorflow::Output min_out;
3747 ::tensorflow::Output max_out;
3748};
3749
3750/// Computes a 2D convolution given quantized 4D input and filter tensors.
3751///
3752/// The inputs are quantized tensors where the lowest value represents the real
3753/// number of the associated minimum, and the highest represents the maximum.
3754/// This means that you can only interpret the quantized output in the same way, by
3755/// taking the returned minimum and maximum values into account.
3756///
3757/// Args:
3758/// * scope: A Scope object
3759/// * filter: filter's input_depth dimension must match input's depth dimensions.
3760/// * min_input: The float value that the lowest quantized input value represents.
3761/// * max_input: The float value that the highest quantized input value represents.
3762/// * min_filter: The float value that the lowest quantized filter value represents.
3763/// * max_filter: The float value that the highest quantized filter value represents.
3764/// * strides: The stride of the sliding window for each dimension of the input
3765/// tensor.
3766/// * padding: The type of padding algorithm to use.
3767///
3768/// Optional attributes (see `Attrs`):
3769/// * dilations: 1-D tensor of length 4. The dilation factor for each dimension of
3770/// `input`. If set to k > 1, there will be k-1 skipped cells between each
3771/// filter element on that dimension. The dimension order is determined by the
3772/// value of `data_format`, see above for details. Dilations in the batch and
3773/// depth dimensions must be 1.
3774///
3775/// Returns:
3776/// * `Output` output
3777/// * `Output` min_output: The float value that the lowest quantized output value represents.
3778/// * `Output` max_output: The float value that the highest quantized output value represents.
3779class QuantizedConv2D {
3780 public:
3781 /// Optional attribute setters for QuantizedConv2D
3782 struct Attrs {
3783 /// Defaults to DT_QINT32
3784 TF_MUST_USE_RESULT Attrs OutType(DataType x) {
3785 Attrs ret = *this;
3786 ret.out_type_ = x;
3787 return ret;
3788 }
3789
3790 /// 1-D tensor of length 4. The dilation factor for each dimension of
3791 /// `input`. If set to k > 1, there will be k-1 skipped cells between each
3792 /// filter element on that dimension. The dimension order is determined by the
3793 /// value of `data_format`, see above for details. Dilations in the batch and
3794 /// depth dimensions must be 1.
3795 ///
3796 /// Defaults to [1, 1, 1, 1]
3797 TF_MUST_USE_RESULT Attrs Dilations(const gtl::ArraySlice<int>& x) {
3798 Attrs ret = *this;
3799 ret.dilations_ = x;
3800 return ret;
3801 }
3802
3803 DataType out_type_ = DT_QINT32;
3804 gtl::ArraySlice<int> dilations_ = Default_dilations();
3805 private:
3806 static gtl::ArraySlice<int> Default_dilations() {
3807 static const int kStorage[] = {1, 1, 1, 1};
3808 return gtl::ArraySlice<int>(kStorage);
3809 }
3810 };
3811 QuantizedConv2D(const ::tensorflow::Scope& scope, ::tensorflow::Input input,
3812 ::tensorflow::Input filter, ::tensorflow::Input min_input,
3813 ::tensorflow::Input max_input, ::tensorflow::Input min_filter,
3814 ::tensorflow::Input max_filter, const gtl::ArraySlice<int>&
3815 strides, StringPiece padding);
3816 QuantizedConv2D(const ::tensorflow::Scope& scope, ::tensorflow::Input input,
3817 ::tensorflow::Input filter, ::tensorflow::Input min_input,
3818 ::tensorflow::Input max_input, ::tensorflow::Input min_filter,
3819 ::tensorflow::Input max_filter, const gtl::ArraySlice<int>&
3820 strides, StringPiece padding, const QuantizedConv2D::Attrs&
3821 attrs);
3822
3823 static Attrs OutType(DataType x) {
3824 return Attrs().OutType(x);
3825 }
3826 static Attrs Dilations(const gtl::ArraySlice<int>& x) {
3827 return Attrs().Dilations(x);
3828 }
3829
3830 Operation operation;
3831 ::tensorflow::Output output;
3832 ::tensorflow::Output min_output;
3833 ::tensorflow::Output max_output;
3834};
3835
3836/// Produces the max pool of the input tensor for quantized types.
3837///
3838/// Args:
3839/// * scope: A Scope object
3840/// * input: The 4D (batch x rows x cols x depth) Tensor to MaxReduce over.
3841/// * min_input: The float value that the lowest quantized input value represents.
3842/// * max_input: The float value that the highest quantized input value represents.
3843/// * ksize: The size of the window for each dimension of the input tensor.
3844/// The length must be 4 to match the number of dimensions of the input.
3845/// * strides: The stride of the sliding window for each dimension of the input
3846/// tensor. The length must be 4 to match the number of dimensions of the input.
3847/// * padding: The type of padding algorithm to use.
3848///
3849/// Returns:
3850/// * `Output` output
3851/// * `Output` min_output: The float value that the lowest quantized output value represents.
3852/// * `Output` max_output: The float value that the highest quantized output value represents.
3853class QuantizedMaxPool {
3854 public:
3855 QuantizedMaxPool(const ::tensorflow::Scope& scope, ::tensorflow::Input input,
3856 ::tensorflow::Input min_input, ::tensorflow::Input max_input,
3857 const gtl::ArraySlice<int>& ksize, const gtl::ArraySlice<int>&
3858 strides, StringPiece padding);
3859
3860 Operation operation;
3861 ::tensorflow::Output output;
3862 ::tensorflow::Output min_output;
3863 ::tensorflow::Output max_output;
3864};
3865
3866/// Computes Quantized Rectified Linear: `max(features, 0)`
3867///
3868/// Args:
3869/// * scope: A Scope object
3870/// * min_features: The float value that the lowest quantized value represents.
3871/// * max_features: The float value that the highest quantized value represents.
3872///
3873/// Returns:
3874/// * `Output` activations: Has the same output shape as "features".
3875/// * `Output` min_activations: The float value that the lowest quantized value represents.
3876/// * `Output` max_activations: The float value that the highest quantized value represents.
3877class QuantizedRelu {
3878 public:
3879 /// Optional attribute setters for QuantizedRelu
3880 struct Attrs {
3881 /// Defaults to DT_QUINT8
3882 TF_MUST_USE_RESULT Attrs OutType(DataType x) {
3883 Attrs ret = *this;
3884 ret.out_type_ = x;
3885 return ret;
3886 }
3887
3888 DataType out_type_ = DT_QUINT8;
3889 };
3890 QuantizedRelu(const ::tensorflow::Scope& scope, ::tensorflow::Input features,
3891 ::tensorflow::Input min_features, ::tensorflow::Input
3892 max_features);
3893 QuantizedRelu(const ::tensorflow::Scope& scope, ::tensorflow::Input features,
3894 ::tensorflow::Input min_features, ::tensorflow::Input
3895 max_features, const QuantizedRelu::Attrs& attrs);
3896
3897 static Attrs OutType(DataType x) {
3898 return Attrs().OutType(x);
3899 }
3900
3901 Operation operation;
3902 ::tensorflow::Output activations;
3903 ::tensorflow::Output min_activations;
3904 ::tensorflow::Output max_activations;
3905};
3906
3907/// Computes Quantized Rectified Linear 6: `min(max(features, 0), 6)`
3908///
3909/// Args:
3910/// * scope: A Scope object
3911/// * min_features: The float value that the lowest quantized value represents.
3912/// * max_features: The float value that the highest quantized value represents.
3913///
3914/// Returns:
3915/// * `Output` activations: Has the same output shape as "features".
3916/// * `Output` min_activations: The float value that the lowest quantized value represents.
3917/// * `Output` max_activations: The float value that the highest quantized value represents.
3918class QuantizedRelu6 {
3919 public:
3920 /// Optional attribute setters for QuantizedRelu6
3921 struct Attrs {
3922 /// Defaults to DT_QUINT8
3923 TF_MUST_USE_RESULT Attrs OutType(DataType x) {
3924 Attrs ret = *this;
3925 ret.out_type_ = x;
3926 return ret;
3927 }
3928
3929 DataType out_type_ = DT_QUINT8;
3930 };
3931 QuantizedRelu6(const ::tensorflow::Scope& scope, ::tensorflow::Input features,
3932 ::tensorflow::Input min_features, ::tensorflow::Input
3933 max_features);
3934 QuantizedRelu6(const ::tensorflow::Scope& scope, ::tensorflow::Input features,
3935 ::tensorflow::Input min_features, ::tensorflow::Input
3936 max_features, const QuantizedRelu6::Attrs& attrs);
3937
3938 static Attrs OutType(DataType x) {
3939 return Attrs().OutType(x);
3940 }
3941
3942 Operation operation;
3943 ::tensorflow::Output activations;
3944 ::tensorflow::Output min_activations;
3945 ::tensorflow::Output max_activations;
3946};
3947
3948/// Computes Quantized Rectified Linear X: `min(max(features, 0), max_value)`
3949///
3950/// Args:
3951/// * scope: A Scope object
3952/// * min_features: The float value that the lowest quantized value represents.
3953/// * max_features: The float value that the highest quantized value represents.
3954///
3955/// Returns:
3956/// * `Output` activations: Has the same output shape as "features".
3957/// * `Output` min_activations: The float value that the lowest quantized value represents.
3958/// * `Output` max_activations: The float value that the highest quantized value represents.
3959class QuantizedReluX {
3960 public:
3961 /// Optional attribute setters for QuantizedReluX
3962 struct Attrs {
3963 /// Defaults to DT_QUINT8
3964 TF_MUST_USE_RESULT Attrs OutType(DataType x) {
3965 Attrs ret = *this;
3966 ret.out_type_ = x;
3967 return ret;
3968 }
3969
3970 DataType out_type_ = DT_QUINT8;
3971 };
3972 QuantizedReluX(const ::tensorflow::Scope& scope, ::tensorflow::Input features,
3973 ::tensorflow::Input max_value, ::tensorflow::Input min_features,
3974 ::tensorflow::Input max_features);
3975 QuantizedReluX(const ::tensorflow::Scope& scope, ::tensorflow::Input features,
3976 ::tensorflow::Input max_value, ::tensorflow::Input min_features,
3977 ::tensorflow::Input max_features, const QuantizedReluX::Attrs&
3978 attrs);
3979
3980 static Attrs OutType(DataType x) {
3981 return Attrs().OutType(x);
3982 }
3983
3984 Operation operation;
3985 ::tensorflow::Output activations;
3986 ::tensorflow::Output min_activations;
3987 ::tensorflow::Output max_activations;
3988};
3989
3990/// Computes rectified linear: `max(features, 0)`.
3991///
3992/// See: https://en.wikipedia.org/wiki/Rectifier_(neural_networks)
3993/// Example usage:
3994/// >>> tf.nn.relu([-2., 0., 3.]).numpy()
3995/// array([0., 0., 3.], dtype=float32)
3996///
3997/// Args:
3998/// * scope: A Scope object
3999///
4000/// Returns:
4001/// * `Output`: The activations tensor.
4002class Relu {
4003 public:
4004 Relu(const ::tensorflow::Scope& scope, ::tensorflow::Input features);
4005 operator ::tensorflow::Output() const { return activations; }
4006 operator ::tensorflow::Input() const { return activations; }
4007 ::tensorflow::Node* node() const { return activations.node(); }
4008
4009 Operation operation;
4010 ::tensorflow::Output activations;
4011};
4012
4013/// Computes rectified linear 6: `min(max(features, 0), 6)`.
4014///
4015/// Args:
4016/// * scope: A Scope object
4017///
4018/// Returns:
4019/// * `Output`: The activations tensor.
4020class Relu6 {
4021 public:
4022 Relu6(const ::tensorflow::Scope& scope, ::tensorflow::Input features);
4023 operator ::tensorflow::Output() const { return activations; }
4024 operator ::tensorflow::Input() const { return activations; }
4025 ::tensorflow::Node* node() const { return activations.node(); }
4026
4027 Operation operation;
4028 ::tensorflow::Output activations;
4029};
4030
4031/// Computes scaled exponential linear: `scale * alpha * (exp(features) - 1)`
4032///
4033/// if < 0, `scale * features` otherwise.
4034///
4035/// To be used together with
4036/// `initializer = tf.variance_scaling_initializer(factor=1.0, mode='FAN_IN')`.
4037/// For correct dropout, use `tf.contrib.nn.alpha_dropout`.
4038///
4039/// See [Self-Normalizing Neural Networks](https://arxiv.org/abs/1706.02515)
4040///
4041/// Args:
4042/// * scope: A Scope object
4043///
4044/// Returns:
4045/// * `Output`: The activations tensor.
4046class Selu {
4047 public:
4048 Selu(const ::tensorflow::Scope& scope, ::tensorflow::Input features);
4049 operator ::tensorflow::Output() const { return activations; }
4050 operator ::tensorflow::Input() const { return activations; }
4051 ::tensorflow::Node* node() const { return activations.node(); }
4052
4053 Operation operation;
4054 ::tensorflow::Output activations;
4055};
4056
4057/// Computes softmax activations.
4058///
4059/// For each batch `i` and class `j` we have
4060///
4061/// $$softmax[i, j] = exp(logits[i, j]) / sum_j(exp(logits[i, j]))$$
4062///
4063/// Args:
4064/// * scope: A Scope object
4065/// * logits: 2-D with shape `[batch_size, num_classes]`.
4066///
4067/// Returns:
4068/// * `Output`: Same shape as `logits`.
4069class Softmax {
4070 public:
4071 Softmax(const ::tensorflow::Scope& scope, ::tensorflow::Input logits);
4072 operator ::tensorflow::Output() const { return softmax; }
4073 operator ::tensorflow::Input() const { return softmax; }
4074 ::tensorflow::Node* node() const { return softmax.node(); }
4075
4076 Operation operation;
4077 ::tensorflow::Output softmax;
4078};
4079
4080/// Computes softmax cross entropy cost and gradients to backpropagate.
4081///
4082/// Inputs are the logits, not probabilities.
4083///
4084/// Args:
4085/// * scope: A Scope object
4086/// * features: batch_size x num_classes matrix
4087/// * labels: batch_size x num_classes matrix
4088/// The caller must ensure that each batch of labels represents a valid
4089/// probability distribution.
4090///
4091/// Returns:
4092/// * `Output` loss: Per example loss (batch_size vector).
4093/// * `Output` backprop: backpropagated gradients (batch_size x num_classes matrix).
4094class SoftmaxCrossEntropyWithLogits {
4095 public:
4096 SoftmaxCrossEntropyWithLogits(const ::tensorflow::Scope& scope,
4097 ::tensorflow::Input features, ::tensorflow::Input
4098 labels);
4099
4100 Operation operation;
4101 ::tensorflow::Output loss;
4102 ::tensorflow::Output backprop;
4103};
4104
4105/// TODO: add doc.
4106///
4107/// Args:
4108/// * scope: A Scope object
4109///
4110/// Returns:
4111/// * `Output`: The activations tensor.
4112class Softplus {
4113 public:
4114 Softplus(const ::tensorflow::Scope& scope, ::tensorflow::Input features);
4115 operator ::tensorflow::Output() const { return activations; }
4116 operator ::tensorflow::Input() const { return activations; }
4117 ::tensorflow::Node* node() const { return activations.node(); }
4118
4119 Operation operation;
4120 ::tensorflow::Output activations;
4121};
4122
4123/// Computes softsign: `features / (abs(features) + 1)`.
4124///
4125/// Args:
4126/// * scope: A Scope object
4127///
4128/// Returns:
4129/// * `Output`: The activations tensor.
4130class Softsign {
4131 public:
4132 Softsign(const ::tensorflow::Scope& scope, ::tensorflow::Input features);
4133 operator ::tensorflow::Output() const { return activations; }
4134 operator ::tensorflow::Input() const { return activations; }
4135 ::tensorflow::Node* node() const { return activations.node(); }
4136
4137 Operation operation;
4138 ::tensorflow::Output activations;
4139};
4140
4141/// Computes softmax cross entropy cost and gradients to backpropagate.
4142///
4143/// Unlike `SoftmaxCrossEntropyWithLogits`, this operation does not accept
4144/// a matrix of label probabilities, but rather a single label per row
4145/// of features. This label is considered to have probability 1.0 for the
4146/// given row.
4147///
4148/// Inputs are the logits, not probabilities.
4149///
4150/// Args:
4151/// * scope: A Scope object
4152/// * features: batch_size x num_classes matrix
4153/// * labels: batch_size vector with values in [0, num_classes).
4154/// This is the label for the given minibatch entry.
4155///
4156/// Returns:
4157/// * `Output` loss: Per example loss (batch_size vector).
4158/// * `Output` backprop: backpropagated gradients (batch_size x num_classes matrix).
4159class SparseSoftmaxCrossEntropyWithLogits {
4160 public:
4161 SparseSoftmaxCrossEntropyWithLogits(const ::tensorflow::Scope& scope,
4162 ::tensorflow::Input features,
4163 ::tensorflow::Input labels);
4164
4165 Operation operation;
4166 ::tensorflow::Output loss;
4167 ::tensorflow::Output backprop;
4168};
4169
4170/// Finds values and indices of the `k` largest elements for the last dimension.
4171///
4172/// If the input is a vector (rank-1), finds the `k` largest entries in the vector
4173/// and outputs their values and indices as vectors. Thus `values[j]` is the
4174/// `j`-th largest entry in `input`, and its index is `indices[j]`.
4175///
4176/// For matrices (resp. higher rank input), computes the top `k` entries in each
4177/// row (resp. vector along the last dimension). Thus,
4178///
4179/// values.shape = indices.shape = input.shape[:-1] + [k]
4180///
4181/// If two elements are equal, the lower-index element appears first.
4182///
4183/// Args:
4184/// * scope: A Scope object
4185/// * input: 1-D or higher with last dimension at least `k`.
4186/// * k: 0-D. Number of top elements to look for along the last dimension (along each
4187/// row for matrices).
4188///
4189/// Optional attributes (see `Attrs`):
4190/// * sorted: If true the resulting `k` elements will be sorted by the values in
4191/// descending order.
4192///
4193/// Returns:
4194/// * `Output` values: The `k` largest elements along each last dimensional slice.
4195/// * `Output` indices: The indices of `values` within the last dimension of `input`.
4196class TopK {
4197 public:
4198 /// Optional attribute setters for TopK
4199 struct Attrs {
4200 /// If true the resulting `k` elements will be sorted by the values in
4201 /// descending order.
4202 ///
4203 /// Defaults to true
4204 TF_MUST_USE_RESULT Attrs Sorted(bool x) {
4205 Attrs ret = *this;
4206 ret.sorted_ = x;
4207 return ret;
4208 }
4209
4210 bool sorted_ = true;
4211 };
4212 TopK(const ::tensorflow::Scope& scope, ::tensorflow::Input input,
4213 ::tensorflow::Input k);
4214 TopK(const ::tensorflow::Scope& scope, ::tensorflow::Input input,
4215 ::tensorflow::Input k, const TopK::Attrs& attrs);
4216
4217 static Attrs Sorted(bool x) {
4218 return Attrs().Sorted(x);
4219 }
4220
4221 Operation operation;
4222 ::tensorflow::Output values;
4223 ::tensorflow::Output indices;
4224};
4225
4226/// @}
4227
4228} // namespace ops
4229} // namespace tensorflow
4230
4231#endif // TENSORFLOW_CC_OPS_NN_OPS_H_
4232