1 | // This file is MACHINE GENERATED! Do not edit. |
2 | |
3 | #ifndef TENSORFLOW_CC_OPS_IMAGE_OPS_INTERNAL_H_ |
4 | #define TENSORFLOW_CC_OPS_IMAGE_OPS_INTERNAL_H_ |
5 | |
6 | // This file is MACHINE GENERATED! Do not edit. |
7 | |
8 | #include "tensorflow/cc/framework/ops.h" |
9 | #include "tensorflow/cc/framework/scope.h" |
10 | #include "tensorflow/core/framework/tensor.h" |
11 | #include "tensorflow/core/framework/tensor_shape.h" |
12 | #include "tensorflow/core/framework/types.h" |
13 | #include "tensorflow/core/lib/gtl/array_slice.h" |
14 | |
15 | namespace tensorflow { |
16 | namespace ops { |
17 | namespace internal { |
18 | // NOTE: This namespace has internal TensorFlow details that |
19 | // are not part of TensorFlow's public API. |
20 | |
21 | /// @defgroup image_ops_internal Image Ops Internal |
22 | /// @{ |
23 | |
24 | /// Extracts a glimpse from the input tensor. |
25 | /// |
26 | /// Returns a set of windows called glimpses extracted at location |
27 | /// `offsets` from the input tensor. If the windows only partially |
28 | /// overlaps the inputs, the non overlapping areas will be filled with |
29 | /// random noise. |
30 | /// |
31 | /// The result is a 4-D tensor of shape `[batch_size, glimpse_height, |
32 | /// glimpse_width, channels]`. The channels and batch dimensions are the |
33 | /// same as that of the input tensor. The height and width of the output |
34 | /// windows are specified in the `size` parameter. |
35 | /// |
36 | /// The argument `normalized` and `centered` controls how the windows are built: |
37 | /// |
38 | /// * If the coordinates are normalized but not centered, 0.0 and 1.0 |
39 | /// correspond to the minimum and maximum of each height and width |
40 | /// dimension. |
41 | /// * If the coordinates are both normalized and centered, they range from |
42 | /// -1.0 to 1.0. The coordinates (-1.0, -1.0) correspond to the upper |
43 | /// left corner, the lower right corner is located at (1.0, 1.0) and the |
44 | /// center is at (0, 0). |
45 | /// * If the coordinates are not normalized they are interpreted as |
46 | /// numbers of pixels. |
47 | /// |
48 | /// Args: |
49 | /// * scope: A Scope object |
50 | /// * input: A 4-D float tensor of shape `[batch_size, height, width, channels]`. |
51 | /// * size: A 1-D tensor of 2 elements containing the size of the glimpses |
52 | /// to extract. The glimpse height must be specified first, following |
53 | /// by the glimpse width. |
54 | /// * offsets: A 2-D integer tensor of shape `[batch_size, 2]` containing |
55 | /// the y, x locations of the center of each window. |
56 | /// |
57 | /// Optional attributes (see `Attrs`): |
58 | /// * centered: indicates if the offset coordinates are centered relative to |
59 | /// the image, in which case the (0, 0) offset is relative to the center |
60 | /// of the input images. If false, the (0,0) offset corresponds to the |
61 | /// upper left corner of the input images. |
62 | /// * normalized: indicates if the offset coordinates are normalized. |
63 | /// * uniform_noise: indicates if the noise should be generated using a |
64 | /// uniform distribution or a Gaussian distribution. |
65 | /// * noise: indicates if the noise should `uniform`, `gaussian`, or |
66 | /// `zero`. The default is `uniform` which means the noise type |
67 | /// will be decided by `uniform_noise`. |
68 | /// |
69 | /// Returns: |
70 | /// * `Output`: A tensor representing the glimpses `[batch_size, |
71 | /// glimpse_height, glimpse_width, channels]`. |
72 | class { |
73 | public: |
74 | /// Optional attribute setters for ExtractGlimpseV2 |
75 | struct { |
76 | /// indicates if the offset coordinates are centered relative to |
77 | /// the image, in which case the (0, 0) offset is relative to the center |
78 | /// of the input images. If false, the (0,0) offset corresponds to the |
79 | /// upper left corner of the input images. |
80 | /// |
81 | /// Defaults to true |
82 | TF_MUST_USE_RESULT Attrs (bool x) { |
83 | Attrs ret = *this; |
84 | ret.centered_ = x; |
85 | return ret; |
86 | } |
87 | |
88 | /// indicates if the offset coordinates are normalized. |
89 | /// |
90 | /// Defaults to true |
91 | TF_MUST_USE_RESULT Attrs (bool x) { |
92 | Attrs ret = *this; |
93 | ret.normalized_ = x; |
94 | return ret; |
95 | } |
96 | |
97 | /// indicates if the noise should be generated using a |
98 | /// uniform distribution or a Gaussian distribution. |
99 | /// |
100 | /// Defaults to true |
101 | TF_MUST_USE_RESULT Attrs (bool x) { |
102 | Attrs ret = *this; |
103 | ret.uniform_noise_ = x; |
104 | return ret; |
105 | } |
106 | |
107 | /// indicates if the noise should `uniform`, `gaussian`, or |
108 | /// `zero`. The default is `uniform` which means the noise type |
109 | /// will be decided by `uniform_noise`. |
110 | /// |
111 | /// Defaults to "uniform" |
112 | TF_MUST_USE_RESULT Attrs (StringPiece x) { |
113 | Attrs ret = *this; |
114 | ret.noise_ = x; |
115 | return ret; |
116 | } |
117 | |
118 | bool = true; |
119 | bool = true; |
120 | bool = true; |
121 | StringPiece = "uniform" ; |
122 | }; |
123 | (const ::tensorflow::Scope& scope, ::tensorflow::Input input, |
124 | ::tensorflow::Input size, ::tensorflow::Input offsets); |
125 | (const ::tensorflow::Scope& scope, ::tensorflow::Input input, |
126 | ::tensorflow::Input size, ::tensorflow::Input offsets, const |
127 | ExtractGlimpseV2::Attrs& attrs); |
128 | () const { return glimpse; } |
129 | () const { return glimpse; } |
130 | ::tensorflow::Node* () const { return glimpse.node(); } |
131 | |
132 | static Attrs (bool x) { |
133 | return Attrs().Centered(x); |
134 | } |
135 | static Attrs (bool x) { |
136 | return Attrs().Normalized(x); |
137 | } |
138 | static Attrs (bool x) { |
139 | return Attrs().UniformNoise(x); |
140 | } |
141 | static Attrs (StringPiece x) { |
142 | return Attrs().Noise(x); |
143 | } |
144 | |
145 | Operation ; |
146 | ::tensorflow::Output ; |
147 | }; |
148 | |
149 | /// This op produces Region of Interests from given bounding boxes(bbox_deltas) encoded wrt anchors according to eq.2 in arXiv:1506.01497 |
150 | /// |
151 | /// The op selects top `pre_nms_topn` scoring boxes, decodes them with respect to anchors, |
152 | /// applies non-maximal suppression on overlapping boxes with higher than |
153 | /// `nms_threshold` intersection-over-union (iou) value, discarding boxes where shorter |
154 | /// side is less than `min_size`. |
155 | /// Inputs: |
156 | /// `scores`: A 4D tensor of shape [Batch, Height, Width, Num Anchors] containing the scores per anchor at given position |
157 | /// `bbox_deltas`: is a tensor of shape [Batch, Height, Width, 4 x Num Anchors] boxes encoded to each anchor |
158 | /// `anchors`: A 1D tensor of shape [4 x Num Anchors], representing the anchors. |
159 | /// Outputs: |
160 | /// `rois`: output RoIs, a 3D tensor of shape [Batch, post_nms_topn, 4], padded by 0 if less than post_nms_topn candidates found. |
161 | /// `roi_probabilities`: probability scores of each roi in 'rois', a 2D tensor of shape [Batch,post_nms_topn], padded with 0 if needed, sorted by scores. |
162 | /// |
163 | /// Args: |
164 | /// * scope: A Scope object |
165 | /// * scores: A 4-D float tensor of shape `[num_images, height, width, num_achors]` containing scores of the boxes for given anchors, can be unsorted. |
166 | /// * bbox_deltas: A 4-D float tensor of shape `[num_images, height, width, 4 x num_anchors]`. encoding boxes with respec to each anchor. |
167 | /// Coordinates are given in the form [dy, dx, dh, dw]. |
168 | /// * image_info: A 2-D float tensor of shape `[num_images, 5]` containing image information Height, Width, Scale. |
169 | /// * anchors: A 2-D float tensor of shape `[num_anchors, 4]` describing the anchor boxes. Boxes are formatted in the form [y1, x1, y2, x2]. |
170 | /// * nms_threshold: A scalar float tensor for non-maximal-suppression threshold. |
171 | /// * pre_nms_topn: A scalar int tensor for the number of top scoring boxes to be used as input. |
172 | /// * min_size: A scalar float tensor. Any box that has a smaller size than min_size will be discarded. |
173 | /// |
174 | /// Optional attributes (see `Attrs`): |
175 | /// * post_nms_topn: An integer. Maximum number of rois in the output. |
176 | /// |
177 | /// Returns: |
178 | /// * `Output` rois: A 3-D float tensor of shape `[num_images,post_nms_topn,4]` representing the selected |
179 | /// region of interest boxes. Sorted in descending order in scores. |
180 | /// * `Output` roi_probabilities: A 2-D float tensor of shape `[num_images, post_nms_topn]` representing the score of the |
181 | /// region of interest box in `rois` tensor at the same index. |
182 | class GenerateBoundingBoxProposals { |
183 | public: |
184 | /// Optional attribute setters for GenerateBoundingBoxProposals |
185 | struct Attrs { |
186 | /// An integer. Maximum number of rois in the output. |
187 | /// |
188 | /// Defaults to 300 |
189 | TF_MUST_USE_RESULT Attrs PostNmsTopn(int64 x) { |
190 | Attrs ret = *this; |
191 | ret.post_nms_topn_ = x; |
192 | return ret; |
193 | } |
194 | |
195 | int64 post_nms_topn_ = 300; |
196 | }; |
197 | GenerateBoundingBoxProposals(const ::tensorflow::Scope& scope, |
198 | ::tensorflow::Input scores, ::tensorflow::Input |
199 | bbox_deltas, ::tensorflow::Input image_info, |
200 | ::tensorflow::Input anchors, ::tensorflow::Input |
201 | nms_threshold, ::tensorflow::Input pre_nms_topn, |
202 | ::tensorflow::Input min_size); |
203 | GenerateBoundingBoxProposals(const ::tensorflow::Scope& scope, |
204 | ::tensorflow::Input scores, ::tensorflow::Input |
205 | bbox_deltas, ::tensorflow::Input image_info, |
206 | ::tensorflow::Input anchors, ::tensorflow::Input |
207 | nms_threshold, ::tensorflow::Input pre_nms_topn, |
208 | ::tensorflow::Input min_size, const |
209 | GenerateBoundingBoxProposals::Attrs& attrs); |
210 | |
211 | static Attrs PostNmsTopn(int64 x) { |
212 | return Attrs().PostNmsTopn(x); |
213 | } |
214 | |
215 | Operation operation; |
216 | ::tensorflow::Output rois; |
217 | ::tensorflow::Output roi_probabilities; |
218 | }; |
219 | |
220 | /// Applies the given transform to each of the images. |
221 | /// |
222 | /// If one row of `transforms` is `[a0, a1, a2, b0, b1, b2, c0, c1]`, then it maps |
223 | /// the *output* point `(x, y)` to a transformed *input* point |
224 | /// `(x', y') = ((a0 x + a1 y + a2) / k, (b0 x + b1 y + b2) / k)`, where |
225 | /// `k = c0 x + c1 y + 1`. If the transformed point lays outside of the input |
226 | /// image, the output pixel is set to 0. |
227 | /// |
228 | /// Args: |
229 | /// * scope: A Scope object |
230 | /// * images: 4-D with shape `[batch, height, width, channels]`. |
231 | /// * transforms: 2-D Tensor, `[batch, 8]` or `[1, 8]` matrix, where each row corresponds to a 3 x 3 |
232 | /// projective transformation matrix, with the last entry assumed to be 1. If there |
233 | /// is one row, the same transformation will be applied to all images. |
234 | /// * output_shape: 1-D Tensor [new_height, new_width]. |
235 | /// * interpolation: Interpolation method, "NEAREST" or "BILINEAR". |
236 | /// |
237 | /// Optional attributes (see `Attrs`): |
238 | /// * fill_mode: Fill mode, "REFLECT", "WRAP", or "CONSTANT". |
239 | /// |
240 | /// Returns: |
241 | /// * `Output`: 4-D with shape |
242 | /// `[batch, new_height, new_width, channels]`. |
243 | class ImageProjectiveTransformV2 { |
244 | public: |
245 | /// Optional attribute setters for ImageProjectiveTransformV2 |
246 | struct Attrs { |
247 | /// Fill mode, "REFLECT", "WRAP", or "CONSTANT". |
248 | /// |
249 | /// Defaults to "CONSTANT" |
250 | TF_MUST_USE_RESULT Attrs FillMode(StringPiece x) { |
251 | Attrs ret = *this; |
252 | ret.fill_mode_ = x; |
253 | return ret; |
254 | } |
255 | |
256 | StringPiece fill_mode_ = "CONSTANT" ; |
257 | }; |
258 | ImageProjectiveTransformV2(const ::tensorflow::Scope& scope, |
259 | ::tensorflow::Input images, ::tensorflow::Input |
260 | transforms, ::tensorflow::Input output_shape, |
261 | StringPiece interpolation); |
262 | ImageProjectiveTransformV2(const ::tensorflow::Scope& scope, |
263 | ::tensorflow::Input images, ::tensorflow::Input |
264 | transforms, ::tensorflow::Input output_shape, |
265 | StringPiece interpolation, const |
266 | ImageProjectiveTransformV2::Attrs& attrs); |
267 | operator ::tensorflow::Output() const { return transformed_images; } |
268 | operator ::tensorflow::Input() const { return transformed_images; } |
269 | ::tensorflow::Node* node() const { return transformed_images.node(); } |
270 | |
271 | static Attrs FillMode(StringPiece x) { |
272 | return Attrs().FillMode(x); |
273 | } |
274 | |
275 | Operation operation; |
276 | ::tensorflow::Output transformed_images; |
277 | }; |
278 | |
279 | /// Applies the given transform to each of the images. |
280 | /// |
281 | /// If one row of `transforms` is `[a0, a1, a2, b0, b1, b2, c0, c1]`, then it maps |
282 | /// the *output* point `(x, y)` to a transformed *input* point |
283 | /// `(x', y') = ((a0 x + a1 y + a2) / k, (b0 x + b1 y + b2) / k)`, where |
284 | /// `k = c0 x + c1 y + 1`. If the transformed point lays outside of the input |
285 | /// image, the output pixel is set to fill_value. |
286 | /// |
287 | /// Args: |
288 | /// * scope: A Scope object |
289 | /// * images: 4-D with shape `[batch, height, width, channels]`. |
290 | /// * transforms: 2-D Tensor, `[batch, 8]` or `[1, 8]` matrix, where each row corresponds to a 3 x 3 |
291 | /// projective transformation matrix, with the last entry assumed to be 1. If there |
292 | /// is one row, the same transformation will be applied to all images. |
293 | /// * output_shape: 1-D Tensor [new_height, new_width]. |
294 | /// * fill_value: float, the value to be filled when fill_mode is constant". |
295 | /// * interpolation: Interpolation method, "NEAREST" or "BILINEAR". |
296 | /// |
297 | /// Optional attributes (see `Attrs`): |
298 | /// * fill_mode: Fill mode, "REFLECT", "WRAP", "CONSTANT", or "NEAREST". |
299 | /// |
300 | /// Returns: |
301 | /// * `Output`: 4-D with shape |
302 | /// `[batch, new_height, new_width, channels]`. |
303 | class ImageProjectiveTransformV3 { |
304 | public: |
305 | /// Optional attribute setters for ImageProjectiveTransformV3 |
306 | struct Attrs { |
307 | /// Fill mode, "REFLECT", "WRAP", "CONSTANT", or "NEAREST". |
308 | /// |
309 | /// Defaults to "CONSTANT" |
310 | TF_MUST_USE_RESULT Attrs FillMode(StringPiece x) { |
311 | Attrs ret = *this; |
312 | ret.fill_mode_ = x; |
313 | return ret; |
314 | } |
315 | |
316 | StringPiece fill_mode_ = "CONSTANT" ; |
317 | }; |
318 | ImageProjectiveTransformV3(const ::tensorflow::Scope& scope, |
319 | ::tensorflow::Input images, ::tensorflow::Input |
320 | transforms, ::tensorflow::Input output_shape, |
321 | ::tensorflow::Input fill_value, StringPiece |
322 | interpolation); |
323 | ImageProjectiveTransformV3(const ::tensorflow::Scope& scope, |
324 | ::tensorflow::Input images, ::tensorflow::Input |
325 | transforms, ::tensorflow::Input output_shape, |
326 | ::tensorflow::Input fill_value, StringPiece |
327 | interpolation, const |
328 | ImageProjectiveTransformV3::Attrs& attrs); |
329 | operator ::tensorflow::Output() const { return transformed_images; } |
330 | operator ::tensorflow::Input() const { return transformed_images; } |
331 | ::tensorflow::Node* node() const { return transformed_images.node(); } |
332 | |
333 | static Attrs FillMode(StringPiece x) { |
334 | return Attrs().FillMode(x); |
335 | } |
336 | |
337 | Operation operation; |
338 | ::tensorflow::Output transformed_images; |
339 | }; |
340 | |
341 | /// Computes the gradient of bicubic interpolation. |
342 | /// |
343 | /// Args: |
344 | /// * scope: A Scope object |
345 | /// * grads: 4-D with shape `[batch, height, width, channels]`. |
346 | /// * original_image: 4-D with shape `[batch, orig_height, orig_width, channels]`, |
347 | /// The image tensor that was resized. |
348 | /// |
349 | /// Optional attributes (see `Attrs`): |
350 | /// * align_corners: If true, the centers of the 4 corner pixels of the input and grad tensors are |
351 | /// aligned. Defaults to false. |
352 | /// |
353 | /// Returns: |
354 | /// * `Output`: 4-D with shape `[batch, orig_height, orig_width, channels]`. |
355 | /// Gradients with respect to the input image. Input image must have been |
356 | /// float or double. |
357 | class ResizeBicubicGrad { |
358 | public: |
359 | /// Optional attribute setters for ResizeBicubicGrad |
360 | struct Attrs { |
361 | /// If true, the centers of the 4 corner pixels of the input and grad tensors are |
362 | /// aligned. Defaults to false. |
363 | /// |
364 | /// Defaults to false |
365 | TF_MUST_USE_RESULT Attrs AlignCorners(bool x) { |
366 | Attrs ret = *this; |
367 | ret.align_corners_ = x; |
368 | return ret; |
369 | } |
370 | |
371 | /// Defaults to false |
372 | TF_MUST_USE_RESULT Attrs HalfPixelCenters(bool x) { |
373 | Attrs ret = *this; |
374 | ret.half_pixel_centers_ = x; |
375 | return ret; |
376 | } |
377 | |
378 | bool align_corners_ = false; |
379 | bool half_pixel_centers_ = false; |
380 | }; |
381 | ResizeBicubicGrad(const ::tensorflow::Scope& scope, ::tensorflow::Input grads, |
382 | ::tensorflow::Input original_image); |
383 | ResizeBicubicGrad(const ::tensorflow::Scope& scope, ::tensorflow::Input grads, |
384 | ::tensorflow::Input original_image, const |
385 | ResizeBicubicGrad::Attrs& attrs); |
386 | operator ::tensorflow::Output() const { return output; } |
387 | operator ::tensorflow::Input() const { return output; } |
388 | ::tensorflow::Node* node() const { return output.node(); } |
389 | |
390 | static Attrs AlignCorners(bool x) { |
391 | return Attrs().AlignCorners(x); |
392 | } |
393 | static Attrs HalfPixelCenters(bool x) { |
394 | return Attrs().HalfPixelCenters(x); |
395 | } |
396 | |
397 | Operation operation; |
398 | ::tensorflow::Output output; |
399 | }; |
400 | |
401 | /// Computes the gradient of bilinear interpolation. |
402 | /// |
403 | /// Args: |
404 | /// * scope: A Scope object |
405 | /// * grads: 4-D with shape `[batch, height, width, channels]`. |
406 | /// * original_image: 4-D with shape `[batch, orig_height, orig_width, channels]`, |
407 | /// The image tensor that was resized. |
408 | /// |
409 | /// Optional attributes (see `Attrs`): |
410 | /// * align_corners: If true, the centers of the 4 corner pixels of the input and grad tensors are |
411 | /// aligned. Defaults to false. |
412 | /// |
413 | /// Returns: |
414 | /// * `Output`: 4-D with shape `[batch, orig_height, orig_width, channels]`. |
415 | /// Gradients with respect to the input image. Input image must have been |
416 | /// float or double. |
417 | class ResizeBilinearGrad { |
418 | public: |
419 | /// Optional attribute setters for ResizeBilinearGrad |
420 | struct Attrs { |
421 | /// If true, the centers of the 4 corner pixels of the input and grad tensors are |
422 | /// aligned. Defaults to false. |
423 | /// |
424 | /// Defaults to false |
425 | TF_MUST_USE_RESULT Attrs AlignCorners(bool x) { |
426 | Attrs ret = *this; |
427 | ret.align_corners_ = x; |
428 | return ret; |
429 | } |
430 | |
431 | /// Defaults to false |
432 | TF_MUST_USE_RESULT Attrs HalfPixelCenters(bool x) { |
433 | Attrs ret = *this; |
434 | ret.half_pixel_centers_ = x; |
435 | return ret; |
436 | } |
437 | |
438 | bool align_corners_ = false; |
439 | bool half_pixel_centers_ = false; |
440 | }; |
441 | ResizeBilinearGrad(const ::tensorflow::Scope& scope, ::tensorflow::Input grads, |
442 | ::tensorflow::Input original_image); |
443 | ResizeBilinearGrad(const ::tensorflow::Scope& scope, ::tensorflow::Input grads, |
444 | ::tensorflow::Input original_image, const |
445 | ResizeBilinearGrad::Attrs& attrs); |
446 | operator ::tensorflow::Output() const { return output; } |
447 | operator ::tensorflow::Input() const { return output; } |
448 | ::tensorflow::Node* node() const { return output.node(); } |
449 | |
450 | static Attrs AlignCorners(bool x) { |
451 | return Attrs().AlignCorners(x); |
452 | } |
453 | static Attrs HalfPixelCenters(bool x) { |
454 | return Attrs().HalfPixelCenters(x); |
455 | } |
456 | |
457 | Operation operation; |
458 | ::tensorflow::Output output; |
459 | }; |
460 | |
461 | /// Computes the gradient of nearest neighbor interpolation. |
462 | /// |
463 | /// Args: |
464 | /// * scope: A Scope object |
465 | /// * grads: 4-D with shape `[batch, height, width, channels]`. |
466 | /// * size: = A 1-D int32 Tensor of 2 elements: `orig_height, orig_width`. The |
467 | /// original input size. |
468 | /// |
469 | /// Optional attributes (see `Attrs`): |
470 | /// * align_corners: If true, the centers of the 4 corner pixels of the input and grad tensors are |
471 | /// aligned. Defaults to false. |
472 | /// |
473 | /// Returns: |
474 | /// * `Output`: 4-D with shape `[batch, orig_height, orig_width, channels]`. Gradients |
475 | /// with respect to the input image. |
476 | class ResizeNearestNeighborGrad { |
477 | public: |
478 | /// Optional attribute setters for ResizeNearestNeighborGrad |
479 | struct Attrs { |
480 | /// If true, the centers of the 4 corner pixels of the input and grad tensors are |
481 | /// aligned. Defaults to false. |
482 | /// |
483 | /// Defaults to false |
484 | TF_MUST_USE_RESULT Attrs AlignCorners(bool x) { |
485 | Attrs ret = *this; |
486 | ret.align_corners_ = x; |
487 | return ret; |
488 | } |
489 | |
490 | /// Defaults to false |
491 | TF_MUST_USE_RESULT Attrs HalfPixelCenters(bool x) { |
492 | Attrs ret = *this; |
493 | ret.half_pixel_centers_ = x; |
494 | return ret; |
495 | } |
496 | |
497 | bool align_corners_ = false; |
498 | bool half_pixel_centers_ = false; |
499 | }; |
500 | ResizeNearestNeighborGrad(const ::tensorflow::Scope& scope, ::tensorflow::Input |
501 | grads, ::tensorflow::Input size); |
502 | ResizeNearestNeighborGrad(const ::tensorflow::Scope& scope, ::tensorflow::Input |
503 | grads, ::tensorflow::Input size, const |
504 | ResizeNearestNeighborGrad::Attrs& attrs); |
505 | operator ::tensorflow::Output() const { return output; } |
506 | operator ::tensorflow::Input() const { return output; } |
507 | ::tensorflow::Node* node() const { return output.node(); } |
508 | |
509 | static Attrs AlignCorners(bool x) { |
510 | return Attrs().AlignCorners(x); |
511 | } |
512 | static Attrs HalfPixelCenters(bool x) { |
513 | return Attrs().HalfPixelCenters(x); |
514 | } |
515 | |
516 | Operation operation; |
517 | ::tensorflow::Output output; |
518 | }; |
519 | |
520 | /// TODO: add doc. |
521 | /// |
522 | /// Args: |
523 | /// * scope: A Scope object |
524 | /// |
525 | /// Returns: |
526 | /// * `Output`: The output tensor. |
527 | class ScaleAndTranslateGrad { |
528 | public: |
529 | /// Optional attribute setters for ScaleAndTranslateGrad |
530 | struct Attrs { |
531 | /// Defaults to "lanczos3" |
532 | TF_MUST_USE_RESULT Attrs KernelType(StringPiece x) { |
533 | Attrs ret = *this; |
534 | ret.kernel_type_ = x; |
535 | return ret; |
536 | } |
537 | |
538 | /// Defaults to true |
539 | TF_MUST_USE_RESULT Attrs Antialias(bool x) { |
540 | Attrs ret = *this; |
541 | ret.antialias_ = x; |
542 | return ret; |
543 | } |
544 | |
545 | StringPiece kernel_type_ = "lanczos3" ; |
546 | bool antialias_ = true; |
547 | }; |
548 | ScaleAndTranslateGrad(const ::tensorflow::Scope& scope, ::tensorflow::Input |
549 | grads, ::tensorflow::Input original_image, |
550 | ::tensorflow::Input scale, ::tensorflow::Input |
551 | translation); |
552 | ScaleAndTranslateGrad(const ::tensorflow::Scope& scope, ::tensorflow::Input |
553 | grads, ::tensorflow::Input original_image, |
554 | ::tensorflow::Input scale, ::tensorflow::Input |
555 | translation, const ScaleAndTranslateGrad::Attrs& attrs); |
556 | operator ::tensorflow::Output() const { return output; } |
557 | operator ::tensorflow::Input() const { return output; } |
558 | ::tensorflow::Node* node() const { return output.node(); } |
559 | |
560 | static Attrs KernelType(StringPiece x) { |
561 | return Attrs().KernelType(x); |
562 | } |
563 | static Attrs Antialias(bool x) { |
564 | return Attrs().Antialias(x); |
565 | } |
566 | |
567 | Operation operation; |
568 | ::tensorflow::Output output; |
569 | }; |
570 | |
571 | } // namespace internal |
572 | } // namespace ops |
573 | } // namespace tensorflow |
574 | |
575 | #endif // TENSORFLOW_CC_OPS_IMAGE_OPS_INTERNAL_H_ |
576 | |