1// This file is MACHINE GENERATED! Do not edit.
2
3#ifndef TENSORFLOW_CC_OPS_DATASET_OPS_INTERNAL_H_
4#define TENSORFLOW_CC_OPS_DATASET_OPS_INTERNAL_H_
5
6// This file is MACHINE GENERATED! Do not edit.
7
8#include "tensorflow/cc/framework/ops.h"
9#include "tensorflow/cc/framework/scope.h"
10#include "tensorflow/core/framework/tensor.h"
11#include "tensorflow/core/framework/tensor_shape.h"
12#include "tensorflow/core/framework/types.h"
13#include "tensorflow/core/lib/gtl/array_slice.h"
14
15namespace tensorflow {
16namespace ops {
17namespace internal {
18// NOTE: This namespace has internal TensorFlow details that
19// are not part of TensorFlow's public API.
20
21/// @defgroup dataset_ops_internal Dataset Ops Internal
22/// @{
23
24/// A container for an iterator resource.
25///
26/// Args:
27/// * scope: A Scope object
28///
29/// Returns:
30/// * `Output` handle: A handle to the iterator that can be passed to a "MakeIterator" or
31/// "IteratorGetNext" op. In contrast to Iterator, AnonymousIterator prevents
32/// resource sharing by name, and does not keep a reference to the resource
33/// container.
34/// * `Output` deleter: A variant deleter that should be passed into the op that deletes the iterator.
35class AnonymousIteratorV2 {
36 public:
37 AnonymousIteratorV2(const ::tensorflow::Scope& scope, const DataTypeSlice&
38 output_types, const gtl::ArraySlice<PartialTensorShape>&
39 output_shapes);
40
41 Operation operation;
42 ::tensorflow::Output handle;
43 ::tensorflow::Output deleter;
44};
45
46/// A container for an iterator resource.
47///
48/// Args:
49/// * scope: A Scope object
50///
51/// Returns:
52/// * `Output`: A handle to the iterator that can be passed to a "MakeIterator" or
53/// "IteratorGetNext" op. In contrast to Iterator, AnonymousIterator prevents
54/// resource sharing by name, and does not keep a reference to the resource
55/// container.
56class AnonymousIteratorV3 {
57 public:
58 AnonymousIteratorV3(const ::tensorflow::Scope& scope, const DataTypeSlice&
59 output_types, const gtl::ArraySlice<PartialTensorShape>&
60 output_shapes);
61 operator ::tensorflow::Output() const { return handle; }
62 operator ::tensorflow::Input() const { return handle; }
63 ::tensorflow::Node* node() const { return handle.node(); }
64
65 Operation operation;
66 ::tensorflow::Output handle;
67};
68
69/// TODO: add doc.
70///
71/// Args:
72/// * scope: A Scope object
73///
74/// Returns:
75/// * `Output` handle
76/// * `Output` deleter
77class AnonymousMemoryCache {
78 public:
79 AnonymousMemoryCache(const ::tensorflow::Scope& scope);
80
81 Operation operation;
82 ::tensorflow::Output handle;
83 ::tensorflow::Output deleter;
84};
85
86/// A container for a multi device iterator resource.
87///
88/// Args:
89/// * scope: A Scope object
90///
91/// Returns:
92/// * `Output` handle: A handle to a multi device iterator that can be passed to a
93/// "MultiDeviceIteratorGetNextFromShard" op. In contrast to MultiDeviceIterator,
94/// AnonymousIterator prevents resource sharing by name, and does not keep a
95/// reference to the resource container.
96/// * `Output` deleter: A variant deleter that should be passed into the op that deletes the iterator.
97class AnonymousMultiDeviceIterator {
98 public:
99 AnonymousMultiDeviceIterator(const ::tensorflow::Scope& scope, const
100 gtl::ArraySlice<::tensorflow::tstring>& devices,
101 const DataTypeSlice& output_types, const
102 gtl::ArraySlice<PartialTensorShape>&
103 output_shapes);
104
105 Operation operation;
106 ::tensorflow::Output handle;
107 ::tensorflow::Output deleter;
108};
109
110/// A container for a multi device iterator resource.
111///
112/// Args:
113/// * scope: A Scope object
114///
115/// Returns:
116/// * `Output`: A handle to a multi device iterator that can be passed to a
117/// "MultiDeviceIteratorGetNextFromShard" op. In contrast to MultiDeviceIterator,
118/// AnonymousIterator prevents resource sharing by name, and does not keep a
119/// reference to the resource container.
120class AnonymousMultiDeviceIteratorV3 {
121 public:
122 AnonymousMultiDeviceIteratorV3(const ::tensorflow::Scope& scope, const
123 gtl::ArraySlice<::tensorflow::tstring>& devices,
124 const DataTypeSlice& output_types, const
125 gtl::ArraySlice<PartialTensorShape>&
126 output_shapes);
127 operator ::tensorflow::Output() const { return handle; }
128 operator ::tensorflow::Input() const { return handle; }
129 ::tensorflow::Node* node() const { return handle.node(); }
130
131 Operation operation;
132 ::tensorflow::Output handle;
133};
134
135/// TODO: add doc.
136///
137/// Args:
138/// * scope: A Scope object
139///
140/// Returns:
141/// * `Output` handle
142/// * `Output` deleter
143class AnonymousRandomSeedGenerator {
144 public:
145 AnonymousRandomSeedGenerator(const ::tensorflow::Scope& scope,
146 ::tensorflow::Input seed, ::tensorflow::Input
147 seed2);
148
149 Operation operation;
150 ::tensorflow::Output handle;
151 ::tensorflow::Output deleter;
152};
153
154/// TODO: add doc.
155///
156/// Args:
157/// * scope: A Scope object
158///
159/// Returns:
160/// * `Output` handle
161/// * `Output` deleter
162class AnonymousSeedGenerator {
163 public:
164 AnonymousSeedGenerator(const ::tensorflow::Scope& scope, ::tensorflow::Input
165 seed, ::tensorflow::Input seed2, ::tensorflow::Input
166 reshuffle);
167
168 Operation operation;
169 ::tensorflow::Output handle;
170 ::tensorflow::Output deleter;
171};
172
173/// Creates a dataset that batches `batch_size` elements from `input_dataset`.
174///
175/// Args:
176/// * scope: A Scope object
177/// * batch_size: A scalar representing the number of elements to accumulate in a
178/// batch.
179///
180/// Returns:
181/// * `Output`: The handle tensor.
182class BatchDataset {
183 public:
184 /// Optional attribute setters for BatchDataset
185 struct Attrs {
186 /// Defaults to ""
187 TF_MUST_USE_RESULT Attrs Metadata(StringPiece x) {
188 Attrs ret = *this;
189 ret.metadata_ = x;
190 return ret;
191 }
192
193 StringPiece metadata_ = "";
194 };
195 BatchDataset(const ::tensorflow::Scope& scope, ::tensorflow::Input
196 input_dataset, ::tensorflow::Input batch_size, const
197 DataTypeSlice& output_types, const
198 gtl::ArraySlice<PartialTensorShape>& output_shapes);
199 BatchDataset(const ::tensorflow::Scope& scope, ::tensorflow::Input
200 input_dataset, ::tensorflow::Input batch_size, const
201 DataTypeSlice& output_types, const
202 gtl::ArraySlice<PartialTensorShape>& output_shapes, const
203 BatchDataset::Attrs& attrs);
204 operator ::tensorflow::Output() const { return handle; }
205 operator ::tensorflow::Input() const { return handle; }
206 ::tensorflow::Node* node() const { return handle.node(); }
207
208 static Attrs Metadata(StringPiece x) {
209 return Attrs().Metadata(x);
210 }
211
212 Operation operation;
213 ::tensorflow::Output handle;
214};
215
216/// Creates a dataset that batches `batch_size` elements from `input_dataset`.
217///
218/// Args:
219/// * scope: A Scope object
220/// * batch_size: A scalar representing the number of elements to accumulate in a batch.
221/// * drop_remainder: A scalar representing whether the last batch should be dropped in case its size
222/// is smaller than desired.
223///
224/// Returns:
225/// * `Output`: The handle tensor.
226class BatchDatasetV2 {
227 public:
228 /// Optional attribute setters for BatchDatasetV2
229 struct Attrs {
230 /// Defaults to false
231 TF_MUST_USE_RESULT Attrs ParallelCopy(bool x) {
232 Attrs ret = *this;
233 ret.parallel_copy_ = x;
234 return ret;
235 }
236
237 /// Defaults to ""
238 TF_MUST_USE_RESULT Attrs Metadata(StringPiece x) {
239 Attrs ret = *this;
240 ret.metadata_ = x;
241 return ret;
242 }
243
244 bool parallel_copy_ = false;
245 StringPiece metadata_ = "";
246 };
247 BatchDatasetV2(const ::tensorflow::Scope& scope, ::tensorflow::Input
248 input_dataset, ::tensorflow::Input batch_size,
249 ::tensorflow::Input drop_remainder, const DataTypeSlice&
250 output_types, const gtl::ArraySlice<PartialTensorShape>&
251 output_shapes);
252 BatchDatasetV2(const ::tensorflow::Scope& scope, ::tensorflow::Input
253 input_dataset, ::tensorflow::Input batch_size,
254 ::tensorflow::Input drop_remainder, const DataTypeSlice&
255 output_types, const gtl::ArraySlice<PartialTensorShape>&
256 output_shapes, const BatchDatasetV2::Attrs& attrs);
257 operator ::tensorflow::Output() const { return handle; }
258 operator ::tensorflow::Input() const { return handle; }
259 ::tensorflow::Node* node() const { return handle.node(); }
260
261 static Attrs ParallelCopy(bool x) {
262 return Attrs().ParallelCopy(x);
263 }
264 static Attrs Metadata(StringPiece x) {
265 return Attrs().Metadata(x);
266 }
267
268 Operation operation;
269 ::tensorflow::Output handle;
270};
271
272/// Creates a dataset that caches elements from `input_dataset`.
273///
274/// A CacheDataset will iterate over the input_dataset, and store tensors. If the
275/// cache already exists, the cache will be used. If the cache is inappropriate
276/// (e.g. cannot be opened, contains tensors of the wrong shape / size), an error
277/// will the returned when used.
278///
279/// Args:
280/// * scope: A Scope object
281/// * filename: A path on the filesystem where we should cache the dataset. Note: this
282/// will be a directory.
283///
284/// Returns:
285/// * `Output`: The handle tensor.
286class CacheDataset {
287 public:
288 /// Optional attribute setters for CacheDataset
289 struct Attrs {
290 /// Defaults to ""
291 TF_MUST_USE_RESULT Attrs Metadata(StringPiece x) {
292 Attrs ret = *this;
293 ret.metadata_ = x;
294 return ret;
295 }
296
297 StringPiece metadata_ = "";
298 };
299 CacheDataset(const ::tensorflow::Scope& scope, ::tensorflow::Input
300 input_dataset, ::tensorflow::Input filename, const DataTypeSlice&
301 output_types, const gtl::ArraySlice<PartialTensorShape>&
302 output_shapes);
303 CacheDataset(const ::tensorflow::Scope& scope, ::tensorflow::Input
304 input_dataset, ::tensorflow::Input filename, const DataTypeSlice&
305 output_types, const gtl::ArraySlice<PartialTensorShape>&
306 output_shapes, const CacheDataset::Attrs& attrs);
307 operator ::tensorflow::Output() const { return handle; }
308 operator ::tensorflow::Input() const { return handle; }
309 ::tensorflow::Node* node() const { return handle.node(); }
310
311 static Attrs Metadata(StringPiece x) {
312 return Attrs().Metadata(x);
313 }
314
315 Operation operation;
316 ::tensorflow::Output handle;
317};
318
319/// TODO: add doc.
320///
321/// Args:
322/// * scope: A Scope object
323///
324/// Returns:
325/// * `Output`: The handle tensor.
326class CacheDatasetV2 {
327 public:
328 /// Optional attribute setters for CacheDatasetV2
329 struct Attrs {
330 /// Defaults to ""
331 TF_MUST_USE_RESULT Attrs Metadata(StringPiece x) {
332 Attrs ret = *this;
333 ret.metadata_ = x;
334 return ret;
335 }
336
337 StringPiece metadata_ = "";
338 };
339 CacheDatasetV2(const ::tensorflow::Scope& scope, ::tensorflow::Input
340 input_dataset, ::tensorflow::Input filename, ::tensorflow::Input
341 cache, const DataTypeSlice& output_types, const
342 gtl::ArraySlice<PartialTensorShape>& output_shapes);
343 CacheDatasetV2(const ::tensorflow::Scope& scope, ::tensorflow::Input
344 input_dataset, ::tensorflow::Input filename, ::tensorflow::Input
345 cache, const DataTypeSlice& output_types, const
346 gtl::ArraySlice<PartialTensorShape>& output_shapes, const
347 CacheDatasetV2::Attrs& attrs);
348 operator ::tensorflow::Output() const { return handle; }
349 operator ::tensorflow::Input() const { return handle; }
350 ::tensorflow::Node* node() const { return handle.node(); }
351
352 static Attrs Metadata(StringPiece x) {
353 return Attrs().Metadata(x);
354 }
355
356 Operation operation;
357 ::tensorflow::Output handle;
358};
359
360/// Creates a dataset that concatenates `input_dataset` with `another_dataset`.
361///
362/// Args:
363/// * scope: A Scope object
364///
365/// Returns:
366/// * `Output`: The handle tensor.
367class ConcatenateDataset {
368 public:
369 /// Optional attribute setters for ConcatenateDataset
370 struct Attrs {
371 /// Defaults to ""
372 TF_MUST_USE_RESULT Attrs Metadata(StringPiece x) {
373 Attrs ret = *this;
374 ret.metadata_ = x;
375 return ret;
376 }
377
378 StringPiece metadata_ = "";
379 };
380 ConcatenateDataset(const ::tensorflow::Scope& scope, ::tensorflow::Input
381 input_dataset, ::tensorflow::Input another_dataset, const
382 DataTypeSlice& output_types, const
383 gtl::ArraySlice<PartialTensorShape>& output_shapes);
384 ConcatenateDataset(const ::tensorflow::Scope& scope, ::tensorflow::Input
385 input_dataset, ::tensorflow::Input another_dataset, const
386 DataTypeSlice& output_types, const
387 gtl::ArraySlice<PartialTensorShape>& output_shapes, const
388 ConcatenateDataset::Attrs& attrs);
389 operator ::tensorflow::Output() const { return handle; }
390 operator ::tensorflow::Input() const { return handle; }
391 ::tensorflow::Node* node() const { return handle.node(); }
392
393 static Attrs Metadata(StringPiece x) {
394 return Attrs().Metadata(x);
395 }
396
397 Operation operation;
398 ::tensorflow::Output handle;
399};
400
401/// Returns the cardinality of `input_dataset`.
402///
403/// Returns the cardinality of `input_dataset`.
404///
405/// Args:
406/// * scope: A Scope object
407/// * input_dataset: A variant tensor representing the dataset to return cardinality for.
408///
409/// Returns:
410/// * `Output`: The cardinality of `input_dataset`. Named constants are used to represent
411/// infinite and unknown cardinality.
412class DatasetCardinality {
413 public:
414 DatasetCardinality(const ::tensorflow::Scope& scope, ::tensorflow::Input
415 input_dataset);
416 operator ::tensorflow::Output() const { return cardinality; }
417 operator ::tensorflow::Input() const { return cardinality; }
418 ::tensorflow::Node* node() const { return cardinality.node(); }
419
420 Operation operation;
421 ::tensorflow::Output cardinality;
422};
423
424/// Returns a serialized GraphDef representing `input_dataset`.
425///
426/// Returns a graph representation for `input_dataset`.
427///
428/// Args:
429/// * scope: A Scope object
430/// * input_dataset: A variant tensor representing the dataset to return the graph representation for.
431///
432/// Returns:
433/// * `Output`: The graph representation of the dataset (as serialized GraphDef).
434class DatasetToGraph {
435 public:
436 /// Optional attribute setters for DatasetToGraph
437 struct Attrs {
438 /// Defaults to []
439 TF_MUST_USE_RESULT Attrs StatefulWhitelist(const gtl::ArraySlice<::tensorflow::tstring>& x) {
440 Attrs ret = *this;
441 ret.stateful_whitelist_ = x;
442 return ret;
443 }
444
445 /// Defaults to false
446 TF_MUST_USE_RESULT Attrs AllowStateful(bool x) {
447 Attrs ret = *this;
448 ret.allow_stateful_ = x;
449 return ret;
450 }
451
452 /// Defaults to false
453 TF_MUST_USE_RESULT Attrs StripDeviceAssignment(bool x) {
454 Attrs ret = *this;
455 ret.strip_device_assignment_ = x;
456 return ret;
457 }
458
459 gtl::ArraySlice<::tensorflow::tstring> stateful_whitelist_ = {};
460 bool allow_stateful_ = false;
461 bool strip_device_assignment_ = false;
462 };
463 DatasetToGraph(const ::tensorflow::Scope& scope, ::tensorflow::Input
464 input_dataset);
465 DatasetToGraph(const ::tensorflow::Scope& scope, ::tensorflow::Input
466 input_dataset, const DatasetToGraph::Attrs& attrs);
467 operator ::tensorflow::Output() const { return graph; }
468 operator ::tensorflow::Input() const { return graph; }
469 ::tensorflow::Node* node() const { return graph.node(); }
470
471 static Attrs StatefulWhitelist(const gtl::ArraySlice<::tensorflow::tstring>& x) {
472 return Attrs().StatefulWhitelist(x);
473 }
474 static Attrs AllowStateful(bool x) {
475 return Attrs().AllowStateful(x);
476 }
477 static Attrs StripDeviceAssignment(bool x) {
478 return Attrs().StripDeviceAssignment(x);
479 }
480
481 Operation operation;
482 ::tensorflow::Output graph;
483};
484
485/// Returns a serialized GraphDef representing `input_dataset`.
486///
487/// Returns a graph representation for `input_dataset`.
488///
489/// Args:
490/// * scope: A Scope object
491/// * input_dataset: A variant tensor representing the dataset to return the graph representation for.
492///
493/// Returns:
494/// * `Output`: The graph representation of the dataset (as serialized GraphDef).
495class DatasetToGraphV2 {
496 public:
497 /// Optional attribute setters for DatasetToGraphV2
498 struct Attrs {
499 /// Defaults to 0
500 TF_MUST_USE_RESULT Attrs ExternalStatePolicy(int64 x) {
501 Attrs ret = *this;
502 ret.external_state_policy_ = x;
503 return ret;
504 }
505
506 /// Defaults to false
507 TF_MUST_USE_RESULT Attrs StripDeviceAssignment(bool x) {
508 Attrs ret = *this;
509 ret.strip_device_assignment_ = x;
510 return ret;
511 }
512
513 int64 external_state_policy_ = 0;
514 bool strip_device_assignment_ = false;
515 };
516 DatasetToGraphV2(const ::tensorflow::Scope& scope, ::tensorflow::Input
517 input_dataset);
518 DatasetToGraphV2(const ::tensorflow::Scope& scope, ::tensorflow::Input
519 input_dataset, const DatasetToGraphV2::Attrs& attrs);
520 operator ::tensorflow::Output() const { return graph; }
521 operator ::tensorflow::Input() const { return graph; }
522 ::tensorflow::Node* node() const { return graph.node(); }
523
524 static Attrs ExternalStatePolicy(int64 x) {
525 return Attrs().ExternalStatePolicy(x);
526 }
527 static Attrs StripDeviceAssignment(bool x) {
528 return Attrs().StripDeviceAssignment(x);
529 }
530
531 Operation operation;
532 ::tensorflow::Output graph;
533};
534
535/// Outputs the single element from the given dataset.
536///
537/// Args:
538/// * scope: A Scope object
539/// * dataset: A handle to a dataset that contains a single element.
540///
541/// Returns:
542/// * `OutputList`: The components of the single element of `input`.
543class DatasetToSingleElement {
544 public:
545 /// Optional attribute setters for DatasetToSingleElement
546 struct Attrs {
547 /// Defaults to ""
548 TF_MUST_USE_RESULT Attrs Metadata(StringPiece x) {
549 Attrs ret = *this;
550 ret.metadata_ = x;
551 return ret;
552 }
553
554 StringPiece metadata_ = "";
555 };
556 DatasetToSingleElement(const ::tensorflow::Scope& scope, ::tensorflow::Input
557 dataset, const DataTypeSlice& output_types, const
558 gtl::ArraySlice<PartialTensorShape>& output_shapes);
559 DatasetToSingleElement(const ::tensorflow::Scope& scope, ::tensorflow::Input
560 dataset, const DataTypeSlice& output_types, const
561 gtl::ArraySlice<PartialTensorShape>& output_shapes,
562 const DatasetToSingleElement::Attrs& attrs);
563 ::tensorflow::Output operator[](size_t index) const { return components[index]; }
564
565
566 static Attrs Metadata(StringPiece x) {
567 return Attrs().Metadata(x);
568 }
569
570 Operation operation;
571 ::tensorflow::OutputList components;
572};
573
574/// A container for an iterator resource.
575///
576/// Args:
577/// * scope: A Scope object
578/// * handle: A handle to the iterator to delete.
579/// * deleter: A variant deleter.
580///
581/// Returns:
582/// * the created `Operation`
583class DeleteIterator {
584 public:
585 DeleteIterator(const ::tensorflow::Scope& scope, ::tensorflow::Input handle,
586 ::tensorflow::Input deleter);
587 operator ::tensorflow::Operation() const { return operation; }
588
589 Operation operation;
590};
591
592/// TODO: add doc.
593///
594/// Args:
595/// * scope: A Scope object
596///
597/// Returns:
598/// * the created `Operation`
599class DeleteMemoryCache {
600 public:
601 DeleteMemoryCache(const ::tensorflow::Scope& scope, ::tensorflow::Input handle,
602 ::tensorflow::Input deleter);
603 operator ::tensorflow::Operation() const { return operation; }
604
605 Operation operation;
606};
607
608/// A container for an iterator resource.
609///
610/// Args:
611/// * scope: A Scope object
612/// * multi_device_iterator: A handle to the multi device iterator to delete.
613/// * iterators: A list of iterator handles (unused). This is added so that automatic control dependencies get added during function tracing that ensure this op runs after all the dependent iterators are deleted.
614/// * deleter: A variant deleter.
615///
616/// Returns:
617/// * the created `Operation`
618class DeleteMultiDeviceIterator {
619 public:
620 DeleteMultiDeviceIterator(const ::tensorflow::Scope& scope, ::tensorflow::Input
621 multi_device_iterator, ::tensorflow::InputList
622 iterators, ::tensorflow::Input deleter);
623 operator ::tensorflow::Operation() const { return operation; }
624
625 Operation operation;
626};
627
628/// TODO: add doc.
629///
630/// Args:
631/// * scope: A Scope object
632///
633/// Returns:
634/// * the created `Operation`
635class DeleteRandomSeedGenerator {
636 public:
637 DeleteRandomSeedGenerator(const ::tensorflow::Scope& scope, ::tensorflow::Input
638 handle, ::tensorflow::Input deleter);
639 operator ::tensorflow::Operation() const { return operation; }
640
641 Operation operation;
642};
643
644/// TODO: add doc.
645///
646/// Args:
647/// * scope: A Scope object
648///
649/// Returns:
650/// * the created `Operation`
651class DeleteSeedGenerator {
652 public:
653 DeleteSeedGenerator(const ::tensorflow::Scope& scope, ::tensorflow::Input
654 handle, ::tensorflow::Input deleter);
655 operator ::tensorflow::Operation() const { return operation; }
656
657 Operation operation;
658};
659
660/// TODO: add doc.
661///
662/// Args:
663/// * scope: A Scope object
664///
665/// Returns:
666/// * `Output`: The handle tensor.
667class DummyMemoryCache {
668 public:
669 DummyMemoryCache(const ::tensorflow::Scope& scope);
670 operator ::tensorflow::Output() const { return handle; }
671 operator ::tensorflow::Input() const { return handle; }
672 ::tensorflow::Node* node() const { return handle.node(); }
673
674 Operation operation;
675 ::tensorflow::Output handle;
676};
677
678/// TODO: add doc.
679///
680/// Args:
681/// * scope: A Scope object
682///
683/// Returns:
684/// * `Output`: The handle tensor.
685class DummySeedGenerator {
686 public:
687 DummySeedGenerator(const ::tensorflow::Scope& scope);
688 operator ::tensorflow::Output() const { return handle; }
689 operator ::tensorflow::Input() const { return handle; }
690 ::tensorflow::Node* node() const { return handle.node(); }
691
692 Operation operation;
693 ::tensorflow::Output handle;
694};
695
696/// Creates a dataset containing elements of first component of `input_dataset` having true in the last component.
697///
698/// Args:
699/// * scope: A Scope object
700///
701/// Returns:
702/// * `Output`: The output tensor.
703class FilterByLastComponentDataset {
704 public:
705 FilterByLastComponentDataset(const ::tensorflow::Scope& scope,
706 ::tensorflow::Input input_dataset, const
707 DataTypeSlice& output_types, const
708 gtl::ArraySlice<PartialTensorShape>&
709 output_shapes);
710 operator ::tensorflow::Output() const { return output; }
711 operator ::tensorflow::Input() const { return output; }
712 ::tensorflow::Node* node() const { return output.node(); }
713
714 Operation operation;
715 ::tensorflow::Output output;
716};
717
718/// Creates a dataset containing elements of `input_dataset` matching `predicate`.
719///
720/// The `predicate` function must return a scalar boolean and accept the
721/// following arguments:
722///
723/// * One tensor for each component of an element of `input_dataset`.
724/// * One tensor for each value in `other_arguments`.
725///
726/// Args:
727/// * scope: A Scope object
728/// * other_arguments: A list of tensors, typically values that were captured when
729/// building a closure for `predicate`.
730/// * predicate: A function returning a scalar boolean.
731///
732/// Returns:
733/// * `Output`: The handle tensor.
734class FilterDataset {
735 public:
736 /// Optional attribute setters for FilterDataset
737 struct Attrs {
738 /// Defaults to ""
739 TF_MUST_USE_RESULT Attrs Metadata(StringPiece x) {
740 Attrs ret = *this;
741 ret.metadata_ = x;
742 return ret;
743 }
744
745 StringPiece metadata_ = "";
746 };
747 FilterDataset(const ::tensorflow::Scope& scope, ::tensorflow::Input
748 input_dataset, ::tensorflow::InputList other_arguments, const
749 NameAttrList& predicate, const DataTypeSlice& output_types, const
750 gtl::ArraySlice<PartialTensorShape>& output_shapes);
751 FilterDataset(const ::tensorflow::Scope& scope, ::tensorflow::Input
752 input_dataset, ::tensorflow::InputList other_arguments, const
753 NameAttrList& predicate, const DataTypeSlice& output_types, const
754 gtl::ArraySlice<PartialTensorShape>& output_shapes, const
755 FilterDataset::Attrs& attrs);
756 operator ::tensorflow::Output() const { return handle; }
757 operator ::tensorflow::Input() const { return handle; }
758 ::tensorflow::Node* node() const { return handle.node(); }
759
760 static Attrs Metadata(StringPiece x) {
761 return Attrs().Metadata(x);
762 }
763
764 Operation operation;
765 ::tensorflow::Output handle;
766};
767
768/// Creates a dataset by applying `tf.data.Options` to `input_dataset`.
769///
770/// Args:
771/// * scope: A Scope object
772/// * input_dataset: A variant tensor representing the input dataset.
773///
774/// Returns:
775/// * `Output`: The handle tensor.
776class FinalizeDataset {
777 public:
778 /// Optional attribute setters for FinalizeDataset
779 struct Attrs {
780 /// Defaults to false
781 TF_MUST_USE_RESULT Attrs HasCapturedRef(bool x) {
782 Attrs ret = *this;
783 ret.has_captured_ref_ = x;
784 return ret;
785 }
786
787 bool has_captured_ref_ = false;
788 };
789 FinalizeDataset(const ::tensorflow::Scope& scope, ::tensorflow::Input
790 input_dataset, const DataTypeSlice& output_types, const
791 gtl::ArraySlice<PartialTensorShape>& output_shapes);
792 FinalizeDataset(const ::tensorflow::Scope& scope, ::tensorflow::Input
793 input_dataset, const DataTypeSlice& output_types, const
794 gtl::ArraySlice<PartialTensorShape>& output_shapes, const
795 FinalizeDataset::Attrs& attrs);
796 operator ::tensorflow::Output() const { return handle; }
797 operator ::tensorflow::Input() const { return handle; }
798 ::tensorflow::Node* node() const { return handle.node(); }
799
800 static Attrs HasCapturedRef(bool x) {
801 return Attrs().HasCapturedRef(x);
802 }
803
804 Operation operation;
805 ::tensorflow::Output handle;
806};
807
808/// Creates a dataset that emits the records from one or more binary files.
809///
810/// Args:
811/// * scope: A Scope object
812/// * filenames: A scalar or a vector containing the name(s) of the file(s) to be
813/// read.
814/// * header_bytes: A scalar representing the number of bytes to skip at the
815/// beginning of a file.
816/// * record_bytes: A scalar representing the number of bytes in each record.
817/// * footer_bytes: A scalar representing the number of bytes to skip at the end
818/// of a file.
819/// * buffer_size: A scalar representing the number of bytes to buffer. Must be > 0.
820///
821/// Returns:
822/// * `Output`: The handle tensor.
823class FixedLengthRecordDataset {
824 public:
825 /// Optional attribute setters for FixedLengthRecordDataset
826 struct Attrs {
827 /// Defaults to ""
828 TF_MUST_USE_RESULT Attrs Metadata(StringPiece x) {
829 Attrs ret = *this;
830 ret.metadata_ = x;
831 return ret;
832 }
833
834 StringPiece metadata_ = "";
835 };
836 FixedLengthRecordDataset(const ::tensorflow::Scope& scope, ::tensorflow::Input
837 filenames, ::tensorflow::Input header_bytes,
838 ::tensorflow::Input record_bytes, ::tensorflow::Input
839 footer_bytes, ::tensorflow::Input buffer_size);
840 FixedLengthRecordDataset(const ::tensorflow::Scope& scope, ::tensorflow::Input
841 filenames, ::tensorflow::Input header_bytes,
842 ::tensorflow::Input record_bytes, ::tensorflow::Input
843 footer_bytes, ::tensorflow::Input buffer_size, const
844 FixedLengthRecordDataset::Attrs& attrs);
845 operator ::tensorflow::Output() const { return handle; }
846 operator ::tensorflow::Input() const { return handle; }
847 ::tensorflow::Node* node() const { return handle.node(); }
848
849 static Attrs Metadata(StringPiece x) {
850 return Attrs().Metadata(x);
851 }
852
853 Operation operation;
854 ::tensorflow::Output handle;
855};
856
857/// TODO: add doc.
858///
859/// Args:
860/// * scope: A Scope object
861///
862/// Returns:
863/// * `Output`: The handle tensor.
864class FixedLengthRecordDatasetV2 {
865 public:
866 /// Optional attribute setters for FixedLengthRecordDatasetV2
867 struct Attrs {
868 /// Defaults to ""
869 TF_MUST_USE_RESULT Attrs Metadata(StringPiece x) {
870 Attrs ret = *this;
871 ret.metadata_ = x;
872 return ret;
873 }
874
875 StringPiece metadata_ = "";
876 };
877 FixedLengthRecordDatasetV2(const ::tensorflow::Scope& scope,
878 ::tensorflow::Input filenames, ::tensorflow::Input
879 header_bytes, ::tensorflow::Input record_bytes,
880 ::tensorflow::Input footer_bytes,
881 ::tensorflow::Input buffer_size, ::tensorflow::Input
882 compression_type);
883 FixedLengthRecordDatasetV2(const ::tensorflow::Scope& scope,
884 ::tensorflow::Input filenames, ::tensorflow::Input
885 header_bytes, ::tensorflow::Input record_bytes,
886 ::tensorflow::Input footer_bytes,
887 ::tensorflow::Input buffer_size, ::tensorflow::Input
888 compression_type, const
889 FixedLengthRecordDatasetV2::Attrs& attrs);
890 operator ::tensorflow::Output() const { return handle; }
891 operator ::tensorflow::Input() const { return handle; }
892 ::tensorflow::Node* node() const { return handle.node(); }
893
894 static Attrs Metadata(StringPiece x) {
895 return Attrs().Metadata(x);
896 }
897
898 Operation operation;
899 ::tensorflow::Output handle;
900};
901
902/// Creates a dataset that applies `f` to the outputs of `input_dataset`.
903///
904/// Unlike MapDataset, the `f` in FlatMapDataset is expected to return a
905/// Dataset variant, and FlatMapDataset will flatten successive results
906/// into a single Dataset.
907///
908/// Args:
909/// * scope: A Scope object
910/// * f: A function mapping elements of `input_dataset`, concatenated with
911/// `other_arguments`, to a Dataset variant that contains elements matching
912/// `output_types` and `output_shapes`.
913///
914/// Returns:
915/// * `Output`: The handle tensor.
916class FlatMapDataset {
917 public:
918 /// Optional attribute setters for FlatMapDataset
919 struct Attrs {
920 /// Defaults to ""
921 TF_MUST_USE_RESULT Attrs Metadata(StringPiece x) {
922 Attrs ret = *this;
923 ret.metadata_ = x;
924 return ret;
925 }
926
927 StringPiece metadata_ = "";
928 };
929 FlatMapDataset(const ::tensorflow::Scope& scope, ::tensorflow::Input
930 input_dataset, ::tensorflow::InputList other_arguments, const
931 NameAttrList& f, const DataTypeSlice& output_types, const
932 gtl::ArraySlice<PartialTensorShape>& output_shapes);
933 FlatMapDataset(const ::tensorflow::Scope& scope, ::tensorflow::Input
934 input_dataset, ::tensorflow::InputList other_arguments, const
935 NameAttrList& f, const DataTypeSlice& output_types, const
936 gtl::ArraySlice<PartialTensorShape>& output_shapes, const
937 FlatMapDataset::Attrs& attrs);
938 operator ::tensorflow::Output() const { return handle; }
939 operator ::tensorflow::Input() const { return handle; }
940 ::tensorflow::Node* node() const { return handle.node(); }
941
942 static Attrs Metadata(StringPiece x) {
943 return Attrs().Metadata(x);
944 }
945
946 Operation operation;
947 ::tensorflow::Output handle;
948};
949
950/// Creates a dataset that invokes a function to generate elements.
951///
952/// Args:
953/// * scope: A Scope object
954///
955/// Returns:
956/// * `Output`: The handle tensor.
957class GeneratorDataset {
958 public:
959 /// Optional attribute setters for GeneratorDataset
960 struct Attrs {
961 /// Defaults to ""
962 TF_MUST_USE_RESULT Attrs Metadata(StringPiece x) {
963 Attrs ret = *this;
964 ret.metadata_ = x;
965 return ret;
966 }
967
968 StringPiece metadata_ = "";
969 };
970 GeneratorDataset(const ::tensorflow::Scope& scope, ::tensorflow::InputList
971 init_func_other_args, ::tensorflow::InputList
972 next_func_other_args, ::tensorflow::InputList
973 finalize_func_other_args, const NameAttrList& init_func, const
974 NameAttrList& next_func, const NameAttrList& finalize_func,
975 const DataTypeSlice& output_types, const
976 gtl::ArraySlice<PartialTensorShape>& output_shapes);
977 GeneratorDataset(const ::tensorflow::Scope& scope, ::tensorflow::InputList
978 init_func_other_args, ::tensorflow::InputList
979 next_func_other_args, ::tensorflow::InputList
980 finalize_func_other_args, const NameAttrList& init_func, const
981 NameAttrList& next_func, const NameAttrList& finalize_func,
982 const DataTypeSlice& output_types, const
983 gtl::ArraySlice<PartialTensorShape>& output_shapes, const
984 GeneratorDataset::Attrs& attrs);
985 operator ::tensorflow::Output() const { return handle; }
986 operator ::tensorflow::Input() const { return handle; }
987 ::tensorflow::Node* node() const { return handle.node(); }
988
989 static Attrs Metadata(StringPiece x) {
990 return Attrs().Metadata(x);
991 }
992
993 Operation operation;
994 ::tensorflow::Output handle;
995};
996
997/// Returns the `tf.data.Options` attached to `input_dataset`.
998///
999/// Args:
1000/// * scope: A Scope object
1001/// * input_dataset: A variant tensor representing the input dataset.
1002///
1003/// Returns:
1004/// * `Output`: The serialized_options tensor.
1005class GetOptions {
1006 public:
1007 GetOptions(const ::tensorflow::Scope& scope, ::tensorflow::Input input_dataset);
1008 operator ::tensorflow::Output() const { return serialized_options; }
1009 operator ::tensorflow::Input() const { return serialized_options; }
1010 ::tensorflow::Node* node() const { return serialized_options.node(); }
1011
1012 Operation operation;
1013 ::tensorflow::Output serialized_options;
1014};
1015
1016/// Creates a dataset that applies `f` to the outputs of `input_dataset`.
1017///
1018/// Unlike MapDataset, the `f` in InterleaveDataset is expected to return
1019/// a Dataset variant, and InterleaveDataset will flatten successive
1020/// results into a single Dataset. Unlike FlatMapDataset,
1021/// InterleaveDataset will interleave sequences of up to `block_length`
1022/// consecutive elements from `cycle_length` input elements.
1023///
1024/// Args:
1025/// * scope: A Scope object
1026/// * f: A function mapping elements of `input_dataset`, concatenated with
1027/// `other_arguments`, to a Dataset variant that contains elements matching
1028/// `output_types` and `output_shapes`.
1029///
1030/// Returns:
1031/// * `Output`: The handle tensor.
1032class InterleaveDataset {
1033 public:
1034 /// Optional attribute setters for InterleaveDataset
1035 struct Attrs {
1036 /// Defaults to ""
1037 TF_MUST_USE_RESULT Attrs Metadata(StringPiece x) {
1038 Attrs ret = *this;
1039 ret.metadata_ = x;
1040 return ret;
1041 }
1042
1043 StringPiece metadata_ = "";
1044 };
1045 InterleaveDataset(const ::tensorflow::Scope& scope, ::tensorflow::Input
1046 input_dataset, ::tensorflow::InputList other_arguments,
1047 ::tensorflow::Input cycle_length, ::tensorflow::Input
1048 block_length, const NameAttrList& f, const DataTypeSlice&
1049 output_types, const gtl::ArraySlice<PartialTensorShape>&
1050 output_shapes);
1051 InterleaveDataset(const ::tensorflow::Scope& scope, ::tensorflow::Input
1052 input_dataset, ::tensorflow::InputList other_arguments,
1053 ::tensorflow::Input cycle_length, ::tensorflow::Input
1054 block_length, const NameAttrList& f, const DataTypeSlice&
1055 output_types, const gtl::ArraySlice<PartialTensorShape>&
1056 output_shapes, const InterleaveDataset::Attrs& attrs);
1057 operator ::tensorflow::Output() const { return handle; }
1058 operator ::tensorflow::Input() const { return handle; }
1059 ::tensorflow::Node* node() const { return handle.node(); }
1060
1061 static Attrs Metadata(StringPiece x) {
1062 return Attrs().Metadata(x);
1063 }
1064
1065 Operation operation;
1066 ::tensorflow::Output handle;
1067};
1068
1069/// TODO: add doc.
1070///
1071/// Args:
1072/// * scope: A Scope object
1073///
1074/// Returns:
1075/// * `Output`: The resource_handle tensor.
1076class IteratorFromStringHandleV2 {
1077 public:
1078 /// Optional attribute setters for IteratorFromStringHandleV2
1079 struct Attrs {
1080 /// Defaults to []
1081 TF_MUST_USE_RESULT Attrs OutputTypes(const DataTypeSlice& x) {
1082 Attrs ret = *this;
1083 ret.output_types_ = x;
1084 return ret;
1085 }
1086
1087 /// Defaults to []
1088 TF_MUST_USE_RESULT Attrs OutputShapes(const gtl::ArraySlice<PartialTensorShape>& x) {
1089 Attrs ret = *this;
1090 ret.output_shapes_ = x;
1091 return ret;
1092 }
1093
1094 DataTypeSlice output_types_ = {};
1095 gtl::ArraySlice<PartialTensorShape> output_shapes_ = {};
1096 };
1097 IteratorFromStringHandleV2(const ::tensorflow::Scope& scope,
1098 ::tensorflow::Input string_handle);
1099 IteratorFromStringHandleV2(const ::tensorflow::Scope& scope,
1100 ::tensorflow::Input string_handle, const
1101 IteratorFromStringHandleV2::Attrs& attrs);
1102 operator ::tensorflow::Output() const { return resource_handle; }
1103 operator ::tensorflow::Input() const { return resource_handle; }
1104 ::tensorflow::Node* node() const { return resource_handle.node(); }
1105
1106 static Attrs OutputTypes(const DataTypeSlice& x) {
1107 return Attrs().OutputTypes(x);
1108 }
1109 static Attrs OutputShapes(const gtl::ArraySlice<PartialTensorShape>& x) {
1110 return Attrs().OutputShapes(x);
1111 }
1112
1113 Operation operation;
1114 ::tensorflow::Output resource_handle;
1115};
1116
1117/// TODO: add doc.
1118///
1119/// Args:
1120/// * scope: A Scope object
1121///
1122/// Returns:
1123/// * `Output`: The handle tensor.
1124class IteratorV2 {
1125 public:
1126 IteratorV2(const ::tensorflow::Scope& scope, StringPiece shared_name,
1127 StringPiece container, const DataTypeSlice& output_types, const
1128 gtl::ArraySlice<PartialTensorShape>& output_shapes);
1129 operator ::tensorflow::Output() const { return handle; }
1130 operator ::tensorflow::Input() const { return handle; }
1131 ::tensorflow::Node* node() const { return handle.node(); }
1132
1133 Operation operation;
1134 ::tensorflow::Output handle;
1135};
1136
1137/// Creates a dataset that applies `f` to the outputs of `input_dataset`.
1138///
1139/// Args:
1140/// * scope: A Scope object
1141///
1142/// Returns:
1143/// * `Output`: The handle tensor.
1144class MapDataset {
1145 public:
1146 /// Optional attribute setters for MapDataset
1147 struct Attrs {
1148 /// Defaults to true
1149 TF_MUST_USE_RESULT Attrs UseInterOpParallelism(bool x) {
1150 Attrs ret = *this;
1151 ret.use_inter_op_parallelism_ = x;
1152 return ret;
1153 }
1154
1155 /// Defaults to false
1156 TF_MUST_USE_RESULT Attrs PreserveCardinality(bool x) {
1157 Attrs ret = *this;
1158 ret.preserve_cardinality_ = x;
1159 return ret;
1160 }
1161
1162 /// Defaults to ""
1163 TF_MUST_USE_RESULT Attrs Metadata(StringPiece x) {
1164 Attrs ret = *this;
1165 ret.metadata_ = x;
1166 return ret;
1167 }
1168
1169 bool use_inter_op_parallelism_ = true;
1170 bool preserve_cardinality_ = false;
1171 StringPiece metadata_ = "";
1172 };
1173 MapDataset(const ::tensorflow::Scope& scope, ::tensorflow::Input input_dataset,
1174 ::tensorflow::InputList other_arguments, const NameAttrList& f,
1175 const DataTypeSlice& output_types, const
1176 gtl::ArraySlice<PartialTensorShape>& output_shapes);
1177 MapDataset(const ::tensorflow::Scope& scope, ::tensorflow::Input input_dataset,
1178 ::tensorflow::InputList other_arguments, const NameAttrList& f,
1179 const DataTypeSlice& output_types, const
1180 gtl::ArraySlice<PartialTensorShape>& output_shapes, const
1181 MapDataset::Attrs& attrs);
1182 operator ::tensorflow::Output() const { return handle; }
1183 operator ::tensorflow::Input() const { return handle; }
1184 ::tensorflow::Node* node() const { return handle.node(); }
1185
1186 static Attrs UseInterOpParallelism(bool x) {
1187 return Attrs().UseInterOpParallelism(x);
1188 }
1189 static Attrs PreserveCardinality(bool x) {
1190 return Attrs().PreserveCardinality(x);
1191 }
1192 static Attrs Metadata(StringPiece x) {
1193 return Attrs().Metadata(x);
1194 }
1195
1196 Operation operation;
1197 ::tensorflow::Output handle;
1198};
1199
1200/// Maps a function on the list of tensors unpacked from arguments on dimension 0.
1201/// The function given by `f` is assumed to be stateless, and is executed
1202/// concurrently on all the slices; up to batch_size (i.e. the size of the 0th
1203/// dimension of each argument) functions will be scheduled at once.
1204///
1205/// The `max_intra_op_parallelism` attr, which defaults to 1, can be used to
1206/// limit the intra op parallelism. To limit inter-op parallelism, a user can
1207/// set a private threadpool on the dataset using `tf.data.Options`'s
1208/// `ThreadingOptions`.
1209///
1210/// Note that this op is not exposed to users directly, but is invoked in tf.data
1211/// rewrites.
1212///
1213/// Args:
1214/// * scope: A Scope object
1215/// * arguments: A list of tensors whose types are `Targuments`, corresponding to the inputs
1216/// the function should be mapped over.
1217/// * captured_inputs: A list of tensors whose types are `Tcaptured`, corresponding to the captured
1218/// inputs of the defun.
1219/// * output_types: A list of types.
1220/// * output_shapes: A list of shapes.
1221///
1222/// Returns:
1223/// * `OutputList`: A list of output tensors whose types are `output_types` and whose dimensions
1224/// 0 are the same as the dimensions 0 of the tensors in `arguments`, and whose
1225/// remaining dimensions correspond to those in `output_shapes`.
1226class MapDefun {
1227 public:
1228 /// Optional attribute setters for MapDefun
1229 struct Attrs {
1230 /// Defaults to 1
1231 TF_MUST_USE_RESULT Attrs MaxIntraOpParallelism(int64 x) {
1232 Attrs ret = *this;
1233 ret.max_intra_op_parallelism_ = x;
1234 return ret;
1235 }
1236
1237 int64 max_intra_op_parallelism_ = 1;
1238 };
1239 MapDefun(const ::tensorflow::Scope& scope, ::tensorflow::InputList arguments,
1240 ::tensorflow::InputList captured_inputs, const DataTypeSlice&
1241 output_types, const gtl::ArraySlice<PartialTensorShape>&
1242 output_shapes, const NameAttrList& f);
1243 MapDefun(const ::tensorflow::Scope& scope, ::tensorflow::InputList arguments,
1244 ::tensorflow::InputList captured_inputs, const DataTypeSlice&
1245 output_types, const gtl::ArraySlice<PartialTensorShape>&
1246 output_shapes, const NameAttrList& f, const MapDefun::Attrs& attrs);
1247 ::tensorflow::Output operator[](size_t index) const { return output[index]; }
1248
1249
1250 static Attrs MaxIntraOpParallelism(int64 x) {
1251 return Attrs().MaxIntraOpParallelism(x);
1252 }
1253
1254 Operation operation;
1255 ::tensorflow::OutputList output;
1256};
1257
1258/// Identity transformation that models performance.
1259///
1260/// Identity transformation that models performance.
1261///
1262/// Args:
1263/// * scope: A Scope object
1264/// * input_dataset: A variant tensor representing the input dataset.
1265///
1266/// Returns:
1267/// * `Output`: The handle tensor.
1268class ModelDataset {
1269 public:
1270 /// Optional attribute setters for ModelDataset
1271 struct Attrs {
1272 /// Defaults to 0
1273 TF_MUST_USE_RESULT Attrs Algorithm(int64 x) {
1274 Attrs ret = *this;
1275 ret.algorithm_ = x;
1276 return ret;
1277 }
1278
1279 /// Defaults to 0
1280 TF_MUST_USE_RESULT Attrs CpuBudget(int64 x) {
1281 Attrs ret = *this;
1282 ret.cpu_budget_ = x;
1283 return ret;
1284 }
1285
1286 /// Defaults to 0
1287 TF_MUST_USE_RESULT Attrs RamBudget(int64 x) {
1288 Attrs ret = *this;
1289 ret.ram_budget_ = x;
1290 return ret;
1291 }
1292
1293 int64 algorithm_ = 0;
1294 int64 cpu_budget_ = 0;
1295 int64 ram_budget_ = 0;
1296 };
1297 ModelDataset(const ::tensorflow::Scope& scope, ::tensorflow::Input
1298 input_dataset, const DataTypeSlice& output_types, const
1299 gtl::ArraySlice<PartialTensorShape>& output_shapes);
1300 ModelDataset(const ::tensorflow::Scope& scope, ::tensorflow::Input
1301 input_dataset, const DataTypeSlice& output_types, const
1302 gtl::ArraySlice<PartialTensorShape>& output_shapes, const
1303 ModelDataset::Attrs& attrs);
1304 operator ::tensorflow::Output() const { return handle; }
1305 operator ::tensorflow::Input() const { return handle; }
1306 ::tensorflow::Node* node() const { return handle.node(); }
1307
1308 static Attrs Algorithm(int64 x) {
1309 return Attrs().Algorithm(x);
1310 }
1311 static Attrs CpuBudget(int64 x) {
1312 return Attrs().CpuBudget(x);
1313 }
1314 static Attrs RamBudget(int64 x) {
1315 return Attrs().RamBudget(x);
1316 }
1317
1318 Operation operation;
1319 ::tensorflow::Output handle;
1320};
1321
1322/// Creates a MultiDeviceIterator resource.
1323///
1324/// Args:
1325/// * scope: A Scope object
1326/// * devices: A list of devices the iterator works across.
1327/// * shared_name: If non-empty, this resource will be shared under the given name
1328/// across multiple sessions.
1329/// * container: If non-empty, this resource is placed in the given container.
1330/// Otherwise, a default container is used.
1331/// * output_types: The type list for the return values.
1332/// * output_shapes: The list of shapes being produced.
1333///
1334/// Returns:
1335/// * `Output`: Handle to the resource created.
1336class MultiDeviceIterator {
1337 public:
1338 MultiDeviceIterator(const ::tensorflow::Scope& scope, const
1339 gtl::ArraySlice<::tensorflow::tstring>& devices,
1340 StringPiece shared_name, StringPiece container, const
1341 DataTypeSlice& output_types, const
1342 gtl::ArraySlice<PartialTensorShape>& output_shapes);
1343 operator ::tensorflow::Output() const { return handle; }
1344 operator ::tensorflow::Input() const { return handle; }
1345 ::tensorflow::Node* node() const { return handle.node(); }
1346
1347 Operation operation;
1348 ::tensorflow::Output handle;
1349};
1350
1351/// Generates a MultiDeviceIterator resource from its provided string handle.
1352///
1353/// Args:
1354/// * scope: A Scope object
1355/// * string_handle: String representing the resource.
1356///
1357/// Optional attributes (see `Attrs`):
1358/// * output_types: The type list for the return values.
1359/// * output_shapes: The list of shapes being produced.
1360///
1361/// Returns:
1362/// * `Output`: A MultiDeviceIterator resource.
1363class MultiDeviceIteratorFromStringHandle {
1364 public:
1365 /// Optional attribute setters for MultiDeviceIteratorFromStringHandle
1366 struct Attrs {
1367 /// The type list for the return values.
1368 ///
1369 /// Defaults to []
1370 TF_MUST_USE_RESULT Attrs OutputTypes(const DataTypeSlice& x) {
1371 Attrs ret = *this;
1372 ret.output_types_ = x;
1373 return ret;
1374 }
1375
1376 /// The list of shapes being produced.
1377 ///
1378 /// Defaults to []
1379 TF_MUST_USE_RESULT Attrs OutputShapes(const gtl::ArraySlice<PartialTensorShape>& x) {
1380 Attrs ret = *this;
1381 ret.output_shapes_ = x;
1382 return ret;
1383 }
1384
1385 DataTypeSlice output_types_ = {};
1386 gtl::ArraySlice<PartialTensorShape> output_shapes_ = {};
1387 };
1388 MultiDeviceIteratorFromStringHandle(const ::tensorflow::Scope& scope,
1389 ::tensorflow::Input string_handle);
1390 MultiDeviceIteratorFromStringHandle(const ::tensorflow::Scope& scope,
1391 ::tensorflow::Input string_handle, const
1392 MultiDeviceIteratorFromStringHandle::Attrs&
1393 attrs);
1394 operator ::tensorflow::Output() const { return multi_device_iterator; }
1395 operator ::tensorflow::Input() const { return multi_device_iterator; }
1396 ::tensorflow::Node* node() const { return multi_device_iterator.node(); }
1397
1398 static Attrs OutputTypes(const DataTypeSlice& x) {
1399 return Attrs().OutputTypes(x);
1400 }
1401 static Attrs OutputShapes(const gtl::ArraySlice<PartialTensorShape>& x) {
1402 return Attrs().OutputShapes(x);
1403 }
1404
1405 Operation operation;
1406 ::tensorflow::Output multi_device_iterator;
1407};
1408
1409/// Gets next element for the provided shard number.
1410///
1411/// Args:
1412/// * scope: A Scope object
1413/// * multi_device_iterator: A MultiDeviceIterator resource.
1414/// * shard_num: Integer representing which shard to fetch data for.
1415/// * incarnation_id: Which incarnation of the MultiDeviceIterator is running.
1416/// * output_types: The type list for the return values.
1417/// * output_shapes: The list of shapes being produced.
1418///
1419/// Returns:
1420/// * `OutputList`: Result of the get_next on the dataset.
1421class MultiDeviceIteratorGetNextFromShard {
1422 public:
1423 MultiDeviceIteratorGetNextFromShard(const ::tensorflow::Scope& scope,
1424 ::tensorflow::Input multi_device_iterator,
1425 ::tensorflow::Input shard_num,
1426 ::tensorflow::Input incarnation_id, const
1427 DataTypeSlice& output_types, const
1428 gtl::ArraySlice<PartialTensorShape>&
1429 output_shapes);
1430 ::tensorflow::Output operator[](size_t index) const { return components[index]; }
1431
1432
1433 Operation operation;
1434 ::tensorflow::OutputList components;
1435};
1436
1437/// Initializes the multi device iterator with the given dataset.
1438///
1439/// Args:
1440/// * scope: A Scope object
1441/// * dataset: Dataset to be iterated upon.
1442/// * multi_device_iterator: A MultiDeviceIteratorResource.
1443/// * max_buffer_size: The maximum size of the host side per device buffer to keep.
1444///
1445/// Returns:
1446/// * `Output`: An int64 indicating which incarnation of the MultiDeviceIterator
1447/// is running.
1448class MultiDeviceIteratorInit {
1449 public:
1450 MultiDeviceIteratorInit(const ::tensorflow::Scope& scope, ::tensorflow::Input
1451 dataset, ::tensorflow::Input multi_device_iterator,
1452 ::tensorflow::Input max_buffer_size);
1453 operator ::tensorflow::Output() const { return incarnation_id; }
1454 operator ::tensorflow::Input() const { return incarnation_id; }
1455 ::tensorflow::Node* node() const { return incarnation_id.node(); }
1456
1457 Operation operation;
1458 ::tensorflow::Output incarnation_id;
1459};
1460
1461/// Produces a string handle for the given MultiDeviceIterator.
1462///
1463/// Args:
1464/// * scope: A Scope object
1465/// * multi_device_iterator: A MultiDeviceIterator resource.
1466///
1467/// Returns:
1468/// * `Output`: A string representing the resource.
1469class MultiDeviceIteratorToStringHandle {
1470 public:
1471 MultiDeviceIteratorToStringHandle(const ::tensorflow::Scope& scope,
1472 ::tensorflow::Input multi_device_iterator);
1473 operator ::tensorflow::Output() const { return string_handle; }
1474 operator ::tensorflow::Input() const { return string_handle; }
1475 ::tensorflow::Node* node() const { return string_handle.node(); }
1476
1477 Operation operation;
1478 ::tensorflow::Output string_handle;
1479};
1480
1481/// Creates a dataset by applying optimizations to `input_dataset`.
1482///
1483/// Creates a dataset by applying optimizations to `input_dataset`.
1484///
1485/// Args:
1486/// * scope: A Scope object
1487/// * input_dataset: A variant tensor representing the input dataset.
1488/// * optimizations: A `tf.string` vector `tf.Tensor` identifying optimizations to use.
1489///
1490/// Returns:
1491/// * `Output`: The handle tensor.
1492class OptimizeDataset {
1493 public:
1494 /// Optional attribute setters for OptimizeDataset
1495 struct Attrs {
1496 /// Defaults to []
1497 TF_MUST_USE_RESULT Attrs OptimizationConfigs(const gtl::ArraySlice<::tensorflow::tstring>& x) {
1498 Attrs ret = *this;
1499 ret.optimization_configs_ = x;
1500 return ret;
1501 }
1502
1503 gtl::ArraySlice<::tensorflow::tstring> optimization_configs_ = {};
1504 };
1505 OptimizeDataset(const ::tensorflow::Scope& scope, ::tensorflow::Input
1506 input_dataset, ::tensorflow::Input optimizations, const
1507 DataTypeSlice& output_types, const
1508 gtl::ArraySlice<PartialTensorShape>& output_shapes);
1509 OptimizeDataset(const ::tensorflow::Scope& scope, ::tensorflow::Input
1510 input_dataset, ::tensorflow::Input optimizations, const
1511 DataTypeSlice& output_types, const
1512 gtl::ArraySlice<PartialTensorShape>& output_shapes, const
1513 OptimizeDataset::Attrs& attrs);
1514 operator ::tensorflow::Output() const { return handle; }
1515 operator ::tensorflow::Input() const { return handle; }
1516 ::tensorflow::Node* node() const { return handle.node(); }
1517
1518 static Attrs OptimizationConfigs(const gtl::ArraySlice<::tensorflow::tstring>& x) {
1519 return Attrs().OptimizationConfigs(x);
1520 }
1521
1522 Operation operation;
1523 ::tensorflow::Output handle;
1524};
1525
1526/// Creates a dataset by applying related optimizations to `input_dataset`.
1527///
1528/// Creates a dataset by applying related optimizations to `input_dataset`.
1529///
1530/// Args:
1531/// * scope: A Scope object
1532/// * input_dataset: A variant tensor representing the input dataset.
1533/// * optimizations_enabled: A `tf.string` vector `tf.Tensor` identifying user enabled optimizations.
1534/// * optimizations_disabled: A `tf.string` vector `tf.Tensor` identifying user disabled optimizations.
1535/// * optimizations_default: A `tf.string` vector `tf.Tensor` identifying optimizations by default.
1536///
1537/// Returns:
1538/// * `Output`: The handle tensor.
1539class OptimizeDatasetV2 {
1540 public:
1541 /// Optional attribute setters for OptimizeDatasetV2
1542 struct Attrs {
1543 /// Defaults to []
1544 TF_MUST_USE_RESULT Attrs OptimizationConfigs(const gtl::ArraySlice<::tensorflow::tstring>& x) {
1545 Attrs ret = *this;
1546 ret.optimization_configs_ = x;
1547 return ret;
1548 }
1549
1550 gtl::ArraySlice<::tensorflow::tstring> optimization_configs_ = {};
1551 };
1552 OptimizeDatasetV2(const ::tensorflow::Scope& scope, ::tensorflow::Input
1553 input_dataset, ::tensorflow::Input optimizations_enabled,
1554 ::tensorflow::Input optimizations_disabled,
1555 ::tensorflow::Input optimizations_default, const
1556 DataTypeSlice& output_types, const
1557 gtl::ArraySlice<PartialTensorShape>& output_shapes);
1558 OptimizeDatasetV2(const ::tensorflow::Scope& scope, ::tensorflow::Input
1559 input_dataset, ::tensorflow::Input optimizations_enabled,
1560 ::tensorflow::Input optimizations_disabled,
1561 ::tensorflow::Input optimizations_default, const
1562 DataTypeSlice& output_types, const
1563 gtl::ArraySlice<PartialTensorShape>& output_shapes, const
1564 OptimizeDatasetV2::Attrs& attrs);
1565 operator ::tensorflow::Output() const { return handle; }
1566 operator ::tensorflow::Input() const { return handle; }
1567 ::tensorflow::Node* node() const { return handle.node(); }
1568
1569 static Attrs OptimizationConfigs(const gtl::ArraySlice<::tensorflow::tstring>& x) {
1570 return Attrs().OptimizationConfigs(x);
1571 }
1572
1573 Operation operation;
1574 ::tensorflow::Output handle;
1575};
1576
1577/// Creates a dataset by attaching tf.data.Options to `input_dataset`.
1578///
1579/// Args:
1580/// * scope: A Scope object
1581/// * input_dataset: A variant tensor representing the input dataset.
1582/// * serialized_options: A `tf.string` scalar `tf.Tensor` of serialized `tf.data.Options` protocol buffer.
1583///
1584/// Returns:
1585/// * `Output`: The handle tensor.
1586class OptionsDataset {
1587 public:
1588 /// Optional attribute setters for OptionsDataset
1589 struct Attrs {
1590 /// Defaults to ""
1591 TF_MUST_USE_RESULT Attrs Metadata(StringPiece x) {
1592 Attrs ret = *this;
1593 ret.metadata_ = x;
1594 return ret;
1595 }
1596
1597 StringPiece metadata_ = "";
1598 };
1599 OptionsDataset(const ::tensorflow::Scope& scope, ::tensorflow::Input
1600 input_dataset, StringPiece serialized_options, const
1601 DataTypeSlice& output_types, const
1602 gtl::ArraySlice<PartialTensorShape>& output_shapes);
1603 OptionsDataset(const ::tensorflow::Scope& scope, ::tensorflow::Input
1604 input_dataset, StringPiece serialized_options, const
1605 DataTypeSlice& output_types, const
1606 gtl::ArraySlice<PartialTensorShape>& output_shapes, const
1607 OptionsDataset::Attrs& attrs);
1608 operator ::tensorflow::Output() const { return handle; }
1609 operator ::tensorflow::Input() const { return handle; }
1610 ::tensorflow::Node* node() const { return handle.node(); }
1611
1612 static Attrs Metadata(StringPiece x) {
1613 return Attrs().Metadata(x);
1614 }
1615
1616 Operation operation;
1617 ::tensorflow::Output handle;
1618};
1619
1620/// Creates a dataset that batches and pads `batch_size` elements from the input.
1621///
1622/// Args:
1623/// * scope: A Scope object
1624/// * batch_size: A scalar representing the number of elements to accumulate in a
1625/// batch.
1626/// * padded_shapes: A list of int64 tensors representing the desired padded shapes
1627/// of the corresponding output components. These shapes may be partially
1628/// specified, using `-1` to indicate that a particular dimension should be
1629/// padded to the maximum size of all batch elements.
1630/// * padding_values: A list of scalars containing the padding value to use for
1631/// each of the outputs.
1632///
1633/// Returns:
1634/// * `Output`: The handle tensor.
1635class PaddedBatchDataset {
1636 public:
1637 /// Optional attribute setters for PaddedBatchDataset
1638 struct Attrs {
1639 /// Defaults to ""
1640 TF_MUST_USE_RESULT Attrs Metadata(StringPiece x) {
1641 Attrs ret = *this;
1642 ret.metadata_ = x;
1643 return ret;
1644 }
1645
1646 StringPiece metadata_ = "";
1647 };
1648 PaddedBatchDataset(const ::tensorflow::Scope& scope, ::tensorflow::Input
1649 input_dataset, ::tensorflow::Input batch_size,
1650 ::tensorflow::InputList padded_shapes,
1651 ::tensorflow::InputList padding_values, const
1652 gtl::ArraySlice<PartialTensorShape>& output_shapes);
1653 PaddedBatchDataset(const ::tensorflow::Scope& scope, ::tensorflow::Input
1654 input_dataset, ::tensorflow::Input batch_size,
1655 ::tensorflow::InputList padded_shapes,
1656 ::tensorflow::InputList padding_values, const
1657 gtl::ArraySlice<PartialTensorShape>& output_shapes, const
1658 PaddedBatchDataset::Attrs& attrs);
1659 operator ::tensorflow::Output() const { return handle; }
1660 operator ::tensorflow::Input() const { return handle; }
1661 ::tensorflow::Node* node() const { return handle.node(); }
1662
1663 static Attrs Metadata(StringPiece x) {
1664 return Attrs().Metadata(x);
1665 }
1666
1667 Operation operation;
1668 ::tensorflow::Output handle;
1669};
1670
1671/// Creates a dataset that batches and pads `batch_size` elements from the input.
1672///
1673/// Args:
1674/// * scope: A Scope object
1675/// * batch_size: A scalar representing the number of elements to accumulate in a
1676/// batch.
1677/// * padded_shapes: A list of int64 tensors representing the desired padded shapes
1678/// of the corresponding output components. These shapes may be partially
1679/// specified, using `-1` to indicate that a particular dimension should be
1680/// padded to the maximum size of all batch elements.
1681/// * padding_values: A list of scalars containing the padding value to use for
1682/// each of the outputs.
1683/// * drop_remainder: A scalar representing whether the last batch should be dropped in case its size
1684/// is smaller than desired.
1685///
1686/// Returns:
1687/// * `Output`: The handle tensor.
1688class PaddedBatchDatasetV2 {
1689 public:
1690 /// Optional attribute setters for PaddedBatchDatasetV2
1691 struct Attrs {
1692 /// Defaults to false
1693 TF_MUST_USE_RESULT Attrs ParallelCopy(bool x) {
1694 Attrs ret = *this;
1695 ret.parallel_copy_ = x;
1696 return ret;
1697 }
1698
1699 /// Defaults to ""
1700 TF_MUST_USE_RESULT Attrs Metadata(StringPiece x) {
1701 Attrs ret = *this;
1702 ret.metadata_ = x;
1703 return ret;
1704 }
1705
1706 bool parallel_copy_ = false;
1707 StringPiece metadata_ = "";
1708 };
1709 PaddedBatchDatasetV2(const ::tensorflow::Scope& scope, ::tensorflow::Input
1710 input_dataset, ::tensorflow::Input batch_size,
1711 ::tensorflow::InputList padded_shapes,
1712 ::tensorflow::InputList padding_values,
1713 ::tensorflow::Input drop_remainder, const
1714 gtl::ArraySlice<PartialTensorShape>& output_shapes);
1715 PaddedBatchDatasetV2(const ::tensorflow::Scope& scope, ::tensorflow::Input
1716 input_dataset, ::tensorflow::Input batch_size,
1717 ::tensorflow::InputList padded_shapes,
1718 ::tensorflow::InputList padding_values,
1719 ::tensorflow::Input drop_remainder, const
1720 gtl::ArraySlice<PartialTensorShape>& output_shapes, const
1721 PaddedBatchDatasetV2::Attrs& attrs);
1722 operator ::tensorflow::Output() const { return handle; }
1723 operator ::tensorflow::Input() const { return handle; }
1724 ::tensorflow::Node* node() const { return handle.node(); }
1725
1726 static Attrs ParallelCopy(bool x) {
1727 return Attrs().ParallelCopy(x);
1728 }
1729 static Attrs Metadata(StringPiece x) {
1730 return Attrs().Metadata(x);
1731 }
1732
1733 Operation operation;
1734 ::tensorflow::Output handle;
1735};
1736
1737/// TODO: add doc.
1738///
1739/// Args:
1740/// * scope: A Scope object
1741///
1742/// Returns:
1743/// * `Output`: The handle tensor.
1744class ParallelBatchDataset {
1745 public:
1746 /// Optional attribute setters for ParallelBatchDataset
1747 struct Attrs {
1748 /// Defaults to false
1749 TF_MUST_USE_RESULT Attrs ParallelCopy(bool x) {
1750 Attrs ret = *this;
1751 ret.parallel_copy_ = x;
1752 return ret;
1753 }
1754
1755 /// Defaults to "default"
1756 TF_MUST_USE_RESULT Attrs Deterministic(StringPiece x) {
1757 Attrs ret = *this;
1758 ret.deterministic_ = x;
1759 return ret;
1760 }
1761
1762 /// Defaults to ""
1763 TF_MUST_USE_RESULT Attrs Metadata(StringPiece x) {
1764 Attrs ret = *this;
1765 ret.metadata_ = x;
1766 return ret;
1767 }
1768
1769 bool parallel_copy_ = false;
1770 StringPiece deterministic_ = "default";
1771 StringPiece metadata_ = "";
1772 };
1773 ParallelBatchDataset(const ::tensorflow::Scope& scope, ::tensorflow::Input
1774 input_dataset, ::tensorflow::Input batch_size,
1775 ::tensorflow::Input num_parallel_calls,
1776 ::tensorflow::Input drop_remainder, const DataTypeSlice&
1777 output_types, const gtl::ArraySlice<PartialTensorShape>&
1778 output_shapes);
1779 ParallelBatchDataset(const ::tensorflow::Scope& scope, ::tensorflow::Input
1780 input_dataset, ::tensorflow::Input batch_size,
1781 ::tensorflow::Input num_parallel_calls,
1782 ::tensorflow::Input drop_remainder, const DataTypeSlice&
1783 output_types, const gtl::ArraySlice<PartialTensorShape>&
1784 output_shapes, const ParallelBatchDataset::Attrs& attrs);
1785 operator ::tensorflow::Output() const { return handle; }
1786 operator ::tensorflow::Input() const { return handle; }
1787 ::tensorflow::Node* node() const { return handle.node(); }
1788
1789 static Attrs ParallelCopy(bool x) {
1790 return Attrs().ParallelCopy(x);
1791 }
1792 static Attrs Deterministic(StringPiece x) {
1793 return Attrs().Deterministic(x);
1794 }
1795 static Attrs Metadata(StringPiece x) {
1796 return Attrs().Metadata(x);
1797 }
1798
1799 Operation operation;
1800 ::tensorflow::Output handle;
1801};
1802
1803/// Creates a dataset containing elements of `input_dataset` matching `predicate`.
1804///
1805/// The `predicate` function must return a scalar boolean and accept the
1806/// following arguments:
1807///
1808/// * One tensor for each component of an element of `input_dataset`.
1809/// * One tensor for each value in `other_arguments`.
1810///
1811/// Unlike a "FilterDataset", which applies `predicate` sequentially, this dataset
1812/// invokes up to `num_parallel_calls` copies of `predicate` in parallel.
1813///
1814///
1815/// Args:
1816/// * scope: A Scope object
1817/// * other_arguments: A list of tensors, typically values that were captured when
1818/// building a closure for `predicate`.
1819/// * num_parallel_calls: The number of concurrent invocations of `predicate` that process
1820/// elements from `input_dataset` in parallel.
1821/// * predicate: A function returning a scalar boolean.
1822///
1823/// Optional attributes (see `Attrs`):
1824/// * deterministic: A string indicating the op-level determinism to use. Deterministic controls
1825/// whether the interleave is allowed to return elements out of order if the next
1826/// element to be returned isn't available, but a later element is. Options are
1827/// "true", "false", and "default". "default" indicates that determinism should be
1828/// decided by the `experimental_deterministic` parameter of `tf.data.Options`.
1829///
1830/// Returns:
1831/// * `Output`: The handle tensor.
1832class ParallelFilterDataset {
1833 public:
1834 /// Optional attribute setters for ParallelFilterDataset
1835 struct Attrs {
1836 /// A string indicating the op-level determinism to use. Deterministic controls
1837 /// whether the interleave is allowed to return elements out of order if the next
1838 /// element to be returned isn't available, but a later element is. Options are
1839 /// "true", "false", and "default". "default" indicates that determinism should be
1840 /// decided by the `experimental_deterministic` parameter of `tf.data.Options`.
1841 ///
1842 /// Defaults to "default"
1843 TF_MUST_USE_RESULT Attrs Deterministic(StringPiece x) {
1844 Attrs ret = *this;
1845 ret.deterministic_ = x;
1846 return ret;
1847 }
1848
1849 /// Defaults to ""
1850 TF_MUST_USE_RESULT Attrs Metadata(StringPiece x) {
1851 Attrs ret = *this;
1852 ret.metadata_ = x;
1853 return ret;
1854 }
1855
1856 StringPiece deterministic_ = "default";
1857 StringPiece metadata_ = "";
1858 };
1859 ParallelFilterDataset(const ::tensorflow::Scope& scope, ::tensorflow::Input
1860 input_dataset, ::tensorflow::InputList other_arguments,
1861 ::tensorflow::Input num_parallel_calls, const
1862 NameAttrList& predicate, const DataTypeSlice&
1863 output_types, const gtl::ArraySlice<PartialTensorShape>&
1864 output_shapes);
1865 ParallelFilterDataset(const ::tensorflow::Scope& scope, ::tensorflow::Input
1866 input_dataset, ::tensorflow::InputList other_arguments,
1867 ::tensorflow::Input num_parallel_calls, const
1868 NameAttrList& predicate, const DataTypeSlice&
1869 output_types, const gtl::ArraySlice<PartialTensorShape>&
1870 output_shapes, const ParallelFilterDataset::Attrs& attrs);
1871 operator ::tensorflow::Output() const { return handle; }
1872 operator ::tensorflow::Input() const { return handle; }
1873 ::tensorflow::Node* node() const { return handle.node(); }
1874
1875 static Attrs Deterministic(StringPiece x) {
1876 return Attrs().Deterministic(x);
1877 }
1878 static Attrs Metadata(StringPiece x) {
1879 return Attrs().Metadata(x);
1880 }
1881
1882 Operation operation;
1883 ::tensorflow::Output handle;
1884};
1885
1886/// Creates a dataset that applies `f` to the outputs of `input_dataset`.
1887///
1888/// The resulting dataset is similar to the `InterleaveDataset`, except that the
1889/// dataset will fetch records from the interleaved datasets in parallel.
1890///
1891/// The `tf.data` Python API creates instances of this op from
1892/// `Dataset.interleave()` when the `num_parallel_calls` parameter of that method
1893/// is set to any value other than `None`.
1894///
1895/// By default, the output of this dataset will be deterministic, which may result
1896/// in the dataset blocking if the next data item to be returned isn't available.
1897/// In order to avoid head-of-line blocking, one can set the
1898/// `experimental_deterministic` parameter of `tf.data.Options` to `False`,
1899/// which can improve performance at the expense of non-determinism.
1900///
1901/// Args:
1902/// * scope: A Scope object
1903/// * input_dataset: Dataset that produces a stream of arguments for the function `f`.
1904/// * other_arguments: Additional arguments to pass to `f` beyond those produced by `input_dataset`.
1905/// Evaluated once when the dataset is instantiated.
1906/// * cycle_length: Number of datasets (each created by applying `f` to the elements of
1907/// `input_dataset`) among which the `ParallelInterleaveDatasetV2` will cycle in a
1908/// round-robin fashion.
1909/// * block_length: Number of elements at a time to produce from each interleaved invocation of a
1910/// dataset returned by `f`.
1911/// * num_parallel_calls: Determines the number of threads that should be used for fetching data from
1912/// input datasets in parallel. The Python API `tf.data.experimental.AUTOTUNE`
1913/// constant can be used to indicate that the level of parallelism should be autotuned.
1914/// * f: A function mapping elements of `input_dataset`, concatenated with
1915/// `other_arguments`, to a Dataset variant that contains elements matching
1916/// `output_types` and `output_shapes`.
1917///
1918/// Returns:
1919/// * `Output`: The handle tensor.
1920class ParallelInterleaveDatasetV2 {
1921 public:
1922 /// Optional attribute setters for ParallelInterleaveDatasetV2
1923 struct Attrs {
1924 /// Defaults to false
1925 TF_MUST_USE_RESULT Attrs Sloppy(bool x) {
1926 Attrs ret = *this;
1927 ret.sloppy_ = x;
1928 return ret;
1929 }
1930
1931 /// Defaults to ""
1932 TF_MUST_USE_RESULT Attrs Metadata(StringPiece x) {
1933 Attrs ret = *this;
1934 ret.metadata_ = x;
1935 return ret;
1936 }
1937
1938 bool sloppy_ = false;
1939 StringPiece metadata_ = "";
1940 };
1941 ParallelInterleaveDatasetV2(const ::tensorflow::Scope& scope,
1942 ::tensorflow::Input input_dataset,
1943 ::tensorflow::InputList other_arguments,
1944 ::tensorflow::Input cycle_length,
1945 ::tensorflow::Input block_length,
1946 ::tensorflow::Input num_parallel_calls, const
1947 NameAttrList& f, const DataTypeSlice& output_types,
1948 const gtl::ArraySlice<PartialTensorShape>&
1949 output_shapes);
1950 ParallelInterleaveDatasetV2(const ::tensorflow::Scope& scope,
1951 ::tensorflow::Input input_dataset,
1952 ::tensorflow::InputList other_arguments,
1953 ::tensorflow::Input cycle_length,
1954 ::tensorflow::Input block_length,
1955 ::tensorflow::Input num_parallel_calls, const
1956 NameAttrList& f, const DataTypeSlice& output_types,
1957 const gtl::ArraySlice<PartialTensorShape>&
1958 output_shapes, const
1959 ParallelInterleaveDatasetV2::Attrs& attrs);
1960 operator ::tensorflow::Output() const { return handle; }
1961 operator ::tensorflow::Input() const { return handle; }
1962 ::tensorflow::Node* node() const { return handle.node(); }
1963
1964 static Attrs Sloppy(bool x) {
1965 return Attrs().Sloppy(x);
1966 }
1967 static Attrs Metadata(StringPiece x) {
1968 return Attrs().Metadata(x);
1969 }
1970
1971 Operation operation;
1972 ::tensorflow::Output handle;
1973};
1974
1975/// Creates a dataset that applies `f` to the outputs of `input_dataset`.
1976///
1977/// The resulting dataset is similar to the `InterleaveDataset`, except that the
1978/// dataset will fetch records from the interleaved datasets in parallel.
1979///
1980/// The `tf.data` Python API creates instances of this op from
1981/// `Dataset.interleave()` when the `num_parallel_calls` parameter of that method
1982/// is set to any value other than `None`.
1983///
1984/// By default, the output of this dataset will be deterministic, which may result
1985/// in the dataset blocking if the next data item to be returned isn't available.
1986/// In order to avoid head-of-line blocking, one can either set the `deterministic`
1987/// attribute to "false", or leave it as "default" and set the
1988/// `experimental_deterministic` parameter of `tf.data.Options` to `False`.
1989/// This can improve performance at the expense of non-determinism.
1990///
1991/// Args:
1992/// * scope: A Scope object
1993/// * input_dataset: Dataset that produces a stream of arguments for the function `f`.
1994/// * other_arguments: Additional arguments to pass to `f` beyond those produced by `input_dataset`.
1995/// Evaluated once when the dataset is instantiated.
1996/// * cycle_length: Number of datasets (each created by applying `f` to the elements of
1997/// `input_dataset`) among which the `ParallelInterleaveDatasetV2` will cycle in a
1998/// round-robin fashion.
1999/// * block_length: Number of elements at a time to produce from each interleaved invocation of a
2000/// dataset returned by `f`.
2001/// * num_parallel_calls: Determines the number of threads that should be used for fetching data from
2002/// input datasets in parallel. The Python API `tf.data.experimental.AUTOTUNE`
2003/// constant can be used to indicate that the level of parallelism should be autotuned.
2004/// * f: A function mapping elements of `input_dataset`, concatenated with
2005/// `other_arguments`, to a Dataset variant that contains elements matching
2006/// `output_types` and `output_shapes`.
2007///
2008/// Optional attributes (see `Attrs`):
2009/// * deterministic: A string indicating the op-level determinism to use. Deterministic controls
2010/// whether the interleave is allowed to return elements out of order if the next
2011/// element to be returned isn't available, but a later element is. Options are
2012/// "true", "false", and "default". "default" indicates that determinism should be
2013/// decided by the `experimental_deterministic` parameter of `tf.data.Options`.
2014///
2015/// Returns:
2016/// * `Output`: The handle tensor.
2017class ParallelInterleaveDatasetV3 {
2018 public:
2019 /// Optional attribute setters for ParallelInterleaveDatasetV3
2020 struct Attrs {
2021 /// A string indicating the op-level determinism to use. Deterministic controls
2022 /// whether the interleave is allowed to return elements out of order if the next
2023 /// element to be returned isn't available, but a later element is. Options are
2024 /// "true", "false", and "default". "default" indicates that determinism should be
2025 /// decided by the `experimental_deterministic` parameter of `tf.data.Options`.
2026 ///
2027 /// Defaults to "default"
2028 TF_MUST_USE_RESULT Attrs Deterministic(StringPiece x) {
2029 Attrs ret = *this;
2030 ret.deterministic_ = x;
2031 return ret;
2032 }
2033
2034 /// Defaults to ""
2035 TF_MUST_USE_RESULT Attrs Metadata(StringPiece x) {
2036 Attrs ret = *this;
2037 ret.metadata_ = x;
2038 return ret;
2039 }
2040
2041 StringPiece deterministic_ = "default";
2042 StringPiece metadata_ = "";
2043 };
2044 ParallelInterleaveDatasetV3(const ::tensorflow::Scope& scope,
2045 ::tensorflow::Input input_dataset,
2046 ::tensorflow::InputList other_arguments,
2047 ::tensorflow::Input cycle_length,
2048 ::tensorflow::Input block_length,
2049 ::tensorflow::Input num_parallel_calls, const
2050 NameAttrList& f, const DataTypeSlice& output_types,
2051 const gtl::ArraySlice<PartialTensorShape>&
2052 output_shapes);
2053 ParallelInterleaveDatasetV3(const ::tensorflow::Scope& scope,
2054 ::tensorflow::Input input_dataset,
2055 ::tensorflow::InputList other_arguments,
2056 ::tensorflow::Input cycle_length,
2057 ::tensorflow::Input block_length,
2058 ::tensorflow::Input num_parallel_calls, const
2059 NameAttrList& f, const DataTypeSlice& output_types,
2060 const gtl::ArraySlice<PartialTensorShape>&
2061 output_shapes, const
2062 ParallelInterleaveDatasetV3::Attrs& attrs);
2063 operator ::tensorflow::Output() const { return handle; }
2064 operator ::tensorflow::Input() const { return handle; }
2065 ::tensorflow::Node* node() const { return handle.node(); }
2066
2067 static Attrs Deterministic(StringPiece x) {
2068 return Attrs().Deterministic(x);
2069 }
2070 static Attrs Metadata(StringPiece x) {
2071 return Attrs().Metadata(x);
2072 }
2073
2074 Operation operation;
2075 ::tensorflow::Output handle;
2076};
2077
2078/// Creates a dataset that applies `f` to the outputs of `input_dataset`.
2079///
2080/// The resulting dataset is similar to the `InterleaveDataset`, except that the
2081/// dataset will fetch records from the interleaved datasets in parallel.
2082///
2083/// The `tf.data` Python API creates instances of this op from
2084/// `Dataset.interleave()` when the `num_parallel_calls` parameter of that method
2085/// is set to any value other than `None`.
2086///
2087/// By default, the output of this dataset will be deterministic, which may result
2088/// in the dataset blocking if the next data item to be returned isn't available.
2089/// In order to avoid head-of-line blocking, one can either set the `deterministic`
2090/// attribute to "false", or leave it as "default" and set the
2091/// `experimental_deterministic` parameter of `tf.data.Options` to `False`.
2092/// This can improve performance at the expense of non-determinism.
2093///
2094/// Args:
2095/// * scope: A Scope object
2096/// * input_dataset: Dataset that produces a stream of arguments for the function `f`.
2097/// * other_arguments: Additional arguments to pass to `f` beyond those produced by `input_dataset`.
2098/// Evaluated once when the dataset is instantiated.
2099/// * cycle_length: Number of datasets (each created by applying `f` to the elements of
2100/// `input_dataset`) among which the `ParallelInterleaveDatasetV2` will cycle in a
2101/// round-robin fashion.
2102/// * block_length: Number of elements at a time to produce from each interleaved invocation of a
2103/// dataset returned by `f`.
2104/// * buffer_output_elements: The number of elements each iterator being interleaved should buffer (similar
2105/// to the `.prefetch()` transformation for each interleaved iterator).
2106/// * prefetch_input_elements: Determines the number of iterators to prefetch, allowing buffers to warm up and
2107/// data to be pre-fetched without blocking the main thread.
2108/// * num_parallel_calls: Determines the number of threads that should be used for fetching data from
2109/// input datasets in parallel. The Python API `tf.data.experimental.AUTOTUNE`
2110/// constant can be used to indicate that the level of parallelism should be autotuned.
2111/// * f: A function mapping elements of `input_dataset`, concatenated with
2112/// `other_arguments`, to a Dataset variant that contains elements matching
2113/// `output_types` and `output_shapes`.
2114///
2115/// Optional attributes (see `Attrs`):
2116/// * deterministic: A string indicating the op-level determinism to use. Deterministic controls
2117/// whether the interleave is allowed to return elements out of order if the next
2118/// element to be returned isn't available, but a later element is. Options are
2119/// "true", "false", and "default". "default" indicates that determinism should be
2120/// decided by the `experimental_deterministic` parameter of `tf.data.Options`.
2121///
2122/// Returns:
2123/// * `Output`: The handle tensor.
2124class ParallelInterleaveDatasetV4 {
2125 public:
2126 /// Optional attribute setters for ParallelInterleaveDatasetV4
2127 struct Attrs {
2128 /// A string indicating the op-level determinism to use. Deterministic controls
2129 /// whether the interleave is allowed to return elements out of order if the next
2130 /// element to be returned isn't available, but a later element is. Options are
2131 /// "true", "false", and "default". "default" indicates that determinism should be
2132 /// decided by the `experimental_deterministic` parameter of `tf.data.Options`.
2133 ///
2134 /// Defaults to "default"
2135 TF_MUST_USE_RESULT Attrs Deterministic(StringPiece x) {
2136 Attrs ret = *this;
2137 ret.deterministic_ = x;
2138 return ret;
2139 }
2140
2141 /// Defaults to ""
2142 TF_MUST_USE_RESULT Attrs Metadata(StringPiece x) {
2143 Attrs ret = *this;
2144 ret.metadata_ = x;
2145 return ret;
2146 }
2147
2148 StringPiece deterministic_ = "default";
2149 StringPiece metadata_ = "";
2150 };
2151 ParallelInterleaveDatasetV4(const ::tensorflow::Scope& scope,
2152 ::tensorflow::Input input_dataset,
2153 ::tensorflow::InputList other_arguments,
2154 ::tensorflow::Input cycle_length,
2155 ::tensorflow::Input block_length,
2156 ::tensorflow::Input buffer_output_elements,
2157 ::tensorflow::Input prefetch_input_elements,
2158 ::tensorflow::Input num_parallel_calls, const
2159 NameAttrList& f, const DataTypeSlice& output_types,
2160 const gtl::ArraySlice<PartialTensorShape>&
2161 output_shapes);
2162 ParallelInterleaveDatasetV4(const ::tensorflow::Scope& scope,
2163 ::tensorflow::Input input_dataset,
2164 ::tensorflow::InputList other_arguments,
2165 ::tensorflow::Input cycle_length,
2166 ::tensorflow::Input block_length,
2167 ::tensorflow::Input buffer_output_elements,
2168 ::tensorflow::Input prefetch_input_elements,
2169 ::tensorflow::Input num_parallel_calls, const
2170 NameAttrList& f, const DataTypeSlice& output_types,
2171 const gtl::ArraySlice<PartialTensorShape>&
2172 output_shapes, const
2173 ParallelInterleaveDatasetV4::Attrs& attrs);
2174 operator ::tensorflow::Output() const { return handle; }
2175 operator ::tensorflow::Input() const { return handle; }
2176 ::tensorflow::Node* node() const { return handle.node(); }
2177
2178 static Attrs Deterministic(StringPiece x) {
2179 return Attrs().Deterministic(x);
2180 }
2181 static Attrs Metadata(StringPiece x) {
2182 return Attrs().Metadata(x);
2183 }
2184
2185 Operation operation;
2186 ::tensorflow::Output handle;
2187};
2188
2189/// Creates a dataset that applies `f` to the outputs of `input_dataset`.
2190///
2191/// Unlike a "MapDataset", which applies `f` sequentially, this dataset invokes up
2192/// to `num_parallel_calls` copies of `f` in parallel.
2193///
2194/// Args:
2195/// * scope: A Scope object
2196/// * num_parallel_calls: The number of concurrent invocations of `f` that process
2197/// elements from `input_dataset` in parallel.
2198///
2199/// Returns:
2200/// * `Output`: The handle tensor.
2201class ParallelMapDataset {
2202 public:
2203 /// Optional attribute setters for ParallelMapDataset
2204 struct Attrs {
2205 /// Defaults to true
2206 TF_MUST_USE_RESULT Attrs UseInterOpParallelism(bool x) {
2207 Attrs ret = *this;
2208 ret.use_inter_op_parallelism_ = x;
2209 return ret;
2210 }
2211
2212 /// Defaults to false
2213 TF_MUST_USE_RESULT Attrs Sloppy(bool x) {
2214 Attrs ret = *this;
2215 ret.sloppy_ = x;
2216 return ret;
2217 }
2218
2219 /// Defaults to false
2220 TF_MUST_USE_RESULT Attrs PreserveCardinality(bool x) {
2221 Attrs ret = *this;
2222 ret.preserve_cardinality_ = x;
2223 return ret;
2224 }
2225
2226 /// Defaults to ""
2227 TF_MUST_USE_RESULT Attrs Metadata(StringPiece x) {
2228 Attrs ret = *this;
2229 ret.metadata_ = x;
2230 return ret;
2231 }
2232
2233 bool use_inter_op_parallelism_ = true;
2234 bool sloppy_ = false;
2235 bool preserve_cardinality_ = false;
2236 StringPiece metadata_ = "";
2237 };
2238 ParallelMapDataset(const ::tensorflow::Scope& scope, ::tensorflow::Input
2239 input_dataset, ::tensorflow::InputList other_arguments,
2240 ::tensorflow::Input num_parallel_calls, const NameAttrList&
2241 f, const DataTypeSlice& output_types, const
2242 gtl::ArraySlice<PartialTensorShape>& output_shapes);
2243 ParallelMapDataset(const ::tensorflow::Scope& scope, ::tensorflow::Input
2244 input_dataset, ::tensorflow::InputList other_arguments,
2245 ::tensorflow::Input num_parallel_calls, const NameAttrList&
2246 f, const DataTypeSlice& output_types, const
2247 gtl::ArraySlice<PartialTensorShape>& output_shapes, const
2248 ParallelMapDataset::Attrs& attrs);
2249 operator ::tensorflow::Output() const { return handle; }
2250 operator ::tensorflow::Input() const { return handle; }
2251 ::tensorflow::Node* node() const { return handle.node(); }
2252
2253 static Attrs UseInterOpParallelism(bool x) {
2254 return Attrs().UseInterOpParallelism(x);
2255 }
2256 static Attrs Sloppy(bool x) {
2257 return Attrs().Sloppy(x);
2258 }
2259 static Attrs PreserveCardinality(bool x) {
2260 return Attrs().PreserveCardinality(x);
2261 }
2262 static Attrs Metadata(StringPiece x) {
2263 return Attrs().Metadata(x);
2264 }
2265
2266 Operation operation;
2267 ::tensorflow::Output handle;
2268};
2269
2270/// Creates a dataset that applies `f` to the outputs of `input_dataset`.
2271///
2272/// Unlike a "MapDataset", which applies `f` sequentially, this dataset invokes up
2273/// to `num_parallel_calls` copies of `f` in parallel.
2274///
2275/// Args:
2276/// * scope: A Scope object
2277/// * num_parallel_calls: The number of concurrent invocations of `f` that process
2278/// elements from `input_dataset` in parallel.
2279///
2280/// Returns:
2281/// * `Output`: The handle tensor.
2282class ParallelMapDatasetV2 {
2283 public:
2284 /// Optional attribute setters for ParallelMapDatasetV2
2285 struct Attrs {
2286 /// Defaults to true
2287 TF_MUST_USE_RESULT Attrs UseInterOpParallelism(bool x) {
2288 Attrs ret = *this;
2289 ret.use_inter_op_parallelism_ = x;
2290 return ret;
2291 }
2292
2293 /// Defaults to "default"
2294 TF_MUST_USE_RESULT Attrs Deterministic(StringPiece x) {
2295 Attrs ret = *this;
2296 ret.deterministic_ = x;
2297 return ret;
2298 }
2299
2300 /// Defaults to false
2301 TF_MUST_USE_RESULT Attrs PreserveCardinality(bool x) {
2302 Attrs ret = *this;
2303 ret.preserve_cardinality_ = x;
2304 return ret;
2305 }
2306
2307 /// Defaults to ""
2308 TF_MUST_USE_RESULT Attrs Metadata(StringPiece x) {
2309 Attrs ret = *this;
2310 ret.metadata_ = x;
2311 return ret;
2312 }
2313
2314 bool use_inter_op_parallelism_ = true;
2315 StringPiece deterministic_ = "default";
2316 bool preserve_cardinality_ = false;
2317 StringPiece metadata_ = "";
2318 };
2319 ParallelMapDatasetV2(const ::tensorflow::Scope& scope, ::tensorflow::Input
2320 input_dataset, ::tensorflow::InputList other_arguments,
2321 ::tensorflow::Input num_parallel_calls, const
2322 NameAttrList& f, const DataTypeSlice& output_types, const
2323 gtl::ArraySlice<PartialTensorShape>& output_shapes);
2324 ParallelMapDatasetV2(const ::tensorflow::Scope& scope, ::tensorflow::Input
2325 input_dataset, ::tensorflow::InputList other_arguments,
2326 ::tensorflow::Input num_parallel_calls, const
2327 NameAttrList& f, const DataTypeSlice& output_types, const
2328 gtl::ArraySlice<PartialTensorShape>& output_shapes, const
2329 ParallelMapDatasetV2::Attrs& attrs);
2330 operator ::tensorflow::Output() const { return handle; }
2331 operator ::tensorflow::Input() const { return handle; }
2332 ::tensorflow::Node* node() const { return handle.node(); }
2333
2334 static Attrs UseInterOpParallelism(bool x) {
2335 return Attrs().UseInterOpParallelism(x);
2336 }
2337 static Attrs Deterministic(StringPiece x) {
2338 return Attrs().Deterministic(x);
2339 }
2340 static Attrs PreserveCardinality(bool x) {
2341 return Attrs().PreserveCardinality(x);
2342 }
2343 static Attrs Metadata(StringPiece x) {
2344 return Attrs().Metadata(x);
2345 }
2346
2347 Operation operation;
2348 ::tensorflow::Output handle;
2349};
2350
2351/// Creates a dataset that asynchronously prefetches elements from `input_dataset`.
2352///
2353/// Args:
2354/// * scope: A Scope object
2355/// * buffer_size: The maximum number of elements to buffer in an iterator over
2356/// this dataset.
2357///
2358/// Returns:
2359/// * `Output`: The handle tensor.
2360class PrefetchDataset {
2361 public:
2362 /// Optional attribute setters for PrefetchDataset
2363 struct Attrs {
2364 /// Defaults to 0
2365 TF_MUST_USE_RESULT Attrs SlackPeriod(int64 x) {
2366 Attrs ret = *this;
2367 ret.slack_period_ = x;
2368 return ret;
2369 }
2370
2371 /// Defaults to true
2372 TF_MUST_USE_RESULT Attrs LegacyAutotune(bool x) {
2373 Attrs ret = *this;
2374 ret.legacy_autotune_ = x;
2375 return ret;
2376 }
2377
2378 /// Defaults to 0
2379 TF_MUST_USE_RESULT Attrs BufferSizeMin(int64 x) {
2380 Attrs ret = *this;
2381 ret.buffer_size_min_ = x;
2382 return ret;
2383 }
2384
2385 /// Defaults to ""
2386 TF_MUST_USE_RESULT Attrs Metadata(StringPiece x) {
2387 Attrs ret = *this;
2388 ret.metadata_ = x;
2389 return ret;
2390 }
2391
2392 int64 slack_period_ = 0;
2393 bool legacy_autotune_ = true;
2394 int64 buffer_size_min_ = 0;
2395 StringPiece metadata_ = "";
2396 };
2397 PrefetchDataset(const ::tensorflow::Scope& scope, ::tensorflow::Input
2398 input_dataset, ::tensorflow::Input buffer_size, const
2399 DataTypeSlice& output_types, const
2400 gtl::ArraySlice<PartialTensorShape>& output_shapes);
2401 PrefetchDataset(const ::tensorflow::Scope& scope, ::tensorflow::Input
2402 input_dataset, ::tensorflow::Input buffer_size, const
2403 DataTypeSlice& output_types, const
2404 gtl::ArraySlice<PartialTensorShape>& output_shapes, const
2405 PrefetchDataset::Attrs& attrs);
2406 operator ::tensorflow::Output() const { return handle; }
2407 operator ::tensorflow::Input() const { return handle; }
2408 ::tensorflow::Node* node() const { return handle.node(); }
2409
2410 static Attrs SlackPeriod(int64 x) {
2411 return Attrs().SlackPeriod(x);
2412 }
2413 static Attrs LegacyAutotune(bool x) {
2414 return Attrs().LegacyAutotune(x);
2415 }
2416 static Attrs BufferSizeMin(int64 x) {
2417 return Attrs().BufferSizeMin(x);
2418 }
2419 static Attrs Metadata(StringPiece x) {
2420 return Attrs().Metadata(x);
2421 }
2422
2423 Operation operation;
2424 ::tensorflow::Output handle;
2425};
2426
2427/// Creates a dataset with a range of values. Corresponds to python's xrange.
2428///
2429/// Args:
2430/// * scope: A Scope object
2431/// * start: corresponds to start in python's xrange().
2432/// * stop: corresponds to stop in python's xrange().
2433/// * step: corresponds to step in python's xrange().
2434///
2435/// Returns:
2436/// * `Output`: The handle tensor.
2437class RangeDataset {
2438 public:
2439 /// Optional attribute setters for RangeDataset
2440 struct Attrs {
2441 /// Defaults to ""
2442 TF_MUST_USE_RESULT Attrs Metadata(StringPiece x) {
2443 Attrs ret = *this;
2444 ret.metadata_ = x;
2445 return ret;
2446 }
2447
2448 /// Defaults to false
2449 TF_MUST_USE_RESULT Attrs ReplicateOnSplit(bool x) {
2450 Attrs ret = *this;
2451 ret.replicate_on_split_ = x;
2452 return ret;
2453 }
2454
2455 StringPiece metadata_ = "";
2456 bool replicate_on_split_ = false;
2457 };
2458 RangeDataset(const ::tensorflow::Scope& scope, ::tensorflow::Input start,
2459 ::tensorflow::Input stop, ::tensorflow::Input step, const
2460 DataTypeSlice& output_types, const
2461 gtl::ArraySlice<PartialTensorShape>& output_shapes);
2462 RangeDataset(const ::tensorflow::Scope& scope, ::tensorflow::Input start,
2463 ::tensorflow::Input stop, ::tensorflow::Input step, const
2464 DataTypeSlice& output_types, const
2465 gtl::ArraySlice<PartialTensorShape>& output_shapes, const
2466 RangeDataset::Attrs& attrs);
2467 operator ::tensorflow::Output() const { return handle; }
2468 operator ::tensorflow::Input() const { return handle; }
2469 ::tensorflow::Node* node() const { return handle.node(); }
2470
2471 static Attrs Metadata(StringPiece x) {
2472 return Attrs().Metadata(x);
2473 }
2474 static Attrs ReplicateOnSplit(bool x) {
2475 return Attrs().ReplicateOnSplit(x);
2476 }
2477
2478 Operation operation;
2479 ::tensorflow::Output handle;
2480};
2481
2482/// Reduces the input dataset to a singleton using a reduce function.
2483///
2484/// Args:
2485/// * scope: A Scope object
2486/// * input_dataset: A variant tensor representing the input dataset.
2487/// * initial_state: A nested structure of tensors, representing the initial state of the
2488/// transformation.
2489/// * f: A function that maps `(old_state, input_element)` to `new_state`. It must take
2490/// two arguments and return a nested structures of tensors. The structure of
2491/// `new_state` must match the structure of `initial_state`.
2492///
2493/// Returns:
2494/// * `OutputList`: The components tensor.
2495class ReduceDataset {
2496 public:
2497 /// Optional attribute setters for ReduceDataset
2498 struct Attrs {
2499 /// Defaults to true
2500 TF_MUST_USE_RESULT Attrs UseInterOpParallelism(bool x) {
2501 Attrs ret = *this;
2502 ret.use_inter_op_parallelism_ = x;
2503 return ret;
2504 }
2505
2506 /// Defaults to ""
2507 TF_MUST_USE_RESULT Attrs Metadata(StringPiece x) {
2508 Attrs ret = *this;
2509 ret.metadata_ = x;
2510 return ret;
2511 }
2512
2513 bool use_inter_op_parallelism_ = true;
2514 StringPiece metadata_ = "";
2515 };
2516 ReduceDataset(const ::tensorflow::Scope& scope, ::tensorflow::Input
2517 input_dataset, ::tensorflow::InputList initial_state,
2518 ::tensorflow::InputList other_arguments, const NameAttrList& f,
2519 const DataTypeSlice& output_types, const
2520 gtl::ArraySlice<PartialTensorShape>& output_shapes);
2521 ReduceDataset(const ::tensorflow::Scope& scope, ::tensorflow::Input
2522 input_dataset, ::tensorflow::InputList initial_state,
2523 ::tensorflow::InputList other_arguments, const NameAttrList& f,
2524 const DataTypeSlice& output_types, const
2525 gtl::ArraySlice<PartialTensorShape>& output_shapes, const
2526 ReduceDataset::Attrs& attrs);
2527 ::tensorflow::Output operator[](size_t index) const { return components[index]; }
2528
2529
2530 static Attrs UseInterOpParallelism(bool x) {
2531 return Attrs().UseInterOpParallelism(x);
2532 }
2533 static Attrs Metadata(StringPiece x) {
2534 return Attrs().Metadata(x);
2535 }
2536
2537 Operation operation;
2538 ::tensorflow::OutputList components;
2539};
2540
2541/// Creates a dataset that emits the outputs of `input_dataset` `count` times.
2542///
2543/// Args:
2544/// * scope: A Scope object
2545/// * count: A scalar representing the number of times that `input_dataset` should
2546/// be repeated. A value of `-1` indicates that it should be repeated infinitely.
2547///
2548/// Returns:
2549/// * `Output`: The handle tensor.
2550class RepeatDataset {
2551 public:
2552 /// Optional attribute setters for RepeatDataset
2553 struct Attrs {
2554 /// Defaults to ""
2555 TF_MUST_USE_RESULT Attrs Metadata(StringPiece x) {
2556 Attrs ret = *this;
2557 ret.metadata_ = x;
2558 return ret;
2559 }
2560
2561 StringPiece metadata_ = "";
2562 };
2563 RepeatDataset(const ::tensorflow::Scope& scope, ::tensorflow::Input
2564 input_dataset, ::tensorflow::Input count, const DataTypeSlice&
2565 output_types, const gtl::ArraySlice<PartialTensorShape>&
2566 output_shapes);
2567 RepeatDataset(const ::tensorflow::Scope& scope, ::tensorflow::Input
2568 input_dataset, ::tensorflow::Input count, const DataTypeSlice&
2569 output_types, const gtl::ArraySlice<PartialTensorShape>&
2570 output_shapes, const RepeatDataset::Attrs& attrs);
2571 operator ::tensorflow::Output() const { return handle; }
2572 operator ::tensorflow::Input() const { return handle; }
2573 ::tensorflow::Node* node() const { return handle.node(); }
2574
2575 static Attrs Metadata(StringPiece x) {
2576 return Attrs().Metadata(x);
2577 }
2578
2579 Operation operation;
2580 ::tensorflow::Output handle;
2581};
2582
2583/// TODO: add doc.
2584///
2585/// Args:
2586/// * scope: A Scope object
2587///
2588/// Returns:
2589/// * `Output`: The handle tensor.
2590class RewriteDataset {
2591 public:
2592 RewriteDataset(const ::tensorflow::Scope& scope, ::tensorflow::Input
2593 input_dataset, ::tensorflow::Input rewrite_name, const
2594 DataTypeSlice& output_types, const
2595 gtl::ArraySlice<PartialTensorShape>& output_shapes);
2596 operator ::tensorflow::Output() const { return handle; }
2597 operator ::tensorflow::Input() const { return handle; }
2598 ::tensorflow::Node* node() const { return handle.node(); }
2599
2600 Operation operation;
2601 ::tensorflow::Output handle;
2602};
2603
2604/// Creates a `Dataset` that includes only 1/`num_shards` of this dataset.
2605///
2606/// Args:
2607/// * scope: A Scope object
2608/// * num_shards: An integer representing the number of shards operating in parallel.
2609/// * index: An integer representing the current worker index.
2610///
2611/// Returns:
2612/// * `Output`: The handle tensor.
2613class ShardDataset {
2614 public:
2615 /// Optional attribute setters for ShardDataset
2616 struct Attrs {
2617 /// Defaults to false
2618 TF_MUST_USE_RESULT Attrs RequireNonEmpty(bool x) {
2619 Attrs ret = *this;
2620 ret.require_non_empty_ = x;
2621 return ret;
2622 }
2623
2624 /// Defaults to ""
2625 TF_MUST_USE_RESULT Attrs Metadata(StringPiece x) {
2626 Attrs ret = *this;
2627 ret.metadata_ = x;
2628 return ret;
2629 }
2630
2631 bool require_non_empty_ = false;
2632 StringPiece metadata_ = "";
2633 };
2634 ShardDataset(const ::tensorflow::Scope& scope, ::tensorflow::Input
2635 input_dataset, ::tensorflow::Input num_shards, ::tensorflow::Input
2636 index, const DataTypeSlice& output_types, const
2637 gtl::ArraySlice<PartialTensorShape>& output_shapes);
2638 ShardDataset(const ::tensorflow::Scope& scope, ::tensorflow::Input
2639 input_dataset, ::tensorflow::Input num_shards, ::tensorflow::Input
2640 index, const DataTypeSlice& output_types, const
2641 gtl::ArraySlice<PartialTensorShape>& output_shapes, const
2642 ShardDataset::Attrs& attrs);
2643 operator ::tensorflow::Output() const { return handle; }
2644 operator ::tensorflow::Input() const { return handle; }
2645 ::tensorflow::Node* node() const { return handle.node(); }
2646
2647 static Attrs RequireNonEmpty(bool x) {
2648 return Attrs().RequireNonEmpty(x);
2649 }
2650 static Attrs Metadata(StringPiece x) {
2651 return Attrs().Metadata(x);
2652 }
2653
2654 Operation operation;
2655 ::tensorflow::Output handle;
2656};
2657
2658/// Creates a dataset that shuffles and repeats elements from `input_dataset`
2659///
2660/// pseudorandomly.
2661///
2662/// Args:
2663/// * scope: A Scope object
2664/// * buffer_size: The number of output elements to buffer in an iterator over
2665/// this dataset. Compare with the `min_after_dequeue` attr when creating a
2666/// `RandomShuffleQueue`.
2667/// * seed: A scalar seed for the random number generator. If either `seed` or
2668/// `seed2` is set to be non-zero, the random number generator is seeded
2669/// by the given seed. Otherwise, a random seed is used.
2670/// * seed2: A second scalar seed to avoid seed collision.
2671/// * count: A scalar representing the number of times the underlying dataset
2672/// should be repeated. The default is `-1`, which results in infinite repetition.
2673///
2674/// Returns:
2675/// * `Output`: The handle tensor.
2676class ShuffleAndRepeatDataset {
2677 public:
2678 /// Optional attribute setters for ShuffleAndRepeatDataset
2679 struct Attrs {
2680 /// Defaults to true
2681 TF_MUST_USE_RESULT Attrs ReshuffleEachIteration(bool x) {
2682 Attrs ret = *this;
2683 ret.reshuffle_each_iteration_ = x;
2684 return ret;
2685 }
2686
2687 /// Defaults to ""
2688 TF_MUST_USE_RESULT Attrs Metadata(StringPiece x) {
2689 Attrs ret = *this;
2690 ret.metadata_ = x;
2691 return ret;
2692 }
2693
2694 bool reshuffle_each_iteration_ = true;
2695 StringPiece metadata_ = "";
2696 };
2697 ShuffleAndRepeatDataset(const ::tensorflow::Scope& scope, ::tensorflow::Input
2698 input_dataset, ::tensorflow::Input buffer_size,
2699 ::tensorflow::Input seed, ::tensorflow::Input seed2,
2700 ::tensorflow::Input count, const DataTypeSlice&
2701 output_types, const
2702 gtl::ArraySlice<PartialTensorShape>& output_shapes);
2703 ShuffleAndRepeatDataset(const ::tensorflow::Scope& scope, ::tensorflow::Input
2704 input_dataset, ::tensorflow::Input buffer_size,
2705 ::tensorflow::Input seed, ::tensorflow::Input seed2,
2706 ::tensorflow::Input count, const DataTypeSlice&
2707 output_types, const
2708 gtl::ArraySlice<PartialTensorShape>& output_shapes,
2709 const ShuffleAndRepeatDataset::Attrs& attrs);
2710 operator ::tensorflow::Output() const { return handle; }
2711 operator ::tensorflow::Input() const { return handle; }
2712 ::tensorflow::Node* node() const { return handle.node(); }
2713
2714 static Attrs ReshuffleEachIteration(bool x) {
2715 return Attrs().ReshuffleEachIteration(x);
2716 }
2717 static Attrs Metadata(StringPiece x) {
2718 return Attrs().Metadata(x);
2719 }
2720
2721 Operation operation;
2722 ::tensorflow::Output handle;
2723};
2724
2725/// TODO: add doc.
2726///
2727/// Args:
2728/// * scope: A Scope object
2729///
2730/// Returns:
2731/// * `Output`: The handle tensor.
2732class ShuffleAndRepeatDatasetV2 {
2733 public:
2734 /// Optional attribute setters for ShuffleAndRepeatDatasetV2
2735 struct Attrs {
2736 /// Defaults to true
2737 TF_MUST_USE_RESULT Attrs ReshuffleEachIteration(bool x) {
2738 Attrs ret = *this;
2739 ret.reshuffle_each_iteration_ = x;
2740 return ret;
2741 }
2742
2743 /// Defaults to ""
2744 TF_MUST_USE_RESULT Attrs Metadata(StringPiece x) {
2745 Attrs ret = *this;
2746 ret.metadata_ = x;
2747 return ret;
2748 }
2749
2750 bool reshuffle_each_iteration_ = true;
2751 StringPiece metadata_ = "";
2752 };
2753 ShuffleAndRepeatDatasetV2(const ::tensorflow::Scope& scope, ::tensorflow::Input
2754 input_dataset, ::tensorflow::Input buffer_size,
2755 ::tensorflow::Input seed, ::tensorflow::Input seed2,
2756 ::tensorflow::Input count, ::tensorflow::Input
2757 seed_generator, const DataTypeSlice& output_types,
2758 const gtl::ArraySlice<PartialTensorShape>&
2759 output_shapes);
2760 ShuffleAndRepeatDatasetV2(const ::tensorflow::Scope& scope, ::tensorflow::Input
2761 input_dataset, ::tensorflow::Input buffer_size,
2762 ::tensorflow::Input seed, ::tensorflow::Input seed2,
2763 ::tensorflow::Input count, ::tensorflow::Input
2764 seed_generator, const DataTypeSlice& output_types,
2765 const gtl::ArraySlice<PartialTensorShape>&
2766 output_shapes, const
2767 ShuffleAndRepeatDatasetV2::Attrs& attrs);
2768 operator ::tensorflow::Output() const { return handle; }
2769 operator ::tensorflow::Input() const { return handle; }
2770 ::tensorflow::Node* node() const { return handle.node(); }
2771
2772 static Attrs ReshuffleEachIteration(bool x) {
2773 return Attrs().ReshuffleEachIteration(x);
2774 }
2775 static Attrs Metadata(StringPiece x) {
2776 return Attrs().Metadata(x);
2777 }
2778
2779 Operation operation;
2780 ::tensorflow::Output handle;
2781};
2782
2783/// Creates a dataset that shuffles elements from `input_dataset` pseudorandomly.
2784///
2785/// Args:
2786/// * scope: A Scope object
2787/// * buffer_size: The number of output elements to buffer in an iterator over
2788/// this dataset. Compare with the `min_after_dequeue` attr when creating a
2789/// `RandomShuffleQueue`.
2790/// * seed: A scalar seed for the random number generator. If either `seed` or
2791/// `seed2` is set to be non-zero, the random number generator is seeded
2792/// by the given seed. Otherwise, a random seed is used.
2793/// * seed2: A second scalar seed to avoid seed collision.
2794///
2795/// Optional attributes (see `Attrs`):
2796/// * reshuffle_each_iteration: If true, each iterator over this dataset will be given
2797/// a different pseudorandomly generated seed, based on a sequence seeded by the
2798/// `seed` and `seed2` inputs. If false, each iterator will be given the same
2799/// seed, and repeated iteration over this dataset will yield the exact same
2800/// sequence of results.
2801///
2802/// Returns:
2803/// * `Output`: The handle tensor.
2804class ShuffleDataset {
2805 public:
2806 /// Optional attribute setters for ShuffleDataset
2807 struct Attrs {
2808 /// If true, each iterator over this dataset will be given
2809 /// a different pseudorandomly generated seed, based on a sequence seeded by the
2810 /// `seed` and `seed2` inputs. If false, each iterator will be given the same
2811 /// seed, and repeated iteration over this dataset will yield the exact same
2812 /// sequence of results.
2813 ///
2814 /// Defaults to true
2815 TF_MUST_USE_RESULT Attrs ReshuffleEachIteration(bool x) {
2816 Attrs ret = *this;
2817 ret.reshuffle_each_iteration_ = x;
2818 return ret;
2819 }
2820
2821 /// Defaults to ""
2822 TF_MUST_USE_RESULT Attrs Metadata(StringPiece x) {
2823 Attrs ret = *this;
2824 ret.metadata_ = x;
2825 return ret;
2826 }
2827
2828 bool reshuffle_each_iteration_ = true;
2829 StringPiece metadata_ = "";
2830 };
2831 ShuffleDataset(const ::tensorflow::Scope& scope, ::tensorflow::Input
2832 input_dataset, ::tensorflow::Input buffer_size,
2833 ::tensorflow::Input seed, ::tensorflow::Input seed2, const
2834 DataTypeSlice& output_types, const
2835 gtl::ArraySlice<PartialTensorShape>& output_shapes);
2836 ShuffleDataset(const ::tensorflow::Scope& scope, ::tensorflow::Input
2837 input_dataset, ::tensorflow::Input buffer_size,
2838 ::tensorflow::Input seed, ::tensorflow::Input seed2, const
2839 DataTypeSlice& output_types, const
2840 gtl::ArraySlice<PartialTensorShape>& output_shapes, const
2841 ShuffleDataset::Attrs& attrs);
2842 operator ::tensorflow::Output() const { return handle; }
2843 operator ::tensorflow::Input() const { return handle; }
2844 ::tensorflow::Node* node() const { return handle.node(); }
2845
2846 static Attrs ReshuffleEachIteration(bool x) {
2847 return Attrs().ReshuffleEachIteration(x);
2848 }
2849 static Attrs Metadata(StringPiece x) {
2850 return Attrs().Metadata(x);
2851 }
2852
2853 Operation operation;
2854 ::tensorflow::Output handle;
2855};
2856
2857/// TODO: add doc.
2858///
2859/// Args:
2860/// * scope: A Scope object
2861///
2862/// Returns:
2863/// * `Output`: The handle tensor.
2864class ShuffleDatasetV2 {
2865 public:
2866 /// Optional attribute setters for ShuffleDatasetV2
2867 struct Attrs {
2868 /// Defaults to ""
2869 TF_MUST_USE_RESULT Attrs Metadata(StringPiece x) {
2870 Attrs ret = *this;
2871 ret.metadata_ = x;
2872 return ret;
2873 }
2874
2875 StringPiece metadata_ = "";
2876 };
2877 ShuffleDatasetV2(const ::tensorflow::Scope& scope, ::tensorflow::Input
2878 input_dataset, ::tensorflow::Input buffer_size,
2879 ::tensorflow::Input seed_generator, const DataTypeSlice&
2880 output_types, const gtl::ArraySlice<PartialTensorShape>&
2881 output_shapes);
2882 ShuffleDatasetV2(const ::tensorflow::Scope& scope, ::tensorflow::Input
2883 input_dataset, ::tensorflow::Input buffer_size,
2884 ::tensorflow::Input seed_generator, const DataTypeSlice&
2885 output_types, const gtl::ArraySlice<PartialTensorShape>&
2886 output_shapes, const ShuffleDatasetV2::Attrs& attrs);
2887 operator ::tensorflow::Output() const { return handle; }
2888 operator ::tensorflow::Input() const { return handle; }
2889 ::tensorflow::Node* node() const { return handle.node(); }
2890
2891 static Attrs Metadata(StringPiece x) {
2892 return Attrs().Metadata(x);
2893 }
2894
2895 Operation operation;
2896 ::tensorflow::Output handle;
2897};
2898
2899/// TODO: add doc.
2900///
2901/// Args:
2902/// * scope: A Scope object
2903///
2904/// Returns:
2905/// * `Output`: The handle tensor.
2906class ShuffleDatasetV3 {
2907 public:
2908 /// Optional attribute setters for ShuffleDatasetV3
2909 struct Attrs {
2910 /// Defaults to true
2911 TF_MUST_USE_RESULT Attrs ReshuffleEachIteration(bool x) {
2912 Attrs ret = *this;
2913 ret.reshuffle_each_iteration_ = x;
2914 return ret;
2915 }
2916
2917 /// Defaults to ""
2918 TF_MUST_USE_RESULT Attrs Metadata(StringPiece x) {
2919 Attrs ret = *this;
2920 ret.metadata_ = x;
2921 return ret;
2922 }
2923
2924 bool reshuffle_each_iteration_ = true;
2925 StringPiece metadata_ = "";
2926 };
2927 ShuffleDatasetV3(const ::tensorflow::Scope& scope, ::tensorflow::Input
2928 input_dataset, ::tensorflow::Input buffer_size,
2929 ::tensorflow::Input seed, ::tensorflow::Input seed2,
2930 ::tensorflow::Input seed_generator, const DataTypeSlice&
2931 output_types, const gtl::ArraySlice<PartialTensorShape>&
2932 output_shapes);
2933 ShuffleDatasetV3(const ::tensorflow::Scope& scope, ::tensorflow::Input
2934 input_dataset, ::tensorflow::Input buffer_size,
2935 ::tensorflow::Input seed, ::tensorflow::Input seed2,
2936 ::tensorflow::Input seed_generator, const DataTypeSlice&
2937 output_types, const gtl::ArraySlice<PartialTensorShape>&
2938 output_shapes, const ShuffleDatasetV3::Attrs& attrs);
2939 operator ::tensorflow::Output() const { return handle; }
2940 operator ::tensorflow::Input() const { return handle; }
2941 ::tensorflow::Node* node() const { return handle.node(); }
2942
2943 static Attrs ReshuffleEachIteration(bool x) {
2944 return Attrs().ReshuffleEachIteration(x);
2945 }
2946 static Attrs Metadata(StringPiece x) {
2947 return Attrs().Metadata(x);
2948 }
2949
2950 Operation operation;
2951 ::tensorflow::Output handle;
2952};
2953
2954/// Creates a dataset that skips `count` elements from the `input_dataset`.
2955///
2956/// Args:
2957/// * scope: A Scope object
2958/// * count: A scalar representing the number of elements from the `input_dataset`
2959/// that should be skipped. If count is -1, skips everything.
2960///
2961/// Returns:
2962/// * `Output`: The handle tensor.
2963class SkipDataset {
2964 public:
2965 /// Optional attribute setters for SkipDataset
2966 struct Attrs {
2967 /// Defaults to ""
2968 TF_MUST_USE_RESULT Attrs Metadata(StringPiece x) {
2969 Attrs ret = *this;
2970 ret.metadata_ = x;
2971 return ret;
2972 }
2973
2974 StringPiece metadata_ = "";
2975 };
2976 SkipDataset(const ::tensorflow::Scope& scope, ::tensorflow::Input
2977 input_dataset, ::tensorflow::Input count, const DataTypeSlice&
2978 output_types, const gtl::ArraySlice<PartialTensorShape>&
2979 output_shapes);
2980 SkipDataset(const ::tensorflow::Scope& scope, ::tensorflow::Input
2981 input_dataset, ::tensorflow::Input count, const DataTypeSlice&
2982 output_types, const gtl::ArraySlice<PartialTensorShape>&
2983 output_shapes, const SkipDataset::Attrs& attrs);
2984 operator ::tensorflow::Output() const { return handle; }
2985 operator ::tensorflow::Input() const { return handle; }
2986 ::tensorflow::Node* node() const { return handle.node(); }
2987
2988 static Attrs Metadata(StringPiece x) {
2989 return Attrs().Metadata(x);
2990 }
2991
2992 Operation operation;
2993 ::tensorflow::Output handle;
2994};
2995
2996/// Creates a dataset that splits a SparseTensor into elements row-wise.
2997///
2998/// Args:
2999/// * scope: A Scope object
3000///
3001/// Returns:
3002/// * `Output`: The handle tensor.
3003class SparseTensorSliceDataset {
3004 public:
3005 SparseTensorSliceDataset(const ::tensorflow::Scope& scope, ::tensorflow::Input
3006 indices, ::tensorflow::Input values,
3007 ::tensorflow::Input dense_shape);
3008 operator ::tensorflow::Output() const { return handle; }
3009 operator ::tensorflow::Input() const { return handle; }
3010 ::tensorflow::Node* node() const { return handle.node(); }
3011
3012 Operation operation;
3013 ::tensorflow::Output handle;
3014};
3015
3016/// Creates a dataset that emits the records from one or more TFRecord files.
3017///
3018/// Args:
3019/// * scope: A Scope object
3020/// * filenames: A scalar or vector containing the name(s) of the file(s) to be
3021/// read.
3022/// * compression_type: A scalar containing either (i) the empty string (no
3023/// compression), (ii) "ZLIB", or (iii) "GZIP".
3024/// * buffer_size: A scalar representing the number of bytes to buffer. A value of
3025/// 0 means no buffering will be performed.
3026///
3027/// Returns:
3028/// * `Output`: The handle tensor.
3029class TFRecordDataset {
3030 public:
3031 /// Optional attribute setters for TFRecordDataset
3032 struct Attrs {
3033 /// Defaults to ""
3034 TF_MUST_USE_RESULT Attrs Metadata(StringPiece x) {
3035 Attrs ret = *this;
3036 ret.metadata_ = x;
3037 return ret;
3038 }
3039
3040 StringPiece metadata_ = "";
3041 };
3042 TFRecordDataset(const ::tensorflow::Scope& scope, ::tensorflow::Input
3043 filenames, ::tensorflow::Input compression_type,
3044 ::tensorflow::Input buffer_size);
3045 TFRecordDataset(const ::tensorflow::Scope& scope, ::tensorflow::Input
3046 filenames, ::tensorflow::Input compression_type,
3047 ::tensorflow::Input buffer_size, const TFRecordDataset::Attrs&
3048 attrs);
3049 operator ::tensorflow::Output() const { return handle; }
3050 operator ::tensorflow::Input() const { return handle; }
3051 ::tensorflow::Node* node() const { return handle.node(); }
3052
3053 static Attrs Metadata(StringPiece x) {
3054 return Attrs().Metadata(x);
3055 }
3056
3057 Operation operation;
3058 ::tensorflow::Output handle;
3059};
3060
3061/// Creates a dataset that contains `count` elements from the `input_dataset`.
3062///
3063/// Args:
3064/// * scope: A Scope object
3065/// * count: A scalar representing the number of elements from the `input_dataset`
3066/// that should be taken. A value of `-1` indicates that all of `input_dataset`
3067/// is taken.
3068///
3069/// Returns:
3070/// * `Output`: The handle tensor.
3071class TakeDataset {
3072 public:
3073 /// Optional attribute setters for TakeDataset
3074 struct Attrs {
3075 /// Defaults to ""
3076 TF_MUST_USE_RESULT Attrs Metadata(StringPiece x) {
3077 Attrs ret = *this;
3078 ret.metadata_ = x;
3079 return ret;
3080 }
3081
3082 StringPiece metadata_ = "";
3083 };
3084 TakeDataset(const ::tensorflow::Scope& scope, ::tensorflow::Input
3085 input_dataset, ::tensorflow::Input count, const DataTypeSlice&
3086 output_types, const gtl::ArraySlice<PartialTensorShape>&
3087 output_shapes);
3088 TakeDataset(const ::tensorflow::Scope& scope, ::tensorflow::Input
3089 input_dataset, ::tensorflow::Input count, const DataTypeSlice&
3090 output_types, const gtl::ArraySlice<PartialTensorShape>&
3091 output_shapes, const TakeDataset::Attrs& attrs);
3092 operator ::tensorflow::Output() const { return handle; }
3093 operator ::tensorflow::Input() const { return handle; }
3094 ::tensorflow::Node* node() const { return handle.node(); }
3095
3096 static Attrs Metadata(StringPiece x) {
3097 return Attrs().Metadata(x);
3098 }
3099
3100 Operation operation;
3101 ::tensorflow::Output handle;
3102};
3103
3104/// Creates a dataset that emits `components` as a tuple of tensors once.
3105///
3106/// Args:
3107/// * scope: A Scope object
3108///
3109/// Returns:
3110/// * `Output`: The handle tensor.
3111class TensorDataset {
3112 public:
3113 /// Optional attribute setters for TensorDataset
3114 struct Attrs {
3115 /// Defaults to ""
3116 TF_MUST_USE_RESULT Attrs Metadata(StringPiece x) {
3117 Attrs ret = *this;
3118 ret.metadata_ = x;
3119 return ret;
3120 }
3121
3122 StringPiece metadata_ = "";
3123 };
3124 TensorDataset(const ::tensorflow::Scope& scope, ::tensorflow::InputList
3125 components, const gtl::ArraySlice<PartialTensorShape>&
3126 output_shapes);
3127 TensorDataset(const ::tensorflow::Scope& scope, ::tensorflow::InputList
3128 components, const gtl::ArraySlice<PartialTensorShape>&
3129 output_shapes, const TensorDataset::Attrs& attrs);
3130 operator ::tensorflow::Output() const { return handle; }
3131 operator ::tensorflow::Input() const { return handle; }
3132 ::tensorflow::Node* node() const { return handle.node(); }
3133
3134 static Attrs Metadata(StringPiece x) {
3135 return Attrs().Metadata(x);
3136 }
3137
3138 Operation operation;
3139 ::tensorflow::Output handle;
3140};
3141
3142/// Creates a dataset that emits each dim-0 slice of `components` once.
3143///
3144/// Args:
3145/// * scope: A Scope object
3146///
3147/// Returns:
3148/// * `Output`: The handle tensor.
3149class TensorSliceDataset {
3150 public:
3151 /// Optional attribute setters for TensorSliceDataset
3152 struct Attrs {
3153 /// Defaults to false
3154 TF_MUST_USE_RESULT Attrs IsFiles(bool x) {
3155 Attrs ret = *this;
3156 ret.is_files_ = x;
3157 return ret;
3158 }
3159
3160 /// Defaults to ""
3161 TF_MUST_USE_RESULT Attrs Metadata(StringPiece x) {
3162 Attrs ret = *this;
3163 ret.metadata_ = x;
3164 return ret;
3165 }
3166
3167 /// Defaults to false
3168 TF_MUST_USE_RESULT Attrs ReplicateOnSplit(bool x) {
3169 Attrs ret = *this;
3170 ret.replicate_on_split_ = x;
3171 return ret;
3172 }
3173
3174 bool is_files_ = false;
3175 StringPiece metadata_ = "";
3176 bool replicate_on_split_ = false;
3177 };
3178 TensorSliceDataset(const ::tensorflow::Scope& scope, ::tensorflow::InputList
3179 components, const gtl::ArraySlice<PartialTensorShape>&
3180 output_shapes);
3181 TensorSliceDataset(const ::tensorflow::Scope& scope, ::tensorflow::InputList
3182 components, const gtl::ArraySlice<PartialTensorShape>&
3183 output_shapes, const TensorSliceDataset::Attrs& attrs);
3184 operator ::tensorflow::Output() const { return handle; }
3185 operator ::tensorflow::Input() const { return handle; }
3186 ::tensorflow::Node* node() const { return handle.node(); }
3187
3188 static Attrs IsFiles(bool x) {
3189 return Attrs().IsFiles(x);
3190 }
3191 static Attrs Metadata(StringPiece x) {
3192 return Attrs().Metadata(x);
3193 }
3194 static Attrs ReplicateOnSplit(bool x) {
3195 return Attrs().ReplicateOnSplit(x);
3196 }
3197
3198 Operation operation;
3199 ::tensorflow::Output handle;
3200};
3201
3202/// Creates a dataset that emits the lines of one or more text files.
3203///
3204/// Args:
3205/// * scope: A Scope object
3206/// * filenames: A scalar or a vector containing the name(s) of the file(s) to be
3207/// read.
3208/// * compression_type: A scalar containing either (i) the empty string (no
3209/// compression), (ii) "ZLIB", or (iii) "GZIP".
3210/// * buffer_size: A scalar containing the number of bytes to buffer.
3211///
3212/// Returns:
3213/// * `Output`: The handle tensor.
3214class TextLineDataset {
3215 public:
3216 /// Optional attribute setters for TextLineDataset
3217 struct Attrs {
3218 /// Defaults to ""
3219 TF_MUST_USE_RESULT Attrs Metadata(StringPiece x) {
3220 Attrs ret = *this;
3221 ret.metadata_ = x;
3222 return ret;
3223 }
3224
3225 StringPiece metadata_ = "";
3226 };
3227 TextLineDataset(const ::tensorflow::Scope& scope, ::tensorflow::Input
3228 filenames, ::tensorflow::Input compression_type,
3229 ::tensorflow::Input buffer_size);
3230 TextLineDataset(const ::tensorflow::Scope& scope, ::tensorflow::Input
3231 filenames, ::tensorflow::Input compression_type,
3232 ::tensorflow::Input buffer_size, const TextLineDataset::Attrs&
3233 attrs);
3234 operator ::tensorflow::Output() const { return handle; }
3235 operator ::tensorflow::Input() const { return handle; }
3236 ::tensorflow::Node* node() const { return handle.node(); }
3237
3238 static Attrs Metadata(StringPiece x) {
3239 return Attrs().Metadata(x);
3240 }
3241
3242 Operation operation;
3243 ::tensorflow::Output handle;
3244};
3245
3246/// TODO: add doc.
3247///
3248/// Args:
3249/// * scope: A Scope object
3250///
3251/// Returns:
3252/// * `Output`: The output_handle tensor.
3253class UnwrapDatasetVariant {
3254 public:
3255 UnwrapDatasetVariant(const ::tensorflow::Scope& scope, ::tensorflow::Input
3256 input_handle);
3257 operator ::tensorflow::Output() const { return output_handle; }
3258 operator ::tensorflow::Input() const { return output_handle; }
3259 ::tensorflow::Node* node() const { return output_handle.node(); }
3260
3261 Operation operation;
3262 ::tensorflow::Output output_handle;
3263};
3264
3265/// Combines (nests of) input elements into a dataset of (nests of) windows.
3266///
3267/// A "window" is a finite dataset of flat elements of size `size` (or possibly
3268/// fewer if there are not enough input elements to fill the window and
3269/// `drop_remainder` evaluates to false).
3270///
3271/// The `shift` argument determines the number of input elements by which
3272/// the window moves on each iteration. The first element in the `k`th window
3273/// will be element
3274///
3275/// ```
3276/// 1 + (k-1) * shift
3277/// ```
3278///
3279/// of the input dataset. In particular, the first element of the first window
3280/// will always be the first element of the input dataset.
3281///
3282/// If the `stride` parameter is greater than 1, then each window will skip
3283/// `(stride - 1)` input elements between each element that appears in the
3284/// window. Output windows will still contain `size` elements regardless of
3285/// the value of `stride`.
3286///
3287/// The `stride` argument determines the stride of the input elements, and the
3288/// `shift` argument determines the shift of the window.
3289///
3290/// For example, letting `{...}` to represent a Dataset:
3291///
3292/// - `tf.data.Dataset.range(7).window(2)` produces
3293/// `{{0, 1}, {2, 3}, {4, 5}, {6}}`
3294/// - `tf.data.Dataset.range(7).window(3, 2, 1, True)` produces
3295/// `{{0, 1, 2}, {2, 3, 4}, {4, 5, 6}}`
3296/// - `tf.data.Dataset.range(7).window(3, 1, 2, True)` produces
3297/// `{{0, 2, 4}, {1, 3, 5}, {2, 4, 6}}`
3298///
3299/// Note that when the `window` transformation is applied to a dataset of
3300/// nested elements, it produces a dataset of nested windows.
3301///
3302/// For example:
3303///
3304/// - `tf.data.Dataset.from_tensor_slices((range(4), range(4))).window(2)`
3305/// produces `{({0, 1}, {0, 1}), ({2, 3}, {2, 3})}`
3306/// - `tf.data.Dataset.from_tensor_slices({"a": range(4)}).window(2)`
3307/// produces `{{"a": {0, 1}}, {"a": {2, 3}}}`
3308///
3309/// Args:
3310/// * scope: A Scope object
3311/// * size: An integer scalar, representing the number of elements
3312/// of the input dataset to combine into a window. Must be positive.
3313/// * shift: An integer scalar, representing the number of input elements
3314/// by which the window moves in each iteration. Defaults to `size`.
3315/// Must be positive.
3316/// * stride: An integer scalar, representing the stride of the input elements
3317/// in the sliding window. Must be positive. The default value of 1 means
3318/// "retain every input element".
3319/// * drop_remainder: A Boolean scalar, representing whether the last window should be
3320/// dropped if its size is smaller than `window_size`.
3321///
3322/// Returns:
3323/// * `Output`: The handle tensor.
3324class WindowDataset {
3325 public:
3326 /// Optional attribute setters for WindowDataset
3327 struct Attrs {
3328 /// Defaults to ""
3329 TF_MUST_USE_RESULT Attrs Metadata(StringPiece x) {
3330 Attrs ret = *this;
3331 ret.metadata_ = x;
3332 return ret;
3333 }
3334
3335 StringPiece metadata_ = "";
3336 };
3337 WindowDataset(const ::tensorflow::Scope& scope, ::tensorflow::Input
3338 input_dataset, ::tensorflow::Input size, ::tensorflow::Input
3339 shift, ::tensorflow::Input stride, ::tensorflow::Input
3340 drop_remainder, const DataTypeSlice& output_types, const
3341 gtl::ArraySlice<PartialTensorShape>& output_shapes);
3342 WindowDataset(const ::tensorflow::Scope& scope, ::tensorflow::Input
3343 input_dataset, ::tensorflow::Input size, ::tensorflow::Input
3344 shift, ::tensorflow::Input stride, ::tensorflow::Input
3345 drop_remainder, const DataTypeSlice& output_types, const
3346 gtl::ArraySlice<PartialTensorShape>& output_shapes, const
3347 WindowDataset::Attrs& attrs);
3348 operator ::tensorflow::Output() const { return handle; }
3349 operator ::tensorflow::Input() const { return handle; }
3350 ::tensorflow::Node* node() const { return handle.node(); }
3351
3352 static Attrs Metadata(StringPiece x) {
3353 return Attrs().Metadata(x);
3354 }
3355
3356 Operation operation;
3357 ::tensorflow::Output handle;
3358};
3359
3360/// TODO: add doc.
3361///
3362/// Args:
3363/// * scope: A Scope object
3364///
3365/// Returns:
3366/// * `Output`: The handle tensor.
3367class WindowOp {
3368 public:
3369 WindowOp(const ::tensorflow::Scope& scope, ::tensorflow::InputList inputs,
3370 const DataTypeSlice& output_types, const
3371 gtl::ArraySlice<PartialTensorShape>& output_shapes);
3372 operator ::tensorflow::Output() const { return handle; }
3373 operator ::tensorflow::Input() const { return handle; }
3374 ::tensorflow::Node* node() const { return handle.node(); }
3375
3376 Operation operation;
3377 ::tensorflow::Output handle;
3378};
3379
3380/// TODO: add doc.
3381///
3382/// Args:
3383/// * scope: A Scope object
3384///
3385/// Returns:
3386/// * `Output`: The output_handle tensor.
3387class WrapDatasetVariant {
3388 public:
3389 WrapDatasetVariant(const ::tensorflow::Scope& scope, ::tensorflow::Input
3390 input_handle);
3391 operator ::tensorflow::Output() const { return output_handle; }
3392 operator ::tensorflow::Input() const { return output_handle; }
3393 ::tensorflow::Node* node() const { return output_handle.node(); }
3394
3395 Operation operation;
3396 ::tensorflow::Output output_handle;
3397};
3398
3399/// Creates a dataset that zips together `input_datasets`.
3400///
3401/// The elements of the resulting dataset are created by zipping corresponding
3402/// elements from each of the input datasets.
3403///
3404/// The size of the resulting dataset will match the size of the smallest input
3405/// dataset, and no error will be raised if input datasets have different sizes.
3406///
3407/// Args:
3408/// * scope: A Scope object
3409/// * input_datasets: List of `N` variant Tensors representing datasets to be zipped together.
3410///
3411/// Returns:
3412/// * `Output`: The handle tensor.
3413class ZipDataset {
3414 public:
3415 /// Optional attribute setters for ZipDataset
3416 struct Attrs {
3417 /// Defaults to ""
3418 TF_MUST_USE_RESULT Attrs Metadata(StringPiece x) {
3419 Attrs ret = *this;
3420 ret.metadata_ = x;
3421 return ret;
3422 }
3423
3424 StringPiece metadata_ = "";
3425 };
3426 ZipDataset(const ::tensorflow::Scope& scope, ::tensorflow::InputList
3427 input_datasets, const DataTypeSlice& output_types, const
3428 gtl::ArraySlice<PartialTensorShape>& output_shapes);
3429 ZipDataset(const ::tensorflow::Scope& scope, ::tensorflow::InputList
3430 input_datasets, const DataTypeSlice& output_types, const
3431 gtl::ArraySlice<PartialTensorShape>& output_shapes, const
3432 ZipDataset::Attrs& attrs);
3433 operator ::tensorflow::Output() const { return handle; }
3434 operator ::tensorflow::Input() const { return handle; }
3435 ::tensorflow::Node* node() const { return handle.node(); }
3436
3437 static Attrs Metadata(StringPiece x) {
3438 return Attrs().Metadata(x);
3439 }
3440
3441 Operation operation;
3442 ::tensorflow::Output handle;
3443};
3444
3445} // namespace internal
3446} // namespace ops
3447} // namespace tensorflow
3448
3449#endif // TENSORFLOW_CC_OPS_DATASET_OPS_INTERNAL_H_
3450