1// Generated by the protocol buffer compiler. DO NOT EDIT!
2// source: tensorflow/core/framework/step_stats.proto
3
4#include "tensorflow/core/framework/step_stats.pb.h"
5
6#include <algorithm>
7
8#include <google/protobuf/stubs/common.h>
9#include <google/protobuf/io/coded_stream.h>
10#include <google/protobuf/extension_set.h>
11#include <google/protobuf/wire_format_lite.h>
12#include <google/protobuf/descriptor.h>
13#include <google/protobuf/generated_message_reflection.h>
14#include <google/protobuf/reflection_ops.h>
15#include <google/protobuf/wire_format.h>
16// @@protoc_insertion_point(includes)
17#include <google/protobuf/port_def.inc>
18extern PROTOBUF_INTERNAL_EXPORT_tensorflow_2fcore_2fframework_2fallocation_5fdescription_2eproto ::PROTOBUF_NAMESPACE_ID::internal::SCCInfo<0> scc_info_AllocationDescription_tensorflow_2fcore_2fframework_2fallocation_5fdescription_2eproto;
19extern PROTOBUF_INTERNAL_EXPORT_tensorflow_2fcore_2fframework_2fstep_5fstats_2eproto ::PROTOBUF_NAMESPACE_ID::internal::SCCInfo<0> scc_info_AllocationRecord_tensorflow_2fcore_2fframework_2fstep_5fstats_2eproto;
20extern PROTOBUF_INTERNAL_EXPORT_tensorflow_2fcore_2fframework_2fstep_5fstats_2eproto ::PROTOBUF_NAMESPACE_ID::internal::SCCInfo<1> scc_info_AllocatorMemoryUsed_tensorflow_2fcore_2fframework_2fstep_5fstats_2eproto;
21extern PROTOBUF_INTERNAL_EXPORT_tensorflow_2fcore_2fframework_2fstep_5fstats_2eproto ::PROTOBUF_NAMESPACE_ID::internal::SCCInfo<2> scc_info_DeviceStepStats_tensorflow_2fcore_2fframework_2fstep_5fstats_2eproto;
22extern PROTOBUF_INTERNAL_EXPORT_tensorflow_2fcore_2fframework_2fstep_5fstats_2eproto ::PROTOBUF_NAMESPACE_ID::internal::SCCInfo<0> scc_info_DeviceStepStats_ThreadNamesEntry_DoNotUse_tensorflow_2fcore_2fframework_2fstep_5fstats_2eproto;
23extern PROTOBUF_INTERNAL_EXPORT_tensorflow_2fcore_2fframework_2fstep_5fstats_2eproto ::PROTOBUF_NAMESPACE_ID::internal::SCCInfo<0> scc_info_MemoryStats_tensorflow_2fcore_2fframework_2fstep_5fstats_2eproto;
24extern PROTOBUF_INTERNAL_EXPORT_tensorflow_2fcore_2fframework_2fstep_5fstats_2eproto ::PROTOBUF_NAMESPACE_ID::internal::SCCInfo<4> scc_info_NodeExecStats_tensorflow_2fcore_2fframework_2fstep_5fstats_2eproto;
25extern PROTOBUF_INTERNAL_EXPORT_tensorflow_2fcore_2fframework_2fstep_5fstats_2eproto ::PROTOBUF_NAMESPACE_ID::internal::SCCInfo<1> scc_info_NodeOutput_tensorflow_2fcore_2fframework_2fstep_5fstats_2eproto;
26extern PROTOBUF_INTERNAL_EXPORT_tensorflow_2fcore_2fframework_2ftensor_5fdescription_2eproto ::PROTOBUF_NAMESPACE_ID::internal::SCCInfo<2> scc_info_TensorDescription_tensorflow_2fcore_2fframework_2ftensor_5fdescription_2eproto;
27namespace tensorflow {
28class AllocationRecordDefaultTypeInternal {
29 public:
30 ::PROTOBUF_NAMESPACE_ID::internal::ExplicitlyConstructed<AllocationRecord> _instance;
31} _AllocationRecord_default_instance_;
32class AllocatorMemoryUsedDefaultTypeInternal {
33 public:
34 ::PROTOBUF_NAMESPACE_ID::internal::ExplicitlyConstructed<AllocatorMemoryUsed> _instance;
35} _AllocatorMemoryUsed_default_instance_;
36class NodeOutputDefaultTypeInternal {
37 public:
38 ::PROTOBUF_NAMESPACE_ID::internal::ExplicitlyConstructed<NodeOutput> _instance;
39} _NodeOutput_default_instance_;
40class MemoryStatsDefaultTypeInternal {
41 public:
42 ::PROTOBUF_NAMESPACE_ID::internal::ExplicitlyConstructed<MemoryStats> _instance;
43} _MemoryStats_default_instance_;
44class NodeExecStatsDefaultTypeInternal {
45 public:
46 ::PROTOBUF_NAMESPACE_ID::internal::ExplicitlyConstructed<NodeExecStats> _instance;
47} _NodeExecStats_default_instance_;
48class DeviceStepStats_ThreadNamesEntry_DoNotUseDefaultTypeInternal {
49 public:
50 ::PROTOBUF_NAMESPACE_ID::internal::ExplicitlyConstructed<DeviceStepStats_ThreadNamesEntry_DoNotUse> _instance;
51} _DeviceStepStats_ThreadNamesEntry_DoNotUse_default_instance_;
52class DeviceStepStatsDefaultTypeInternal {
53 public:
54 ::PROTOBUF_NAMESPACE_ID::internal::ExplicitlyConstructed<DeviceStepStats> _instance;
55} _DeviceStepStats_default_instance_;
56class StepStatsDefaultTypeInternal {
57 public:
58 ::PROTOBUF_NAMESPACE_ID::internal::ExplicitlyConstructed<StepStats> _instance;
59} _StepStats_default_instance_;
60} // namespace tensorflow
61static void InitDefaultsscc_info_AllocationRecord_tensorflow_2fcore_2fframework_2fstep_5fstats_2eproto() {
62 GOOGLE_PROTOBUF_VERIFY_VERSION;
63
64 {
65 void* ptr = &::tensorflow::_AllocationRecord_default_instance_;
66 new (ptr) ::tensorflow::AllocationRecord();
67 ::PROTOBUF_NAMESPACE_ID::internal::OnShutdownDestroyMessage(ptr);
68 }
69 ::tensorflow::AllocationRecord::InitAsDefaultInstance();
70}
71
72::PROTOBUF_NAMESPACE_ID::internal::SCCInfo<0> scc_info_AllocationRecord_tensorflow_2fcore_2fframework_2fstep_5fstats_2eproto =
73 {{ATOMIC_VAR_INIT(::PROTOBUF_NAMESPACE_ID::internal::SCCInfoBase::kUninitialized), 0, InitDefaultsscc_info_AllocationRecord_tensorflow_2fcore_2fframework_2fstep_5fstats_2eproto}, {}};
74
75static void InitDefaultsscc_info_AllocatorMemoryUsed_tensorflow_2fcore_2fframework_2fstep_5fstats_2eproto() {
76 GOOGLE_PROTOBUF_VERIFY_VERSION;
77
78 {
79 void* ptr = &::tensorflow::_AllocatorMemoryUsed_default_instance_;
80 new (ptr) ::tensorflow::AllocatorMemoryUsed();
81 ::PROTOBUF_NAMESPACE_ID::internal::OnShutdownDestroyMessage(ptr);
82 }
83 ::tensorflow::AllocatorMemoryUsed::InitAsDefaultInstance();
84}
85
86::PROTOBUF_NAMESPACE_ID::internal::SCCInfo<1> scc_info_AllocatorMemoryUsed_tensorflow_2fcore_2fframework_2fstep_5fstats_2eproto =
87 {{ATOMIC_VAR_INIT(::PROTOBUF_NAMESPACE_ID::internal::SCCInfoBase::kUninitialized), 1, InitDefaultsscc_info_AllocatorMemoryUsed_tensorflow_2fcore_2fframework_2fstep_5fstats_2eproto}, {
88 &scc_info_AllocationRecord_tensorflow_2fcore_2fframework_2fstep_5fstats_2eproto.base,}};
89
90static void InitDefaultsscc_info_DeviceStepStats_tensorflow_2fcore_2fframework_2fstep_5fstats_2eproto() {
91 GOOGLE_PROTOBUF_VERIFY_VERSION;
92
93 {
94 void* ptr = &::tensorflow::_DeviceStepStats_default_instance_;
95 new (ptr) ::tensorflow::DeviceStepStats();
96 ::PROTOBUF_NAMESPACE_ID::internal::OnShutdownDestroyMessage(ptr);
97 }
98 ::tensorflow::DeviceStepStats::InitAsDefaultInstance();
99}
100
101::PROTOBUF_NAMESPACE_ID::internal::SCCInfo<2> scc_info_DeviceStepStats_tensorflow_2fcore_2fframework_2fstep_5fstats_2eproto =
102 {{ATOMIC_VAR_INIT(::PROTOBUF_NAMESPACE_ID::internal::SCCInfoBase::kUninitialized), 2, InitDefaultsscc_info_DeviceStepStats_tensorflow_2fcore_2fframework_2fstep_5fstats_2eproto}, {
103 &scc_info_NodeExecStats_tensorflow_2fcore_2fframework_2fstep_5fstats_2eproto.base,
104 &scc_info_DeviceStepStats_ThreadNamesEntry_DoNotUse_tensorflow_2fcore_2fframework_2fstep_5fstats_2eproto.base,}};
105
106static void InitDefaultsscc_info_DeviceStepStats_ThreadNamesEntry_DoNotUse_tensorflow_2fcore_2fframework_2fstep_5fstats_2eproto() {
107 GOOGLE_PROTOBUF_VERIFY_VERSION;
108
109 {
110 void* ptr = &::tensorflow::_DeviceStepStats_ThreadNamesEntry_DoNotUse_default_instance_;
111 new (ptr) ::tensorflow::DeviceStepStats_ThreadNamesEntry_DoNotUse();
112 }
113 ::tensorflow::DeviceStepStats_ThreadNamesEntry_DoNotUse::InitAsDefaultInstance();
114}
115
116::PROTOBUF_NAMESPACE_ID::internal::SCCInfo<0> scc_info_DeviceStepStats_ThreadNamesEntry_DoNotUse_tensorflow_2fcore_2fframework_2fstep_5fstats_2eproto =
117 {{ATOMIC_VAR_INIT(::PROTOBUF_NAMESPACE_ID::internal::SCCInfoBase::kUninitialized), 0, InitDefaultsscc_info_DeviceStepStats_ThreadNamesEntry_DoNotUse_tensorflow_2fcore_2fframework_2fstep_5fstats_2eproto}, {}};
118
119static void InitDefaultsscc_info_MemoryStats_tensorflow_2fcore_2fframework_2fstep_5fstats_2eproto() {
120 GOOGLE_PROTOBUF_VERIFY_VERSION;
121
122 {
123 void* ptr = &::tensorflow::_MemoryStats_default_instance_;
124 new (ptr) ::tensorflow::MemoryStats();
125 ::PROTOBUF_NAMESPACE_ID::internal::OnShutdownDestroyMessage(ptr);
126 }
127 ::tensorflow::MemoryStats::InitAsDefaultInstance();
128}
129
130::PROTOBUF_NAMESPACE_ID::internal::SCCInfo<0> scc_info_MemoryStats_tensorflow_2fcore_2fframework_2fstep_5fstats_2eproto =
131 {{ATOMIC_VAR_INIT(::PROTOBUF_NAMESPACE_ID::internal::SCCInfoBase::kUninitialized), 0, InitDefaultsscc_info_MemoryStats_tensorflow_2fcore_2fframework_2fstep_5fstats_2eproto}, {}};
132
133static void InitDefaultsscc_info_NodeExecStats_tensorflow_2fcore_2fframework_2fstep_5fstats_2eproto() {
134 GOOGLE_PROTOBUF_VERIFY_VERSION;
135
136 {
137 void* ptr = &::tensorflow::_NodeExecStats_default_instance_;
138 new (ptr) ::tensorflow::NodeExecStats();
139 ::PROTOBUF_NAMESPACE_ID::internal::OnShutdownDestroyMessage(ptr);
140 }
141 ::tensorflow::NodeExecStats::InitAsDefaultInstance();
142}
143
144::PROTOBUF_NAMESPACE_ID::internal::SCCInfo<4> scc_info_NodeExecStats_tensorflow_2fcore_2fframework_2fstep_5fstats_2eproto =
145 {{ATOMIC_VAR_INIT(::PROTOBUF_NAMESPACE_ID::internal::SCCInfoBase::kUninitialized), 4, InitDefaultsscc_info_NodeExecStats_tensorflow_2fcore_2fframework_2fstep_5fstats_2eproto}, {
146 &scc_info_AllocatorMemoryUsed_tensorflow_2fcore_2fframework_2fstep_5fstats_2eproto.base,
147 &scc_info_NodeOutput_tensorflow_2fcore_2fframework_2fstep_5fstats_2eproto.base,
148 &scc_info_AllocationDescription_tensorflow_2fcore_2fframework_2fallocation_5fdescription_2eproto.base,
149 &scc_info_MemoryStats_tensorflow_2fcore_2fframework_2fstep_5fstats_2eproto.base,}};
150
151static void InitDefaultsscc_info_NodeOutput_tensorflow_2fcore_2fframework_2fstep_5fstats_2eproto() {
152 GOOGLE_PROTOBUF_VERIFY_VERSION;
153
154 {
155 void* ptr = &::tensorflow::_NodeOutput_default_instance_;
156 new (ptr) ::tensorflow::NodeOutput();
157 ::PROTOBUF_NAMESPACE_ID::internal::OnShutdownDestroyMessage(ptr);
158 }
159 ::tensorflow::NodeOutput::InitAsDefaultInstance();
160}
161
162::PROTOBUF_NAMESPACE_ID::internal::SCCInfo<1> scc_info_NodeOutput_tensorflow_2fcore_2fframework_2fstep_5fstats_2eproto =
163 {{ATOMIC_VAR_INIT(::PROTOBUF_NAMESPACE_ID::internal::SCCInfoBase::kUninitialized), 1, InitDefaultsscc_info_NodeOutput_tensorflow_2fcore_2fframework_2fstep_5fstats_2eproto}, {
164 &scc_info_TensorDescription_tensorflow_2fcore_2fframework_2ftensor_5fdescription_2eproto.base,}};
165
166static void InitDefaultsscc_info_StepStats_tensorflow_2fcore_2fframework_2fstep_5fstats_2eproto() {
167 GOOGLE_PROTOBUF_VERIFY_VERSION;
168
169 {
170 void* ptr = &::tensorflow::_StepStats_default_instance_;
171 new (ptr) ::tensorflow::StepStats();
172 ::PROTOBUF_NAMESPACE_ID::internal::OnShutdownDestroyMessage(ptr);
173 }
174 ::tensorflow::StepStats::InitAsDefaultInstance();
175}
176
177::PROTOBUF_NAMESPACE_ID::internal::SCCInfo<1> scc_info_StepStats_tensorflow_2fcore_2fframework_2fstep_5fstats_2eproto =
178 {{ATOMIC_VAR_INIT(::PROTOBUF_NAMESPACE_ID::internal::SCCInfoBase::kUninitialized), 1, InitDefaultsscc_info_StepStats_tensorflow_2fcore_2fframework_2fstep_5fstats_2eproto}, {
179 &scc_info_DeviceStepStats_tensorflow_2fcore_2fframework_2fstep_5fstats_2eproto.base,}};
180
181static ::PROTOBUF_NAMESPACE_ID::Metadata file_level_metadata_tensorflow_2fcore_2fframework_2fstep_5fstats_2eproto[8];
182static constexpr ::PROTOBUF_NAMESPACE_ID::EnumDescriptor const** file_level_enum_descriptors_tensorflow_2fcore_2fframework_2fstep_5fstats_2eproto = nullptr;
183static constexpr ::PROTOBUF_NAMESPACE_ID::ServiceDescriptor const** file_level_service_descriptors_tensorflow_2fcore_2fframework_2fstep_5fstats_2eproto = nullptr;
184
185const ::PROTOBUF_NAMESPACE_ID::uint32 TableStruct_tensorflow_2fcore_2fframework_2fstep_5fstats_2eproto::offsets[] PROTOBUF_SECTION_VARIABLE(protodesc_cold) = {
186 ~0u, // no _has_bits_
187 PROTOBUF_FIELD_OFFSET(::tensorflow::AllocationRecord, _internal_metadata_),
188 ~0u, // no _extensions_
189 ~0u, // no _oneof_case_
190 ~0u, // no _weak_field_map_
191 PROTOBUF_FIELD_OFFSET(::tensorflow::AllocationRecord, alloc_micros_),
192 PROTOBUF_FIELD_OFFSET(::tensorflow::AllocationRecord, alloc_bytes_),
193 ~0u, // no _has_bits_
194 PROTOBUF_FIELD_OFFSET(::tensorflow::AllocatorMemoryUsed, _internal_metadata_),
195 ~0u, // no _extensions_
196 ~0u, // no _oneof_case_
197 ~0u, // no _weak_field_map_
198 PROTOBUF_FIELD_OFFSET(::tensorflow::AllocatorMemoryUsed, allocator_name_),
199 PROTOBUF_FIELD_OFFSET(::tensorflow::AllocatorMemoryUsed, total_bytes_),
200 PROTOBUF_FIELD_OFFSET(::tensorflow::AllocatorMemoryUsed, peak_bytes_),
201 PROTOBUF_FIELD_OFFSET(::tensorflow::AllocatorMemoryUsed, live_bytes_),
202 PROTOBUF_FIELD_OFFSET(::tensorflow::AllocatorMemoryUsed, allocation_records_),
203 PROTOBUF_FIELD_OFFSET(::tensorflow::AllocatorMemoryUsed, allocator_bytes_in_use_),
204 ~0u, // no _has_bits_
205 PROTOBUF_FIELD_OFFSET(::tensorflow::NodeOutput, _internal_metadata_),
206 ~0u, // no _extensions_
207 ~0u, // no _oneof_case_
208 ~0u, // no _weak_field_map_
209 PROTOBUF_FIELD_OFFSET(::tensorflow::NodeOutput, slot_),
210 PROTOBUF_FIELD_OFFSET(::tensorflow::NodeOutput, tensor_description_),
211 ~0u, // no _has_bits_
212 PROTOBUF_FIELD_OFFSET(::tensorflow::MemoryStats, _internal_metadata_),
213 ~0u, // no _extensions_
214 ~0u, // no _oneof_case_
215 ~0u, // no _weak_field_map_
216 PROTOBUF_FIELD_OFFSET(::tensorflow::MemoryStats, temp_memory_size_),
217 PROTOBUF_FIELD_OFFSET(::tensorflow::MemoryStats, persistent_memory_size_),
218 PROTOBUF_FIELD_OFFSET(::tensorflow::MemoryStats, persistent_tensor_alloc_ids_),
219 PROTOBUF_FIELD_OFFSET(::tensorflow::MemoryStats, device_temp_memory_size_),
220 PROTOBUF_FIELD_OFFSET(::tensorflow::MemoryStats, device_persistent_memory_size_),
221 PROTOBUF_FIELD_OFFSET(::tensorflow::MemoryStats, device_persistent_tensor_alloc_ids_),
222 ~0u, // no _has_bits_
223 PROTOBUF_FIELD_OFFSET(::tensorflow::NodeExecStats, _internal_metadata_),
224 ~0u, // no _extensions_
225 ~0u, // no _oneof_case_
226 ~0u, // no _weak_field_map_
227 PROTOBUF_FIELD_OFFSET(::tensorflow::NodeExecStats, node_name_),
228 PROTOBUF_FIELD_OFFSET(::tensorflow::NodeExecStats, all_start_micros_),
229 PROTOBUF_FIELD_OFFSET(::tensorflow::NodeExecStats, op_start_rel_micros_),
230 PROTOBUF_FIELD_OFFSET(::tensorflow::NodeExecStats, op_end_rel_micros_),
231 PROTOBUF_FIELD_OFFSET(::tensorflow::NodeExecStats, all_end_rel_micros_),
232 PROTOBUF_FIELD_OFFSET(::tensorflow::NodeExecStats, memory_),
233 PROTOBUF_FIELD_OFFSET(::tensorflow::NodeExecStats, output_),
234 PROTOBUF_FIELD_OFFSET(::tensorflow::NodeExecStats, timeline_label_),
235 PROTOBUF_FIELD_OFFSET(::tensorflow::NodeExecStats, scheduled_micros_),
236 PROTOBUF_FIELD_OFFSET(::tensorflow::NodeExecStats, thread_id_),
237 PROTOBUF_FIELD_OFFSET(::tensorflow::NodeExecStats, referenced_tensor_),
238 PROTOBUF_FIELD_OFFSET(::tensorflow::NodeExecStats, memory_stats_),
239 PROTOBUF_FIELD_OFFSET(::tensorflow::NodeExecStats, all_start_nanos_),
240 PROTOBUF_FIELD_OFFSET(::tensorflow::NodeExecStats, op_start_rel_nanos_),
241 PROTOBUF_FIELD_OFFSET(::tensorflow::NodeExecStats, op_end_rel_nanos_),
242 PROTOBUF_FIELD_OFFSET(::tensorflow::NodeExecStats, all_end_rel_nanos_),
243 PROTOBUF_FIELD_OFFSET(::tensorflow::NodeExecStats, scheduled_nanos_),
244 PROTOBUF_FIELD_OFFSET(::tensorflow::DeviceStepStats_ThreadNamesEntry_DoNotUse, _has_bits_),
245 PROTOBUF_FIELD_OFFSET(::tensorflow::DeviceStepStats_ThreadNamesEntry_DoNotUse, _internal_metadata_),
246 ~0u, // no _extensions_
247 ~0u, // no _oneof_case_
248 ~0u, // no _weak_field_map_
249 PROTOBUF_FIELD_OFFSET(::tensorflow::DeviceStepStats_ThreadNamesEntry_DoNotUse, key_),
250 PROTOBUF_FIELD_OFFSET(::tensorflow::DeviceStepStats_ThreadNamesEntry_DoNotUse, value_),
251 0,
252 1,
253 ~0u, // no _has_bits_
254 PROTOBUF_FIELD_OFFSET(::tensorflow::DeviceStepStats, _internal_metadata_),
255 ~0u, // no _extensions_
256 ~0u, // no _oneof_case_
257 ~0u, // no _weak_field_map_
258 PROTOBUF_FIELD_OFFSET(::tensorflow::DeviceStepStats, device_),
259 PROTOBUF_FIELD_OFFSET(::tensorflow::DeviceStepStats, node_stats_),
260 PROTOBUF_FIELD_OFFSET(::tensorflow::DeviceStepStats, thread_names_),
261 ~0u, // no _has_bits_
262 PROTOBUF_FIELD_OFFSET(::tensorflow::StepStats, _internal_metadata_),
263 ~0u, // no _extensions_
264 ~0u, // no _oneof_case_
265 ~0u, // no _weak_field_map_
266 PROTOBUF_FIELD_OFFSET(::tensorflow::StepStats, dev_stats_),
267};
268static const ::PROTOBUF_NAMESPACE_ID::internal::MigrationSchema schemas[] PROTOBUF_SECTION_VARIABLE(protodesc_cold) = {
269 { 0, -1, sizeof(::tensorflow::AllocationRecord)},
270 { 7, -1, sizeof(::tensorflow::AllocatorMemoryUsed)},
271 { 18, -1, sizeof(::tensorflow::NodeOutput)},
272 { 25, -1, sizeof(::tensorflow::MemoryStats)},
273 { 36, -1, sizeof(::tensorflow::NodeExecStats)},
274 { 58, 65, sizeof(::tensorflow::DeviceStepStats_ThreadNamesEntry_DoNotUse)},
275 { 67, -1, sizeof(::tensorflow::DeviceStepStats)},
276 { 75, -1, sizeof(::tensorflow::StepStats)},
277};
278
279static ::PROTOBUF_NAMESPACE_ID::Message const * const file_default_instances[] = {
280 reinterpret_cast<const ::PROTOBUF_NAMESPACE_ID::Message*>(&::tensorflow::_AllocationRecord_default_instance_),
281 reinterpret_cast<const ::PROTOBUF_NAMESPACE_ID::Message*>(&::tensorflow::_AllocatorMemoryUsed_default_instance_),
282 reinterpret_cast<const ::PROTOBUF_NAMESPACE_ID::Message*>(&::tensorflow::_NodeOutput_default_instance_),
283 reinterpret_cast<const ::PROTOBUF_NAMESPACE_ID::Message*>(&::tensorflow::_MemoryStats_default_instance_),
284 reinterpret_cast<const ::PROTOBUF_NAMESPACE_ID::Message*>(&::tensorflow::_NodeExecStats_default_instance_),
285 reinterpret_cast<const ::PROTOBUF_NAMESPACE_ID::Message*>(&::tensorflow::_DeviceStepStats_ThreadNamesEntry_DoNotUse_default_instance_),
286 reinterpret_cast<const ::PROTOBUF_NAMESPACE_ID::Message*>(&::tensorflow::_DeviceStepStats_default_instance_),
287 reinterpret_cast<const ::PROTOBUF_NAMESPACE_ID::Message*>(&::tensorflow::_StepStats_default_instance_),
288};
289
290const char descriptor_table_protodef_tensorflow_2fcore_2fframework_2fstep_5fstats_2eproto[] PROTOBUF_SECTION_VARIABLE(protodesc_cold) =
291 "\n*tensorflow/core/framework/step_stats.p"
292 "roto\022\ntensorflow\0326tensorflow/core/framew"
293 "ork/allocation_description.proto\0322tensor"
294 "flow/core/framework/tensor_description.p"
295 "roto\"=\n\020AllocationRecord\022\024\n\014alloc_micros"
296 "\030\001 \001(\003\022\023\n\013alloc_bytes\030\002 \001(\003\"\304\001\n\023Allocato"
297 "rMemoryUsed\022\026\n\016allocator_name\030\001 \001(\t\022\023\n\013t"
298 "otal_bytes\030\002 \001(\003\022\022\n\npeak_bytes\030\003 \001(\003\022\022\n\n"
299 "live_bytes\030\004 \001(\003\0228\n\022allocation_records\030\006"
300 " \003(\0132\034.tensorflow.AllocationRecord\022\036\n\026al"
301 "locator_bytes_in_use\030\005 \001(\003\"U\n\nNodeOutput"
302 "\022\014\n\004slot\030\001 \001(\005\0229\n\022tensor_description\030\003 \001"
303 "(\0132\035.tensorflow.TensorDescription\"\354\001\n\013Me"
304 "moryStats\022\030\n\020temp_memory_size\030\001 \001(\003\022\036\n\026p"
305 "ersistent_memory_size\030\003 \001(\003\022#\n\033persisten"
306 "t_tensor_alloc_ids\030\005 \003(\003\022#\n\027device_temp_"
307 "memory_size\030\002 \001(\003B\002\030\001\022)\n\035device_persiste"
308 "nt_memory_size\030\004 \001(\003B\002\030\001\022.\n\"device_persi"
309 "stent_tensor_alloc_ids\030\006 \003(\003B\002\030\001\"\236\004\n\rNod"
310 "eExecStats\022\021\n\tnode_name\030\001 \001(\t\022\030\n\020all_sta"
311 "rt_micros\030\002 \001(\003\022\033\n\023op_start_rel_micros\030\003"
312 " \001(\003\022\031\n\021op_end_rel_micros\030\004 \001(\003\022\032\n\022all_e"
313 "nd_rel_micros\030\005 \001(\003\022/\n\006memory\030\006 \003(\0132\037.te"
314 "nsorflow.AllocatorMemoryUsed\022&\n\006output\030\007"
315 " \003(\0132\026.tensorflow.NodeOutput\022\026\n\016timeline"
316 "_label\030\010 \001(\t\022\030\n\020scheduled_micros\030\t \001(\003\022\021"
317 "\n\tthread_id\030\n \001(\r\022<\n\021referenced_tensor\030\013"
318 " \003(\0132!.tensorflow.AllocationDescription\022"
319 "-\n\014memory_stats\030\014 \001(\0132\027.tensorflow.Memor"
320 "yStats\022\027\n\017all_start_nanos\030\r \001(\003\022\032\n\022op_st"
321 "art_rel_nanos\030\016 \001(\003\022\030\n\020op_end_rel_nanos\030"
322 "\017 \001(\003\022\031\n\021all_end_rel_nanos\030\020 \001(\003\022\027\n\017sche"
323 "duled_nanos\030\021 \001(\003\"\310\001\n\017DeviceStepStats\022\016\n"
324 "\006device\030\001 \001(\t\022-\n\nnode_stats\030\002 \003(\0132\031.tens"
325 "orflow.NodeExecStats\022B\n\014thread_names\030\003 \003"
326 "(\0132,.tensorflow.DeviceStepStats.ThreadNa"
327 "mesEntry\0322\n\020ThreadNamesEntry\022\013\n\003key\030\001 \001("
328 "\r\022\r\n\005value\030\002 \001(\t:\0028\001\";\n\tStepStats\022.\n\tdev"
329 "_stats\030\001 \003(\0132\033.tensorflow.DeviceStepStat"
330 "sB\203\001\n\030org.tensorflow.frameworkB\017StepStat"
331 "sProtosP\001ZQgithub.com/tensorflow/tensorf"
332 "low/tensorflow/go/core/framework/step_st"
333 "ats_go_proto\370\001\001b\006proto3"
334 ;
335static const ::PROTOBUF_NAMESPACE_ID::internal::DescriptorTable*const descriptor_table_tensorflow_2fcore_2fframework_2fstep_5fstats_2eproto_deps[2] = {
336 &::descriptor_table_tensorflow_2fcore_2fframework_2fallocation_5fdescription_2eproto,
337 &::descriptor_table_tensorflow_2fcore_2fframework_2ftensor_5fdescription_2eproto,
338};
339static ::PROTOBUF_NAMESPACE_ID::internal::SCCInfoBase*const descriptor_table_tensorflow_2fcore_2fframework_2fstep_5fstats_2eproto_sccs[8] = {
340 &scc_info_AllocationRecord_tensorflow_2fcore_2fframework_2fstep_5fstats_2eproto.base,
341 &scc_info_AllocatorMemoryUsed_tensorflow_2fcore_2fframework_2fstep_5fstats_2eproto.base,
342 &scc_info_DeviceStepStats_tensorflow_2fcore_2fframework_2fstep_5fstats_2eproto.base,
343 &scc_info_DeviceStepStats_ThreadNamesEntry_DoNotUse_tensorflow_2fcore_2fframework_2fstep_5fstats_2eproto.base,
344 &scc_info_MemoryStats_tensorflow_2fcore_2fframework_2fstep_5fstats_2eproto.base,
345 &scc_info_NodeExecStats_tensorflow_2fcore_2fframework_2fstep_5fstats_2eproto.base,
346 &scc_info_NodeOutput_tensorflow_2fcore_2fframework_2fstep_5fstats_2eproto.base,
347 &scc_info_StepStats_tensorflow_2fcore_2fframework_2fstep_5fstats_2eproto.base,
348};
349static ::PROTOBUF_NAMESPACE_ID::internal::once_flag descriptor_table_tensorflow_2fcore_2fframework_2fstep_5fstats_2eproto_once;
350static bool descriptor_table_tensorflow_2fcore_2fframework_2fstep_5fstats_2eproto_initialized = false;
351const ::PROTOBUF_NAMESPACE_ID::internal::DescriptorTable descriptor_table_tensorflow_2fcore_2fframework_2fstep_5fstats_2eproto = {
352 &descriptor_table_tensorflow_2fcore_2fframework_2fstep_5fstats_2eproto_initialized, descriptor_table_protodef_tensorflow_2fcore_2fframework_2fstep_5fstats_2eproto, "tensorflow/core/framework/step_stats.proto", 1703,
353 &descriptor_table_tensorflow_2fcore_2fframework_2fstep_5fstats_2eproto_once, descriptor_table_tensorflow_2fcore_2fframework_2fstep_5fstats_2eproto_sccs, descriptor_table_tensorflow_2fcore_2fframework_2fstep_5fstats_2eproto_deps, 8, 2,
354 schemas, file_default_instances, TableStruct_tensorflow_2fcore_2fframework_2fstep_5fstats_2eproto::offsets,
355 file_level_metadata_tensorflow_2fcore_2fframework_2fstep_5fstats_2eproto, 8, file_level_enum_descriptors_tensorflow_2fcore_2fframework_2fstep_5fstats_2eproto, file_level_service_descriptors_tensorflow_2fcore_2fframework_2fstep_5fstats_2eproto,
356};
357
358// Force running AddDescriptors() at dynamic initialization time.
359static bool dynamic_init_dummy_tensorflow_2fcore_2fframework_2fstep_5fstats_2eproto = ( ::PROTOBUF_NAMESPACE_ID::internal::AddDescriptors(&descriptor_table_tensorflow_2fcore_2fframework_2fstep_5fstats_2eproto), true);
360namespace tensorflow {
361
362// ===================================================================
363
364void AllocationRecord::InitAsDefaultInstance() {
365}
366class AllocationRecord::_Internal {
367 public:
368};
369
370AllocationRecord::AllocationRecord()
371 : ::PROTOBUF_NAMESPACE_ID::Message(), _internal_metadata_(nullptr) {
372 SharedCtor();
373 // @@protoc_insertion_point(constructor:tensorflow.AllocationRecord)
374}
375AllocationRecord::AllocationRecord(::PROTOBUF_NAMESPACE_ID::Arena* arena)
376 : ::PROTOBUF_NAMESPACE_ID::Message(),
377 _internal_metadata_(arena) {
378 SharedCtor();
379 RegisterArenaDtor(arena);
380 // @@protoc_insertion_point(arena_constructor:tensorflow.AllocationRecord)
381}
382AllocationRecord::AllocationRecord(const AllocationRecord& from)
383 : ::PROTOBUF_NAMESPACE_ID::Message(),
384 _internal_metadata_(nullptr) {
385 _internal_metadata_.MergeFrom(from._internal_metadata_);
386 ::memcpy(&alloc_micros_, &from.alloc_micros_,
387 static_cast<size_t>(reinterpret_cast<char*>(&alloc_bytes_) -
388 reinterpret_cast<char*>(&alloc_micros_)) + sizeof(alloc_bytes_));
389 // @@protoc_insertion_point(copy_constructor:tensorflow.AllocationRecord)
390}
391
392void AllocationRecord::SharedCtor() {
393 ::memset(&alloc_micros_, 0, static_cast<size_t>(
394 reinterpret_cast<char*>(&alloc_bytes_) -
395 reinterpret_cast<char*>(&alloc_micros_)) + sizeof(alloc_bytes_));
396}
397
398AllocationRecord::~AllocationRecord() {
399 // @@protoc_insertion_point(destructor:tensorflow.AllocationRecord)
400 SharedDtor();
401}
402
403void AllocationRecord::SharedDtor() {
404 GOOGLE_DCHECK(GetArenaNoVirtual() == nullptr);
405}
406
407void AllocationRecord::ArenaDtor(void* object) {
408 AllocationRecord* _this = reinterpret_cast< AllocationRecord* >(object);
409 (void)_this;
410}
411void AllocationRecord::RegisterArenaDtor(::PROTOBUF_NAMESPACE_ID::Arena*) {
412}
413void AllocationRecord::SetCachedSize(int size) const {
414 _cached_size_.Set(size);
415}
416const AllocationRecord& AllocationRecord::default_instance() {
417 ::PROTOBUF_NAMESPACE_ID::internal::InitSCC(&::scc_info_AllocationRecord_tensorflow_2fcore_2fframework_2fstep_5fstats_2eproto.base);
418 return *internal_default_instance();
419}
420
421
422void AllocationRecord::Clear() {
423// @@protoc_insertion_point(message_clear_start:tensorflow.AllocationRecord)
424 ::PROTOBUF_NAMESPACE_ID::uint32 cached_has_bits = 0;
425 // Prevent compiler warnings about cached_has_bits being unused
426 (void) cached_has_bits;
427
428 ::memset(&alloc_micros_, 0, static_cast<size_t>(
429 reinterpret_cast<char*>(&alloc_bytes_) -
430 reinterpret_cast<char*>(&alloc_micros_)) + sizeof(alloc_bytes_));
431 _internal_metadata_.Clear();
432}
433
434#if GOOGLE_PROTOBUF_ENABLE_EXPERIMENTAL_PARSER
435const char* AllocationRecord::_InternalParse(const char* ptr, ::PROTOBUF_NAMESPACE_ID::internal::ParseContext* ctx) {
436#define CHK_(x) if (PROTOBUF_PREDICT_FALSE(!(x))) goto failure
437 ::PROTOBUF_NAMESPACE_ID::Arena* arena = GetArenaNoVirtual(); (void)arena;
438 while (!ctx->Done(&ptr)) {
439 ::PROTOBUF_NAMESPACE_ID::uint32 tag;
440 ptr = ::PROTOBUF_NAMESPACE_ID::internal::ReadTag(ptr, &tag);
441 CHK_(ptr);
442 switch (tag >> 3) {
443 // int64 alloc_micros = 1;
444 case 1:
445 if (PROTOBUF_PREDICT_TRUE(static_cast<::PROTOBUF_NAMESPACE_ID::uint8>(tag) == 8)) {
446 alloc_micros_ = ::PROTOBUF_NAMESPACE_ID::internal::ReadVarint(&ptr);
447 CHK_(ptr);
448 } else goto handle_unusual;
449 continue;
450 // int64 alloc_bytes = 2;
451 case 2:
452 if (PROTOBUF_PREDICT_TRUE(static_cast<::PROTOBUF_NAMESPACE_ID::uint8>(tag) == 16)) {
453 alloc_bytes_ = ::PROTOBUF_NAMESPACE_ID::internal::ReadVarint(&ptr);
454 CHK_(ptr);
455 } else goto handle_unusual;
456 continue;
457 default: {
458 handle_unusual:
459 if ((tag & 7) == 4 || tag == 0) {
460 ctx->SetLastTag(tag);
461 goto success;
462 }
463 ptr = UnknownFieldParse(tag, &_internal_metadata_, ptr, ctx);
464 CHK_(ptr != nullptr);
465 continue;
466 }
467 } // switch
468 } // while
469success:
470 return ptr;
471failure:
472 ptr = nullptr;
473 goto success;
474#undef CHK_
475}
476#else // GOOGLE_PROTOBUF_ENABLE_EXPERIMENTAL_PARSER
477bool AllocationRecord::MergePartialFromCodedStream(
478 ::PROTOBUF_NAMESPACE_ID::io::CodedInputStream* input) {
479#define DO_(EXPRESSION) if (!PROTOBUF_PREDICT_TRUE(EXPRESSION)) goto failure
480 ::PROTOBUF_NAMESPACE_ID::uint32 tag;
481 // @@protoc_insertion_point(parse_start:tensorflow.AllocationRecord)
482 for (;;) {
483 ::std::pair<::PROTOBUF_NAMESPACE_ID::uint32, bool> p = input->ReadTagWithCutoffNoLastTag(127u);
484 tag = p.first;
485 if (!p.second) goto handle_unusual;
486 switch (::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::GetTagFieldNumber(tag)) {
487 // int64 alloc_micros = 1;
488 case 1: {
489 if (static_cast< ::PROTOBUF_NAMESPACE_ID::uint8>(tag) == (8 & 0xFF)) {
490
491 DO_((::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::ReadPrimitive<
492 ::PROTOBUF_NAMESPACE_ID::int64, ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::TYPE_INT64>(
493 input, &alloc_micros_)));
494 } else {
495 goto handle_unusual;
496 }
497 break;
498 }
499
500 // int64 alloc_bytes = 2;
501 case 2: {
502 if (static_cast< ::PROTOBUF_NAMESPACE_ID::uint8>(tag) == (16 & 0xFF)) {
503
504 DO_((::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::ReadPrimitive<
505 ::PROTOBUF_NAMESPACE_ID::int64, ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::TYPE_INT64>(
506 input, &alloc_bytes_)));
507 } else {
508 goto handle_unusual;
509 }
510 break;
511 }
512
513 default: {
514 handle_unusual:
515 if (tag == 0) {
516 goto success;
517 }
518 DO_(::PROTOBUF_NAMESPACE_ID::internal::WireFormat::SkipField(
519 input, tag, _internal_metadata_.mutable_unknown_fields()));
520 break;
521 }
522 }
523 }
524success:
525 // @@protoc_insertion_point(parse_success:tensorflow.AllocationRecord)
526 return true;
527failure:
528 // @@protoc_insertion_point(parse_failure:tensorflow.AllocationRecord)
529 return false;
530#undef DO_
531}
532#endif // GOOGLE_PROTOBUF_ENABLE_EXPERIMENTAL_PARSER
533
534void AllocationRecord::SerializeWithCachedSizes(
535 ::PROTOBUF_NAMESPACE_ID::io::CodedOutputStream* output) const {
536 // @@protoc_insertion_point(serialize_start:tensorflow.AllocationRecord)
537 ::PROTOBUF_NAMESPACE_ID::uint32 cached_has_bits = 0;
538 (void) cached_has_bits;
539
540 // int64 alloc_micros = 1;
541 if (this->alloc_micros() != 0) {
542 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::WriteInt64(1, this->alloc_micros(), output);
543 }
544
545 // int64 alloc_bytes = 2;
546 if (this->alloc_bytes() != 0) {
547 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::WriteInt64(2, this->alloc_bytes(), output);
548 }
549
550 if (_internal_metadata_.have_unknown_fields()) {
551 ::PROTOBUF_NAMESPACE_ID::internal::WireFormat::SerializeUnknownFields(
552 _internal_metadata_.unknown_fields(), output);
553 }
554 // @@protoc_insertion_point(serialize_end:tensorflow.AllocationRecord)
555}
556
557::PROTOBUF_NAMESPACE_ID::uint8* AllocationRecord::InternalSerializeWithCachedSizesToArray(
558 ::PROTOBUF_NAMESPACE_ID::uint8* target) const {
559 // @@protoc_insertion_point(serialize_to_array_start:tensorflow.AllocationRecord)
560 ::PROTOBUF_NAMESPACE_ID::uint32 cached_has_bits = 0;
561 (void) cached_has_bits;
562
563 // int64 alloc_micros = 1;
564 if (this->alloc_micros() != 0) {
565 target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::WriteInt64ToArray(1, this->alloc_micros(), target);
566 }
567
568 // int64 alloc_bytes = 2;
569 if (this->alloc_bytes() != 0) {
570 target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::WriteInt64ToArray(2, this->alloc_bytes(), target);
571 }
572
573 if (_internal_metadata_.have_unknown_fields()) {
574 target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormat::SerializeUnknownFieldsToArray(
575 _internal_metadata_.unknown_fields(), target);
576 }
577 // @@protoc_insertion_point(serialize_to_array_end:tensorflow.AllocationRecord)
578 return target;
579}
580
581size_t AllocationRecord::ByteSizeLong() const {
582// @@protoc_insertion_point(message_byte_size_start:tensorflow.AllocationRecord)
583 size_t total_size = 0;
584
585 if (_internal_metadata_.have_unknown_fields()) {
586 total_size +=
587 ::PROTOBUF_NAMESPACE_ID::internal::WireFormat::ComputeUnknownFieldsSize(
588 _internal_metadata_.unknown_fields());
589 }
590 ::PROTOBUF_NAMESPACE_ID::uint32 cached_has_bits = 0;
591 // Prevent compiler warnings about cached_has_bits being unused
592 (void) cached_has_bits;
593
594 // int64 alloc_micros = 1;
595 if (this->alloc_micros() != 0) {
596 total_size += 1 +
597 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::Int64Size(
598 this->alloc_micros());
599 }
600
601 // int64 alloc_bytes = 2;
602 if (this->alloc_bytes() != 0) {
603 total_size += 1 +
604 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::Int64Size(
605 this->alloc_bytes());
606 }
607
608 int cached_size = ::PROTOBUF_NAMESPACE_ID::internal::ToCachedSize(total_size);
609 SetCachedSize(cached_size);
610 return total_size;
611}
612
613void AllocationRecord::MergeFrom(const ::PROTOBUF_NAMESPACE_ID::Message& from) {
614// @@protoc_insertion_point(generalized_merge_from_start:tensorflow.AllocationRecord)
615 GOOGLE_DCHECK_NE(&from, this);
616 const AllocationRecord* source =
617 ::PROTOBUF_NAMESPACE_ID::DynamicCastToGenerated<AllocationRecord>(
618 &from);
619 if (source == nullptr) {
620 // @@protoc_insertion_point(generalized_merge_from_cast_fail:tensorflow.AllocationRecord)
621 ::PROTOBUF_NAMESPACE_ID::internal::ReflectionOps::Merge(from, this);
622 } else {
623 // @@protoc_insertion_point(generalized_merge_from_cast_success:tensorflow.AllocationRecord)
624 MergeFrom(*source);
625 }
626}
627
628void AllocationRecord::MergeFrom(const AllocationRecord& from) {
629// @@protoc_insertion_point(class_specific_merge_from_start:tensorflow.AllocationRecord)
630 GOOGLE_DCHECK_NE(&from, this);
631 _internal_metadata_.MergeFrom(from._internal_metadata_);
632 ::PROTOBUF_NAMESPACE_ID::uint32 cached_has_bits = 0;
633 (void) cached_has_bits;
634
635 if (from.alloc_micros() != 0) {
636 set_alloc_micros(from.alloc_micros());
637 }
638 if (from.alloc_bytes() != 0) {
639 set_alloc_bytes(from.alloc_bytes());
640 }
641}
642
643void AllocationRecord::CopyFrom(const ::PROTOBUF_NAMESPACE_ID::Message& from) {
644// @@protoc_insertion_point(generalized_copy_from_start:tensorflow.AllocationRecord)
645 if (&from == this) return;
646 Clear();
647 MergeFrom(from);
648}
649
650void AllocationRecord::CopyFrom(const AllocationRecord& from) {
651// @@protoc_insertion_point(class_specific_copy_from_start:tensorflow.AllocationRecord)
652 if (&from == this) return;
653 Clear();
654 MergeFrom(from);
655}
656
657bool AllocationRecord::IsInitialized() const {
658 return true;
659}
660
661void AllocationRecord::InternalSwap(AllocationRecord* other) {
662 using std::swap;
663 _internal_metadata_.Swap(&other->_internal_metadata_);
664 swap(alloc_micros_, other->alloc_micros_);
665 swap(alloc_bytes_, other->alloc_bytes_);
666}
667
668::PROTOBUF_NAMESPACE_ID::Metadata AllocationRecord::GetMetadata() const {
669 return GetMetadataStatic();
670}
671
672
673// ===================================================================
674
675void AllocatorMemoryUsed::InitAsDefaultInstance() {
676}
677class AllocatorMemoryUsed::_Internal {
678 public:
679};
680
681AllocatorMemoryUsed::AllocatorMemoryUsed()
682 : ::PROTOBUF_NAMESPACE_ID::Message(), _internal_metadata_(nullptr) {
683 SharedCtor();
684 // @@protoc_insertion_point(constructor:tensorflow.AllocatorMemoryUsed)
685}
686AllocatorMemoryUsed::AllocatorMemoryUsed(::PROTOBUF_NAMESPACE_ID::Arena* arena)
687 : ::PROTOBUF_NAMESPACE_ID::Message(),
688 _internal_metadata_(arena),
689 allocation_records_(arena) {
690 SharedCtor();
691 RegisterArenaDtor(arena);
692 // @@protoc_insertion_point(arena_constructor:tensorflow.AllocatorMemoryUsed)
693}
694AllocatorMemoryUsed::AllocatorMemoryUsed(const AllocatorMemoryUsed& from)
695 : ::PROTOBUF_NAMESPACE_ID::Message(),
696 _internal_metadata_(nullptr),
697 allocation_records_(from.allocation_records_) {
698 _internal_metadata_.MergeFrom(from._internal_metadata_);
699 allocator_name_.UnsafeSetDefault(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited());
700 if (!from.allocator_name().empty()) {
701 allocator_name_.Set(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited(), from.allocator_name(),
702 GetArenaNoVirtual());
703 }
704 ::memcpy(&total_bytes_, &from.total_bytes_,
705 static_cast<size_t>(reinterpret_cast<char*>(&allocator_bytes_in_use_) -
706 reinterpret_cast<char*>(&total_bytes_)) + sizeof(allocator_bytes_in_use_));
707 // @@protoc_insertion_point(copy_constructor:tensorflow.AllocatorMemoryUsed)
708}
709
710void AllocatorMemoryUsed::SharedCtor() {
711 ::PROTOBUF_NAMESPACE_ID::internal::InitSCC(&scc_info_AllocatorMemoryUsed_tensorflow_2fcore_2fframework_2fstep_5fstats_2eproto.base);
712 allocator_name_.UnsafeSetDefault(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited());
713 ::memset(&total_bytes_, 0, static_cast<size_t>(
714 reinterpret_cast<char*>(&allocator_bytes_in_use_) -
715 reinterpret_cast<char*>(&total_bytes_)) + sizeof(allocator_bytes_in_use_));
716}
717
718AllocatorMemoryUsed::~AllocatorMemoryUsed() {
719 // @@protoc_insertion_point(destructor:tensorflow.AllocatorMemoryUsed)
720 SharedDtor();
721}
722
723void AllocatorMemoryUsed::SharedDtor() {
724 GOOGLE_DCHECK(GetArenaNoVirtual() == nullptr);
725 allocator_name_.DestroyNoArena(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited());
726}
727
728void AllocatorMemoryUsed::ArenaDtor(void* object) {
729 AllocatorMemoryUsed* _this = reinterpret_cast< AllocatorMemoryUsed* >(object);
730 (void)_this;
731}
732void AllocatorMemoryUsed::RegisterArenaDtor(::PROTOBUF_NAMESPACE_ID::Arena*) {
733}
734void AllocatorMemoryUsed::SetCachedSize(int size) const {
735 _cached_size_.Set(size);
736}
737const AllocatorMemoryUsed& AllocatorMemoryUsed::default_instance() {
738 ::PROTOBUF_NAMESPACE_ID::internal::InitSCC(&::scc_info_AllocatorMemoryUsed_tensorflow_2fcore_2fframework_2fstep_5fstats_2eproto.base);
739 return *internal_default_instance();
740}
741
742
743void AllocatorMemoryUsed::Clear() {
744// @@protoc_insertion_point(message_clear_start:tensorflow.AllocatorMemoryUsed)
745 ::PROTOBUF_NAMESPACE_ID::uint32 cached_has_bits = 0;
746 // Prevent compiler warnings about cached_has_bits being unused
747 (void) cached_has_bits;
748
749 allocation_records_.Clear();
750 allocator_name_.ClearToEmpty(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited(), GetArenaNoVirtual());
751 ::memset(&total_bytes_, 0, static_cast<size_t>(
752 reinterpret_cast<char*>(&allocator_bytes_in_use_) -
753 reinterpret_cast<char*>(&total_bytes_)) + sizeof(allocator_bytes_in_use_));
754 _internal_metadata_.Clear();
755}
756
757#if GOOGLE_PROTOBUF_ENABLE_EXPERIMENTAL_PARSER
758const char* AllocatorMemoryUsed::_InternalParse(const char* ptr, ::PROTOBUF_NAMESPACE_ID::internal::ParseContext* ctx) {
759#define CHK_(x) if (PROTOBUF_PREDICT_FALSE(!(x))) goto failure
760 ::PROTOBUF_NAMESPACE_ID::Arena* arena = GetArenaNoVirtual(); (void)arena;
761 while (!ctx->Done(&ptr)) {
762 ::PROTOBUF_NAMESPACE_ID::uint32 tag;
763 ptr = ::PROTOBUF_NAMESPACE_ID::internal::ReadTag(ptr, &tag);
764 CHK_(ptr);
765 switch (tag >> 3) {
766 // string allocator_name = 1;
767 case 1:
768 if (PROTOBUF_PREDICT_TRUE(static_cast<::PROTOBUF_NAMESPACE_ID::uint8>(tag) == 10)) {
769 ptr = ::PROTOBUF_NAMESPACE_ID::internal::InlineGreedyStringParserUTF8(mutable_allocator_name(), ptr, ctx, "tensorflow.AllocatorMemoryUsed.allocator_name");
770 CHK_(ptr);
771 } else goto handle_unusual;
772 continue;
773 // int64 total_bytes = 2;
774 case 2:
775 if (PROTOBUF_PREDICT_TRUE(static_cast<::PROTOBUF_NAMESPACE_ID::uint8>(tag) == 16)) {
776 total_bytes_ = ::PROTOBUF_NAMESPACE_ID::internal::ReadVarint(&ptr);
777 CHK_(ptr);
778 } else goto handle_unusual;
779 continue;
780 // int64 peak_bytes = 3;
781 case 3:
782 if (PROTOBUF_PREDICT_TRUE(static_cast<::PROTOBUF_NAMESPACE_ID::uint8>(tag) == 24)) {
783 peak_bytes_ = ::PROTOBUF_NAMESPACE_ID::internal::ReadVarint(&ptr);
784 CHK_(ptr);
785 } else goto handle_unusual;
786 continue;
787 // int64 live_bytes = 4;
788 case 4:
789 if (PROTOBUF_PREDICT_TRUE(static_cast<::PROTOBUF_NAMESPACE_ID::uint8>(tag) == 32)) {
790 live_bytes_ = ::PROTOBUF_NAMESPACE_ID::internal::ReadVarint(&ptr);
791 CHK_(ptr);
792 } else goto handle_unusual;
793 continue;
794 // int64 allocator_bytes_in_use = 5;
795 case 5:
796 if (PROTOBUF_PREDICT_TRUE(static_cast<::PROTOBUF_NAMESPACE_ID::uint8>(tag) == 40)) {
797 allocator_bytes_in_use_ = ::PROTOBUF_NAMESPACE_ID::internal::ReadVarint(&ptr);
798 CHK_(ptr);
799 } else goto handle_unusual;
800 continue;
801 // repeated .tensorflow.AllocationRecord allocation_records = 6;
802 case 6:
803 if (PROTOBUF_PREDICT_TRUE(static_cast<::PROTOBUF_NAMESPACE_ID::uint8>(tag) == 50)) {
804 ptr -= 1;
805 do {
806 ptr += 1;
807 ptr = ctx->ParseMessage(add_allocation_records(), ptr);
808 CHK_(ptr);
809 if (!ctx->DataAvailable(ptr)) break;
810 } while (::PROTOBUF_NAMESPACE_ID::internal::UnalignedLoad<::PROTOBUF_NAMESPACE_ID::uint8>(ptr) == 50);
811 } else goto handle_unusual;
812 continue;
813 default: {
814 handle_unusual:
815 if ((tag & 7) == 4 || tag == 0) {
816 ctx->SetLastTag(tag);
817 goto success;
818 }
819 ptr = UnknownFieldParse(tag, &_internal_metadata_, ptr, ctx);
820 CHK_(ptr != nullptr);
821 continue;
822 }
823 } // switch
824 } // while
825success:
826 return ptr;
827failure:
828 ptr = nullptr;
829 goto success;
830#undef CHK_
831}
832#else // GOOGLE_PROTOBUF_ENABLE_EXPERIMENTAL_PARSER
833bool AllocatorMemoryUsed::MergePartialFromCodedStream(
834 ::PROTOBUF_NAMESPACE_ID::io::CodedInputStream* input) {
835#define DO_(EXPRESSION) if (!PROTOBUF_PREDICT_TRUE(EXPRESSION)) goto failure
836 ::PROTOBUF_NAMESPACE_ID::uint32 tag;
837 // @@protoc_insertion_point(parse_start:tensorflow.AllocatorMemoryUsed)
838 for (;;) {
839 ::std::pair<::PROTOBUF_NAMESPACE_ID::uint32, bool> p = input->ReadTagWithCutoffNoLastTag(127u);
840 tag = p.first;
841 if (!p.second) goto handle_unusual;
842 switch (::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::GetTagFieldNumber(tag)) {
843 // string allocator_name = 1;
844 case 1: {
845 if (static_cast< ::PROTOBUF_NAMESPACE_ID::uint8>(tag) == (10 & 0xFF)) {
846 DO_(::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::ReadString(
847 input, this->mutable_allocator_name()));
848 DO_(::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::VerifyUtf8String(
849 this->allocator_name().data(), static_cast<int>(this->allocator_name().length()),
850 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::PARSE,
851 "tensorflow.AllocatorMemoryUsed.allocator_name"));
852 } else {
853 goto handle_unusual;
854 }
855 break;
856 }
857
858 // int64 total_bytes = 2;
859 case 2: {
860 if (static_cast< ::PROTOBUF_NAMESPACE_ID::uint8>(tag) == (16 & 0xFF)) {
861
862 DO_((::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::ReadPrimitive<
863 ::PROTOBUF_NAMESPACE_ID::int64, ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::TYPE_INT64>(
864 input, &total_bytes_)));
865 } else {
866 goto handle_unusual;
867 }
868 break;
869 }
870
871 // int64 peak_bytes = 3;
872 case 3: {
873 if (static_cast< ::PROTOBUF_NAMESPACE_ID::uint8>(tag) == (24 & 0xFF)) {
874
875 DO_((::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::ReadPrimitive<
876 ::PROTOBUF_NAMESPACE_ID::int64, ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::TYPE_INT64>(
877 input, &peak_bytes_)));
878 } else {
879 goto handle_unusual;
880 }
881 break;
882 }
883
884 // int64 live_bytes = 4;
885 case 4: {
886 if (static_cast< ::PROTOBUF_NAMESPACE_ID::uint8>(tag) == (32 & 0xFF)) {
887
888 DO_((::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::ReadPrimitive<
889 ::PROTOBUF_NAMESPACE_ID::int64, ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::TYPE_INT64>(
890 input, &live_bytes_)));
891 } else {
892 goto handle_unusual;
893 }
894 break;
895 }
896
897 // int64 allocator_bytes_in_use = 5;
898 case 5: {
899 if (static_cast< ::PROTOBUF_NAMESPACE_ID::uint8>(tag) == (40 & 0xFF)) {
900
901 DO_((::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::ReadPrimitive<
902 ::PROTOBUF_NAMESPACE_ID::int64, ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::TYPE_INT64>(
903 input, &allocator_bytes_in_use_)));
904 } else {
905 goto handle_unusual;
906 }
907 break;
908 }
909
910 // repeated .tensorflow.AllocationRecord allocation_records = 6;
911 case 6: {
912 if (static_cast< ::PROTOBUF_NAMESPACE_ID::uint8>(tag) == (50 & 0xFF)) {
913 DO_(::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::ReadMessage(
914 input, add_allocation_records()));
915 } else {
916 goto handle_unusual;
917 }
918 break;
919 }
920
921 default: {
922 handle_unusual:
923 if (tag == 0) {
924 goto success;
925 }
926 DO_(::PROTOBUF_NAMESPACE_ID::internal::WireFormat::SkipField(
927 input, tag, _internal_metadata_.mutable_unknown_fields()));
928 break;
929 }
930 }
931 }
932success:
933 // @@protoc_insertion_point(parse_success:tensorflow.AllocatorMemoryUsed)
934 return true;
935failure:
936 // @@protoc_insertion_point(parse_failure:tensorflow.AllocatorMemoryUsed)
937 return false;
938#undef DO_
939}
940#endif // GOOGLE_PROTOBUF_ENABLE_EXPERIMENTAL_PARSER
941
942void AllocatorMemoryUsed::SerializeWithCachedSizes(
943 ::PROTOBUF_NAMESPACE_ID::io::CodedOutputStream* output) const {
944 // @@protoc_insertion_point(serialize_start:tensorflow.AllocatorMemoryUsed)
945 ::PROTOBUF_NAMESPACE_ID::uint32 cached_has_bits = 0;
946 (void) cached_has_bits;
947
948 // string allocator_name = 1;
949 if (this->allocator_name().size() > 0) {
950 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::VerifyUtf8String(
951 this->allocator_name().data(), static_cast<int>(this->allocator_name().length()),
952 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::SERIALIZE,
953 "tensorflow.AllocatorMemoryUsed.allocator_name");
954 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::WriteStringMaybeAliased(
955 1, this->allocator_name(), output);
956 }
957
958 // int64 total_bytes = 2;
959 if (this->total_bytes() != 0) {
960 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::WriteInt64(2, this->total_bytes(), output);
961 }
962
963 // int64 peak_bytes = 3;
964 if (this->peak_bytes() != 0) {
965 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::WriteInt64(3, this->peak_bytes(), output);
966 }
967
968 // int64 live_bytes = 4;
969 if (this->live_bytes() != 0) {
970 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::WriteInt64(4, this->live_bytes(), output);
971 }
972
973 // int64 allocator_bytes_in_use = 5;
974 if (this->allocator_bytes_in_use() != 0) {
975 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::WriteInt64(5, this->allocator_bytes_in_use(), output);
976 }
977
978 // repeated .tensorflow.AllocationRecord allocation_records = 6;
979 for (unsigned int i = 0,
980 n = static_cast<unsigned int>(this->allocation_records_size()); i < n; i++) {
981 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::WriteMessageMaybeToArray(
982 6,
983 this->allocation_records(static_cast<int>(i)),
984 output);
985 }
986
987 if (_internal_metadata_.have_unknown_fields()) {
988 ::PROTOBUF_NAMESPACE_ID::internal::WireFormat::SerializeUnknownFields(
989 _internal_metadata_.unknown_fields(), output);
990 }
991 // @@protoc_insertion_point(serialize_end:tensorflow.AllocatorMemoryUsed)
992}
993
994::PROTOBUF_NAMESPACE_ID::uint8* AllocatorMemoryUsed::InternalSerializeWithCachedSizesToArray(
995 ::PROTOBUF_NAMESPACE_ID::uint8* target) const {
996 // @@protoc_insertion_point(serialize_to_array_start:tensorflow.AllocatorMemoryUsed)
997 ::PROTOBUF_NAMESPACE_ID::uint32 cached_has_bits = 0;
998 (void) cached_has_bits;
999
1000 // string allocator_name = 1;
1001 if (this->allocator_name().size() > 0) {
1002 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::VerifyUtf8String(
1003 this->allocator_name().data(), static_cast<int>(this->allocator_name().length()),
1004 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::SERIALIZE,
1005 "tensorflow.AllocatorMemoryUsed.allocator_name");
1006 target =
1007 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::WriteStringToArray(
1008 1, this->allocator_name(), target);
1009 }
1010
1011 // int64 total_bytes = 2;
1012 if (this->total_bytes() != 0) {
1013 target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::WriteInt64ToArray(2, this->total_bytes(), target);
1014 }
1015
1016 // int64 peak_bytes = 3;
1017 if (this->peak_bytes() != 0) {
1018 target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::WriteInt64ToArray(3, this->peak_bytes(), target);
1019 }
1020
1021 // int64 live_bytes = 4;
1022 if (this->live_bytes() != 0) {
1023 target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::WriteInt64ToArray(4, this->live_bytes(), target);
1024 }
1025
1026 // int64 allocator_bytes_in_use = 5;
1027 if (this->allocator_bytes_in_use() != 0) {
1028 target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::WriteInt64ToArray(5, this->allocator_bytes_in_use(), target);
1029 }
1030
1031 // repeated .tensorflow.AllocationRecord allocation_records = 6;
1032 for (unsigned int i = 0,
1033 n = static_cast<unsigned int>(this->allocation_records_size()); i < n; i++) {
1034 target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::
1035 InternalWriteMessageToArray(
1036 6, this->allocation_records(static_cast<int>(i)), target);
1037 }
1038
1039 if (_internal_metadata_.have_unknown_fields()) {
1040 target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormat::SerializeUnknownFieldsToArray(
1041 _internal_metadata_.unknown_fields(), target);
1042 }
1043 // @@protoc_insertion_point(serialize_to_array_end:tensorflow.AllocatorMemoryUsed)
1044 return target;
1045}
1046
1047size_t AllocatorMemoryUsed::ByteSizeLong() const {
1048// @@protoc_insertion_point(message_byte_size_start:tensorflow.AllocatorMemoryUsed)
1049 size_t total_size = 0;
1050
1051 if (_internal_metadata_.have_unknown_fields()) {
1052 total_size +=
1053 ::PROTOBUF_NAMESPACE_ID::internal::WireFormat::ComputeUnknownFieldsSize(
1054 _internal_metadata_.unknown_fields());
1055 }
1056 ::PROTOBUF_NAMESPACE_ID::uint32 cached_has_bits = 0;
1057 // Prevent compiler warnings about cached_has_bits being unused
1058 (void) cached_has_bits;
1059
1060 // repeated .tensorflow.AllocationRecord allocation_records = 6;
1061 {
1062 unsigned int count = static_cast<unsigned int>(this->allocation_records_size());
1063 total_size += 1UL * count;
1064 for (unsigned int i = 0; i < count; i++) {
1065 total_size +=
1066 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::MessageSize(
1067 this->allocation_records(static_cast<int>(i)));
1068 }
1069 }
1070
1071 // string allocator_name = 1;
1072 if (this->allocator_name().size() > 0) {
1073 total_size += 1 +
1074 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::StringSize(
1075 this->allocator_name());
1076 }
1077
1078 // int64 total_bytes = 2;
1079 if (this->total_bytes() != 0) {
1080 total_size += 1 +
1081 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::Int64Size(
1082 this->total_bytes());
1083 }
1084
1085 // int64 peak_bytes = 3;
1086 if (this->peak_bytes() != 0) {
1087 total_size += 1 +
1088 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::Int64Size(
1089 this->peak_bytes());
1090 }
1091
1092 // int64 live_bytes = 4;
1093 if (this->live_bytes() != 0) {
1094 total_size += 1 +
1095 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::Int64Size(
1096 this->live_bytes());
1097 }
1098
1099 // int64 allocator_bytes_in_use = 5;
1100 if (this->allocator_bytes_in_use() != 0) {
1101 total_size += 1 +
1102 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::Int64Size(
1103 this->allocator_bytes_in_use());
1104 }
1105
1106 int cached_size = ::PROTOBUF_NAMESPACE_ID::internal::ToCachedSize(total_size);
1107 SetCachedSize(cached_size);
1108 return total_size;
1109}
1110
1111void AllocatorMemoryUsed::MergeFrom(const ::PROTOBUF_NAMESPACE_ID::Message& from) {
1112// @@protoc_insertion_point(generalized_merge_from_start:tensorflow.AllocatorMemoryUsed)
1113 GOOGLE_DCHECK_NE(&from, this);
1114 const AllocatorMemoryUsed* source =
1115 ::PROTOBUF_NAMESPACE_ID::DynamicCastToGenerated<AllocatorMemoryUsed>(
1116 &from);
1117 if (source == nullptr) {
1118 // @@protoc_insertion_point(generalized_merge_from_cast_fail:tensorflow.AllocatorMemoryUsed)
1119 ::PROTOBUF_NAMESPACE_ID::internal::ReflectionOps::Merge(from, this);
1120 } else {
1121 // @@protoc_insertion_point(generalized_merge_from_cast_success:tensorflow.AllocatorMemoryUsed)
1122 MergeFrom(*source);
1123 }
1124}
1125
1126void AllocatorMemoryUsed::MergeFrom(const AllocatorMemoryUsed& from) {
1127// @@protoc_insertion_point(class_specific_merge_from_start:tensorflow.AllocatorMemoryUsed)
1128 GOOGLE_DCHECK_NE(&from, this);
1129 _internal_metadata_.MergeFrom(from._internal_metadata_);
1130 ::PROTOBUF_NAMESPACE_ID::uint32 cached_has_bits = 0;
1131 (void) cached_has_bits;
1132
1133 allocation_records_.MergeFrom(from.allocation_records_);
1134 if (from.allocator_name().size() > 0) {
1135 set_allocator_name(from.allocator_name());
1136 }
1137 if (from.total_bytes() != 0) {
1138 set_total_bytes(from.total_bytes());
1139 }
1140 if (from.peak_bytes() != 0) {
1141 set_peak_bytes(from.peak_bytes());
1142 }
1143 if (from.live_bytes() != 0) {
1144 set_live_bytes(from.live_bytes());
1145 }
1146 if (from.allocator_bytes_in_use() != 0) {
1147 set_allocator_bytes_in_use(from.allocator_bytes_in_use());
1148 }
1149}
1150
1151void AllocatorMemoryUsed::CopyFrom(const ::PROTOBUF_NAMESPACE_ID::Message& from) {
1152// @@protoc_insertion_point(generalized_copy_from_start:tensorflow.AllocatorMemoryUsed)
1153 if (&from == this) return;
1154 Clear();
1155 MergeFrom(from);
1156}
1157
1158void AllocatorMemoryUsed::CopyFrom(const AllocatorMemoryUsed& from) {
1159// @@protoc_insertion_point(class_specific_copy_from_start:tensorflow.AllocatorMemoryUsed)
1160 if (&from == this) return;
1161 Clear();
1162 MergeFrom(from);
1163}
1164
1165bool AllocatorMemoryUsed::IsInitialized() const {
1166 return true;
1167}
1168
1169void AllocatorMemoryUsed::InternalSwap(AllocatorMemoryUsed* other) {
1170 using std::swap;
1171 _internal_metadata_.Swap(&other->_internal_metadata_);
1172 CastToBase(&allocation_records_)->InternalSwap(CastToBase(&other->allocation_records_));
1173 allocator_name_.Swap(&other->allocator_name_, &::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited(),
1174 GetArenaNoVirtual());
1175 swap(total_bytes_, other->total_bytes_);
1176 swap(peak_bytes_, other->peak_bytes_);
1177 swap(live_bytes_, other->live_bytes_);
1178 swap(allocator_bytes_in_use_, other->allocator_bytes_in_use_);
1179}
1180
1181::PROTOBUF_NAMESPACE_ID::Metadata AllocatorMemoryUsed::GetMetadata() const {
1182 return GetMetadataStatic();
1183}
1184
1185
1186// ===================================================================
1187
1188void NodeOutput::InitAsDefaultInstance() {
1189 ::tensorflow::_NodeOutput_default_instance_._instance.get_mutable()->tensor_description_ = const_cast< ::tensorflow::TensorDescription*>(
1190 ::tensorflow::TensorDescription::internal_default_instance());
1191}
1192class NodeOutput::_Internal {
1193 public:
1194 static const ::tensorflow::TensorDescription& tensor_description(const NodeOutput* msg);
1195};
1196
1197const ::tensorflow::TensorDescription&
1198NodeOutput::_Internal::tensor_description(const NodeOutput* msg) {
1199 return *msg->tensor_description_;
1200}
1201void NodeOutput::unsafe_arena_set_allocated_tensor_description(
1202 ::tensorflow::TensorDescription* tensor_description) {
1203 if (GetArenaNoVirtual() == nullptr) {
1204 delete tensor_description_;
1205 }
1206 tensor_description_ = tensor_description;
1207 if (tensor_description) {
1208
1209 } else {
1210
1211 }
1212 // @@protoc_insertion_point(field_unsafe_arena_set_allocated:tensorflow.NodeOutput.tensor_description)
1213}
1214void NodeOutput::clear_tensor_description() {
1215 if (GetArenaNoVirtual() == nullptr && tensor_description_ != nullptr) {
1216 delete tensor_description_;
1217 }
1218 tensor_description_ = nullptr;
1219}
1220NodeOutput::NodeOutput()
1221 : ::PROTOBUF_NAMESPACE_ID::Message(), _internal_metadata_(nullptr) {
1222 SharedCtor();
1223 // @@protoc_insertion_point(constructor:tensorflow.NodeOutput)
1224}
1225NodeOutput::NodeOutput(::PROTOBUF_NAMESPACE_ID::Arena* arena)
1226 : ::PROTOBUF_NAMESPACE_ID::Message(),
1227 _internal_metadata_(arena) {
1228 SharedCtor();
1229 RegisterArenaDtor(arena);
1230 // @@protoc_insertion_point(arena_constructor:tensorflow.NodeOutput)
1231}
1232NodeOutput::NodeOutput(const NodeOutput& from)
1233 : ::PROTOBUF_NAMESPACE_ID::Message(),
1234 _internal_metadata_(nullptr) {
1235 _internal_metadata_.MergeFrom(from._internal_metadata_);
1236 if (from.has_tensor_description()) {
1237 tensor_description_ = new ::tensorflow::TensorDescription(*from.tensor_description_);
1238 } else {
1239 tensor_description_ = nullptr;
1240 }
1241 slot_ = from.slot_;
1242 // @@protoc_insertion_point(copy_constructor:tensorflow.NodeOutput)
1243}
1244
1245void NodeOutput::SharedCtor() {
1246 ::PROTOBUF_NAMESPACE_ID::internal::InitSCC(&scc_info_NodeOutput_tensorflow_2fcore_2fframework_2fstep_5fstats_2eproto.base);
1247 ::memset(&tensor_description_, 0, static_cast<size_t>(
1248 reinterpret_cast<char*>(&slot_) -
1249 reinterpret_cast<char*>(&tensor_description_)) + sizeof(slot_));
1250}
1251
1252NodeOutput::~NodeOutput() {
1253 // @@protoc_insertion_point(destructor:tensorflow.NodeOutput)
1254 SharedDtor();
1255}
1256
1257void NodeOutput::SharedDtor() {
1258 GOOGLE_DCHECK(GetArenaNoVirtual() == nullptr);
1259 if (this != internal_default_instance()) delete tensor_description_;
1260}
1261
1262void NodeOutput::ArenaDtor(void* object) {
1263 NodeOutput* _this = reinterpret_cast< NodeOutput* >(object);
1264 (void)_this;
1265}
1266void NodeOutput::RegisterArenaDtor(::PROTOBUF_NAMESPACE_ID::Arena*) {
1267}
1268void NodeOutput::SetCachedSize(int size) const {
1269 _cached_size_.Set(size);
1270}
1271const NodeOutput& NodeOutput::default_instance() {
1272 ::PROTOBUF_NAMESPACE_ID::internal::InitSCC(&::scc_info_NodeOutput_tensorflow_2fcore_2fframework_2fstep_5fstats_2eproto.base);
1273 return *internal_default_instance();
1274}
1275
1276
1277void NodeOutput::Clear() {
1278// @@protoc_insertion_point(message_clear_start:tensorflow.NodeOutput)
1279 ::PROTOBUF_NAMESPACE_ID::uint32 cached_has_bits = 0;
1280 // Prevent compiler warnings about cached_has_bits being unused
1281 (void) cached_has_bits;
1282
1283 if (GetArenaNoVirtual() == nullptr && tensor_description_ != nullptr) {
1284 delete tensor_description_;
1285 }
1286 tensor_description_ = nullptr;
1287 slot_ = 0;
1288 _internal_metadata_.Clear();
1289}
1290
1291#if GOOGLE_PROTOBUF_ENABLE_EXPERIMENTAL_PARSER
1292const char* NodeOutput::_InternalParse(const char* ptr, ::PROTOBUF_NAMESPACE_ID::internal::ParseContext* ctx) {
1293#define CHK_(x) if (PROTOBUF_PREDICT_FALSE(!(x))) goto failure
1294 ::PROTOBUF_NAMESPACE_ID::Arena* arena = GetArenaNoVirtual(); (void)arena;
1295 while (!ctx->Done(&ptr)) {
1296 ::PROTOBUF_NAMESPACE_ID::uint32 tag;
1297 ptr = ::PROTOBUF_NAMESPACE_ID::internal::ReadTag(ptr, &tag);
1298 CHK_(ptr);
1299 switch (tag >> 3) {
1300 // int32 slot = 1;
1301 case 1:
1302 if (PROTOBUF_PREDICT_TRUE(static_cast<::PROTOBUF_NAMESPACE_ID::uint8>(tag) == 8)) {
1303 slot_ = ::PROTOBUF_NAMESPACE_ID::internal::ReadVarint(&ptr);
1304 CHK_(ptr);
1305 } else goto handle_unusual;
1306 continue;
1307 // .tensorflow.TensorDescription tensor_description = 3;
1308 case 3:
1309 if (PROTOBUF_PREDICT_TRUE(static_cast<::PROTOBUF_NAMESPACE_ID::uint8>(tag) == 26)) {
1310 ptr = ctx->ParseMessage(mutable_tensor_description(), ptr);
1311 CHK_(ptr);
1312 } else goto handle_unusual;
1313 continue;
1314 default: {
1315 handle_unusual:
1316 if ((tag & 7) == 4 || tag == 0) {
1317 ctx->SetLastTag(tag);
1318 goto success;
1319 }
1320 ptr = UnknownFieldParse(tag, &_internal_metadata_, ptr, ctx);
1321 CHK_(ptr != nullptr);
1322 continue;
1323 }
1324 } // switch
1325 } // while
1326success:
1327 return ptr;
1328failure:
1329 ptr = nullptr;
1330 goto success;
1331#undef CHK_
1332}
1333#else // GOOGLE_PROTOBUF_ENABLE_EXPERIMENTAL_PARSER
1334bool NodeOutput::MergePartialFromCodedStream(
1335 ::PROTOBUF_NAMESPACE_ID::io::CodedInputStream* input) {
1336#define DO_(EXPRESSION) if (!PROTOBUF_PREDICT_TRUE(EXPRESSION)) goto failure
1337 ::PROTOBUF_NAMESPACE_ID::uint32 tag;
1338 // @@protoc_insertion_point(parse_start:tensorflow.NodeOutput)
1339 for (;;) {
1340 ::std::pair<::PROTOBUF_NAMESPACE_ID::uint32, bool> p = input->ReadTagWithCutoffNoLastTag(127u);
1341 tag = p.first;
1342 if (!p.second) goto handle_unusual;
1343 switch (::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::GetTagFieldNumber(tag)) {
1344 // int32 slot = 1;
1345 case 1: {
1346 if (static_cast< ::PROTOBUF_NAMESPACE_ID::uint8>(tag) == (8 & 0xFF)) {
1347
1348 DO_((::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::ReadPrimitive<
1349 ::PROTOBUF_NAMESPACE_ID::int32, ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::TYPE_INT32>(
1350 input, &slot_)));
1351 } else {
1352 goto handle_unusual;
1353 }
1354 break;
1355 }
1356
1357 // .tensorflow.TensorDescription tensor_description = 3;
1358 case 3: {
1359 if (static_cast< ::PROTOBUF_NAMESPACE_ID::uint8>(tag) == (26 & 0xFF)) {
1360 DO_(::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::ReadMessage(
1361 input, mutable_tensor_description()));
1362 } else {
1363 goto handle_unusual;
1364 }
1365 break;
1366 }
1367
1368 default: {
1369 handle_unusual:
1370 if (tag == 0) {
1371 goto success;
1372 }
1373 DO_(::PROTOBUF_NAMESPACE_ID::internal::WireFormat::SkipField(
1374 input, tag, _internal_metadata_.mutable_unknown_fields()));
1375 break;
1376 }
1377 }
1378 }
1379success:
1380 // @@protoc_insertion_point(parse_success:tensorflow.NodeOutput)
1381 return true;
1382failure:
1383 // @@protoc_insertion_point(parse_failure:tensorflow.NodeOutput)
1384 return false;
1385#undef DO_
1386}
1387#endif // GOOGLE_PROTOBUF_ENABLE_EXPERIMENTAL_PARSER
1388
1389void NodeOutput::SerializeWithCachedSizes(
1390 ::PROTOBUF_NAMESPACE_ID::io::CodedOutputStream* output) const {
1391 // @@protoc_insertion_point(serialize_start:tensorflow.NodeOutput)
1392 ::PROTOBUF_NAMESPACE_ID::uint32 cached_has_bits = 0;
1393 (void) cached_has_bits;
1394
1395 // int32 slot = 1;
1396 if (this->slot() != 0) {
1397 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::WriteInt32(1, this->slot(), output);
1398 }
1399
1400 // .tensorflow.TensorDescription tensor_description = 3;
1401 if (this->has_tensor_description()) {
1402 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::WriteMessageMaybeToArray(
1403 3, _Internal::tensor_description(this), output);
1404 }
1405
1406 if (_internal_metadata_.have_unknown_fields()) {
1407 ::PROTOBUF_NAMESPACE_ID::internal::WireFormat::SerializeUnknownFields(
1408 _internal_metadata_.unknown_fields(), output);
1409 }
1410 // @@protoc_insertion_point(serialize_end:tensorflow.NodeOutput)
1411}
1412
1413::PROTOBUF_NAMESPACE_ID::uint8* NodeOutput::InternalSerializeWithCachedSizesToArray(
1414 ::PROTOBUF_NAMESPACE_ID::uint8* target) const {
1415 // @@protoc_insertion_point(serialize_to_array_start:tensorflow.NodeOutput)
1416 ::PROTOBUF_NAMESPACE_ID::uint32 cached_has_bits = 0;
1417 (void) cached_has_bits;
1418
1419 // int32 slot = 1;
1420 if (this->slot() != 0) {
1421 target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::WriteInt32ToArray(1, this->slot(), target);
1422 }
1423
1424 // .tensorflow.TensorDescription tensor_description = 3;
1425 if (this->has_tensor_description()) {
1426 target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::
1427 InternalWriteMessageToArray(
1428 3, _Internal::tensor_description(this), target);
1429 }
1430
1431 if (_internal_metadata_.have_unknown_fields()) {
1432 target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormat::SerializeUnknownFieldsToArray(
1433 _internal_metadata_.unknown_fields(), target);
1434 }
1435 // @@protoc_insertion_point(serialize_to_array_end:tensorflow.NodeOutput)
1436 return target;
1437}
1438
1439size_t NodeOutput::ByteSizeLong() const {
1440// @@protoc_insertion_point(message_byte_size_start:tensorflow.NodeOutput)
1441 size_t total_size = 0;
1442
1443 if (_internal_metadata_.have_unknown_fields()) {
1444 total_size +=
1445 ::PROTOBUF_NAMESPACE_ID::internal::WireFormat::ComputeUnknownFieldsSize(
1446 _internal_metadata_.unknown_fields());
1447 }
1448 ::PROTOBUF_NAMESPACE_ID::uint32 cached_has_bits = 0;
1449 // Prevent compiler warnings about cached_has_bits being unused
1450 (void) cached_has_bits;
1451
1452 // .tensorflow.TensorDescription tensor_description = 3;
1453 if (this->has_tensor_description()) {
1454 total_size += 1 +
1455 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::MessageSize(
1456 *tensor_description_);
1457 }
1458
1459 // int32 slot = 1;
1460 if (this->slot() != 0) {
1461 total_size += 1 +
1462 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::Int32Size(
1463 this->slot());
1464 }
1465
1466 int cached_size = ::PROTOBUF_NAMESPACE_ID::internal::ToCachedSize(total_size);
1467 SetCachedSize(cached_size);
1468 return total_size;
1469}
1470
1471void NodeOutput::MergeFrom(const ::PROTOBUF_NAMESPACE_ID::Message& from) {
1472// @@protoc_insertion_point(generalized_merge_from_start:tensorflow.NodeOutput)
1473 GOOGLE_DCHECK_NE(&from, this);
1474 const NodeOutput* source =
1475 ::PROTOBUF_NAMESPACE_ID::DynamicCastToGenerated<NodeOutput>(
1476 &from);
1477 if (source == nullptr) {
1478 // @@protoc_insertion_point(generalized_merge_from_cast_fail:tensorflow.NodeOutput)
1479 ::PROTOBUF_NAMESPACE_ID::internal::ReflectionOps::Merge(from, this);
1480 } else {
1481 // @@protoc_insertion_point(generalized_merge_from_cast_success:tensorflow.NodeOutput)
1482 MergeFrom(*source);
1483 }
1484}
1485
1486void NodeOutput::MergeFrom(const NodeOutput& from) {
1487// @@protoc_insertion_point(class_specific_merge_from_start:tensorflow.NodeOutput)
1488 GOOGLE_DCHECK_NE(&from, this);
1489 _internal_metadata_.MergeFrom(from._internal_metadata_);
1490 ::PROTOBUF_NAMESPACE_ID::uint32 cached_has_bits = 0;
1491 (void) cached_has_bits;
1492
1493 if (from.has_tensor_description()) {
1494 mutable_tensor_description()->::tensorflow::TensorDescription::MergeFrom(from.tensor_description());
1495 }
1496 if (from.slot() != 0) {
1497 set_slot(from.slot());
1498 }
1499}
1500
1501void NodeOutput::CopyFrom(const ::PROTOBUF_NAMESPACE_ID::Message& from) {
1502// @@protoc_insertion_point(generalized_copy_from_start:tensorflow.NodeOutput)
1503 if (&from == this) return;
1504 Clear();
1505 MergeFrom(from);
1506}
1507
1508void NodeOutput::CopyFrom(const NodeOutput& from) {
1509// @@protoc_insertion_point(class_specific_copy_from_start:tensorflow.NodeOutput)
1510 if (&from == this) return;
1511 Clear();
1512 MergeFrom(from);
1513}
1514
1515bool NodeOutput::IsInitialized() const {
1516 return true;
1517}
1518
1519void NodeOutput::InternalSwap(NodeOutput* other) {
1520 using std::swap;
1521 _internal_metadata_.Swap(&other->_internal_metadata_);
1522 swap(tensor_description_, other->tensor_description_);
1523 swap(slot_, other->slot_);
1524}
1525
1526::PROTOBUF_NAMESPACE_ID::Metadata NodeOutput::GetMetadata() const {
1527 return GetMetadataStatic();
1528}
1529
1530
1531// ===================================================================
1532
1533void MemoryStats::InitAsDefaultInstance() {
1534}
1535class MemoryStats::_Internal {
1536 public:
1537};
1538
1539MemoryStats::MemoryStats()
1540 : ::PROTOBUF_NAMESPACE_ID::Message(), _internal_metadata_(nullptr) {
1541 SharedCtor();
1542 // @@protoc_insertion_point(constructor:tensorflow.MemoryStats)
1543}
1544MemoryStats::MemoryStats(::PROTOBUF_NAMESPACE_ID::Arena* arena)
1545 : ::PROTOBUF_NAMESPACE_ID::Message(),
1546 _internal_metadata_(arena),
1547 persistent_tensor_alloc_ids_(arena),
1548 device_persistent_tensor_alloc_ids_(arena) {
1549 SharedCtor();
1550 RegisterArenaDtor(arena);
1551 // @@protoc_insertion_point(arena_constructor:tensorflow.MemoryStats)
1552}
1553MemoryStats::MemoryStats(const MemoryStats& from)
1554 : ::PROTOBUF_NAMESPACE_ID::Message(),
1555 _internal_metadata_(nullptr),
1556 persistent_tensor_alloc_ids_(from.persistent_tensor_alloc_ids_),
1557 device_persistent_tensor_alloc_ids_(from.device_persistent_tensor_alloc_ids_) {
1558 _internal_metadata_.MergeFrom(from._internal_metadata_);
1559 ::memcpy(&temp_memory_size_, &from.temp_memory_size_,
1560 static_cast<size_t>(reinterpret_cast<char*>(&device_persistent_memory_size_) -
1561 reinterpret_cast<char*>(&temp_memory_size_)) + sizeof(device_persistent_memory_size_));
1562 // @@protoc_insertion_point(copy_constructor:tensorflow.MemoryStats)
1563}
1564
1565void MemoryStats::SharedCtor() {
1566 ::memset(&temp_memory_size_, 0, static_cast<size_t>(
1567 reinterpret_cast<char*>(&device_persistent_memory_size_) -
1568 reinterpret_cast<char*>(&temp_memory_size_)) + sizeof(device_persistent_memory_size_));
1569}
1570
1571MemoryStats::~MemoryStats() {
1572 // @@protoc_insertion_point(destructor:tensorflow.MemoryStats)
1573 SharedDtor();
1574}
1575
1576void MemoryStats::SharedDtor() {
1577 GOOGLE_DCHECK(GetArenaNoVirtual() == nullptr);
1578}
1579
1580void MemoryStats::ArenaDtor(void* object) {
1581 MemoryStats* _this = reinterpret_cast< MemoryStats* >(object);
1582 (void)_this;
1583}
1584void MemoryStats::RegisterArenaDtor(::PROTOBUF_NAMESPACE_ID::Arena*) {
1585}
1586void MemoryStats::SetCachedSize(int size) const {
1587 _cached_size_.Set(size);
1588}
1589const MemoryStats& MemoryStats::default_instance() {
1590 ::PROTOBUF_NAMESPACE_ID::internal::InitSCC(&::scc_info_MemoryStats_tensorflow_2fcore_2fframework_2fstep_5fstats_2eproto.base);
1591 return *internal_default_instance();
1592}
1593
1594
1595void MemoryStats::Clear() {
1596// @@protoc_insertion_point(message_clear_start:tensorflow.MemoryStats)
1597 ::PROTOBUF_NAMESPACE_ID::uint32 cached_has_bits = 0;
1598 // Prevent compiler warnings about cached_has_bits being unused
1599 (void) cached_has_bits;
1600
1601 persistent_tensor_alloc_ids_.Clear();
1602 device_persistent_tensor_alloc_ids_.Clear();
1603 ::memset(&temp_memory_size_, 0, static_cast<size_t>(
1604 reinterpret_cast<char*>(&device_persistent_memory_size_) -
1605 reinterpret_cast<char*>(&temp_memory_size_)) + sizeof(device_persistent_memory_size_));
1606 _internal_metadata_.Clear();
1607}
1608
1609#if GOOGLE_PROTOBUF_ENABLE_EXPERIMENTAL_PARSER
1610const char* MemoryStats::_InternalParse(const char* ptr, ::PROTOBUF_NAMESPACE_ID::internal::ParseContext* ctx) {
1611#define CHK_(x) if (PROTOBUF_PREDICT_FALSE(!(x))) goto failure
1612 ::PROTOBUF_NAMESPACE_ID::Arena* arena = GetArenaNoVirtual(); (void)arena;
1613 while (!ctx->Done(&ptr)) {
1614 ::PROTOBUF_NAMESPACE_ID::uint32 tag;
1615 ptr = ::PROTOBUF_NAMESPACE_ID::internal::ReadTag(ptr, &tag);
1616 CHK_(ptr);
1617 switch (tag >> 3) {
1618 // int64 temp_memory_size = 1;
1619 case 1:
1620 if (PROTOBUF_PREDICT_TRUE(static_cast<::PROTOBUF_NAMESPACE_ID::uint8>(tag) == 8)) {
1621 temp_memory_size_ = ::PROTOBUF_NAMESPACE_ID::internal::ReadVarint(&ptr);
1622 CHK_(ptr);
1623 } else goto handle_unusual;
1624 continue;
1625 // int64 device_temp_memory_size = 2 [deprecated = true];
1626 case 2:
1627 if (PROTOBUF_PREDICT_TRUE(static_cast<::PROTOBUF_NAMESPACE_ID::uint8>(tag) == 16)) {
1628 device_temp_memory_size_ = ::PROTOBUF_NAMESPACE_ID::internal::ReadVarint(&ptr);
1629 CHK_(ptr);
1630 } else goto handle_unusual;
1631 continue;
1632 // int64 persistent_memory_size = 3;
1633 case 3:
1634 if (PROTOBUF_PREDICT_TRUE(static_cast<::PROTOBUF_NAMESPACE_ID::uint8>(tag) == 24)) {
1635 persistent_memory_size_ = ::PROTOBUF_NAMESPACE_ID::internal::ReadVarint(&ptr);
1636 CHK_(ptr);
1637 } else goto handle_unusual;
1638 continue;
1639 // int64 device_persistent_memory_size = 4 [deprecated = true];
1640 case 4:
1641 if (PROTOBUF_PREDICT_TRUE(static_cast<::PROTOBUF_NAMESPACE_ID::uint8>(tag) == 32)) {
1642 device_persistent_memory_size_ = ::PROTOBUF_NAMESPACE_ID::internal::ReadVarint(&ptr);
1643 CHK_(ptr);
1644 } else goto handle_unusual;
1645 continue;
1646 // repeated int64 persistent_tensor_alloc_ids = 5;
1647 case 5:
1648 if (PROTOBUF_PREDICT_TRUE(static_cast<::PROTOBUF_NAMESPACE_ID::uint8>(tag) == 42)) {
1649 ptr = ::PROTOBUF_NAMESPACE_ID::internal::PackedInt64Parser(mutable_persistent_tensor_alloc_ids(), ptr, ctx);
1650 CHK_(ptr);
1651 } else if (static_cast<::PROTOBUF_NAMESPACE_ID::uint8>(tag) == 40) {
1652 add_persistent_tensor_alloc_ids(::PROTOBUF_NAMESPACE_ID::internal::ReadVarint(&ptr));
1653 CHK_(ptr);
1654 } else goto handle_unusual;
1655 continue;
1656 // repeated int64 device_persistent_tensor_alloc_ids = 6 [deprecated = true];
1657 case 6:
1658 if (PROTOBUF_PREDICT_TRUE(static_cast<::PROTOBUF_NAMESPACE_ID::uint8>(tag) == 50)) {
1659 ptr = ::PROTOBUF_NAMESPACE_ID::internal::PackedInt64Parser(mutable_device_persistent_tensor_alloc_ids(), ptr, ctx);
1660 CHK_(ptr);
1661 } else if (static_cast<::PROTOBUF_NAMESPACE_ID::uint8>(tag) == 48) {
1662 add_device_persistent_tensor_alloc_ids(::PROTOBUF_NAMESPACE_ID::internal::ReadVarint(&ptr));
1663 CHK_(ptr);
1664 } else goto handle_unusual;
1665 continue;
1666 default: {
1667 handle_unusual:
1668 if ((tag & 7) == 4 || tag == 0) {
1669 ctx->SetLastTag(tag);
1670 goto success;
1671 }
1672 ptr = UnknownFieldParse(tag, &_internal_metadata_, ptr, ctx);
1673 CHK_(ptr != nullptr);
1674 continue;
1675 }
1676 } // switch
1677 } // while
1678success:
1679 return ptr;
1680failure:
1681 ptr = nullptr;
1682 goto success;
1683#undef CHK_
1684}
1685#else // GOOGLE_PROTOBUF_ENABLE_EXPERIMENTAL_PARSER
1686bool MemoryStats::MergePartialFromCodedStream(
1687 ::PROTOBUF_NAMESPACE_ID::io::CodedInputStream* input) {
1688#define DO_(EXPRESSION) if (!PROTOBUF_PREDICT_TRUE(EXPRESSION)) goto failure
1689 ::PROTOBUF_NAMESPACE_ID::uint32 tag;
1690 // @@protoc_insertion_point(parse_start:tensorflow.MemoryStats)
1691 for (;;) {
1692 ::std::pair<::PROTOBUF_NAMESPACE_ID::uint32, bool> p = input->ReadTagWithCutoffNoLastTag(127u);
1693 tag = p.first;
1694 if (!p.second) goto handle_unusual;
1695 switch (::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::GetTagFieldNumber(tag)) {
1696 // int64 temp_memory_size = 1;
1697 case 1: {
1698 if (static_cast< ::PROTOBUF_NAMESPACE_ID::uint8>(tag) == (8 & 0xFF)) {
1699
1700 DO_((::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::ReadPrimitive<
1701 ::PROTOBUF_NAMESPACE_ID::int64, ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::TYPE_INT64>(
1702 input, &temp_memory_size_)));
1703 } else {
1704 goto handle_unusual;
1705 }
1706 break;
1707 }
1708
1709 // int64 device_temp_memory_size = 2 [deprecated = true];
1710 case 2: {
1711 if (static_cast< ::PROTOBUF_NAMESPACE_ID::uint8>(tag) == (16 & 0xFF)) {
1712
1713 DO_((::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::ReadPrimitive<
1714 ::PROTOBUF_NAMESPACE_ID::int64, ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::TYPE_INT64>(
1715 input, &device_temp_memory_size_)));
1716 } else {
1717 goto handle_unusual;
1718 }
1719 break;
1720 }
1721
1722 // int64 persistent_memory_size = 3;
1723 case 3: {
1724 if (static_cast< ::PROTOBUF_NAMESPACE_ID::uint8>(tag) == (24 & 0xFF)) {
1725
1726 DO_((::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::ReadPrimitive<
1727 ::PROTOBUF_NAMESPACE_ID::int64, ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::TYPE_INT64>(
1728 input, &persistent_memory_size_)));
1729 } else {
1730 goto handle_unusual;
1731 }
1732 break;
1733 }
1734
1735 // int64 device_persistent_memory_size = 4 [deprecated = true];
1736 case 4: {
1737 if (static_cast< ::PROTOBUF_NAMESPACE_ID::uint8>(tag) == (32 & 0xFF)) {
1738
1739 DO_((::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::ReadPrimitive<
1740 ::PROTOBUF_NAMESPACE_ID::int64, ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::TYPE_INT64>(
1741 input, &device_persistent_memory_size_)));
1742 } else {
1743 goto handle_unusual;
1744 }
1745 break;
1746 }
1747
1748 // repeated int64 persistent_tensor_alloc_ids = 5;
1749 case 5: {
1750 if (static_cast< ::PROTOBUF_NAMESPACE_ID::uint8>(tag) == (42 & 0xFF)) {
1751 DO_((::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::ReadPackedPrimitive<
1752 ::PROTOBUF_NAMESPACE_ID::int64, ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::TYPE_INT64>(
1753 input, this->mutable_persistent_tensor_alloc_ids())));
1754 } else if (static_cast< ::PROTOBUF_NAMESPACE_ID::uint8>(tag) == (40 & 0xFF)) {
1755 DO_((::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::ReadRepeatedPrimitiveNoInline<
1756 ::PROTOBUF_NAMESPACE_ID::int64, ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::TYPE_INT64>(
1757 1, 42u, input, this->mutable_persistent_tensor_alloc_ids())));
1758 } else {
1759 goto handle_unusual;
1760 }
1761 break;
1762 }
1763
1764 // repeated int64 device_persistent_tensor_alloc_ids = 6 [deprecated = true];
1765 case 6: {
1766 if (static_cast< ::PROTOBUF_NAMESPACE_ID::uint8>(tag) == (50 & 0xFF)) {
1767 DO_((::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::ReadPackedPrimitive<
1768 ::PROTOBUF_NAMESPACE_ID::int64, ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::TYPE_INT64>(
1769 input, this->mutable_device_persistent_tensor_alloc_ids())));
1770 } else if (static_cast< ::PROTOBUF_NAMESPACE_ID::uint8>(tag) == (48 & 0xFF)) {
1771 DO_((::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::ReadRepeatedPrimitiveNoInline<
1772 ::PROTOBUF_NAMESPACE_ID::int64, ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::TYPE_INT64>(
1773 1, 50u, input, this->mutable_device_persistent_tensor_alloc_ids())));
1774 } else {
1775 goto handle_unusual;
1776 }
1777 break;
1778 }
1779
1780 default: {
1781 handle_unusual:
1782 if (tag == 0) {
1783 goto success;
1784 }
1785 DO_(::PROTOBUF_NAMESPACE_ID::internal::WireFormat::SkipField(
1786 input, tag, _internal_metadata_.mutable_unknown_fields()));
1787 break;
1788 }
1789 }
1790 }
1791success:
1792 // @@protoc_insertion_point(parse_success:tensorflow.MemoryStats)
1793 return true;
1794failure:
1795 // @@protoc_insertion_point(parse_failure:tensorflow.MemoryStats)
1796 return false;
1797#undef DO_
1798}
1799#endif // GOOGLE_PROTOBUF_ENABLE_EXPERIMENTAL_PARSER
1800
1801void MemoryStats::SerializeWithCachedSizes(
1802 ::PROTOBUF_NAMESPACE_ID::io::CodedOutputStream* output) const {
1803 // @@protoc_insertion_point(serialize_start:tensorflow.MemoryStats)
1804 ::PROTOBUF_NAMESPACE_ID::uint32 cached_has_bits = 0;
1805 (void) cached_has_bits;
1806
1807 // int64 temp_memory_size = 1;
1808 if (this->temp_memory_size() != 0) {
1809 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::WriteInt64(1, this->temp_memory_size(), output);
1810 }
1811
1812 // int64 device_temp_memory_size = 2 [deprecated = true];
1813 if (this->device_temp_memory_size() != 0) {
1814 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::WriteInt64(2, this->device_temp_memory_size(), output);
1815 }
1816
1817 // int64 persistent_memory_size = 3;
1818 if (this->persistent_memory_size() != 0) {
1819 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::WriteInt64(3, this->persistent_memory_size(), output);
1820 }
1821
1822 // int64 device_persistent_memory_size = 4 [deprecated = true];
1823 if (this->device_persistent_memory_size() != 0) {
1824 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::WriteInt64(4, this->device_persistent_memory_size(), output);
1825 }
1826
1827 // repeated int64 persistent_tensor_alloc_ids = 5;
1828 if (this->persistent_tensor_alloc_ids_size() > 0) {
1829 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::WriteTag(5, ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::WIRETYPE_LENGTH_DELIMITED, output);
1830 output->WriteVarint32(_persistent_tensor_alloc_ids_cached_byte_size_.load(
1831 std::memory_order_relaxed));
1832 }
1833 for (int i = 0, n = this->persistent_tensor_alloc_ids_size(); i < n; i++) {
1834 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::WriteInt64NoTag(
1835 this->persistent_tensor_alloc_ids(i), output);
1836 }
1837
1838 // repeated int64 device_persistent_tensor_alloc_ids = 6 [deprecated = true];
1839 if (this->device_persistent_tensor_alloc_ids_size() > 0) {
1840 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::WriteTag(6, ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::WIRETYPE_LENGTH_DELIMITED, output);
1841 output->WriteVarint32(_device_persistent_tensor_alloc_ids_cached_byte_size_.load(
1842 std::memory_order_relaxed));
1843 }
1844 for (int i = 0, n = this->device_persistent_tensor_alloc_ids_size(); i < n; i++) {
1845 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::WriteInt64NoTag(
1846 this->device_persistent_tensor_alloc_ids(i), output);
1847 }
1848
1849 if (_internal_metadata_.have_unknown_fields()) {
1850 ::PROTOBUF_NAMESPACE_ID::internal::WireFormat::SerializeUnknownFields(
1851 _internal_metadata_.unknown_fields(), output);
1852 }
1853 // @@protoc_insertion_point(serialize_end:tensorflow.MemoryStats)
1854}
1855
1856::PROTOBUF_NAMESPACE_ID::uint8* MemoryStats::InternalSerializeWithCachedSizesToArray(
1857 ::PROTOBUF_NAMESPACE_ID::uint8* target) const {
1858 // @@protoc_insertion_point(serialize_to_array_start:tensorflow.MemoryStats)
1859 ::PROTOBUF_NAMESPACE_ID::uint32 cached_has_bits = 0;
1860 (void) cached_has_bits;
1861
1862 // int64 temp_memory_size = 1;
1863 if (this->temp_memory_size() != 0) {
1864 target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::WriteInt64ToArray(1, this->temp_memory_size(), target);
1865 }
1866
1867 // int64 device_temp_memory_size = 2 [deprecated = true];
1868 if (this->device_temp_memory_size() != 0) {
1869 target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::WriteInt64ToArray(2, this->device_temp_memory_size(), target);
1870 }
1871
1872 // int64 persistent_memory_size = 3;
1873 if (this->persistent_memory_size() != 0) {
1874 target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::WriteInt64ToArray(3, this->persistent_memory_size(), target);
1875 }
1876
1877 // int64 device_persistent_memory_size = 4 [deprecated = true];
1878 if (this->device_persistent_memory_size() != 0) {
1879 target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::WriteInt64ToArray(4, this->device_persistent_memory_size(), target);
1880 }
1881
1882 // repeated int64 persistent_tensor_alloc_ids = 5;
1883 if (this->persistent_tensor_alloc_ids_size() > 0) {
1884 target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::WriteTagToArray(
1885 5,
1886 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::WIRETYPE_LENGTH_DELIMITED,
1887 target);
1888 target = ::PROTOBUF_NAMESPACE_ID::io::CodedOutputStream::WriteVarint32ToArray(
1889 _persistent_tensor_alloc_ids_cached_byte_size_.load(std::memory_order_relaxed),
1890 target);
1891 target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::
1892 WriteInt64NoTagToArray(this->persistent_tensor_alloc_ids_, target);
1893 }
1894
1895 // repeated int64 device_persistent_tensor_alloc_ids = 6 [deprecated = true];
1896 if (this->device_persistent_tensor_alloc_ids_size() > 0) {
1897 target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::WriteTagToArray(
1898 6,
1899 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::WIRETYPE_LENGTH_DELIMITED,
1900 target);
1901 target = ::PROTOBUF_NAMESPACE_ID::io::CodedOutputStream::WriteVarint32ToArray(
1902 _device_persistent_tensor_alloc_ids_cached_byte_size_.load(std::memory_order_relaxed),
1903 target);
1904 target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::
1905 WriteInt64NoTagToArray(this->device_persistent_tensor_alloc_ids_, target);
1906 }
1907
1908 if (_internal_metadata_.have_unknown_fields()) {
1909 target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormat::SerializeUnknownFieldsToArray(
1910 _internal_metadata_.unknown_fields(), target);
1911 }
1912 // @@protoc_insertion_point(serialize_to_array_end:tensorflow.MemoryStats)
1913 return target;
1914}
1915
1916size_t MemoryStats::ByteSizeLong() const {
1917// @@protoc_insertion_point(message_byte_size_start:tensorflow.MemoryStats)
1918 size_t total_size = 0;
1919
1920 if (_internal_metadata_.have_unknown_fields()) {
1921 total_size +=
1922 ::PROTOBUF_NAMESPACE_ID::internal::WireFormat::ComputeUnknownFieldsSize(
1923 _internal_metadata_.unknown_fields());
1924 }
1925 ::PROTOBUF_NAMESPACE_ID::uint32 cached_has_bits = 0;
1926 // Prevent compiler warnings about cached_has_bits being unused
1927 (void) cached_has_bits;
1928
1929 // repeated int64 persistent_tensor_alloc_ids = 5;
1930 {
1931 size_t data_size = ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::
1932 Int64Size(this->persistent_tensor_alloc_ids_);
1933 if (data_size > 0) {
1934 total_size += 1 +
1935 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::Int32Size(
1936 static_cast<::PROTOBUF_NAMESPACE_ID::int32>(data_size));
1937 }
1938 int cached_size = ::PROTOBUF_NAMESPACE_ID::internal::ToCachedSize(data_size);
1939 _persistent_tensor_alloc_ids_cached_byte_size_.store(cached_size,
1940 std::memory_order_relaxed);
1941 total_size += data_size;
1942 }
1943
1944 // repeated int64 device_persistent_tensor_alloc_ids = 6 [deprecated = true];
1945 {
1946 size_t data_size = ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::
1947 Int64Size(this->device_persistent_tensor_alloc_ids_);
1948 if (data_size > 0) {
1949 total_size += 1 +
1950 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::Int32Size(
1951 static_cast<::PROTOBUF_NAMESPACE_ID::int32>(data_size));
1952 }
1953 int cached_size = ::PROTOBUF_NAMESPACE_ID::internal::ToCachedSize(data_size);
1954 _device_persistent_tensor_alloc_ids_cached_byte_size_.store(cached_size,
1955 std::memory_order_relaxed);
1956 total_size += data_size;
1957 }
1958
1959 // int64 temp_memory_size = 1;
1960 if (this->temp_memory_size() != 0) {
1961 total_size += 1 +
1962 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::Int64Size(
1963 this->temp_memory_size());
1964 }
1965
1966 // int64 device_temp_memory_size = 2 [deprecated = true];
1967 if (this->device_temp_memory_size() != 0) {
1968 total_size += 1 +
1969 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::Int64Size(
1970 this->device_temp_memory_size());
1971 }
1972
1973 // int64 persistent_memory_size = 3;
1974 if (this->persistent_memory_size() != 0) {
1975 total_size += 1 +
1976 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::Int64Size(
1977 this->persistent_memory_size());
1978 }
1979
1980 // int64 device_persistent_memory_size = 4 [deprecated = true];
1981 if (this->device_persistent_memory_size() != 0) {
1982 total_size += 1 +
1983 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::Int64Size(
1984 this->device_persistent_memory_size());
1985 }
1986
1987 int cached_size = ::PROTOBUF_NAMESPACE_ID::internal::ToCachedSize(total_size);
1988 SetCachedSize(cached_size);
1989 return total_size;
1990}
1991
1992void MemoryStats::MergeFrom(const ::PROTOBUF_NAMESPACE_ID::Message& from) {
1993// @@protoc_insertion_point(generalized_merge_from_start:tensorflow.MemoryStats)
1994 GOOGLE_DCHECK_NE(&from, this);
1995 const MemoryStats* source =
1996 ::PROTOBUF_NAMESPACE_ID::DynamicCastToGenerated<MemoryStats>(
1997 &from);
1998 if (source == nullptr) {
1999 // @@protoc_insertion_point(generalized_merge_from_cast_fail:tensorflow.MemoryStats)
2000 ::PROTOBUF_NAMESPACE_ID::internal::ReflectionOps::Merge(from, this);
2001 } else {
2002 // @@protoc_insertion_point(generalized_merge_from_cast_success:tensorflow.MemoryStats)
2003 MergeFrom(*source);
2004 }
2005}
2006
2007void MemoryStats::MergeFrom(const MemoryStats& from) {
2008// @@protoc_insertion_point(class_specific_merge_from_start:tensorflow.MemoryStats)
2009 GOOGLE_DCHECK_NE(&from, this);
2010 _internal_metadata_.MergeFrom(from._internal_metadata_);
2011 ::PROTOBUF_NAMESPACE_ID::uint32 cached_has_bits = 0;
2012 (void) cached_has_bits;
2013
2014 persistent_tensor_alloc_ids_.MergeFrom(from.persistent_tensor_alloc_ids_);
2015 device_persistent_tensor_alloc_ids_.MergeFrom(from.device_persistent_tensor_alloc_ids_);
2016 if (from.temp_memory_size() != 0) {
2017 set_temp_memory_size(from.temp_memory_size());
2018 }
2019 if (from.device_temp_memory_size() != 0) {
2020 set_device_temp_memory_size(from.device_temp_memory_size());
2021 }
2022 if (from.persistent_memory_size() != 0) {
2023 set_persistent_memory_size(from.persistent_memory_size());
2024 }
2025 if (from.device_persistent_memory_size() != 0) {
2026 set_device_persistent_memory_size(from.device_persistent_memory_size());
2027 }
2028}
2029
2030void MemoryStats::CopyFrom(const ::PROTOBUF_NAMESPACE_ID::Message& from) {
2031// @@protoc_insertion_point(generalized_copy_from_start:tensorflow.MemoryStats)
2032 if (&from == this) return;
2033 Clear();
2034 MergeFrom(from);
2035}
2036
2037void MemoryStats::CopyFrom(const MemoryStats& from) {
2038// @@protoc_insertion_point(class_specific_copy_from_start:tensorflow.MemoryStats)
2039 if (&from == this) return;
2040 Clear();
2041 MergeFrom(from);
2042}
2043
2044bool MemoryStats::IsInitialized() const {
2045 return true;
2046}
2047
2048void MemoryStats::InternalSwap(MemoryStats* other) {
2049 using std::swap;
2050 _internal_metadata_.Swap(&other->_internal_metadata_);
2051 persistent_tensor_alloc_ids_.InternalSwap(&other->persistent_tensor_alloc_ids_);
2052 device_persistent_tensor_alloc_ids_.InternalSwap(&other->device_persistent_tensor_alloc_ids_);
2053 swap(temp_memory_size_, other->temp_memory_size_);
2054 swap(device_temp_memory_size_, other->device_temp_memory_size_);
2055 swap(persistent_memory_size_, other->persistent_memory_size_);
2056 swap(device_persistent_memory_size_, other->device_persistent_memory_size_);
2057}
2058
2059::PROTOBUF_NAMESPACE_ID::Metadata MemoryStats::GetMetadata() const {
2060 return GetMetadataStatic();
2061}
2062
2063
2064// ===================================================================
2065
2066void NodeExecStats::InitAsDefaultInstance() {
2067 ::tensorflow::_NodeExecStats_default_instance_._instance.get_mutable()->memory_stats_ = const_cast< ::tensorflow::MemoryStats*>(
2068 ::tensorflow::MemoryStats::internal_default_instance());
2069}
2070class NodeExecStats::_Internal {
2071 public:
2072 static const ::tensorflow::MemoryStats& memory_stats(const NodeExecStats* msg);
2073};
2074
2075const ::tensorflow::MemoryStats&
2076NodeExecStats::_Internal::memory_stats(const NodeExecStats* msg) {
2077 return *msg->memory_stats_;
2078}
2079void NodeExecStats::clear_referenced_tensor() {
2080 referenced_tensor_.Clear();
2081}
2082void NodeExecStats::unsafe_arena_set_allocated_memory_stats(
2083 ::tensorflow::MemoryStats* memory_stats) {
2084 if (GetArenaNoVirtual() == nullptr) {
2085 delete memory_stats_;
2086 }
2087 memory_stats_ = memory_stats;
2088 if (memory_stats) {
2089
2090 } else {
2091
2092 }
2093 // @@protoc_insertion_point(field_unsafe_arena_set_allocated:tensorflow.NodeExecStats.memory_stats)
2094}
2095NodeExecStats::NodeExecStats()
2096 : ::PROTOBUF_NAMESPACE_ID::Message(), _internal_metadata_(nullptr) {
2097 SharedCtor();
2098 // @@protoc_insertion_point(constructor:tensorflow.NodeExecStats)
2099}
2100NodeExecStats::NodeExecStats(::PROTOBUF_NAMESPACE_ID::Arena* arena)
2101 : ::PROTOBUF_NAMESPACE_ID::Message(),
2102 _internal_metadata_(arena),
2103 memory_(arena),
2104 output_(arena),
2105 referenced_tensor_(arena) {
2106 SharedCtor();
2107 RegisterArenaDtor(arena);
2108 // @@protoc_insertion_point(arena_constructor:tensorflow.NodeExecStats)
2109}
2110NodeExecStats::NodeExecStats(const NodeExecStats& from)
2111 : ::PROTOBUF_NAMESPACE_ID::Message(),
2112 _internal_metadata_(nullptr),
2113 memory_(from.memory_),
2114 output_(from.output_),
2115 referenced_tensor_(from.referenced_tensor_) {
2116 _internal_metadata_.MergeFrom(from._internal_metadata_);
2117 node_name_.UnsafeSetDefault(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited());
2118 if (!from.node_name().empty()) {
2119 node_name_.Set(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited(), from.node_name(),
2120 GetArenaNoVirtual());
2121 }
2122 timeline_label_.UnsafeSetDefault(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited());
2123 if (!from.timeline_label().empty()) {
2124 timeline_label_.Set(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited(), from.timeline_label(),
2125 GetArenaNoVirtual());
2126 }
2127 if (from.has_memory_stats()) {
2128 memory_stats_ = new ::tensorflow::MemoryStats(*from.memory_stats_);
2129 } else {
2130 memory_stats_ = nullptr;
2131 }
2132 ::memcpy(&all_start_micros_, &from.all_start_micros_,
2133 static_cast<size_t>(reinterpret_cast<char*>(&thread_id_) -
2134 reinterpret_cast<char*>(&all_start_micros_)) + sizeof(thread_id_));
2135 // @@protoc_insertion_point(copy_constructor:tensorflow.NodeExecStats)
2136}
2137
2138void NodeExecStats::SharedCtor() {
2139 ::PROTOBUF_NAMESPACE_ID::internal::InitSCC(&scc_info_NodeExecStats_tensorflow_2fcore_2fframework_2fstep_5fstats_2eproto.base);
2140 node_name_.UnsafeSetDefault(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited());
2141 timeline_label_.UnsafeSetDefault(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited());
2142 ::memset(&memory_stats_, 0, static_cast<size_t>(
2143 reinterpret_cast<char*>(&thread_id_) -
2144 reinterpret_cast<char*>(&memory_stats_)) + sizeof(thread_id_));
2145}
2146
2147NodeExecStats::~NodeExecStats() {
2148 // @@protoc_insertion_point(destructor:tensorflow.NodeExecStats)
2149 SharedDtor();
2150}
2151
2152void NodeExecStats::SharedDtor() {
2153 GOOGLE_DCHECK(GetArenaNoVirtual() == nullptr);
2154 node_name_.DestroyNoArena(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited());
2155 timeline_label_.DestroyNoArena(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited());
2156 if (this != internal_default_instance()) delete memory_stats_;
2157}
2158
2159void NodeExecStats::ArenaDtor(void* object) {
2160 NodeExecStats* _this = reinterpret_cast< NodeExecStats* >(object);
2161 (void)_this;
2162}
2163void NodeExecStats::RegisterArenaDtor(::PROTOBUF_NAMESPACE_ID::Arena*) {
2164}
2165void NodeExecStats::SetCachedSize(int size) const {
2166 _cached_size_.Set(size);
2167}
2168const NodeExecStats& NodeExecStats::default_instance() {
2169 ::PROTOBUF_NAMESPACE_ID::internal::InitSCC(&::scc_info_NodeExecStats_tensorflow_2fcore_2fframework_2fstep_5fstats_2eproto.base);
2170 return *internal_default_instance();
2171}
2172
2173
2174void NodeExecStats::Clear() {
2175// @@protoc_insertion_point(message_clear_start:tensorflow.NodeExecStats)
2176 ::PROTOBUF_NAMESPACE_ID::uint32 cached_has_bits = 0;
2177 // Prevent compiler warnings about cached_has_bits being unused
2178 (void) cached_has_bits;
2179
2180 memory_.Clear();
2181 output_.Clear();
2182 referenced_tensor_.Clear();
2183 node_name_.ClearToEmpty(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited(), GetArenaNoVirtual());
2184 timeline_label_.ClearToEmpty(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited(), GetArenaNoVirtual());
2185 if (GetArenaNoVirtual() == nullptr && memory_stats_ != nullptr) {
2186 delete memory_stats_;
2187 }
2188 memory_stats_ = nullptr;
2189 ::memset(&all_start_micros_, 0, static_cast<size_t>(
2190 reinterpret_cast<char*>(&thread_id_) -
2191 reinterpret_cast<char*>(&all_start_micros_)) + sizeof(thread_id_));
2192 _internal_metadata_.Clear();
2193}
2194
2195#if GOOGLE_PROTOBUF_ENABLE_EXPERIMENTAL_PARSER
2196const char* NodeExecStats::_InternalParse(const char* ptr, ::PROTOBUF_NAMESPACE_ID::internal::ParseContext* ctx) {
2197#define CHK_(x) if (PROTOBUF_PREDICT_FALSE(!(x))) goto failure
2198 ::PROTOBUF_NAMESPACE_ID::Arena* arena = GetArenaNoVirtual(); (void)arena;
2199 while (!ctx->Done(&ptr)) {
2200 ::PROTOBUF_NAMESPACE_ID::uint32 tag;
2201 ptr = ::PROTOBUF_NAMESPACE_ID::internal::ReadTag(ptr, &tag);
2202 CHK_(ptr);
2203 switch (tag >> 3) {
2204 // string node_name = 1;
2205 case 1:
2206 if (PROTOBUF_PREDICT_TRUE(static_cast<::PROTOBUF_NAMESPACE_ID::uint8>(tag) == 10)) {
2207 ptr = ::PROTOBUF_NAMESPACE_ID::internal::InlineGreedyStringParserUTF8(mutable_node_name(), ptr, ctx, "tensorflow.NodeExecStats.node_name");
2208 CHK_(ptr);
2209 } else goto handle_unusual;
2210 continue;
2211 // int64 all_start_micros = 2;
2212 case 2:
2213 if (PROTOBUF_PREDICT_TRUE(static_cast<::PROTOBUF_NAMESPACE_ID::uint8>(tag) == 16)) {
2214 all_start_micros_ = ::PROTOBUF_NAMESPACE_ID::internal::ReadVarint(&ptr);
2215 CHK_(ptr);
2216 } else goto handle_unusual;
2217 continue;
2218 // int64 op_start_rel_micros = 3;
2219 case 3:
2220 if (PROTOBUF_PREDICT_TRUE(static_cast<::PROTOBUF_NAMESPACE_ID::uint8>(tag) == 24)) {
2221 op_start_rel_micros_ = ::PROTOBUF_NAMESPACE_ID::internal::ReadVarint(&ptr);
2222 CHK_(ptr);
2223 } else goto handle_unusual;
2224 continue;
2225 // int64 op_end_rel_micros = 4;
2226 case 4:
2227 if (PROTOBUF_PREDICT_TRUE(static_cast<::PROTOBUF_NAMESPACE_ID::uint8>(tag) == 32)) {
2228 op_end_rel_micros_ = ::PROTOBUF_NAMESPACE_ID::internal::ReadVarint(&ptr);
2229 CHK_(ptr);
2230 } else goto handle_unusual;
2231 continue;
2232 // int64 all_end_rel_micros = 5;
2233 case 5:
2234 if (PROTOBUF_PREDICT_TRUE(static_cast<::PROTOBUF_NAMESPACE_ID::uint8>(tag) == 40)) {
2235 all_end_rel_micros_ = ::PROTOBUF_NAMESPACE_ID::internal::ReadVarint(&ptr);
2236 CHK_(ptr);
2237 } else goto handle_unusual;
2238 continue;
2239 // repeated .tensorflow.AllocatorMemoryUsed memory = 6;
2240 case 6:
2241 if (PROTOBUF_PREDICT_TRUE(static_cast<::PROTOBUF_NAMESPACE_ID::uint8>(tag) == 50)) {
2242 ptr -= 1;
2243 do {
2244 ptr += 1;
2245 ptr = ctx->ParseMessage(add_memory(), ptr);
2246 CHK_(ptr);
2247 if (!ctx->DataAvailable(ptr)) break;
2248 } while (::PROTOBUF_NAMESPACE_ID::internal::UnalignedLoad<::PROTOBUF_NAMESPACE_ID::uint8>(ptr) == 50);
2249 } else goto handle_unusual;
2250 continue;
2251 // repeated .tensorflow.NodeOutput output = 7;
2252 case 7:
2253 if (PROTOBUF_PREDICT_TRUE(static_cast<::PROTOBUF_NAMESPACE_ID::uint8>(tag) == 58)) {
2254 ptr -= 1;
2255 do {
2256 ptr += 1;
2257 ptr = ctx->ParseMessage(add_output(), ptr);
2258 CHK_(ptr);
2259 if (!ctx->DataAvailable(ptr)) break;
2260 } while (::PROTOBUF_NAMESPACE_ID::internal::UnalignedLoad<::PROTOBUF_NAMESPACE_ID::uint8>(ptr) == 58);
2261 } else goto handle_unusual;
2262 continue;
2263 // string timeline_label = 8;
2264 case 8:
2265 if (PROTOBUF_PREDICT_TRUE(static_cast<::PROTOBUF_NAMESPACE_ID::uint8>(tag) == 66)) {
2266 ptr = ::PROTOBUF_NAMESPACE_ID::internal::InlineGreedyStringParserUTF8(mutable_timeline_label(), ptr, ctx, "tensorflow.NodeExecStats.timeline_label");
2267 CHK_(ptr);
2268 } else goto handle_unusual;
2269 continue;
2270 // int64 scheduled_micros = 9;
2271 case 9:
2272 if (PROTOBUF_PREDICT_TRUE(static_cast<::PROTOBUF_NAMESPACE_ID::uint8>(tag) == 72)) {
2273 scheduled_micros_ = ::PROTOBUF_NAMESPACE_ID::internal::ReadVarint(&ptr);
2274 CHK_(ptr);
2275 } else goto handle_unusual;
2276 continue;
2277 // uint32 thread_id = 10;
2278 case 10:
2279 if (PROTOBUF_PREDICT_TRUE(static_cast<::PROTOBUF_NAMESPACE_ID::uint8>(tag) == 80)) {
2280 thread_id_ = ::PROTOBUF_NAMESPACE_ID::internal::ReadVarint(&ptr);
2281 CHK_(ptr);
2282 } else goto handle_unusual;
2283 continue;
2284 // repeated .tensorflow.AllocationDescription referenced_tensor = 11;
2285 case 11:
2286 if (PROTOBUF_PREDICT_TRUE(static_cast<::PROTOBUF_NAMESPACE_ID::uint8>(tag) == 90)) {
2287 ptr -= 1;
2288 do {
2289 ptr += 1;
2290 ptr = ctx->ParseMessage(add_referenced_tensor(), ptr);
2291 CHK_(ptr);
2292 if (!ctx->DataAvailable(ptr)) break;
2293 } while (::PROTOBUF_NAMESPACE_ID::internal::UnalignedLoad<::PROTOBUF_NAMESPACE_ID::uint8>(ptr) == 90);
2294 } else goto handle_unusual;
2295 continue;
2296 // .tensorflow.MemoryStats memory_stats = 12;
2297 case 12:
2298 if (PROTOBUF_PREDICT_TRUE(static_cast<::PROTOBUF_NAMESPACE_ID::uint8>(tag) == 98)) {
2299 ptr = ctx->ParseMessage(mutable_memory_stats(), ptr);
2300 CHK_(ptr);
2301 } else goto handle_unusual;
2302 continue;
2303 // int64 all_start_nanos = 13;
2304 case 13:
2305 if (PROTOBUF_PREDICT_TRUE(static_cast<::PROTOBUF_NAMESPACE_ID::uint8>(tag) == 104)) {
2306 all_start_nanos_ = ::PROTOBUF_NAMESPACE_ID::internal::ReadVarint(&ptr);
2307 CHK_(ptr);
2308 } else goto handle_unusual;
2309 continue;
2310 // int64 op_start_rel_nanos = 14;
2311 case 14:
2312 if (PROTOBUF_PREDICT_TRUE(static_cast<::PROTOBUF_NAMESPACE_ID::uint8>(tag) == 112)) {
2313 op_start_rel_nanos_ = ::PROTOBUF_NAMESPACE_ID::internal::ReadVarint(&ptr);
2314 CHK_(ptr);
2315 } else goto handle_unusual;
2316 continue;
2317 // int64 op_end_rel_nanos = 15;
2318 case 15:
2319 if (PROTOBUF_PREDICT_TRUE(static_cast<::PROTOBUF_NAMESPACE_ID::uint8>(tag) == 120)) {
2320 op_end_rel_nanos_ = ::PROTOBUF_NAMESPACE_ID::internal::ReadVarint(&ptr);
2321 CHK_(ptr);
2322 } else goto handle_unusual;
2323 continue;
2324 // int64 all_end_rel_nanos = 16;
2325 case 16:
2326 if (PROTOBUF_PREDICT_TRUE(static_cast<::PROTOBUF_NAMESPACE_ID::uint8>(tag) == 128)) {
2327 all_end_rel_nanos_ = ::PROTOBUF_NAMESPACE_ID::internal::ReadVarint(&ptr);
2328 CHK_(ptr);
2329 } else goto handle_unusual;
2330 continue;
2331 // int64 scheduled_nanos = 17;
2332 case 17:
2333 if (PROTOBUF_PREDICT_TRUE(static_cast<::PROTOBUF_NAMESPACE_ID::uint8>(tag) == 136)) {
2334 scheduled_nanos_ = ::PROTOBUF_NAMESPACE_ID::internal::ReadVarint(&ptr);
2335 CHK_(ptr);
2336 } else goto handle_unusual;
2337 continue;
2338 default: {
2339 handle_unusual:
2340 if ((tag & 7) == 4 || tag == 0) {
2341 ctx->SetLastTag(tag);
2342 goto success;
2343 }
2344 ptr = UnknownFieldParse(tag, &_internal_metadata_, ptr, ctx);
2345 CHK_(ptr != nullptr);
2346 continue;
2347 }
2348 } // switch
2349 } // while
2350success:
2351 return ptr;
2352failure:
2353 ptr = nullptr;
2354 goto success;
2355#undef CHK_
2356}
2357#else // GOOGLE_PROTOBUF_ENABLE_EXPERIMENTAL_PARSER
2358bool NodeExecStats::MergePartialFromCodedStream(
2359 ::PROTOBUF_NAMESPACE_ID::io::CodedInputStream* input) {
2360#define DO_(EXPRESSION) if (!PROTOBUF_PREDICT_TRUE(EXPRESSION)) goto failure
2361 ::PROTOBUF_NAMESPACE_ID::uint32 tag;
2362 // @@protoc_insertion_point(parse_start:tensorflow.NodeExecStats)
2363 for (;;) {
2364 ::std::pair<::PROTOBUF_NAMESPACE_ID::uint32, bool> p = input->ReadTagWithCutoffNoLastTag(16383u);
2365 tag = p.first;
2366 if (!p.second) goto handle_unusual;
2367 switch (::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::GetTagFieldNumber(tag)) {
2368 // string node_name = 1;
2369 case 1: {
2370 if (static_cast< ::PROTOBUF_NAMESPACE_ID::uint8>(tag) == (10 & 0xFF)) {
2371 DO_(::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::ReadString(
2372 input, this->mutable_node_name()));
2373 DO_(::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::VerifyUtf8String(
2374 this->node_name().data(), static_cast<int>(this->node_name().length()),
2375 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::PARSE,
2376 "tensorflow.NodeExecStats.node_name"));
2377 } else {
2378 goto handle_unusual;
2379 }
2380 break;
2381 }
2382
2383 // int64 all_start_micros = 2;
2384 case 2: {
2385 if (static_cast< ::PROTOBUF_NAMESPACE_ID::uint8>(tag) == (16 & 0xFF)) {
2386
2387 DO_((::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::ReadPrimitive<
2388 ::PROTOBUF_NAMESPACE_ID::int64, ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::TYPE_INT64>(
2389 input, &all_start_micros_)));
2390 } else {
2391 goto handle_unusual;
2392 }
2393 break;
2394 }
2395
2396 // int64 op_start_rel_micros = 3;
2397 case 3: {
2398 if (static_cast< ::PROTOBUF_NAMESPACE_ID::uint8>(tag) == (24 & 0xFF)) {
2399
2400 DO_((::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::ReadPrimitive<
2401 ::PROTOBUF_NAMESPACE_ID::int64, ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::TYPE_INT64>(
2402 input, &op_start_rel_micros_)));
2403 } else {
2404 goto handle_unusual;
2405 }
2406 break;
2407 }
2408
2409 // int64 op_end_rel_micros = 4;
2410 case 4: {
2411 if (static_cast< ::PROTOBUF_NAMESPACE_ID::uint8>(tag) == (32 & 0xFF)) {
2412
2413 DO_((::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::ReadPrimitive<
2414 ::PROTOBUF_NAMESPACE_ID::int64, ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::TYPE_INT64>(
2415 input, &op_end_rel_micros_)));
2416 } else {
2417 goto handle_unusual;
2418 }
2419 break;
2420 }
2421
2422 // int64 all_end_rel_micros = 5;
2423 case 5: {
2424 if (static_cast< ::PROTOBUF_NAMESPACE_ID::uint8>(tag) == (40 & 0xFF)) {
2425
2426 DO_((::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::ReadPrimitive<
2427 ::PROTOBUF_NAMESPACE_ID::int64, ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::TYPE_INT64>(
2428 input, &all_end_rel_micros_)));
2429 } else {
2430 goto handle_unusual;
2431 }
2432 break;
2433 }
2434
2435 // repeated .tensorflow.AllocatorMemoryUsed memory = 6;
2436 case 6: {
2437 if (static_cast< ::PROTOBUF_NAMESPACE_ID::uint8>(tag) == (50 & 0xFF)) {
2438 DO_(::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::ReadMessage(
2439 input, add_memory()));
2440 } else {
2441 goto handle_unusual;
2442 }
2443 break;
2444 }
2445
2446 // repeated .tensorflow.NodeOutput output = 7;
2447 case 7: {
2448 if (static_cast< ::PROTOBUF_NAMESPACE_ID::uint8>(tag) == (58 & 0xFF)) {
2449 DO_(::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::ReadMessage(
2450 input, add_output()));
2451 } else {
2452 goto handle_unusual;
2453 }
2454 break;
2455 }
2456
2457 // string timeline_label = 8;
2458 case 8: {
2459 if (static_cast< ::PROTOBUF_NAMESPACE_ID::uint8>(tag) == (66 & 0xFF)) {
2460 DO_(::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::ReadString(
2461 input, this->mutable_timeline_label()));
2462 DO_(::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::VerifyUtf8String(
2463 this->timeline_label().data(), static_cast<int>(this->timeline_label().length()),
2464 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::PARSE,
2465 "tensorflow.NodeExecStats.timeline_label"));
2466 } else {
2467 goto handle_unusual;
2468 }
2469 break;
2470 }
2471
2472 // int64 scheduled_micros = 9;
2473 case 9: {
2474 if (static_cast< ::PROTOBUF_NAMESPACE_ID::uint8>(tag) == (72 & 0xFF)) {
2475
2476 DO_((::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::ReadPrimitive<
2477 ::PROTOBUF_NAMESPACE_ID::int64, ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::TYPE_INT64>(
2478 input, &scheduled_micros_)));
2479 } else {
2480 goto handle_unusual;
2481 }
2482 break;
2483 }
2484
2485 // uint32 thread_id = 10;
2486 case 10: {
2487 if (static_cast< ::PROTOBUF_NAMESPACE_ID::uint8>(tag) == (80 & 0xFF)) {
2488
2489 DO_((::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::ReadPrimitive<
2490 ::PROTOBUF_NAMESPACE_ID::uint32, ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::TYPE_UINT32>(
2491 input, &thread_id_)));
2492 } else {
2493 goto handle_unusual;
2494 }
2495 break;
2496 }
2497
2498 // repeated .tensorflow.AllocationDescription referenced_tensor = 11;
2499 case 11: {
2500 if (static_cast< ::PROTOBUF_NAMESPACE_ID::uint8>(tag) == (90 & 0xFF)) {
2501 DO_(::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::ReadMessage(
2502 input, add_referenced_tensor()));
2503 } else {
2504 goto handle_unusual;
2505 }
2506 break;
2507 }
2508
2509 // .tensorflow.MemoryStats memory_stats = 12;
2510 case 12: {
2511 if (static_cast< ::PROTOBUF_NAMESPACE_ID::uint8>(tag) == (98 & 0xFF)) {
2512 DO_(::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::ReadMessage(
2513 input, mutable_memory_stats()));
2514 } else {
2515 goto handle_unusual;
2516 }
2517 break;
2518 }
2519
2520 // int64 all_start_nanos = 13;
2521 case 13: {
2522 if (static_cast< ::PROTOBUF_NAMESPACE_ID::uint8>(tag) == (104 & 0xFF)) {
2523
2524 DO_((::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::ReadPrimitive<
2525 ::PROTOBUF_NAMESPACE_ID::int64, ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::TYPE_INT64>(
2526 input, &all_start_nanos_)));
2527 } else {
2528 goto handle_unusual;
2529 }
2530 break;
2531 }
2532
2533 // int64 op_start_rel_nanos = 14;
2534 case 14: {
2535 if (static_cast< ::PROTOBUF_NAMESPACE_ID::uint8>(tag) == (112 & 0xFF)) {
2536
2537 DO_((::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::ReadPrimitive<
2538 ::PROTOBUF_NAMESPACE_ID::int64, ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::TYPE_INT64>(
2539 input, &op_start_rel_nanos_)));
2540 } else {
2541 goto handle_unusual;
2542 }
2543 break;
2544 }
2545
2546 // int64 op_end_rel_nanos = 15;
2547 case 15: {
2548 if (static_cast< ::PROTOBUF_NAMESPACE_ID::uint8>(tag) == (120 & 0xFF)) {
2549
2550 DO_((::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::ReadPrimitive<
2551 ::PROTOBUF_NAMESPACE_ID::int64, ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::TYPE_INT64>(
2552 input, &op_end_rel_nanos_)));
2553 } else {
2554 goto handle_unusual;
2555 }
2556 break;
2557 }
2558
2559 // int64 all_end_rel_nanos = 16;
2560 case 16: {
2561 if (static_cast< ::PROTOBUF_NAMESPACE_ID::uint8>(tag) == (128 & 0xFF)) {
2562
2563 DO_((::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::ReadPrimitive<
2564 ::PROTOBUF_NAMESPACE_ID::int64, ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::TYPE_INT64>(
2565 input, &all_end_rel_nanos_)));
2566 } else {
2567 goto handle_unusual;
2568 }
2569 break;
2570 }
2571
2572 // int64 scheduled_nanos = 17;
2573 case 17: {
2574 if (static_cast< ::PROTOBUF_NAMESPACE_ID::uint8>(tag) == (136 & 0xFF)) {
2575
2576 DO_((::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::ReadPrimitive<
2577 ::PROTOBUF_NAMESPACE_ID::int64, ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::TYPE_INT64>(
2578 input, &scheduled_nanos_)));
2579 } else {
2580 goto handle_unusual;
2581 }
2582 break;
2583 }
2584
2585 default: {
2586 handle_unusual:
2587 if (tag == 0) {
2588 goto success;
2589 }
2590 DO_(::PROTOBUF_NAMESPACE_ID::internal::WireFormat::SkipField(
2591 input, tag, _internal_metadata_.mutable_unknown_fields()));
2592 break;
2593 }
2594 }
2595 }
2596success:
2597 // @@protoc_insertion_point(parse_success:tensorflow.NodeExecStats)
2598 return true;
2599failure:
2600 // @@protoc_insertion_point(parse_failure:tensorflow.NodeExecStats)
2601 return false;
2602#undef DO_
2603}
2604#endif // GOOGLE_PROTOBUF_ENABLE_EXPERIMENTAL_PARSER
2605
2606void NodeExecStats::SerializeWithCachedSizes(
2607 ::PROTOBUF_NAMESPACE_ID::io::CodedOutputStream* output) const {
2608 // @@protoc_insertion_point(serialize_start:tensorflow.NodeExecStats)
2609 ::PROTOBUF_NAMESPACE_ID::uint32 cached_has_bits = 0;
2610 (void) cached_has_bits;
2611
2612 // string node_name = 1;
2613 if (this->node_name().size() > 0) {
2614 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::VerifyUtf8String(
2615 this->node_name().data(), static_cast<int>(this->node_name().length()),
2616 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::SERIALIZE,
2617 "tensorflow.NodeExecStats.node_name");
2618 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::WriteStringMaybeAliased(
2619 1, this->node_name(), output);
2620 }
2621
2622 // int64 all_start_micros = 2;
2623 if (this->all_start_micros() != 0) {
2624 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::WriteInt64(2, this->all_start_micros(), output);
2625 }
2626
2627 // int64 op_start_rel_micros = 3;
2628 if (this->op_start_rel_micros() != 0) {
2629 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::WriteInt64(3, this->op_start_rel_micros(), output);
2630 }
2631
2632 // int64 op_end_rel_micros = 4;
2633 if (this->op_end_rel_micros() != 0) {
2634 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::WriteInt64(4, this->op_end_rel_micros(), output);
2635 }
2636
2637 // int64 all_end_rel_micros = 5;
2638 if (this->all_end_rel_micros() != 0) {
2639 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::WriteInt64(5, this->all_end_rel_micros(), output);
2640 }
2641
2642 // repeated .tensorflow.AllocatorMemoryUsed memory = 6;
2643 for (unsigned int i = 0,
2644 n = static_cast<unsigned int>(this->memory_size()); i < n; i++) {
2645 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::WriteMessageMaybeToArray(
2646 6,
2647 this->memory(static_cast<int>(i)),
2648 output);
2649 }
2650
2651 // repeated .tensorflow.NodeOutput output = 7;
2652 for (unsigned int i = 0,
2653 n = static_cast<unsigned int>(this->output_size()); i < n; i++) {
2654 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::WriteMessageMaybeToArray(
2655 7,
2656 this->output(static_cast<int>(i)),
2657 output);
2658 }
2659
2660 // string timeline_label = 8;
2661 if (this->timeline_label().size() > 0) {
2662 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::VerifyUtf8String(
2663 this->timeline_label().data(), static_cast<int>(this->timeline_label().length()),
2664 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::SERIALIZE,
2665 "tensorflow.NodeExecStats.timeline_label");
2666 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::WriteStringMaybeAliased(
2667 8, this->timeline_label(), output);
2668 }
2669
2670 // int64 scheduled_micros = 9;
2671 if (this->scheduled_micros() != 0) {
2672 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::WriteInt64(9, this->scheduled_micros(), output);
2673 }
2674
2675 // uint32 thread_id = 10;
2676 if (this->thread_id() != 0) {
2677 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::WriteUInt32(10, this->thread_id(), output);
2678 }
2679
2680 // repeated .tensorflow.AllocationDescription referenced_tensor = 11;
2681 for (unsigned int i = 0,
2682 n = static_cast<unsigned int>(this->referenced_tensor_size()); i < n; i++) {
2683 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::WriteMessageMaybeToArray(
2684 11,
2685 this->referenced_tensor(static_cast<int>(i)),
2686 output);
2687 }
2688
2689 // .tensorflow.MemoryStats memory_stats = 12;
2690 if (this->has_memory_stats()) {
2691 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::WriteMessageMaybeToArray(
2692 12, _Internal::memory_stats(this), output);
2693 }
2694
2695 // int64 all_start_nanos = 13;
2696 if (this->all_start_nanos() != 0) {
2697 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::WriteInt64(13, this->all_start_nanos(), output);
2698 }
2699
2700 // int64 op_start_rel_nanos = 14;
2701 if (this->op_start_rel_nanos() != 0) {
2702 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::WriteInt64(14, this->op_start_rel_nanos(), output);
2703 }
2704
2705 // int64 op_end_rel_nanos = 15;
2706 if (this->op_end_rel_nanos() != 0) {
2707 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::WriteInt64(15, this->op_end_rel_nanos(), output);
2708 }
2709
2710 // int64 all_end_rel_nanos = 16;
2711 if (this->all_end_rel_nanos() != 0) {
2712 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::WriteInt64(16, this->all_end_rel_nanos(), output);
2713 }
2714
2715 // int64 scheduled_nanos = 17;
2716 if (this->scheduled_nanos() != 0) {
2717 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::WriteInt64(17, this->scheduled_nanos(), output);
2718 }
2719
2720 if (_internal_metadata_.have_unknown_fields()) {
2721 ::PROTOBUF_NAMESPACE_ID::internal::WireFormat::SerializeUnknownFields(
2722 _internal_metadata_.unknown_fields(), output);
2723 }
2724 // @@protoc_insertion_point(serialize_end:tensorflow.NodeExecStats)
2725}
2726
2727::PROTOBUF_NAMESPACE_ID::uint8* NodeExecStats::InternalSerializeWithCachedSizesToArray(
2728 ::PROTOBUF_NAMESPACE_ID::uint8* target) const {
2729 // @@protoc_insertion_point(serialize_to_array_start:tensorflow.NodeExecStats)
2730 ::PROTOBUF_NAMESPACE_ID::uint32 cached_has_bits = 0;
2731 (void) cached_has_bits;
2732
2733 // string node_name = 1;
2734 if (this->node_name().size() > 0) {
2735 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::VerifyUtf8String(
2736 this->node_name().data(), static_cast<int>(this->node_name().length()),
2737 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::SERIALIZE,
2738 "tensorflow.NodeExecStats.node_name");
2739 target =
2740 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::WriteStringToArray(
2741 1, this->node_name(), target);
2742 }
2743
2744 // int64 all_start_micros = 2;
2745 if (this->all_start_micros() != 0) {
2746 target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::WriteInt64ToArray(2, this->all_start_micros(), target);
2747 }
2748
2749 // int64 op_start_rel_micros = 3;
2750 if (this->op_start_rel_micros() != 0) {
2751 target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::WriteInt64ToArray(3, this->op_start_rel_micros(), target);
2752 }
2753
2754 // int64 op_end_rel_micros = 4;
2755 if (this->op_end_rel_micros() != 0) {
2756 target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::WriteInt64ToArray(4, this->op_end_rel_micros(), target);
2757 }
2758
2759 // int64 all_end_rel_micros = 5;
2760 if (this->all_end_rel_micros() != 0) {
2761 target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::WriteInt64ToArray(5, this->all_end_rel_micros(), target);
2762 }
2763
2764 // repeated .tensorflow.AllocatorMemoryUsed memory = 6;
2765 for (unsigned int i = 0,
2766 n = static_cast<unsigned int>(this->memory_size()); i < n; i++) {
2767 target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::
2768 InternalWriteMessageToArray(
2769 6, this->memory(static_cast<int>(i)), target);
2770 }
2771
2772 // repeated .tensorflow.NodeOutput output = 7;
2773 for (unsigned int i = 0,
2774 n = static_cast<unsigned int>(this->output_size()); i < n; i++) {
2775 target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::
2776 InternalWriteMessageToArray(
2777 7, this->output(static_cast<int>(i)), target);
2778 }
2779
2780 // string timeline_label = 8;
2781 if (this->timeline_label().size() > 0) {
2782 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::VerifyUtf8String(
2783 this->timeline_label().data(), static_cast<int>(this->timeline_label().length()),
2784 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::SERIALIZE,
2785 "tensorflow.NodeExecStats.timeline_label");
2786 target =
2787 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::WriteStringToArray(
2788 8, this->timeline_label(), target);
2789 }
2790
2791 // int64 scheduled_micros = 9;
2792 if (this->scheduled_micros() != 0) {
2793 target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::WriteInt64ToArray(9, this->scheduled_micros(), target);
2794 }
2795
2796 // uint32 thread_id = 10;
2797 if (this->thread_id() != 0) {
2798 target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::WriteUInt32ToArray(10, this->thread_id(), target);
2799 }
2800
2801 // repeated .tensorflow.AllocationDescription referenced_tensor = 11;
2802 for (unsigned int i = 0,
2803 n = static_cast<unsigned int>(this->referenced_tensor_size()); i < n; i++) {
2804 target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::
2805 InternalWriteMessageToArray(
2806 11, this->referenced_tensor(static_cast<int>(i)), target);
2807 }
2808
2809 // .tensorflow.MemoryStats memory_stats = 12;
2810 if (this->has_memory_stats()) {
2811 target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::
2812 InternalWriteMessageToArray(
2813 12, _Internal::memory_stats(this), target);
2814 }
2815
2816 // int64 all_start_nanos = 13;
2817 if (this->all_start_nanos() != 0) {
2818 target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::WriteInt64ToArray(13, this->all_start_nanos(), target);
2819 }
2820
2821 // int64 op_start_rel_nanos = 14;
2822 if (this->op_start_rel_nanos() != 0) {
2823 target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::WriteInt64ToArray(14, this->op_start_rel_nanos(), target);
2824 }
2825
2826 // int64 op_end_rel_nanos = 15;
2827 if (this->op_end_rel_nanos() != 0) {
2828 target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::WriteInt64ToArray(15, this->op_end_rel_nanos(), target);
2829 }
2830
2831 // int64 all_end_rel_nanos = 16;
2832 if (this->all_end_rel_nanos() != 0) {
2833 target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::WriteInt64ToArray(16, this->all_end_rel_nanos(), target);
2834 }
2835
2836 // int64 scheduled_nanos = 17;
2837 if (this->scheduled_nanos() != 0) {
2838 target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::WriteInt64ToArray(17, this->scheduled_nanos(), target);
2839 }
2840
2841 if (_internal_metadata_.have_unknown_fields()) {
2842 target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormat::SerializeUnknownFieldsToArray(
2843 _internal_metadata_.unknown_fields(), target);
2844 }
2845 // @@protoc_insertion_point(serialize_to_array_end:tensorflow.NodeExecStats)
2846 return target;
2847}
2848
2849size_t NodeExecStats::ByteSizeLong() const {
2850// @@protoc_insertion_point(message_byte_size_start:tensorflow.NodeExecStats)
2851 size_t total_size = 0;
2852
2853 if (_internal_metadata_.have_unknown_fields()) {
2854 total_size +=
2855 ::PROTOBUF_NAMESPACE_ID::internal::WireFormat::ComputeUnknownFieldsSize(
2856 _internal_metadata_.unknown_fields());
2857 }
2858 ::PROTOBUF_NAMESPACE_ID::uint32 cached_has_bits = 0;
2859 // Prevent compiler warnings about cached_has_bits being unused
2860 (void) cached_has_bits;
2861
2862 // repeated .tensorflow.AllocatorMemoryUsed memory = 6;
2863 {
2864 unsigned int count = static_cast<unsigned int>(this->memory_size());
2865 total_size += 1UL * count;
2866 for (unsigned int i = 0; i < count; i++) {
2867 total_size +=
2868 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::MessageSize(
2869 this->memory(static_cast<int>(i)));
2870 }
2871 }
2872
2873 // repeated .tensorflow.NodeOutput output = 7;
2874 {
2875 unsigned int count = static_cast<unsigned int>(this->output_size());
2876 total_size += 1UL * count;
2877 for (unsigned int i = 0; i < count; i++) {
2878 total_size +=
2879 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::MessageSize(
2880 this->output(static_cast<int>(i)));
2881 }
2882 }
2883
2884 // repeated .tensorflow.AllocationDescription referenced_tensor = 11;
2885 {
2886 unsigned int count = static_cast<unsigned int>(this->referenced_tensor_size());
2887 total_size += 1UL * count;
2888 for (unsigned int i = 0; i < count; i++) {
2889 total_size +=
2890 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::MessageSize(
2891 this->referenced_tensor(static_cast<int>(i)));
2892 }
2893 }
2894
2895 // string node_name = 1;
2896 if (this->node_name().size() > 0) {
2897 total_size += 1 +
2898 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::StringSize(
2899 this->node_name());
2900 }
2901
2902 // string timeline_label = 8;
2903 if (this->timeline_label().size() > 0) {
2904 total_size += 1 +
2905 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::StringSize(
2906 this->timeline_label());
2907 }
2908
2909 // .tensorflow.MemoryStats memory_stats = 12;
2910 if (this->has_memory_stats()) {
2911 total_size += 1 +
2912 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::MessageSize(
2913 *memory_stats_);
2914 }
2915
2916 // int64 all_start_micros = 2;
2917 if (this->all_start_micros() != 0) {
2918 total_size += 1 +
2919 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::Int64Size(
2920 this->all_start_micros());
2921 }
2922
2923 // int64 op_start_rel_micros = 3;
2924 if (this->op_start_rel_micros() != 0) {
2925 total_size += 1 +
2926 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::Int64Size(
2927 this->op_start_rel_micros());
2928 }
2929
2930 // int64 op_end_rel_micros = 4;
2931 if (this->op_end_rel_micros() != 0) {
2932 total_size += 1 +
2933 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::Int64Size(
2934 this->op_end_rel_micros());
2935 }
2936
2937 // int64 all_end_rel_micros = 5;
2938 if (this->all_end_rel_micros() != 0) {
2939 total_size += 1 +
2940 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::Int64Size(
2941 this->all_end_rel_micros());
2942 }
2943
2944 // int64 scheduled_micros = 9;
2945 if (this->scheduled_micros() != 0) {
2946 total_size += 1 +
2947 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::Int64Size(
2948 this->scheduled_micros());
2949 }
2950
2951 // int64 all_start_nanos = 13;
2952 if (this->all_start_nanos() != 0) {
2953 total_size += 1 +
2954 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::Int64Size(
2955 this->all_start_nanos());
2956 }
2957
2958 // int64 op_start_rel_nanos = 14;
2959 if (this->op_start_rel_nanos() != 0) {
2960 total_size += 1 +
2961 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::Int64Size(
2962 this->op_start_rel_nanos());
2963 }
2964
2965 // int64 op_end_rel_nanos = 15;
2966 if (this->op_end_rel_nanos() != 0) {
2967 total_size += 1 +
2968 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::Int64Size(
2969 this->op_end_rel_nanos());
2970 }
2971
2972 // int64 all_end_rel_nanos = 16;
2973 if (this->all_end_rel_nanos() != 0) {
2974 total_size += 2 +
2975 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::Int64Size(
2976 this->all_end_rel_nanos());
2977 }
2978
2979 // int64 scheduled_nanos = 17;
2980 if (this->scheduled_nanos() != 0) {
2981 total_size += 2 +
2982 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::Int64Size(
2983 this->scheduled_nanos());
2984 }
2985
2986 // uint32 thread_id = 10;
2987 if (this->thread_id() != 0) {
2988 total_size += 1 +
2989 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::UInt32Size(
2990 this->thread_id());
2991 }
2992
2993 int cached_size = ::PROTOBUF_NAMESPACE_ID::internal::ToCachedSize(total_size);
2994 SetCachedSize(cached_size);
2995 return total_size;
2996}
2997
2998void NodeExecStats::MergeFrom(const ::PROTOBUF_NAMESPACE_ID::Message& from) {
2999// @@protoc_insertion_point(generalized_merge_from_start:tensorflow.NodeExecStats)
3000 GOOGLE_DCHECK_NE(&from, this);
3001 const NodeExecStats* source =
3002 ::PROTOBUF_NAMESPACE_ID::DynamicCastToGenerated<NodeExecStats>(
3003 &from);
3004 if (source == nullptr) {
3005 // @@protoc_insertion_point(generalized_merge_from_cast_fail:tensorflow.NodeExecStats)
3006 ::PROTOBUF_NAMESPACE_ID::internal::ReflectionOps::Merge(from, this);
3007 } else {
3008 // @@protoc_insertion_point(generalized_merge_from_cast_success:tensorflow.NodeExecStats)
3009 MergeFrom(*source);
3010 }
3011}
3012
3013void NodeExecStats::MergeFrom(const NodeExecStats& from) {
3014// @@protoc_insertion_point(class_specific_merge_from_start:tensorflow.NodeExecStats)
3015 GOOGLE_DCHECK_NE(&from, this);
3016 _internal_metadata_.MergeFrom(from._internal_metadata_);
3017 ::PROTOBUF_NAMESPACE_ID::uint32 cached_has_bits = 0;
3018 (void) cached_has_bits;
3019
3020 memory_.MergeFrom(from.memory_);
3021 output_.MergeFrom(from.output_);
3022 referenced_tensor_.MergeFrom(from.referenced_tensor_);
3023 if (from.node_name().size() > 0) {
3024 set_node_name(from.node_name());
3025 }
3026 if (from.timeline_label().size() > 0) {
3027 set_timeline_label(from.timeline_label());
3028 }
3029 if (from.has_memory_stats()) {
3030 mutable_memory_stats()->::tensorflow::MemoryStats::MergeFrom(from.memory_stats());
3031 }
3032 if (from.all_start_micros() != 0) {
3033 set_all_start_micros(from.all_start_micros());
3034 }
3035 if (from.op_start_rel_micros() != 0) {
3036 set_op_start_rel_micros(from.op_start_rel_micros());
3037 }
3038 if (from.op_end_rel_micros() != 0) {
3039 set_op_end_rel_micros(from.op_end_rel_micros());
3040 }
3041 if (from.all_end_rel_micros() != 0) {
3042 set_all_end_rel_micros(from.all_end_rel_micros());
3043 }
3044 if (from.scheduled_micros() != 0) {
3045 set_scheduled_micros(from.scheduled_micros());
3046 }
3047 if (from.all_start_nanos() != 0) {
3048 set_all_start_nanos(from.all_start_nanos());
3049 }
3050 if (from.op_start_rel_nanos() != 0) {
3051 set_op_start_rel_nanos(from.op_start_rel_nanos());
3052 }
3053 if (from.op_end_rel_nanos() != 0) {
3054 set_op_end_rel_nanos(from.op_end_rel_nanos());
3055 }
3056 if (from.all_end_rel_nanos() != 0) {
3057 set_all_end_rel_nanos(from.all_end_rel_nanos());
3058 }
3059 if (from.scheduled_nanos() != 0) {
3060 set_scheduled_nanos(from.scheduled_nanos());
3061 }
3062 if (from.thread_id() != 0) {
3063 set_thread_id(from.thread_id());
3064 }
3065}
3066
3067void NodeExecStats::CopyFrom(const ::PROTOBUF_NAMESPACE_ID::Message& from) {
3068// @@protoc_insertion_point(generalized_copy_from_start:tensorflow.NodeExecStats)
3069 if (&from == this) return;
3070 Clear();
3071 MergeFrom(from);
3072}
3073
3074void NodeExecStats::CopyFrom(const NodeExecStats& from) {
3075// @@protoc_insertion_point(class_specific_copy_from_start:tensorflow.NodeExecStats)
3076 if (&from == this) return;
3077 Clear();
3078 MergeFrom(from);
3079}
3080
3081bool NodeExecStats::IsInitialized() const {
3082 return true;
3083}
3084
3085void NodeExecStats::InternalSwap(NodeExecStats* other) {
3086 using std::swap;
3087 _internal_metadata_.Swap(&other->_internal_metadata_);
3088 CastToBase(&memory_)->InternalSwap(CastToBase(&other->memory_));
3089 CastToBase(&output_)->InternalSwap(CastToBase(&other->output_));
3090 CastToBase(&referenced_tensor_)->InternalSwap(CastToBase(&other->referenced_tensor_));
3091 node_name_.Swap(&other->node_name_, &::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited(),
3092 GetArenaNoVirtual());
3093 timeline_label_.Swap(&other->timeline_label_, &::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited(),
3094 GetArenaNoVirtual());
3095 swap(memory_stats_, other->memory_stats_);
3096 swap(all_start_micros_, other->all_start_micros_);
3097 swap(op_start_rel_micros_, other->op_start_rel_micros_);
3098 swap(op_end_rel_micros_, other->op_end_rel_micros_);
3099 swap(all_end_rel_micros_, other->all_end_rel_micros_);
3100 swap(scheduled_micros_, other->scheduled_micros_);
3101 swap(all_start_nanos_, other->all_start_nanos_);
3102 swap(op_start_rel_nanos_, other->op_start_rel_nanos_);
3103 swap(op_end_rel_nanos_, other->op_end_rel_nanos_);
3104 swap(all_end_rel_nanos_, other->all_end_rel_nanos_);
3105 swap(scheduled_nanos_, other->scheduled_nanos_);
3106 swap(thread_id_, other->thread_id_);
3107}
3108
3109::PROTOBUF_NAMESPACE_ID::Metadata NodeExecStats::GetMetadata() const {
3110 return GetMetadataStatic();
3111}
3112
3113
3114// ===================================================================
3115
3116DeviceStepStats_ThreadNamesEntry_DoNotUse::DeviceStepStats_ThreadNamesEntry_DoNotUse() {}
3117DeviceStepStats_ThreadNamesEntry_DoNotUse::DeviceStepStats_ThreadNamesEntry_DoNotUse(::PROTOBUF_NAMESPACE_ID::Arena* arena)
3118 : SuperType(arena) {}
3119void DeviceStepStats_ThreadNamesEntry_DoNotUse::MergeFrom(const DeviceStepStats_ThreadNamesEntry_DoNotUse& other) {
3120 MergeFromInternal(other);
3121}
3122::PROTOBUF_NAMESPACE_ID::Metadata DeviceStepStats_ThreadNamesEntry_DoNotUse::GetMetadata() const {
3123 return GetMetadataStatic();
3124}
3125void DeviceStepStats_ThreadNamesEntry_DoNotUse::MergeFrom(
3126 const ::PROTOBUF_NAMESPACE_ID::Message& other) {
3127 ::PROTOBUF_NAMESPACE_ID::Message::MergeFrom(other);
3128}
3129
3130
3131// ===================================================================
3132
3133void DeviceStepStats::InitAsDefaultInstance() {
3134}
3135class DeviceStepStats::_Internal {
3136 public:
3137};
3138
3139DeviceStepStats::DeviceStepStats()
3140 : ::PROTOBUF_NAMESPACE_ID::Message(), _internal_metadata_(nullptr) {
3141 SharedCtor();
3142 // @@protoc_insertion_point(constructor:tensorflow.DeviceStepStats)
3143}
3144DeviceStepStats::DeviceStepStats(::PROTOBUF_NAMESPACE_ID::Arena* arena)
3145 : ::PROTOBUF_NAMESPACE_ID::Message(),
3146 _internal_metadata_(arena),
3147 node_stats_(arena),
3148 thread_names_(arena) {
3149 SharedCtor();
3150 RegisterArenaDtor(arena);
3151 // @@protoc_insertion_point(arena_constructor:tensorflow.DeviceStepStats)
3152}
3153DeviceStepStats::DeviceStepStats(const DeviceStepStats& from)
3154 : ::PROTOBUF_NAMESPACE_ID::Message(),
3155 _internal_metadata_(nullptr),
3156 node_stats_(from.node_stats_) {
3157 _internal_metadata_.MergeFrom(from._internal_metadata_);
3158 thread_names_.MergeFrom(from.thread_names_);
3159 device_.UnsafeSetDefault(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited());
3160 if (!from.device().empty()) {
3161 device_.Set(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited(), from.device(),
3162 GetArenaNoVirtual());
3163 }
3164 // @@protoc_insertion_point(copy_constructor:tensorflow.DeviceStepStats)
3165}
3166
3167void DeviceStepStats::SharedCtor() {
3168 ::PROTOBUF_NAMESPACE_ID::internal::InitSCC(&scc_info_DeviceStepStats_tensorflow_2fcore_2fframework_2fstep_5fstats_2eproto.base);
3169 device_.UnsafeSetDefault(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited());
3170}
3171
3172DeviceStepStats::~DeviceStepStats() {
3173 // @@protoc_insertion_point(destructor:tensorflow.DeviceStepStats)
3174 SharedDtor();
3175}
3176
3177void DeviceStepStats::SharedDtor() {
3178 GOOGLE_DCHECK(GetArenaNoVirtual() == nullptr);
3179 device_.DestroyNoArena(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited());
3180}
3181
3182void DeviceStepStats::ArenaDtor(void* object) {
3183 DeviceStepStats* _this = reinterpret_cast< DeviceStepStats* >(object);
3184 (void)_this;
3185}
3186void DeviceStepStats::RegisterArenaDtor(::PROTOBUF_NAMESPACE_ID::Arena*) {
3187}
3188void DeviceStepStats::SetCachedSize(int size) const {
3189 _cached_size_.Set(size);
3190}
3191const DeviceStepStats& DeviceStepStats::default_instance() {
3192 ::PROTOBUF_NAMESPACE_ID::internal::InitSCC(&::scc_info_DeviceStepStats_tensorflow_2fcore_2fframework_2fstep_5fstats_2eproto.base);
3193 return *internal_default_instance();
3194}
3195
3196
3197void DeviceStepStats::Clear() {
3198// @@protoc_insertion_point(message_clear_start:tensorflow.DeviceStepStats)
3199 ::PROTOBUF_NAMESPACE_ID::uint32 cached_has_bits = 0;
3200 // Prevent compiler warnings about cached_has_bits being unused
3201 (void) cached_has_bits;
3202
3203 node_stats_.Clear();
3204 thread_names_.Clear();
3205 device_.ClearToEmpty(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited(), GetArenaNoVirtual());
3206 _internal_metadata_.Clear();
3207}
3208
3209#if GOOGLE_PROTOBUF_ENABLE_EXPERIMENTAL_PARSER
3210const char* DeviceStepStats::_InternalParse(const char* ptr, ::PROTOBUF_NAMESPACE_ID::internal::ParseContext* ctx) {
3211#define CHK_(x) if (PROTOBUF_PREDICT_FALSE(!(x))) goto failure
3212 ::PROTOBUF_NAMESPACE_ID::Arena* arena = GetArenaNoVirtual(); (void)arena;
3213 while (!ctx->Done(&ptr)) {
3214 ::PROTOBUF_NAMESPACE_ID::uint32 tag;
3215 ptr = ::PROTOBUF_NAMESPACE_ID::internal::ReadTag(ptr, &tag);
3216 CHK_(ptr);
3217 switch (tag >> 3) {
3218 // string device = 1;
3219 case 1:
3220 if (PROTOBUF_PREDICT_TRUE(static_cast<::PROTOBUF_NAMESPACE_ID::uint8>(tag) == 10)) {
3221 ptr = ::PROTOBUF_NAMESPACE_ID::internal::InlineGreedyStringParserUTF8(mutable_device(), ptr, ctx, "tensorflow.DeviceStepStats.device");
3222 CHK_(ptr);
3223 } else goto handle_unusual;
3224 continue;
3225 // repeated .tensorflow.NodeExecStats node_stats = 2;
3226 case 2:
3227 if (PROTOBUF_PREDICT_TRUE(static_cast<::PROTOBUF_NAMESPACE_ID::uint8>(tag) == 18)) {
3228 ptr -= 1;
3229 do {
3230 ptr += 1;
3231 ptr = ctx->ParseMessage(add_node_stats(), ptr);
3232 CHK_(ptr);
3233 if (!ctx->DataAvailable(ptr)) break;
3234 } while (::PROTOBUF_NAMESPACE_ID::internal::UnalignedLoad<::PROTOBUF_NAMESPACE_ID::uint8>(ptr) == 18);
3235 } else goto handle_unusual;
3236 continue;
3237 // map<uint32, string> thread_names = 3;
3238 case 3:
3239 if (PROTOBUF_PREDICT_TRUE(static_cast<::PROTOBUF_NAMESPACE_ID::uint8>(tag) == 26)) {
3240 ptr -= 1;
3241 do {
3242 ptr += 1;
3243 ptr = ctx->ParseMessage(&thread_names_, ptr);
3244 CHK_(ptr);
3245 if (!ctx->DataAvailable(ptr)) break;
3246 } while (::PROTOBUF_NAMESPACE_ID::internal::UnalignedLoad<::PROTOBUF_NAMESPACE_ID::uint8>(ptr) == 26);
3247 } else goto handle_unusual;
3248 continue;
3249 default: {
3250 handle_unusual:
3251 if ((tag & 7) == 4 || tag == 0) {
3252 ctx->SetLastTag(tag);
3253 goto success;
3254 }
3255 ptr = UnknownFieldParse(tag, &_internal_metadata_, ptr, ctx);
3256 CHK_(ptr != nullptr);
3257 continue;
3258 }
3259 } // switch
3260 } // while
3261success:
3262 return ptr;
3263failure:
3264 ptr = nullptr;
3265 goto success;
3266#undef CHK_
3267}
3268#else // GOOGLE_PROTOBUF_ENABLE_EXPERIMENTAL_PARSER
3269bool DeviceStepStats::MergePartialFromCodedStream(
3270 ::PROTOBUF_NAMESPACE_ID::io::CodedInputStream* input) {
3271#define DO_(EXPRESSION) if (!PROTOBUF_PREDICT_TRUE(EXPRESSION)) goto failure
3272 ::PROTOBUF_NAMESPACE_ID::uint32 tag;
3273 // @@protoc_insertion_point(parse_start:tensorflow.DeviceStepStats)
3274 for (;;) {
3275 ::std::pair<::PROTOBUF_NAMESPACE_ID::uint32, bool> p = input->ReadTagWithCutoffNoLastTag(127u);
3276 tag = p.first;
3277 if (!p.second) goto handle_unusual;
3278 switch (::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::GetTagFieldNumber(tag)) {
3279 // string device = 1;
3280 case 1: {
3281 if (static_cast< ::PROTOBUF_NAMESPACE_ID::uint8>(tag) == (10 & 0xFF)) {
3282 DO_(::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::ReadString(
3283 input, this->mutable_device()));
3284 DO_(::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::VerifyUtf8String(
3285 this->device().data(), static_cast<int>(this->device().length()),
3286 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::PARSE,
3287 "tensorflow.DeviceStepStats.device"));
3288 } else {
3289 goto handle_unusual;
3290 }
3291 break;
3292 }
3293
3294 // repeated .tensorflow.NodeExecStats node_stats = 2;
3295 case 2: {
3296 if (static_cast< ::PROTOBUF_NAMESPACE_ID::uint8>(tag) == (18 & 0xFF)) {
3297 DO_(::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::ReadMessage(
3298 input, add_node_stats()));
3299 } else {
3300 goto handle_unusual;
3301 }
3302 break;
3303 }
3304
3305 // map<uint32, string> thread_names = 3;
3306 case 3: {
3307 if (static_cast< ::PROTOBUF_NAMESPACE_ID::uint8>(tag) == (26 & 0xFF)) {
3308 DeviceStepStats_ThreadNamesEntry_DoNotUse::Parser< ::PROTOBUF_NAMESPACE_ID::internal::MapField<
3309 DeviceStepStats_ThreadNamesEntry_DoNotUse,
3310 ::PROTOBUF_NAMESPACE_ID::uint32, std::string,
3311 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::TYPE_UINT32,
3312 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::TYPE_STRING,
3313 0 >,
3314 ::PROTOBUF_NAMESPACE_ID::Map< ::PROTOBUF_NAMESPACE_ID::uint32, std::string > > parser(&thread_names_);
3315 DO_(::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::ReadMessageNoVirtual(
3316 input, &parser));
3317 DO_(::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::VerifyUtf8String(
3318 parser.value().data(), static_cast<int>(parser.value().length()),
3319 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::PARSE,
3320 "tensorflow.DeviceStepStats.ThreadNamesEntry.value"));
3321 } else {
3322 goto handle_unusual;
3323 }
3324 break;
3325 }
3326
3327 default: {
3328 handle_unusual:
3329 if (tag == 0) {
3330 goto success;
3331 }
3332 DO_(::PROTOBUF_NAMESPACE_ID::internal::WireFormat::SkipField(
3333 input, tag, _internal_metadata_.mutable_unknown_fields()));
3334 break;
3335 }
3336 }
3337 }
3338success:
3339 // @@protoc_insertion_point(parse_success:tensorflow.DeviceStepStats)
3340 return true;
3341failure:
3342 // @@protoc_insertion_point(parse_failure:tensorflow.DeviceStepStats)
3343 return false;
3344#undef DO_
3345}
3346#endif // GOOGLE_PROTOBUF_ENABLE_EXPERIMENTAL_PARSER
3347
3348void DeviceStepStats::SerializeWithCachedSizes(
3349 ::PROTOBUF_NAMESPACE_ID::io::CodedOutputStream* output) const {
3350 // @@protoc_insertion_point(serialize_start:tensorflow.DeviceStepStats)
3351 ::PROTOBUF_NAMESPACE_ID::uint32 cached_has_bits = 0;
3352 (void) cached_has_bits;
3353
3354 // string device = 1;
3355 if (this->device().size() > 0) {
3356 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::VerifyUtf8String(
3357 this->device().data(), static_cast<int>(this->device().length()),
3358 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::SERIALIZE,
3359 "tensorflow.DeviceStepStats.device");
3360 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::WriteStringMaybeAliased(
3361 1, this->device(), output);
3362 }
3363
3364 // repeated .tensorflow.NodeExecStats node_stats = 2;
3365 for (unsigned int i = 0,
3366 n = static_cast<unsigned int>(this->node_stats_size()); i < n; i++) {
3367 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::WriteMessageMaybeToArray(
3368 2,
3369 this->node_stats(static_cast<int>(i)),
3370 output);
3371 }
3372
3373 // map<uint32, string> thread_names = 3;
3374 if (!this->thread_names().empty()) {
3375 typedef ::PROTOBUF_NAMESPACE_ID::Map< ::PROTOBUF_NAMESPACE_ID::uint32, std::string >::const_pointer
3376 ConstPtr;
3377 typedef ::PROTOBUF_NAMESPACE_ID::internal::SortItem< ::PROTOBUF_NAMESPACE_ID::uint32, ConstPtr > SortItem;
3378 typedef ::PROTOBUF_NAMESPACE_ID::internal::CompareByFirstField<SortItem> Less;
3379 struct Utf8Check {
3380 static void Check(ConstPtr p) {
3381 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::VerifyUtf8String(
3382 p->second.data(), static_cast<int>(p->second.length()),
3383 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::SERIALIZE,
3384 "tensorflow.DeviceStepStats.ThreadNamesEntry.value");
3385 }
3386 };
3387
3388 if (output->IsSerializationDeterministic() &&
3389 this->thread_names().size() > 1) {
3390 ::std::unique_ptr<SortItem[]> items(
3391 new SortItem[this->thread_names().size()]);
3392 typedef ::PROTOBUF_NAMESPACE_ID::Map< ::PROTOBUF_NAMESPACE_ID::uint32, std::string >::size_type size_type;
3393 size_type n = 0;
3394 for (::PROTOBUF_NAMESPACE_ID::Map< ::PROTOBUF_NAMESPACE_ID::uint32, std::string >::const_iterator
3395 it = this->thread_names().begin();
3396 it != this->thread_names().end(); ++it, ++n) {
3397 items[static_cast<ptrdiff_t>(n)] = SortItem(&*it);
3398 }
3399 ::std::sort(&items[0], &items[static_cast<ptrdiff_t>(n)], Less());
3400 for (size_type i = 0; i < n; i++) {
3401 DeviceStepStats_ThreadNamesEntry_DoNotUse::Funcs::SerializeToCodedStream(3, items[static_cast<ptrdiff_t>(i)].second->first, items[static_cast<ptrdiff_t>(i)].second->second, output);
3402 Utf8Check::Check(&(*items[static_cast<ptrdiff_t>(i)].second));
3403 }
3404 } else {
3405 for (::PROTOBUF_NAMESPACE_ID::Map< ::PROTOBUF_NAMESPACE_ID::uint32, std::string >::const_iterator
3406 it = this->thread_names().begin();
3407 it != this->thread_names().end(); ++it) {
3408 DeviceStepStats_ThreadNamesEntry_DoNotUse::Funcs::SerializeToCodedStream(3, it->first, it->second, output);
3409 Utf8Check::Check(&(*it));
3410 }
3411 }
3412 }
3413
3414 if (_internal_metadata_.have_unknown_fields()) {
3415 ::PROTOBUF_NAMESPACE_ID::internal::WireFormat::SerializeUnknownFields(
3416 _internal_metadata_.unknown_fields(), output);
3417 }
3418 // @@protoc_insertion_point(serialize_end:tensorflow.DeviceStepStats)
3419}
3420
3421::PROTOBUF_NAMESPACE_ID::uint8* DeviceStepStats::InternalSerializeWithCachedSizesToArray(
3422 ::PROTOBUF_NAMESPACE_ID::uint8* target) const {
3423 // @@protoc_insertion_point(serialize_to_array_start:tensorflow.DeviceStepStats)
3424 ::PROTOBUF_NAMESPACE_ID::uint32 cached_has_bits = 0;
3425 (void) cached_has_bits;
3426
3427 // string device = 1;
3428 if (this->device().size() > 0) {
3429 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::VerifyUtf8String(
3430 this->device().data(), static_cast<int>(this->device().length()),
3431 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::SERIALIZE,
3432 "tensorflow.DeviceStepStats.device");
3433 target =
3434 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::WriteStringToArray(
3435 1, this->device(), target);
3436 }
3437
3438 // repeated .tensorflow.NodeExecStats node_stats = 2;
3439 for (unsigned int i = 0,
3440 n = static_cast<unsigned int>(this->node_stats_size()); i < n; i++) {
3441 target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::
3442 InternalWriteMessageToArray(
3443 2, this->node_stats(static_cast<int>(i)), target);
3444 }
3445
3446 // map<uint32, string> thread_names = 3;
3447 if (!this->thread_names().empty()) {
3448 typedef ::PROTOBUF_NAMESPACE_ID::Map< ::PROTOBUF_NAMESPACE_ID::uint32, std::string >::const_pointer
3449 ConstPtr;
3450 typedef ::PROTOBUF_NAMESPACE_ID::internal::SortItem< ::PROTOBUF_NAMESPACE_ID::uint32, ConstPtr > SortItem;
3451 typedef ::PROTOBUF_NAMESPACE_ID::internal::CompareByFirstField<SortItem> Less;
3452 struct Utf8Check {
3453 static void Check(ConstPtr p) {
3454 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::VerifyUtf8String(
3455 p->second.data(), static_cast<int>(p->second.length()),
3456 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::SERIALIZE,
3457 "tensorflow.DeviceStepStats.ThreadNamesEntry.value");
3458 }
3459 };
3460
3461 if (false &&
3462 this->thread_names().size() > 1) {
3463 ::std::unique_ptr<SortItem[]> items(
3464 new SortItem[this->thread_names().size()]);
3465 typedef ::PROTOBUF_NAMESPACE_ID::Map< ::PROTOBUF_NAMESPACE_ID::uint32, std::string >::size_type size_type;
3466 size_type n = 0;
3467 for (::PROTOBUF_NAMESPACE_ID::Map< ::PROTOBUF_NAMESPACE_ID::uint32, std::string >::const_iterator
3468 it = this->thread_names().begin();
3469 it != this->thread_names().end(); ++it, ++n) {
3470 items[static_cast<ptrdiff_t>(n)] = SortItem(&*it);
3471 }
3472 ::std::sort(&items[0], &items[static_cast<ptrdiff_t>(n)], Less());
3473 for (size_type i = 0; i < n; i++) {
3474 target = DeviceStepStats_ThreadNamesEntry_DoNotUse::Funcs::SerializeToArray(3, items[static_cast<ptrdiff_t>(i)].second->first, items[static_cast<ptrdiff_t>(i)].second->second, target);
3475 Utf8Check::Check(&(*items[static_cast<ptrdiff_t>(i)].second));
3476 }
3477 } else {
3478 for (::PROTOBUF_NAMESPACE_ID::Map< ::PROTOBUF_NAMESPACE_ID::uint32, std::string >::const_iterator
3479 it = this->thread_names().begin();
3480 it != this->thread_names().end(); ++it) {
3481 target = DeviceStepStats_ThreadNamesEntry_DoNotUse::Funcs::SerializeToArray(3, it->first, it->second, target);
3482 Utf8Check::Check(&(*it));
3483 }
3484 }
3485 }
3486
3487 if (_internal_metadata_.have_unknown_fields()) {
3488 target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormat::SerializeUnknownFieldsToArray(
3489 _internal_metadata_.unknown_fields(), target);
3490 }
3491 // @@protoc_insertion_point(serialize_to_array_end:tensorflow.DeviceStepStats)
3492 return target;
3493}
3494
3495size_t DeviceStepStats::ByteSizeLong() const {
3496// @@protoc_insertion_point(message_byte_size_start:tensorflow.DeviceStepStats)
3497 size_t total_size = 0;
3498
3499 if (_internal_metadata_.have_unknown_fields()) {
3500 total_size +=
3501 ::PROTOBUF_NAMESPACE_ID::internal::WireFormat::ComputeUnknownFieldsSize(
3502 _internal_metadata_.unknown_fields());
3503 }
3504 ::PROTOBUF_NAMESPACE_ID::uint32 cached_has_bits = 0;
3505 // Prevent compiler warnings about cached_has_bits being unused
3506 (void) cached_has_bits;
3507
3508 // repeated .tensorflow.NodeExecStats node_stats = 2;
3509 {
3510 unsigned int count = static_cast<unsigned int>(this->node_stats_size());
3511 total_size += 1UL * count;
3512 for (unsigned int i = 0; i < count; i++) {
3513 total_size +=
3514 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::MessageSize(
3515 this->node_stats(static_cast<int>(i)));
3516 }
3517 }
3518
3519 // map<uint32, string> thread_names = 3;
3520 total_size += 1 *
3521 ::PROTOBUF_NAMESPACE_ID::internal::FromIntSize(this->thread_names_size());
3522 for (::PROTOBUF_NAMESPACE_ID::Map< ::PROTOBUF_NAMESPACE_ID::uint32, std::string >::const_iterator
3523 it = this->thread_names().begin();
3524 it != this->thread_names().end(); ++it) {
3525 total_size += DeviceStepStats_ThreadNamesEntry_DoNotUse::Funcs::ByteSizeLong(it->first, it->second);
3526 }
3527
3528 // string device = 1;
3529 if (this->device().size() > 0) {
3530 total_size += 1 +
3531 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::StringSize(
3532 this->device());
3533 }
3534
3535 int cached_size = ::PROTOBUF_NAMESPACE_ID::internal::ToCachedSize(total_size);
3536 SetCachedSize(cached_size);
3537 return total_size;
3538}
3539
3540void DeviceStepStats::MergeFrom(const ::PROTOBUF_NAMESPACE_ID::Message& from) {
3541// @@protoc_insertion_point(generalized_merge_from_start:tensorflow.DeviceStepStats)
3542 GOOGLE_DCHECK_NE(&from, this);
3543 const DeviceStepStats* source =
3544 ::PROTOBUF_NAMESPACE_ID::DynamicCastToGenerated<DeviceStepStats>(
3545 &from);
3546 if (source == nullptr) {
3547 // @@protoc_insertion_point(generalized_merge_from_cast_fail:tensorflow.DeviceStepStats)
3548 ::PROTOBUF_NAMESPACE_ID::internal::ReflectionOps::Merge(from, this);
3549 } else {
3550 // @@protoc_insertion_point(generalized_merge_from_cast_success:tensorflow.DeviceStepStats)
3551 MergeFrom(*source);
3552 }
3553}
3554
3555void DeviceStepStats::MergeFrom(const DeviceStepStats& from) {
3556// @@protoc_insertion_point(class_specific_merge_from_start:tensorflow.DeviceStepStats)
3557 GOOGLE_DCHECK_NE(&from, this);
3558 _internal_metadata_.MergeFrom(from._internal_metadata_);
3559 ::PROTOBUF_NAMESPACE_ID::uint32 cached_has_bits = 0;
3560 (void) cached_has_bits;
3561
3562 node_stats_.MergeFrom(from.node_stats_);
3563 thread_names_.MergeFrom(from.thread_names_);
3564 if (from.device().size() > 0) {
3565 set_device(from.device());
3566 }
3567}
3568
3569void DeviceStepStats::CopyFrom(const ::PROTOBUF_NAMESPACE_ID::Message& from) {
3570// @@protoc_insertion_point(generalized_copy_from_start:tensorflow.DeviceStepStats)
3571 if (&from == this) return;
3572 Clear();
3573 MergeFrom(from);
3574}
3575
3576void DeviceStepStats::CopyFrom(const DeviceStepStats& from) {
3577// @@protoc_insertion_point(class_specific_copy_from_start:tensorflow.DeviceStepStats)
3578 if (&from == this) return;
3579 Clear();
3580 MergeFrom(from);
3581}
3582
3583bool DeviceStepStats::IsInitialized() const {
3584 return true;
3585}
3586
3587void DeviceStepStats::InternalSwap(DeviceStepStats* other) {
3588 using std::swap;
3589 _internal_metadata_.Swap(&other->_internal_metadata_);
3590 CastToBase(&node_stats_)->InternalSwap(CastToBase(&other->node_stats_));
3591 thread_names_.Swap(&other->thread_names_);
3592 device_.Swap(&other->device_, &::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited(),
3593 GetArenaNoVirtual());
3594}
3595
3596::PROTOBUF_NAMESPACE_ID::Metadata DeviceStepStats::GetMetadata() const {
3597 return GetMetadataStatic();
3598}
3599
3600
3601// ===================================================================
3602
3603void StepStats::InitAsDefaultInstance() {
3604}
3605class StepStats::_Internal {
3606 public:
3607};
3608
3609StepStats::StepStats()
3610 : ::PROTOBUF_NAMESPACE_ID::Message(), _internal_metadata_(nullptr) {
3611 SharedCtor();
3612 // @@protoc_insertion_point(constructor:tensorflow.StepStats)
3613}
3614StepStats::StepStats(::PROTOBUF_NAMESPACE_ID::Arena* arena)
3615 : ::PROTOBUF_NAMESPACE_ID::Message(),
3616 _internal_metadata_(arena),
3617 dev_stats_(arena) {
3618 SharedCtor();
3619 RegisterArenaDtor(arena);
3620 // @@protoc_insertion_point(arena_constructor:tensorflow.StepStats)
3621}
3622StepStats::StepStats(const StepStats& from)
3623 : ::PROTOBUF_NAMESPACE_ID::Message(),
3624 _internal_metadata_(nullptr),
3625 dev_stats_(from.dev_stats_) {
3626 _internal_metadata_.MergeFrom(from._internal_metadata_);
3627 // @@protoc_insertion_point(copy_constructor:tensorflow.StepStats)
3628}
3629
3630void StepStats::SharedCtor() {
3631 ::PROTOBUF_NAMESPACE_ID::internal::InitSCC(&scc_info_StepStats_tensorflow_2fcore_2fframework_2fstep_5fstats_2eproto.base);
3632}
3633
3634StepStats::~StepStats() {
3635 // @@protoc_insertion_point(destructor:tensorflow.StepStats)
3636 SharedDtor();
3637}
3638
3639void StepStats::SharedDtor() {
3640 GOOGLE_DCHECK(GetArenaNoVirtual() == nullptr);
3641}
3642
3643void StepStats::ArenaDtor(void* object) {
3644 StepStats* _this = reinterpret_cast< StepStats* >(object);
3645 (void)_this;
3646}
3647void StepStats::RegisterArenaDtor(::PROTOBUF_NAMESPACE_ID::Arena*) {
3648}
3649void StepStats::SetCachedSize(int size) const {
3650 _cached_size_.Set(size);
3651}
3652const StepStats& StepStats::default_instance() {
3653 ::PROTOBUF_NAMESPACE_ID::internal::InitSCC(&::scc_info_StepStats_tensorflow_2fcore_2fframework_2fstep_5fstats_2eproto.base);
3654 return *internal_default_instance();
3655}
3656
3657
3658void StepStats::Clear() {
3659// @@protoc_insertion_point(message_clear_start:tensorflow.StepStats)
3660 ::PROTOBUF_NAMESPACE_ID::uint32 cached_has_bits = 0;
3661 // Prevent compiler warnings about cached_has_bits being unused
3662 (void) cached_has_bits;
3663
3664 dev_stats_.Clear();
3665 _internal_metadata_.Clear();
3666}
3667
3668#if GOOGLE_PROTOBUF_ENABLE_EXPERIMENTAL_PARSER
3669const char* StepStats::_InternalParse(const char* ptr, ::PROTOBUF_NAMESPACE_ID::internal::ParseContext* ctx) {
3670#define CHK_(x) if (PROTOBUF_PREDICT_FALSE(!(x))) goto failure
3671 ::PROTOBUF_NAMESPACE_ID::Arena* arena = GetArenaNoVirtual(); (void)arena;
3672 while (!ctx->Done(&ptr)) {
3673 ::PROTOBUF_NAMESPACE_ID::uint32 tag;
3674 ptr = ::PROTOBUF_NAMESPACE_ID::internal::ReadTag(ptr, &tag);
3675 CHK_(ptr);
3676 switch (tag >> 3) {
3677 // repeated .tensorflow.DeviceStepStats dev_stats = 1;
3678 case 1:
3679 if (PROTOBUF_PREDICT_TRUE(static_cast<::PROTOBUF_NAMESPACE_ID::uint8>(tag) == 10)) {
3680 ptr -= 1;
3681 do {
3682 ptr += 1;
3683 ptr = ctx->ParseMessage(add_dev_stats(), ptr);
3684 CHK_(ptr);
3685 if (!ctx->DataAvailable(ptr)) break;
3686 } while (::PROTOBUF_NAMESPACE_ID::internal::UnalignedLoad<::PROTOBUF_NAMESPACE_ID::uint8>(ptr) == 10);
3687 } else goto handle_unusual;
3688 continue;
3689 default: {
3690 handle_unusual:
3691 if ((tag & 7) == 4 || tag == 0) {
3692 ctx->SetLastTag(tag);
3693 goto success;
3694 }
3695 ptr = UnknownFieldParse(tag, &_internal_metadata_, ptr, ctx);
3696 CHK_(ptr != nullptr);
3697 continue;
3698 }
3699 } // switch
3700 } // while
3701success:
3702 return ptr;
3703failure:
3704 ptr = nullptr;
3705 goto success;
3706#undef CHK_
3707}
3708#else // GOOGLE_PROTOBUF_ENABLE_EXPERIMENTAL_PARSER
3709bool StepStats::MergePartialFromCodedStream(
3710 ::PROTOBUF_NAMESPACE_ID::io::CodedInputStream* input) {
3711#define DO_(EXPRESSION) if (!PROTOBUF_PREDICT_TRUE(EXPRESSION)) goto failure
3712 ::PROTOBUF_NAMESPACE_ID::uint32 tag;
3713 // @@protoc_insertion_point(parse_start:tensorflow.StepStats)
3714 for (;;) {
3715 ::std::pair<::PROTOBUF_NAMESPACE_ID::uint32, bool> p = input->ReadTagWithCutoffNoLastTag(127u);
3716 tag = p.first;
3717 if (!p.second) goto handle_unusual;
3718 switch (::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::GetTagFieldNumber(tag)) {
3719 // repeated .tensorflow.DeviceStepStats dev_stats = 1;
3720 case 1: {
3721 if (static_cast< ::PROTOBUF_NAMESPACE_ID::uint8>(tag) == (10 & 0xFF)) {
3722 DO_(::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::ReadMessage(
3723 input, add_dev_stats()));
3724 } else {
3725 goto handle_unusual;
3726 }
3727 break;
3728 }
3729
3730 default: {
3731 handle_unusual:
3732 if (tag == 0) {
3733 goto success;
3734 }
3735 DO_(::PROTOBUF_NAMESPACE_ID::internal::WireFormat::SkipField(
3736 input, tag, _internal_metadata_.mutable_unknown_fields()));
3737 break;
3738 }
3739 }
3740 }
3741success:
3742 // @@protoc_insertion_point(parse_success:tensorflow.StepStats)
3743 return true;
3744failure:
3745 // @@protoc_insertion_point(parse_failure:tensorflow.StepStats)
3746 return false;
3747#undef DO_
3748}
3749#endif // GOOGLE_PROTOBUF_ENABLE_EXPERIMENTAL_PARSER
3750
3751void StepStats::SerializeWithCachedSizes(
3752 ::PROTOBUF_NAMESPACE_ID::io::CodedOutputStream* output) const {
3753 // @@protoc_insertion_point(serialize_start:tensorflow.StepStats)
3754 ::PROTOBUF_NAMESPACE_ID::uint32 cached_has_bits = 0;
3755 (void) cached_has_bits;
3756
3757 // repeated .tensorflow.DeviceStepStats dev_stats = 1;
3758 for (unsigned int i = 0,
3759 n = static_cast<unsigned int>(this->dev_stats_size()); i < n; i++) {
3760 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::WriteMessageMaybeToArray(
3761 1,
3762 this->dev_stats(static_cast<int>(i)),
3763 output);
3764 }
3765
3766 if (_internal_metadata_.have_unknown_fields()) {
3767 ::PROTOBUF_NAMESPACE_ID::internal::WireFormat::SerializeUnknownFields(
3768 _internal_metadata_.unknown_fields(), output);
3769 }
3770 // @@protoc_insertion_point(serialize_end:tensorflow.StepStats)
3771}
3772
3773::PROTOBUF_NAMESPACE_ID::uint8* StepStats::InternalSerializeWithCachedSizesToArray(
3774 ::PROTOBUF_NAMESPACE_ID::uint8* target) const {
3775 // @@protoc_insertion_point(serialize_to_array_start:tensorflow.StepStats)
3776 ::PROTOBUF_NAMESPACE_ID::uint32 cached_has_bits = 0;
3777 (void) cached_has_bits;
3778
3779 // repeated .tensorflow.DeviceStepStats dev_stats = 1;
3780 for (unsigned int i = 0,
3781 n = static_cast<unsigned int>(this->dev_stats_size()); i < n; i++) {
3782 target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::
3783 InternalWriteMessageToArray(
3784 1, this->dev_stats(static_cast<int>(i)), target);
3785 }
3786
3787 if (_internal_metadata_.have_unknown_fields()) {
3788 target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormat::SerializeUnknownFieldsToArray(
3789 _internal_metadata_.unknown_fields(), target);
3790 }
3791 // @@protoc_insertion_point(serialize_to_array_end:tensorflow.StepStats)
3792 return target;
3793}
3794
3795size_t StepStats::ByteSizeLong() const {
3796// @@protoc_insertion_point(message_byte_size_start:tensorflow.StepStats)
3797 size_t total_size = 0;
3798
3799 if (_internal_metadata_.have_unknown_fields()) {
3800 total_size +=
3801 ::PROTOBUF_NAMESPACE_ID::internal::WireFormat::ComputeUnknownFieldsSize(
3802 _internal_metadata_.unknown_fields());
3803 }
3804 ::PROTOBUF_NAMESPACE_ID::uint32 cached_has_bits = 0;
3805 // Prevent compiler warnings about cached_has_bits being unused
3806 (void) cached_has_bits;
3807
3808 // repeated .tensorflow.DeviceStepStats dev_stats = 1;
3809 {
3810 unsigned int count = static_cast<unsigned int>(this->dev_stats_size());
3811 total_size += 1UL * count;
3812 for (unsigned int i = 0; i < count; i++) {
3813 total_size +=
3814 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::MessageSize(
3815 this->dev_stats(static_cast<int>(i)));
3816 }
3817 }
3818
3819 int cached_size = ::PROTOBUF_NAMESPACE_ID::internal::ToCachedSize(total_size);
3820 SetCachedSize(cached_size);
3821 return total_size;
3822}
3823
3824void StepStats::MergeFrom(const ::PROTOBUF_NAMESPACE_ID::Message& from) {
3825// @@protoc_insertion_point(generalized_merge_from_start:tensorflow.StepStats)
3826 GOOGLE_DCHECK_NE(&from, this);
3827 const StepStats* source =
3828 ::PROTOBUF_NAMESPACE_ID::DynamicCastToGenerated<StepStats>(
3829 &from);
3830 if (source == nullptr) {
3831 // @@protoc_insertion_point(generalized_merge_from_cast_fail:tensorflow.StepStats)
3832 ::PROTOBUF_NAMESPACE_ID::internal::ReflectionOps::Merge(from, this);
3833 } else {
3834 // @@protoc_insertion_point(generalized_merge_from_cast_success:tensorflow.StepStats)
3835 MergeFrom(*source);
3836 }
3837}
3838
3839void StepStats::MergeFrom(const StepStats& from) {
3840// @@protoc_insertion_point(class_specific_merge_from_start:tensorflow.StepStats)
3841 GOOGLE_DCHECK_NE(&from, this);
3842 _internal_metadata_.MergeFrom(from._internal_metadata_);
3843 ::PROTOBUF_NAMESPACE_ID::uint32 cached_has_bits = 0;
3844 (void) cached_has_bits;
3845
3846 dev_stats_.MergeFrom(from.dev_stats_);
3847}
3848
3849void StepStats::CopyFrom(const ::PROTOBUF_NAMESPACE_ID::Message& from) {
3850// @@protoc_insertion_point(generalized_copy_from_start:tensorflow.StepStats)
3851 if (&from == this) return;
3852 Clear();
3853 MergeFrom(from);
3854}
3855
3856void StepStats::CopyFrom(const StepStats& from) {
3857// @@protoc_insertion_point(class_specific_copy_from_start:tensorflow.StepStats)
3858 if (&from == this) return;
3859 Clear();
3860 MergeFrom(from);
3861}
3862
3863bool StepStats::IsInitialized() const {
3864 return true;
3865}
3866
3867void StepStats::InternalSwap(StepStats* other) {
3868 using std::swap;
3869 _internal_metadata_.Swap(&other->_internal_metadata_);
3870 CastToBase(&dev_stats_)->InternalSwap(CastToBase(&other->dev_stats_));
3871}
3872
3873::PROTOBUF_NAMESPACE_ID::Metadata StepStats::GetMetadata() const {
3874 return GetMetadataStatic();
3875}
3876
3877
3878// @@protoc_insertion_point(namespace_scope)
3879} // namespace tensorflow
3880PROTOBUF_NAMESPACE_OPEN
3881template<> PROTOBUF_NOINLINE ::tensorflow::AllocationRecord* Arena::CreateMaybeMessage< ::tensorflow::AllocationRecord >(Arena* arena) {
3882 return Arena::CreateMessageInternal< ::tensorflow::AllocationRecord >(arena);
3883}
3884template<> PROTOBUF_NOINLINE ::tensorflow::AllocatorMemoryUsed* Arena::CreateMaybeMessage< ::tensorflow::AllocatorMemoryUsed >(Arena* arena) {
3885 return Arena::CreateMessageInternal< ::tensorflow::AllocatorMemoryUsed >(arena);
3886}
3887template<> PROTOBUF_NOINLINE ::tensorflow::NodeOutput* Arena::CreateMaybeMessage< ::tensorflow::NodeOutput >(Arena* arena) {
3888 return Arena::CreateMessageInternal< ::tensorflow::NodeOutput >(arena);
3889}
3890template<> PROTOBUF_NOINLINE ::tensorflow::MemoryStats* Arena::CreateMaybeMessage< ::tensorflow::MemoryStats >(Arena* arena) {
3891 return Arena::CreateMessageInternal< ::tensorflow::MemoryStats >(arena);
3892}
3893template<> PROTOBUF_NOINLINE ::tensorflow::NodeExecStats* Arena::CreateMaybeMessage< ::tensorflow::NodeExecStats >(Arena* arena) {
3894 return Arena::CreateMessageInternal< ::tensorflow::NodeExecStats >(arena);
3895}
3896template<> PROTOBUF_NOINLINE ::tensorflow::DeviceStepStats_ThreadNamesEntry_DoNotUse* Arena::CreateMaybeMessage< ::tensorflow::DeviceStepStats_ThreadNamesEntry_DoNotUse >(Arena* arena) {
3897 return Arena::CreateMessageInternal< ::tensorflow::DeviceStepStats_ThreadNamesEntry_DoNotUse >(arena);
3898}
3899template<> PROTOBUF_NOINLINE ::tensorflow::DeviceStepStats* Arena::CreateMaybeMessage< ::tensorflow::DeviceStepStats >(Arena* arena) {
3900 return Arena::CreateMessageInternal< ::tensorflow::DeviceStepStats >(arena);
3901}
3902template<> PROTOBUF_NOINLINE ::tensorflow::StepStats* Arena::CreateMaybeMessage< ::tensorflow::StepStats >(Arena* arena) {
3903 return Arena::CreateMessageInternal< ::tensorflow::StepStats >(arena);
3904}
3905PROTOBUF_NAMESPACE_CLOSE
3906
3907// @@protoc_insertion_point(global_scope)
3908#include <google/protobuf/port_undef.inc>
3909