1// Generated by the protocol buffer compiler. DO NOT EDIT!
2// source: tensorflow/core/framework/log_memory.proto
3
4#include "tensorflow/core/framework/log_memory.pb.h"
5
6#include <algorithm>
7
8#include <google/protobuf/stubs/common.h>
9#include <google/protobuf/io/coded_stream.h>
10#include <google/protobuf/extension_set.h>
11#include <google/protobuf/wire_format_lite.h>
12#include <google/protobuf/descriptor.h>
13#include <google/protobuf/generated_message_reflection.h>
14#include <google/protobuf/reflection_ops.h>
15#include <google/protobuf/wire_format.h>
16// @@protoc_insertion_point(includes)
17#include <google/protobuf/port_def.inc>
18extern PROTOBUF_INTERNAL_EXPORT_tensorflow_2fcore_2fframework_2ftensor_5fdescription_2eproto ::PROTOBUF_NAMESPACE_ID::internal::SCCInfo<2> scc_info_TensorDescription_tensorflow_2fcore_2fframework_2ftensor_5fdescription_2eproto;
19namespace tensorflow {
20class MemoryLogStepDefaultTypeInternal {
21 public:
22 ::PROTOBUF_NAMESPACE_ID::internal::ExplicitlyConstructed<MemoryLogStep> _instance;
23} _MemoryLogStep_default_instance_;
24class MemoryLogTensorAllocationDefaultTypeInternal {
25 public:
26 ::PROTOBUF_NAMESPACE_ID::internal::ExplicitlyConstructed<MemoryLogTensorAllocation> _instance;
27} _MemoryLogTensorAllocation_default_instance_;
28class MemoryLogTensorDeallocationDefaultTypeInternal {
29 public:
30 ::PROTOBUF_NAMESPACE_ID::internal::ExplicitlyConstructed<MemoryLogTensorDeallocation> _instance;
31} _MemoryLogTensorDeallocation_default_instance_;
32class MemoryLogTensorOutputDefaultTypeInternal {
33 public:
34 ::PROTOBUF_NAMESPACE_ID::internal::ExplicitlyConstructed<MemoryLogTensorOutput> _instance;
35} _MemoryLogTensorOutput_default_instance_;
36class MemoryLogRawAllocationDefaultTypeInternal {
37 public:
38 ::PROTOBUF_NAMESPACE_ID::internal::ExplicitlyConstructed<MemoryLogRawAllocation> _instance;
39} _MemoryLogRawAllocation_default_instance_;
40class MemoryLogRawDeallocationDefaultTypeInternal {
41 public:
42 ::PROTOBUF_NAMESPACE_ID::internal::ExplicitlyConstructed<MemoryLogRawDeallocation> _instance;
43} _MemoryLogRawDeallocation_default_instance_;
44} // namespace tensorflow
45static void InitDefaultsscc_info_MemoryLogRawAllocation_tensorflow_2fcore_2fframework_2flog_5fmemory_2eproto() {
46 GOOGLE_PROTOBUF_VERIFY_VERSION;
47
48 {
49 void* ptr = &::tensorflow::_MemoryLogRawAllocation_default_instance_;
50 new (ptr) ::tensorflow::MemoryLogRawAllocation();
51 ::PROTOBUF_NAMESPACE_ID::internal::OnShutdownDestroyMessage(ptr);
52 }
53 ::tensorflow::MemoryLogRawAllocation::InitAsDefaultInstance();
54}
55
56::PROTOBUF_NAMESPACE_ID::internal::SCCInfo<0> scc_info_MemoryLogRawAllocation_tensorflow_2fcore_2fframework_2flog_5fmemory_2eproto =
57 {{ATOMIC_VAR_INIT(::PROTOBUF_NAMESPACE_ID::internal::SCCInfoBase::kUninitialized), 0, InitDefaultsscc_info_MemoryLogRawAllocation_tensorflow_2fcore_2fframework_2flog_5fmemory_2eproto}, {}};
58
59static void InitDefaultsscc_info_MemoryLogRawDeallocation_tensorflow_2fcore_2fframework_2flog_5fmemory_2eproto() {
60 GOOGLE_PROTOBUF_VERIFY_VERSION;
61
62 {
63 void* ptr = &::tensorflow::_MemoryLogRawDeallocation_default_instance_;
64 new (ptr) ::tensorflow::MemoryLogRawDeallocation();
65 ::PROTOBUF_NAMESPACE_ID::internal::OnShutdownDestroyMessage(ptr);
66 }
67 ::tensorflow::MemoryLogRawDeallocation::InitAsDefaultInstance();
68}
69
70::PROTOBUF_NAMESPACE_ID::internal::SCCInfo<0> scc_info_MemoryLogRawDeallocation_tensorflow_2fcore_2fframework_2flog_5fmemory_2eproto =
71 {{ATOMIC_VAR_INIT(::PROTOBUF_NAMESPACE_ID::internal::SCCInfoBase::kUninitialized), 0, InitDefaultsscc_info_MemoryLogRawDeallocation_tensorflow_2fcore_2fframework_2flog_5fmemory_2eproto}, {}};
72
73static void InitDefaultsscc_info_MemoryLogStep_tensorflow_2fcore_2fframework_2flog_5fmemory_2eproto() {
74 GOOGLE_PROTOBUF_VERIFY_VERSION;
75
76 {
77 void* ptr = &::tensorflow::_MemoryLogStep_default_instance_;
78 new (ptr) ::tensorflow::MemoryLogStep();
79 ::PROTOBUF_NAMESPACE_ID::internal::OnShutdownDestroyMessage(ptr);
80 }
81 ::tensorflow::MemoryLogStep::InitAsDefaultInstance();
82}
83
84::PROTOBUF_NAMESPACE_ID::internal::SCCInfo<0> scc_info_MemoryLogStep_tensorflow_2fcore_2fframework_2flog_5fmemory_2eproto =
85 {{ATOMIC_VAR_INIT(::PROTOBUF_NAMESPACE_ID::internal::SCCInfoBase::kUninitialized), 0, InitDefaultsscc_info_MemoryLogStep_tensorflow_2fcore_2fframework_2flog_5fmemory_2eproto}, {}};
86
87static void InitDefaultsscc_info_MemoryLogTensorAllocation_tensorflow_2fcore_2fframework_2flog_5fmemory_2eproto() {
88 GOOGLE_PROTOBUF_VERIFY_VERSION;
89
90 {
91 void* ptr = &::tensorflow::_MemoryLogTensorAllocation_default_instance_;
92 new (ptr) ::tensorflow::MemoryLogTensorAllocation();
93 ::PROTOBUF_NAMESPACE_ID::internal::OnShutdownDestroyMessage(ptr);
94 }
95 ::tensorflow::MemoryLogTensorAllocation::InitAsDefaultInstance();
96}
97
98::PROTOBUF_NAMESPACE_ID::internal::SCCInfo<1> scc_info_MemoryLogTensorAllocation_tensorflow_2fcore_2fframework_2flog_5fmemory_2eproto =
99 {{ATOMIC_VAR_INIT(::PROTOBUF_NAMESPACE_ID::internal::SCCInfoBase::kUninitialized), 1, InitDefaultsscc_info_MemoryLogTensorAllocation_tensorflow_2fcore_2fframework_2flog_5fmemory_2eproto}, {
100 &scc_info_TensorDescription_tensorflow_2fcore_2fframework_2ftensor_5fdescription_2eproto.base,}};
101
102static void InitDefaultsscc_info_MemoryLogTensorDeallocation_tensorflow_2fcore_2fframework_2flog_5fmemory_2eproto() {
103 GOOGLE_PROTOBUF_VERIFY_VERSION;
104
105 {
106 void* ptr = &::tensorflow::_MemoryLogTensorDeallocation_default_instance_;
107 new (ptr) ::tensorflow::MemoryLogTensorDeallocation();
108 ::PROTOBUF_NAMESPACE_ID::internal::OnShutdownDestroyMessage(ptr);
109 }
110 ::tensorflow::MemoryLogTensorDeallocation::InitAsDefaultInstance();
111}
112
113::PROTOBUF_NAMESPACE_ID::internal::SCCInfo<0> scc_info_MemoryLogTensorDeallocation_tensorflow_2fcore_2fframework_2flog_5fmemory_2eproto =
114 {{ATOMIC_VAR_INIT(::PROTOBUF_NAMESPACE_ID::internal::SCCInfoBase::kUninitialized), 0, InitDefaultsscc_info_MemoryLogTensorDeallocation_tensorflow_2fcore_2fframework_2flog_5fmemory_2eproto}, {}};
115
116static void InitDefaultsscc_info_MemoryLogTensorOutput_tensorflow_2fcore_2fframework_2flog_5fmemory_2eproto() {
117 GOOGLE_PROTOBUF_VERIFY_VERSION;
118
119 {
120 void* ptr = &::tensorflow::_MemoryLogTensorOutput_default_instance_;
121 new (ptr) ::tensorflow::MemoryLogTensorOutput();
122 ::PROTOBUF_NAMESPACE_ID::internal::OnShutdownDestroyMessage(ptr);
123 }
124 ::tensorflow::MemoryLogTensorOutput::InitAsDefaultInstance();
125}
126
127::PROTOBUF_NAMESPACE_ID::internal::SCCInfo<1> scc_info_MemoryLogTensorOutput_tensorflow_2fcore_2fframework_2flog_5fmemory_2eproto =
128 {{ATOMIC_VAR_INIT(::PROTOBUF_NAMESPACE_ID::internal::SCCInfoBase::kUninitialized), 1, InitDefaultsscc_info_MemoryLogTensorOutput_tensorflow_2fcore_2fframework_2flog_5fmemory_2eproto}, {
129 &scc_info_TensorDescription_tensorflow_2fcore_2fframework_2ftensor_5fdescription_2eproto.base,}};
130
131static ::PROTOBUF_NAMESPACE_ID::Metadata file_level_metadata_tensorflow_2fcore_2fframework_2flog_5fmemory_2eproto[6];
132static constexpr ::PROTOBUF_NAMESPACE_ID::EnumDescriptor const** file_level_enum_descriptors_tensorflow_2fcore_2fframework_2flog_5fmemory_2eproto = nullptr;
133static constexpr ::PROTOBUF_NAMESPACE_ID::ServiceDescriptor const** file_level_service_descriptors_tensorflow_2fcore_2fframework_2flog_5fmemory_2eproto = nullptr;
134
135const ::PROTOBUF_NAMESPACE_ID::uint32 TableStruct_tensorflow_2fcore_2fframework_2flog_5fmemory_2eproto::offsets[] PROTOBUF_SECTION_VARIABLE(protodesc_cold) = {
136 ~0u, // no _has_bits_
137 PROTOBUF_FIELD_OFFSET(::tensorflow::MemoryLogStep, _internal_metadata_),
138 ~0u, // no _extensions_
139 ~0u, // no _oneof_case_
140 ~0u, // no _weak_field_map_
141 PROTOBUF_FIELD_OFFSET(::tensorflow::MemoryLogStep, step_id_),
142 PROTOBUF_FIELD_OFFSET(::tensorflow::MemoryLogStep, handle_),
143 ~0u, // no _has_bits_
144 PROTOBUF_FIELD_OFFSET(::tensorflow::MemoryLogTensorAllocation, _internal_metadata_),
145 ~0u, // no _extensions_
146 ~0u, // no _oneof_case_
147 ~0u, // no _weak_field_map_
148 PROTOBUF_FIELD_OFFSET(::tensorflow::MemoryLogTensorAllocation, step_id_),
149 PROTOBUF_FIELD_OFFSET(::tensorflow::MemoryLogTensorAllocation, kernel_name_),
150 PROTOBUF_FIELD_OFFSET(::tensorflow::MemoryLogTensorAllocation, tensor_),
151 ~0u, // no _has_bits_
152 PROTOBUF_FIELD_OFFSET(::tensorflow::MemoryLogTensorDeallocation, _internal_metadata_),
153 ~0u, // no _extensions_
154 ~0u, // no _oneof_case_
155 ~0u, // no _weak_field_map_
156 PROTOBUF_FIELD_OFFSET(::tensorflow::MemoryLogTensorDeallocation, allocation_id_),
157 PROTOBUF_FIELD_OFFSET(::tensorflow::MemoryLogTensorDeallocation, allocator_name_),
158 ~0u, // no _has_bits_
159 PROTOBUF_FIELD_OFFSET(::tensorflow::MemoryLogTensorOutput, _internal_metadata_),
160 ~0u, // no _extensions_
161 ~0u, // no _oneof_case_
162 ~0u, // no _weak_field_map_
163 PROTOBUF_FIELD_OFFSET(::tensorflow::MemoryLogTensorOutput, step_id_),
164 PROTOBUF_FIELD_OFFSET(::tensorflow::MemoryLogTensorOutput, kernel_name_),
165 PROTOBUF_FIELD_OFFSET(::tensorflow::MemoryLogTensorOutput, index_),
166 PROTOBUF_FIELD_OFFSET(::tensorflow::MemoryLogTensorOutput, tensor_),
167 ~0u, // no _has_bits_
168 PROTOBUF_FIELD_OFFSET(::tensorflow::MemoryLogRawAllocation, _internal_metadata_),
169 ~0u, // no _extensions_
170 ~0u, // no _oneof_case_
171 ~0u, // no _weak_field_map_
172 PROTOBUF_FIELD_OFFSET(::tensorflow::MemoryLogRawAllocation, step_id_),
173 PROTOBUF_FIELD_OFFSET(::tensorflow::MemoryLogRawAllocation, operation_),
174 PROTOBUF_FIELD_OFFSET(::tensorflow::MemoryLogRawAllocation, num_bytes_),
175 PROTOBUF_FIELD_OFFSET(::tensorflow::MemoryLogRawAllocation, ptr_),
176 PROTOBUF_FIELD_OFFSET(::tensorflow::MemoryLogRawAllocation, allocation_id_),
177 PROTOBUF_FIELD_OFFSET(::tensorflow::MemoryLogRawAllocation, allocator_name_),
178 ~0u, // no _has_bits_
179 PROTOBUF_FIELD_OFFSET(::tensorflow::MemoryLogRawDeallocation, _internal_metadata_),
180 ~0u, // no _extensions_
181 ~0u, // no _oneof_case_
182 ~0u, // no _weak_field_map_
183 PROTOBUF_FIELD_OFFSET(::tensorflow::MemoryLogRawDeallocation, step_id_),
184 PROTOBUF_FIELD_OFFSET(::tensorflow::MemoryLogRawDeallocation, operation_),
185 PROTOBUF_FIELD_OFFSET(::tensorflow::MemoryLogRawDeallocation, allocation_id_),
186 PROTOBUF_FIELD_OFFSET(::tensorflow::MemoryLogRawDeallocation, allocator_name_),
187 PROTOBUF_FIELD_OFFSET(::tensorflow::MemoryLogRawDeallocation, deferred_),
188};
189static const ::PROTOBUF_NAMESPACE_ID::internal::MigrationSchema schemas[] PROTOBUF_SECTION_VARIABLE(protodesc_cold) = {
190 { 0, -1, sizeof(::tensorflow::MemoryLogStep)},
191 { 7, -1, sizeof(::tensorflow::MemoryLogTensorAllocation)},
192 { 15, -1, sizeof(::tensorflow::MemoryLogTensorDeallocation)},
193 { 22, -1, sizeof(::tensorflow::MemoryLogTensorOutput)},
194 { 31, -1, sizeof(::tensorflow::MemoryLogRawAllocation)},
195 { 42, -1, sizeof(::tensorflow::MemoryLogRawDeallocation)},
196};
197
198static ::PROTOBUF_NAMESPACE_ID::Message const * const file_default_instances[] = {
199 reinterpret_cast<const ::PROTOBUF_NAMESPACE_ID::Message*>(&::tensorflow::_MemoryLogStep_default_instance_),
200 reinterpret_cast<const ::PROTOBUF_NAMESPACE_ID::Message*>(&::tensorflow::_MemoryLogTensorAllocation_default_instance_),
201 reinterpret_cast<const ::PROTOBUF_NAMESPACE_ID::Message*>(&::tensorflow::_MemoryLogTensorDeallocation_default_instance_),
202 reinterpret_cast<const ::PROTOBUF_NAMESPACE_ID::Message*>(&::tensorflow::_MemoryLogTensorOutput_default_instance_),
203 reinterpret_cast<const ::PROTOBUF_NAMESPACE_ID::Message*>(&::tensorflow::_MemoryLogRawAllocation_default_instance_),
204 reinterpret_cast<const ::PROTOBUF_NAMESPACE_ID::Message*>(&::tensorflow::_MemoryLogRawDeallocation_default_instance_),
205};
206
207const char descriptor_table_protodef_tensorflow_2fcore_2fframework_2flog_5fmemory_2eproto[] PROTOBUF_SECTION_VARIABLE(protodesc_cold) =
208 "\n*tensorflow/core/framework/log_memory.p"
209 "roto\022\ntensorflow\0322tensorflow/core/framew"
210 "ork/tensor_description.proto\"0\n\rMemoryLo"
211 "gStep\022\017\n\007step_id\030\001 \001(\003\022\016\n\006handle\030\002 \001(\t\"p"
212 "\n\031MemoryLogTensorAllocation\022\017\n\007step_id\030\001"
213 " \001(\003\022\023\n\013kernel_name\030\002 \001(\t\022-\n\006tensor\030\003 \001("
214 "\0132\035.tensorflow.TensorDescription\"L\n\033Memo"
215 "ryLogTensorDeallocation\022\025\n\rallocation_id"
216 "\030\001 \001(\003\022\026\n\016allocator_name\030\002 \001(\t\"{\n\025Memory"
217 "LogTensorOutput\022\017\n\007step_id\030\001 \001(\003\022\023\n\013kern"
218 "el_name\030\002 \001(\t\022\r\n\005index\030\003 \001(\005\022-\n\006tensor\030\004"
219 " \001(\0132\035.tensorflow.TensorDescription\"\213\001\n\026"
220 "MemoryLogRawAllocation\022\017\n\007step_id\030\001 \001(\003\022"
221 "\021\n\toperation\030\002 \001(\t\022\021\n\tnum_bytes\030\003 \001(\003\022\013\n"
222 "\003ptr\030\004 \001(\004\022\025\n\rallocation_id\030\005 \001(\003\022\026\n\016all"
223 "ocator_name\030\006 \001(\t\"\177\n\030MemoryLogRawDealloc"
224 "ation\022\017\n\007step_id\030\001 \001(\003\022\021\n\toperation\030\002 \001("
225 "\t\022\025\n\rallocation_id\030\003 \001(\003\022\026\n\016allocator_na"
226 "me\030\004 \001(\t\022\020\n\010deferred\030\005 \001(\010B\203\001\n\030org.tenso"
227 "rflow.frameworkB\017LogMemoryProtosP\001ZQgith"
228 "ub.com/tensorflow/tensorflow/tensorflow/"
229 "go/core/framework/log_memory_go_proto\370\001\001"
230 "b\006proto3"
231 ;
232static const ::PROTOBUF_NAMESPACE_ID::internal::DescriptorTable*const descriptor_table_tensorflow_2fcore_2fframework_2flog_5fmemory_2eproto_deps[1] = {
233 &::descriptor_table_tensorflow_2fcore_2fframework_2ftensor_5fdescription_2eproto,
234};
235static ::PROTOBUF_NAMESPACE_ID::internal::SCCInfoBase*const descriptor_table_tensorflow_2fcore_2fframework_2flog_5fmemory_2eproto_sccs[6] = {
236 &scc_info_MemoryLogRawAllocation_tensorflow_2fcore_2fframework_2flog_5fmemory_2eproto.base,
237 &scc_info_MemoryLogRawDeallocation_tensorflow_2fcore_2fframework_2flog_5fmemory_2eproto.base,
238 &scc_info_MemoryLogStep_tensorflow_2fcore_2fframework_2flog_5fmemory_2eproto.base,
239 &scc_info_MemoryLogTensorAllocation_tensorflow_2fcore_2fframework_2flog_5fmemory_2eproto.base,
240 &scc_info_MemoryLogTensorDeallocation_tensorflow_2fcore_2fframework_2flog_5fmemory_2eproto.base,
241 &scc_info_MemoryLogTensorOutput_tensorflow_2fcore_2fframework_2flog_5fmemory_2eproto.base,
242};
243static ::PROTOBUF_NAMESPACE_ID::internal::once_flag descriptor_table_tensorflow_2fcore_2fframework_2flog_5fmemory_2eproto_once;
244static bool descriptor_table_tensorflow_2fcore_2fframework_2flog_5fmemory_2eproto_initialized = false;
245const ::PROTOBUF_NAMESPACE_ID::internal::DescriptorTable descriptor_table_tensorflow_2fcore_2fframework_2flog_5fmemory_2eproto = {
246 &descriptor_table_tensorflow_2fcore_2fframework_2flog_5fmemory_2eproto_initialized, descriptor_table_protodef_tensorflow_2fcore_2fframework_2flog_5fmemory_2eproto, "tensorflow/core/framework/log_memory.proto", 888,
247 &descriptor_table_tensorflow_2fcore_2fframework_2flog_5fmemory_2eproto_once, descriptor_table_tensorflow_2fcore_2fframework_2flog_5fmemory_2eproto_sccs, descriptor_table_tensorflow_2fcore_2fframework_2flog_5fmemory_2eproto_deps, 6, 1,
248 schemas, file_default_instances, TableStruct_tensorflow_2fcore_2fframework_2flog_5fmemory_2eproto::offsets,
249 file_level_metadata_tensorflow_2fcore_2fframework_2flog_5fmemory_2eproto, 6, file_level_enum_descriptors_tensorflow_2fcore_2fframework_2flog_5fmemory_2eproto, file_level_service_descriptors_tensorflow_2fcore_2fframework_2flog_5fmemory_2eproto,
250};
251
252// Force running AddDescriptors() at dynamic initialization time.
253static bool dynamic_init_dummy_tensorflow_2fcore_2fframework_2flog_5fmemory_2eproto = ( ::PROTOBUF_NAMESPACE_ID::internal::AddDescriptors(&descriptor_table_tensorflow_2fcore_2fframework_2flog_5fmemory_2eproto), true);
254namespace tensorflow {
255
256// ===================================================================
257
258void MemoryLogStep::InitAsDefaultInstance() {
259}
260class MemoryLogStep::_Internal {
261 public:
262};
263
264MemoryLogStep::MemoryLogStep()
265 : ::PROTOBUF_NAMESPACE_ID::Message(), _internal_metadata_(nullptr) {
266 SharedCtor();
267 // @@protoc_insertion_point(constructor:tensorflow.MemoryLogStep)
268}
269MemoryLogStep::MemoryLogStep(::PROTOBUF_NAMESPACE_ID::Arena* arena)
270 : ::PROTOBUF_NAMESPACE_ID::Message(),
271 _internal_metadata_(arena) {
272 SharedCtor();
273 RegisterArenaDtor(arena);
274 // @@protoc_insertion_point(arena_constructor:tensorflow.MemoryLogStep)
275}
276MemoryLogStep::MemoryLogStep(const MemoryLogStep& from)
277 : ::PROTOBUF_NAMESPACE_ID::Message(),
278 _internal_metadata_(nullptr) {
279 _internal_metadata_.MergeFrom(from._internal_metadata_);
280 handle_.UnsafeSetDefault(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited());
281 if (!from.handle().empty()) {
282 handle_.Set(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited(), from.handle(),
283 GetArenaNoVirtual());
284 }
285 step_id_ = from.step_id_;
286 // @@protoc_insertion_point(copy_constructor:tensorflow.MemoryLogStep)
287}
288
289void MemoryLogStep::SharedCtor() {
290 ::PROTOBUF_NAMESPACE_ID::internal::InitSCC(&scc_info_MemoryLogStep_tensorflow_2fcore_2fframework_2flog_5fmemory_2eproto.base);
291 handle_.UnsafeSetDefault(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited());
292 step_id_ = PROTOBUF_LONGLONG(0);
293}
294
295MemoryLogStep::~MemoryLogStep() {
296 // @@protoc_insertion_point(destructor:tensorflow.MemoryLogStep)
297 SharedDtor();
298}
299
300void MemoryLogStep::SharedDtor() {
301 GOOGLE_DCHECK(GetArenaNoVirtual() == nullptr);
302 handle_.DestroyNoArena(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited());
303}
304
305void MemoryLogStep::ArenaDtor(void* object) {
306 MemoryLogStep* _this = reinterpret_cast< MemoryLogStep* >(object);
307 (void)_this;
308}
309void MemoryLogStep::RegisterArenaDtor(::PROTOBUF_NAMESPACE_ID::Arena*) {
310}
311void MemoryLogStep::SetCachedSize(int size) const {
312 _cached_size_.Set(size);
313}
314const MemoryLogStep& MemoryLogStep::default_instance() {
315 ::PROTOBUF_NAMESPACE_ID::internal::InitSCC(&::scc_info_MemoryLogStep_tensorflow_2fcore_2fframework_2flog_5fmemory_2eproto.base);
316 return *internal_default_instance();
317}
318
319
320void MemoryLogStep::Clear() {
321// @@protoc_insertion_point(message_clear_start:tensorflow.MemoryLogStep)
322 ::PROTOBUF_NAMESPACE_ID::uint32 cached_has_bits = 0;
323 // Prevent compiler warnings about cached_has_bits being unused
324 (void) cached_has_bits;
325
326 handle_.ClearToEmpty(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited(), GetArenaNoVirtual());
327 step_id_ = PROTOBUF_LONGLONG(0);
328 _internal_metadata_.Clear();
329}
330
331#if GOOGLE_PROTOBUF_ENABLE_EXPERIMENTAL_PARSER
332const char* MemoryLogStep::_InternalParse(const char* ptr, ::PROTOBUF_NAMESPACE_ID::internal::ParseContext* ctx) {
333#define CHK_(x) if (PROTOBUF_PREDICT_FALSE(!(x))) goto failure
334 ::PROTOBUF_NAMESPACE_ID::Arena* arena = GetArenaNoVirtual(); (void)arena;
335 while (!ctx->Done(&ptr)) {
336 ::PROTOBUF_NAMESPACE_ID::uint32 tag;
337 ptr = ::PROTOBUF_NAMESPACE_ID::internal::ReadTag(ptr, &tag);
338 CHK_(ptr);
339 switch (tag >> 3) {
340 // int64 step_id = 1;
341 case 1:
342 if (PROTOBUF_PREDICT_TRUE(static_cast<::PROTOBUF_NAMESPACE_ID::uint8>(tag) == 8)) {
343 step_id_ = ::PROTOBUF_NAMESPACE_ID::internal::ReadVarint(&ptr);
344 CHK_(ptr);
345 } else goto handle_unusual;
346 continue;
347 // string handle = 2;
348 case 2:
349 if (PROTOBUF_PREDICT_TRUE(static_cast<::PROTOBUF_NAMESPACE_ID::uint8>(tag) == 18)) {
350 ptr = ::PROTOBUF_NAMESPACE_ID::internal::InlineGreedyStringParserUTF8(mutable_handle(), ptr, ctx, "tensorflow.MemoryLogStep.handle");
351 CHK_(ptr);
352 } else goto handle_unusual;
353 continue;
354 default: {
355 handle_unusual:
356 if ((tag & 7) == 4 || tag == 0) {
357 ctx->SetLastTag(tag);
358 goto success;
359 }
360 ptr = UnknownFieldParse(tag, &_internal_metadata_, ptr, ctx);
361 CHK_(ptr != nullptr);
362 continue;
363 }
364 } // switch
365 } // while
366success:
367 return ptr;
368failure:
369 ptr = nullptr;
370 goto success;
371#undef CHK_
372}
373#else // GOOGLE_PROTOBUF_ENABLE_EXPERIMENTAL_PARSER
374bool MemoryLogStep::MergePartialFromCodedStream(
375 ::PROTOBUF_NAMESPACE_ID::io::CodedInputStream* input) {
376#define DO_(EXPRESSION) if (!PROTOBUF_PREDICT_TRUE(EXPRESSION)) goto failure
377 ::PROTOBUF_NAMESPACE_ID::uint32 tag;
378 // @@protoc_insertion_point(parse_start:tensorflow.MemoryLogStep)
379 for (;;) {
380 ::std::pair<::PROTOBUF_NAMESPACE_ID::uint32, bool> p = input->ReadTagWithCutoffNoLastTag(127u);
381 tag = p.first;
382 if (!p.second) goto handle_unusual;
383 switch (::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::GetTagFieldNumber(tag)) {
384 // int64 step_id = 1;
385 case 1: {
386 if (static_cast< ::PROTOBUF_NAMESPACE_ID::uint8>(tag) == (8 & 0xFF)) {
387
388 DO_((::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::ReadPrimitive<
389 ::PROTOBUF_NAMESPACE_ID::int64, ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::TYPE_INT64>(
390 input, &step_id_)));
391 } else {
392 goto handle_unusual;
393 }
394 break;
395 }
396
397 // string handle = 2;
398 case 2: {
399 if (static_cast< ::PROTOBUF_NAMESPACE_ID::uint8>(tag) == (18 & 0xFF)) {
400 DO_(::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::ReadString(
401 input, this->mutable_handle()));
402 DO_(::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::VerifyUtf8String(
403 this->handle().data(), static_cast<int>(this->handle().length()),
404 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::PARSE,
405 "tensorflow.MemoryLogStep.handle"));
406 } else {
407 goto handle_unusual;
408 }
409 break;
410 }
411
412 default: {
413 handle_unusual:
414 if (tag == 0) {
415 goto success;
416 }
417 DO_(::PROTOBUF_NAMESPACE_ID::internal::WireFormat::SkipField(
418 input, tag, _internal_metadata_.mutable_unknown_fields()));
419 break;
420 }
421 }
422 }
423success:
424 // @@protoc_insertion_point(parse_success:tensorflow.MemoryLogStep)
425 return true;
426failure:
427 // @@protoc_insertion_point(parse_failure:tensorflow.MemoryLogStep)
428 return false;
429#undef DO_
430}
431#endif // GOOGLE_PROTOBUF_ENABLE_EXPERIMENTAL_PARSER
432
433void MemoryLogStep::SerializeWithCachedSizes(
434 ::PROTOBUF_NAMESPACE_ID::io::CodedOutputStream* output) const {
435 // @@protoc_insertion_point(serialize_start:tensorflow.MemoryLogStep)
436 ::PROTOBUF_NAMESPACE_ID::uint32 cached_has_bits = 0;
437 (void) cached_has_bits;
438
439 // int64 step_id = 1;
440 if (this->step_id() != 0) {
441 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::WriteInt64(1, this->step_id(), output);
442 }
443
444 // string handle = 2;
445 if (this->handle().size() > 0) {
446 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::VerifyUtf8String(
447 this->handle().data(), static_cast<int>(this->handle().length()),
448 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::SERIALIZE,
449 "tensorflow.MemoryLogStep.handle");
450 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::WriteStringMaybeAliased(
451 2, this->handle(), output);
452 }
453
454 if (_internal_metadata_.have_unknown_fields()) {
455 ::PROTOBUF_NAMESPACE_ID::internal::WireFormat::SerializeUnknownFields(
456 _internal_metadata_.unknown_fields(), output);
457 }
458 // @@protoc_insertion_point(serialize_end:tensorflow.MemoryLogStep)
459}
460
461::PROTOBUF_NAMESPACE_ID::uint8* MemoryLogStep::InternalSerializeWithCachedSizesToArray(
462 ::PROTOBUF_NAMESPACE_ID::uint8* target) const {
463 // @@protoc_insertion_point(serialize_to_array_start:tensorflow.MemoryLogStep)
464 ::PROTOBUF_NAMESPACE_ID::uint32 cached_has_bits = 0;
465 (void) cached_has_bits;
466
467 // int64 step_id = 1;
468 if (this->step_id() != 0) {
469 target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::WriteInt64ToArray(1, this->step_id(), target);
470 }
471
472 // string handle = 2;
473 if (this->handle().size() > 0) {
474 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::VerifyUtf8String(
475 this->handle().data(), static_cast<int>(this->handle().length()),
476 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::SERIALIZE,
477 "tensorflow.MemoryLogStep.handle");
478 target =
479 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::WriteStringToArray(
480 2, this->handle(), target);
481 }
482
483 if (_internal_metadata_.have_unknown_fields()) {
484 target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormat::SerializeUnknownFieldsToArray(
485 _internal_metadata_.unknown_fields(), target);
486 }
487 // @@protoc_insertion_point(serialize_to_array_end:tensorflow.MemoryLogStep)
488 return target;
489}
490
491size_t MemoryLogStep::ByteSizeLong() const {
492// @@protoc_insertion_point(message_byte_size_start:tensorflow.MemoryLogStep)
493 size_t total_size = 0;
494
495 if (_internal_metadata_.have_unknown_fields()) {
496 total_size +=
497 ::PROTOBUF_NAMESPACE_ID::internal::WireFormat::ComputeUnknownFieldsSize(
498 _internal_metadata_.unknown_fields());
499 }
500 ::PROTOBUF_NAMESPACE_ID::uint32 cached_has_bits = 0;
501 // Prevent compiler warnings about cached_has_bits being unused
502 (void) cached_has_bits;
503
504 // string handle = 2;
505 if (this->handle().size() > 0) {
506 total_size += 1 +
507 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::StringSize(
508 this->handle());
509 }
510
511 // int64 step_id = 1;
512 if (this->step_id() != 0) {
513 total_size += 1 +
514 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::Int64Size(
515 this->step_id());
516 }
517
518 int cached_size = ::PROTOBUF_NAMESPACE_ID::internal::ToCachedSize(total_size);
519 SetCachedSize(cached_size);
520 return total_size;
521}
522
523void MemoryLogStep::MergeFrom(const ::PROTOBUF_NAMESPACE_ID::Message& from) {
524// @@protoc_insertion_point(generalized_merge_from_start:tensorflow.MemoryLogStep)
525 GOOGLE_DCHECK_NE(&from, this);
526 const MemoryLogStep* source =
527 ::PROTOBUF_NAMESPACE_ID::DynamicCastToGenerated<MemoryLogStep>(
528 &from);
529 if (source == nullptr) {
530 // @@protoc_insertion_point(generalized_merge_from_cast_fail:tensorflow.MemoryLogStep)
531 ::PROTOBUF_NAMESPACE_ID::internal::ReflectionOps::Merge(from, this);
532 } else {
533 // @@protoc_insertion_point(generalized_merge_from_cast_success:tensorflow.MemoryLogStep)
534 MergeFrom(*source);
535 }
536}
537
538void MemoryLogStep::MergeFrom(const MemoryLogStep& from) {
539// @@protoc_insertion_point(class_specific_merge_from_start:tensorflow.MemoryLogStep)
540 GOOGLE_DCHECK_NE(&from, this);
541 _internal_metadata_.MergeFrom(from._internal_metadata_);
542 ::PROTOBUF_NAMESPACE_ID::uint32 cached_has_bits = 0;
543 (void) cached_has_bits;
544
545 if (from.handle().size() > 0) {
546 set_handle(from.handle());
547 }
548 if (from.step_id() != 0) {
549 set_step_id(from.step_id());
550 }
551}
552
553void MemoryLogStep::CopyFrom(const ::PROTOBUF_NAMESPACE_ID::Message& from) {
554// @@protoc_insertion_point(generalized_copy_from_start:tensorflow.MemoryLogStep)
555 if (&from == this) return;
556 Clear();
557 MergeFrom(from);
558}
559
560void MemoryLogStep::CopyFrom(const MemoryLogStep& from) {
561// @@protoc_insertion_point(class_specific_copy_from_start:tensorflow.MemoryLogStep)
562 if (&from == this) return;
563 Clear();
564 MergeFrom(from);
565}
566
567bool MemoryLogStep::IsInitialized() const {
568 return true;
569}
570
571void MemoryLogStep::InternalSwap(MemoryLogStep* other) {
572 using std::swap;
573 _internal_metadata_.Swap(&other->_internal_metadata_);
574 handle_.Swap(&other->handle_, &::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited(),
575 GetArenaNoVirtual());
576 swap(step_id_, other->step_id_);
577}
578
579::PROTOBUF_NAMESPACE_ID::Metadata MemoryLogStep::GetMetadata() const {
580 return GetMetadataStatic();
581}
582
583
584// ===================================================================
585
586void MemoryLogTensorAllocation::InitAsDefaultInstance() {
587 ::tensorflow::_MemoryLogTensorAllocation_default_instance_._instance.get_mutable()->tensor_ = const_cast< ::tensorflow::TensorDescription*>(
588 ::tensorflow::TensorDescription::internal_default_instance());
589}
590class MemoryLogTensorAllocation::_Internal {
591 public:
592 static const ::tensorflow::TensorDescription& tensor(const MemoryLogTensorAllocation* msg);
593};
594
595const ::tensorflow::TensorDescription&
596MemoryLogTensorAllocation::_Internal::tensor(const MemoryLogTensorAllocation* msg) {
597 return *msg->tensor_;
598}
599void MemoryLogTensorAllocation::unsafe_arena_set_allocated_tensor(
600 ::tensorflow::TensorDescription* tensor) {
601 if (GetArenaNoVirtual() == nullptr) {
602 delete tensor_;
603 }
604 tensor_ = tensor;
605 if (tensor) {
606
607 } else {
608
609 }
610 // @@protoc_insertion_point(field_unsafe_arena_set_allocated:tensorflow.MemoryLogTensorAllocation.tensor)
611}
612void MemoryLogTensorAllocation::clear_tensor() {
613 if (GetArenaNoVirtual() == nullptr && tensor_ != nullptr) {
614 delete tensor_;
615 }
616 tensor_ = nullptr;
617}
618MemoryLogTensorAllocation::MemoryLogTensorAllocation()
619 : ::PROTOBUF_NAMESPACE_ID::Message(), _internal_metadata_(nullptr) {
620 SharedCtor();
621 // @@protoc_insertion_point(constructor:tensorflow.MemoryLogTensorAllocation)
622}
623MemoryLogTensorAllocation::MemoryLogTensorAllocation(::PROTOBUF_NAMESPACE_ID::Arena* arena)
624 : ::PROTOBUF_NAMESPACE_ID::Message(),
625 _internal_metadata_(arena) {
626 SharedCtor();
627 RegisterArenaDtor(arena);
628 // @@protoc_insertion_point(arena_constructor:tensorflow.MemoryLogTensorAllocation)
629}
630MemoryLogTensorAllocation::MemoryLogTensorAllocation(const MemoryLogTensorAllocation& from)
631 : ::PROTOBUF_NAMESPACE_ID::Message(),
632 _internal_metadata_(nullptr) {
633 _internal_metadata_.MergeFrom(from._internal_metadata_);
634 kernel_name_.UnsafeSetDefault(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited());
635 if (!from.kernel_name().empty()) {
636 kernel_name_.Set(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited(), from.kernel_name(),
637 GetArenaNoVirtual());
638 }
639 if (from.has_tensor()) {
640 tensor_ = new ::tensorflow::TensorDescription(*from.tensor_);
641 } else {
642 tensor_ = nullptr;
643 }
644 step_id_ = from.step_id_;
645 // @@protoc_insertion_point(copy_constructor:tensorflow.MemoryLogTensorAllocation)
646}
647
648void MemoryLogTensorAllocation::SharedCtor() {
649 ::PROTOBUF_NAMESPACE_ID::internal::InitSCC(&scc_info_MemoryLogTensorAllocation_tensorflow_2fcore_2fframework_2flog_5fmemory_2eproto.base);
650 kernel_name_.UnsafeSetDefault(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited());
651 ::memset(&tensor_, 0, static_cast<size_t>(
652 reinterpret_cast<char*>(&step_id_) -
653 reinterpret_cast<char*>(&tensor_)) + sizeof(step_id_));
654}
655
656MemoryLogTensorAllocation::~MemoryLogTensorAllocation() {
657 // @@protoc_insertion_point(destructor:tensorflow.MemoryLogTensorAllocation)
658 SharedDtor();
659}
660
661void MemoryLogTensorAllocation::SharedDtor() {
662 GOOGLE_DCHECK(GetArenaNoVirtual() == nullptr);
663 kernel_name_.DestroyNoArena(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited());
664 if (this != internal_default_instance()) delete tensor_;
665}
666
667void MemoryLogTensorAllocation::ArenaDtor(void* object) {
668 MemoryLogTensorAllocation* _this = reinterpret_cast< MemoryLogTensorAllocation* >(object);
669 (void)_this;
670}
671void MemoryLogTensorAllocation::RegisterArenaDtor(::PROTOBUF_NAMESPACE_ID::Arena*) {
672}
673void MemoryLogTensorAllocation::SetCachedSize(int size) const {
674 _cached_size_.Set(size);
675}
676const MemoryLogTensorAllocation& MemoryLogTensorAllocation::default_instance() {
677 ::PROTOBUF_NAMESPACE_ID::internal::InitSCC(&::scc_info_MemoryLogTensorAllocation_tensorflow_2fcore_2fframework_2flog_5fmemory_2eproto.base);
678 return *internal_default_instance();
679}
680
681
682void MemoryLogTensorAllocation::Clear() {
683// @@protoc_insertion_point(message_clear_start:tensorflow.MemoryLogTensorAllocation)
684 ::PROTOBUF_NAMESPACE_ID::uint32 cached_has_bits = 0;
685 // Prevent compiler warnings about cached_has_bits being unused
686 (void) cached_has_bits;
687
688 kernel_name_.ClearToEmpty(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited(), GetArenaNoVirtual());
689 if (GetArenaNoVirtual() == nullptr && tensor_ != nullptr) {
690 delete tensor_;
691 }
692 tensor_ = nullptr;
693 step_id_ = PROTOBUF_LONGLONG(0);
694 _internal_metadata_.Clear();
695}
696
697#if GOOGLE_PROTOBUF_ENABLE_EXPERIMENTAL_PARSER
698const char* MemoryLogTensorAllocation::_InternalParse(const char* ptr, ::PROTOBUF_NAMESPACE_ID::internal::ParseContext* ctx) {
699#define CHK_(x) if (PROTOBUF_PREDICT_FALSE(!(x))) goto failure
700 ::PROTOBUF_NAMESPACE_ID::Arena* arena = GetArenaNoVirtual(); (void)arena;
701 while (!ctx->Done(&ptr)) {
702 ::PROTOBUF_NAMESPACE_ID::uint32 tag;
703 ptr = ::PROTOBUF_NAMESPACE_ID::internal::ReadTag(ptr, &tag);
704 CHK_(ptr);
705 switch (tag >> 3) {
706 // int64 step_id = 1;
707 case 1:
708 if (PROTOBUF_PREDICT_TRUE(static_cast<::PROTOBUF_NAMESPACE_ID::uint8>(tag) == 8)) {
709 step_id_ = ::PROTOBUF_NAMESPACE_ID::internal::ReadVarint(&ptr);
710 CHK_(ptr);
711 } else goto handle_unusual;
712 continue;
713 // string kernel_name = 2;
714 case 2:
715 if (PROTOBUF_PREDICT_TRUE(static_cast<::PROTOBUF_NAMESPACE_ID::uint8>(tag) == 18)) {
716 ptr = ::PROTOBUF_NAMESPACE_ID::internal::InlineGreedyStringParserUTF8(mutable_kernel_name(), ptr, ctx, "tensorflow.MemoryLogTensorAllocation.kernel_name");
717 CHK_(ptr);
718 } else goto handle_unusual;
719 continue;
720 // .tensorflow.TensorDescription tensor = 3;
721 case 3:
722 if (PROTOBUF_PREDICT_TRUE(static_cast<::PROTOBUF_NAMESPACE_ID::uint8>(tag) == 26)) {
723 ptr = ctx->ParseMessage(mutable_tensor(), ptr);
724 CHK_(ptr);
725 } else goto handle_unusual;
726 continue;
727 default: {
728 handle_unusual:
729 if ((tag & 7) == 4 || tag == 0) {
730 ctx->SetLastTag(tag);
731 goto success;
732 }
733 ptr = UnknownFieldParse(tag, &_internal_metadata_, ptr, ctx);
734 CHK_(ptr != nullptr);
735 continue;
736 }
737 } // switch
738 } // while
739success:
740 return ptr;
741failure:
742 ptr = nullptr;
743 goto success;
744#undef CHK_
745}
746#else // GOOGLE_PROTOBUF_ENABLE_EXPERIMENTAL_PARSER
747bool MemoryLogTensorAllocation::MergePartialFromCodedStream(
748 ::PROTOBUF_NAMESPACE_ID::io::CodedInputStream* input) {
749#define DO_(EXPRESSION) if (!PROTOBUF_PREDICT_TRUE(EXPRESSION)) goto failure
750 ::PROTOBUF_NAMESPACE_ID::uint32 tag;
751 // @@protoc_insertion_point(parse_start:tensorflow.MemoryLogTensorAllocation)
752 for (;;) {
753 ::std::pair<::PROTOBUF_NAMESPACE_ID::uint32, bool> p = input->ReadTagWithCutoffNoLastTag(127u);
754 tag = p.first;
755 if (!p.second) goto handle_unusual;
756 switch (::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::GetTagFieldNumber(tag)) {
757 // int64 step_id = 1;
758 case 1: {
759 if (static_cast< ::PROTOBUF_NAMESPACE_ID::uint8>(tag) == (8 & 0xFF)) {
760
761 DO_((::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::ReadPrimitive<
762 ::PROTOBUF_NAMESPACE_ID::int64, ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::TYPE_INT64>(
763 input, &step_id_)));
764 } else {
765 goto handle_unusual;
766 }
767 break;
768 }
769
770 // string kernel_name = 2;
771 case 2: {
772 if (static_cast< ::PROTOBUF_NAMESPACE_ID::uint8>(tag) == (18 & 0xFF)) {
773 DO_(::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::ReadString(
774 input, this->mutable_kernel_name()));
775 DO_(::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::VerifyUtf8String(
776 this->kernel_name().data(), static_cast<int>(this->kernel_name().length()),
777 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::PARSE,
778 "tensorflow.MemoryLogTensorAllocation.kernel_name"));
779 } else {
780 goto handle_unusual;
781 }
782 break;
783 }
784
785 // .tensorflow.TensorDescription tensor = 3;
786 case 3: {
787 if (static_cast< ::PROTOBUF_NAMESPACE_ID::uint8>(tag) == (26 & 0xFF)) {
788 DO_(::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::ReadMessage(
789 input, mutable_tensor()));
790 } else {
791 goto handle_unusual;
792 }
793 break;
794 }
795
796 default: {
797 handle_unusual:
798 if (tag == 0) {
799 goto success;
800 }
801 DO_(::PROTOBUF_NAMESPACE_ID::internal::WireFormat::SkipField(
802 input, tag, _internal_metadata_.mutable_unknown_fields()));
803 break;
804 }
805 }
806 }
807success:
808 // @@protoc_insertion_point(parse_success:tensorflow.MemoryLogTensorAllocation)
809 return true;
810failure:
811 // @@protoc_insertion_point(parse_failure:tensorflow.MemoryLogTensorAllocation)
812 return false;
813#undef DO_
814}
815#endif // GOOGLE_PROTOBUF_ENABLE_EXPERIMENTAL_PARSER
816
817void MemoryLogTensorAllocation::SerializeWithCachedSizes(
818 ::PROTOBUF_NAMESPACE_ID::io::CodedOutputStream* output) const {
819 // @@protoc_insertion_point(serialize_start:tensorflow.MemoryLogTensorAllocation)
820 ::PROTOBUF_NAMESPACE_ID::uint32 cached_has_bits = 0;
821 (void) cached_has_bits;
822
823 // int64 step_id = 1;
824 if (this->step_id() != 0) {
825 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::WriteInt64(1, this->step_id(), output);
826 }
827
828 // string kernel_name = 2;
829 if (this->kernel_name().size() > 0) {
830 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::VerifyUtf8String(
831 this->kernel_name().data(), static_cast<int>(this->kernel_name().length()),
832 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::SERIALIZE,
833 "tensorflow.MemoryLogTensorAllocation.kernel_name");
834 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::WriteStringMaybeAliased(
835 2, this->kernel_name(), output);
836 }
837
838 // .tensorflow.TensorDescription tensor = 3;
839 if (this->has_tensor()) {
840 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::WriteMessageMaybeToArray(
841 3, _Internal::tensor(this), output);
842 }
843
844 if (_internal_metadata_.have_unknown_fields()) {
845 ::PROTOBUF_NAMESPACE_ID::internal::WireFormat::SerializeUnknownFields(
846 _internal_metadata_.unknown_fields(), output);
847 }
848 // @@protoc_insertion_point(serialize_end:tensorflow.MemoryLogTensorAllocation)
849}
850
851::PROTOBUF_NAMESPACE_ID::uint8* MemoryLogTensorAllocation::InternalSerializeWithCachedSizesToArray(
852 ::PROTOBUF_NAMESPACE_ID::uint8* target) const {
853 // @@protoc_insertion_point(serialize_to_array_start:tensorflow.MemoryLogTensorAllocation)
854 ::PROTOBUF_NAMESPACE_ID::uint32 cached_has_bits = 0;
855 (void) cached_has_bits;
856
857 // int64 step_id = 1;
858 if (this->step_id() != 0) {
859 target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::WriteInt64ToArray(1, this->step_id(), target);
860 }
861
862 // string kernel_name = 2;
863 if (this->kernel_name().size() > 0) {
864 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::VerifyUtf8String(
865 this->kernel_name().data(), static_cast<int>(this->kernel_name().length()),
866 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::SERIALIZE,
867 "tensorflow.MemoryLogTensorAllocation.kernel_name");
868 target =
869 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::WriteStringToArray(
870 2, this->kernel_name(), target);
871 }
872
873 // .tensorflow.TensorDescription tensor = 3;
874 if (this->has_tensor()) {
875 target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::
876 InternalWriteMessageToArray(
877 3, _Internal::tensor(this), target);
878 }
879
880 if (_internal_metadata_.have_unknown_fields()) {
881 target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormat::SerializeUnknownFieldsToArray(
882 _internal_metadata_.unknown_fields(), target);
883 }
884 // @@protoc_insertion_point(serialize_to_array_end:tensorflow.MemoryLogTensorAllocation)
885 return target;
886}
887
888size_t MemoryLogTensorAllocation::ByteSizeLong() const {
889// @@protoc_insertion_point(message_byte_size_start:tensorflow.MemoryLogTensorAllocation)
890 size_t total_size = 0;
891
892 if (_internal_metadata_.have_unknown_fields()) {
893 total_size +=
894 ::PROTOBUF_NAMESPACE_ID::internal::WireFormat::ComputeUnknownFieldsSize(
895 _internal_metadata_.unknown_fields());
896 }
897 ::PROTOBUF_NAMESPACE_ID::uint32 cached_has_bits = 0;
898 // Prevent compiler warnings about cached_has_bits being unused
899 (void) cached_has_bits;
900
901 // string kernel_name = 2;
902 if (this->kernel_name().size() > 0) {
903 total_size += 1 +
904 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::StringSize(
905 this->kernel_name());
906 }
907
908 // .tensorflow.TensorDescription tensor = 3;
909 if (this->has_tensor()) {
910 total_size += 1 +
911 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::MessageSize(
912 *tensor_);
913 }
914
915 // int64 step_id = 1;
916 if (this->step_id() != 0) {
917 total_size += 1 +
918 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::Int64Size(
919 this->step_id());
920 }
921
922 int cached_size = ::PROTOBUF_NAMESPACE_ID::internal::ToCachedSize(total_size);
923 SetCachedSize(cached_size);
924 return total_size;
925}
926
927void MemoryLogTensorAllocation::MergeFrom(const ::PROTOBUF_NAMESPACE_ID::Message& from) {
928// @@protoc_insertion_point(generalized_merge_from_start:tensorflow.MemoryLogTensorAllocation)
929 GOOGLE_DCHECK_NE(&from, this);
930 const MemoryLogTensorAllocation* source =
931 ::PROTOBUF_NAMESPACE_ID::DynamicCastToGenerated<MemoryLogTensorAllocation>(
932 &from);
933 if (source == nullptr) {
934 // @@protoc_insertion_point(generalized_merge_from_cast_fail:tensorflow.MemoryLogTensorAllocation)
935 ::PROTOBUF_NAMESPACE_ID::internal::ReflectionOps::Merge(from, this);
936 } else {
937 // @@protoc_insertion_point(generalized_merge_from_cast_success:tensorflow.MemoryLogTensorAllocation)
938 MergeFrom(*source);
939 }
940}
941
942void MemoryLogTensorAllocation::MergeFrom(const MemoryLogTensorAllocation& from) {
943// @@protoc_insertion_point(class_specific_merge_from_start:tensorflow.MemoryLogTensorAllocation)
944 GOOGLE_DCHECK_NE(&from, this);
945 _internal_metadata_.MergeFrom(from._internal_metadata_);
946 ::PROTOBUF_NAMESPACE_ID::uint32 cached_has_bits = 0;
947 (void) cached_has_bits;
948
949 if (from.kernel_name().size() > 0) {
950 set_kernel_name(from.kernel_name());
951 }
952 if (from.has_tensor()) {
953 mutable_tensor()->::tensorflow::TensorDescription::MergeFrom(from.tensor());
954 }
955 if (from.step_id() != 0) {
956 set_step_id(from.step_id());
957 }
958}
959
960void MemoryLogTensorAllocation::CopyFrom(const ::PROTOBUF_NAMESPACE_ID::Message& from) {
961// @@protoc_insertion_point(generalized_copy_from_start:tensorflow.MemoryLogTensorAllocation)
962 if (&from == this) return;
963 Clear();
964 MergeFrom(from);
965}
966
967void MemoryLogTensorAllocation::CopyFrom(const MemoryLogTensorAllocation& from) {
968// @@protoc_insertion_point(class_specific_copy_from_start:tensorflow.MemoryLogTensorAllocation)
969 if (&from == this) return;
970 Clear();
971 MergeFrom(from);
972}
973
974bool MemoryLogTensorAllocation::IsInitialized() const {
975 return true;
976}
977
978void MemoryLogTensorAllocation::InternalSwap(MemoryLogTensorAllocation* other) {
979 using std::swap;
980 _internal_metadata_.Swap(&other->_internal_metadata_);
981 kernel_name_.Swap(&other->kernel_name_, &::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited(),
982 GetArenaNoVirtual());
983 swap(tensor_, other->tensor_);
984 swap(step_id_, other->step_id_);
985}
986
987::PROTOBUF_NAMESPACE_ID::Metadata MemoryLogTensorAllocation::GetMetadata() const {
988 return GetMetadataStatic();
989}
990
991
992// ===================================================================
993
994void MemoryLogTensorDeallocation::InitAsDefaultInstance() {
995}
996class MemoryLogTensorDeallocation::_Internal {
997 public:
998};
999
1000MemoryLogTensorDeallocation::MemoryLogTensorDeallocation()
1001 : ::PROTOBUF_NAMESPACE_ID::Message(), _internal_metadata_(nullptr) {
1002 SharedCtor();
1003 // @@protoc_insertion_point(constructor:tensorflow.MemoryLogTensorDeallocation)
1004}
1005MemoryLogTensorDeallocation::MemoryLogTensorDeallocation(::PROTOBUF_NAMESPACE_ID::Arena* arena)
1006 : ::PROTOBUF_NAMESPACE_ID::Message(),
1007 _internal_metadata_(arena) {
1008 SharedCtor();
1009 RegisterArenaDtor(arena);
1010 // @@protoc_insertion_point(arena_constructor:tensorflow.MemoryLogTensorDeallocation)
1011}
1012MemoryLogTensorDeallocation::MemoryLogTensorDeallocation(const MemoryLogTensorDeallocation& from)
1013 : ::PROTOBUF_NAMESPACE_ID::Message(),
1014 _internal_metadata_(nullptr) {
1015 _internal_metadata_.MergeFrom(from._internal_metadata_);
1016 allocator_name_.UnsafeSetDefault(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited());
1017 if (!from.allocator_name().empty()) {
1018 allocator_name_.Set(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited(), from.allocator_name(),
1019 GetArenaNoVirtual());
1020 }
1021 allocation_id_ = from.allocation_id_;
1022 // @@protoc_insertion_point(copy_constructor:tensorflow.MemoryLogTensorDeallocation)
1023}
1024
1025void MemoryLogTensorDeallocation::SharedCtor() {
1026 ::PROTOBUF_NAMESPACE_ID::internal::InitSCC(&scc_info_MemoryLogTensorDeallocation_tensorflow_2fcore_2fframework_2flog_5fmemory_2eproto.base);
1027 allocator_name_.UnsafeSetDefault(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited());
1028 allocation_id_ = PROTOBUF_LONGLONG(0);
1029}
1030
1031MemoryLogTensorDeallocation::~MemoryLogTensorDeallocation() {
1032 // @@protoc_insertion_point(destructor:tensorflow.MemoryLogTensorDeallocation)
1033 SharedDtor();
1034}
1035
1036void MemoryLogTensorDeallocation::SharedDtor() {
1037 GOOGLE_DCHECK(GetArenaNoVirtual() == nullptr);
1038 allocator_name_.DestroyNoArena(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited());
1039}
1040
1041void MemoryLogTensorDeallocation::ArenaDtor(void* object) {
1042 MemoryLogTensorDeallocation* _this = reinterpret_cast< MemoryLogTensorDeallocation* >(object);
1043 (void)_this;
1044}
1045void MemoryLogTensorDeallocation::RegisterArenaDtor(::PROTOBUF_NAMESPACE_ID::Arena*) {
1046}
1047void MemoryLogTensorDeallocation::SetCachedSize(int size) const {
1048 _cached_size_.Set(size);
1049}
1050const MemoryLogTensorDeallocation& MemoryLogTensorDeallocation::default_instance() {
1051 ::PROTOBUF_NAMESPACE_ID::internal::InitSCC(&::scc_info_MemoryLogTensorDeallocation_tensorflow_2fcore_2fframework_2flog_5fmemory_2eproto.base);
1052 return *internal_default_instance();
1053}
1054
1055
1056void MemoryLogTensorDeallocation::Clear() {
1057// @@protoc_insertion_point(message_clear_start:tensorflow.MemoryLogTensorDeallocation)
1058 ::PROTOBUF_NAMESPACE_ID::uint32 cached_has_bits = 0;
1059 // Prevent compiler warnings about cached_has_bits being unused
1060 (void) cached_has_bits;
1061
1062 allocator_name_.ClearToEmpty(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited(), GetArenaNoVirtual());
1063 allocation_id_ = PROTOBUF_LONGLONG(0);
1064 _internal_metadata_.Clear();
1065}
1066
1067#if GOOGLE_PROTOBUF_ENABLE_EXPERIMENTAL_PARSER
1068const char* MemoryLogTensorDeallocation::_InternalParse(const char* ptr, ::PROTOBUF_NAMESPACE_ID::internal::ParseContext* ctx) {
1069#define CHK_(x) if (PROTOBUF_PREDICT_FALSE(!(x))) goto failure
1070 ::PROTOBUF_NAMESPACE_ID::Arena* arena = GetArenaNoVirtual(); (void)arena;
1071 while (!ctx->Done(&ptr)) {
1072 ::PROTOBUF_NAMESPACE_ID::uint32 tag;
1073 ptr = ::PROTOBUF_NAMESPACE_ID::internal::ReadTag(ptr, &tag);
1074 CHK_(ptr);
1075 switch (tag >> 3) {
1076 // int64 allocation_id = 1;
1077 case 1:
1078 if (PROTOBUF_PREDICT_TRUE(static_cast<::PROTOBUF_NAMESPACE_ID::uint8>(tag) == 8)) {
1079 allocation_id_ = ::PROTOBUF_NAMESPACE_ID::internal::ReadVarint(&ptr);
1080 CHK_(ptr);
1081 } else goto handle_unusual;
1082 continue;
1083 // string allocator_name = 2;
1084 case 2:
1085 if (PROTOBUF_PREDICT_TRUE(static_cast<::PROTOBUF_NAMESPACE_ID::uint8>(tag) == 18)) {
1086 ptr = ::PROTOBUF_NAMESPACE_ID::internal::InlineGreedyStringParserUTF8(mutable_allocator_name(), ptr, ctx, "tensorflow.MemoryLogTensorDeallocation.allocator_name");
1087 CHK_(ptr);
1088 } else goto handle_unusual;
1089 continue;
1090 default: {
1091 handle_unusual:
1092 if ((tag & 7) == 4 || tag == 0) {
1093 ctx->SetLastTag(tag);
1094 goto success;
1095 }
1096 ptr = UnknownFieldParse(tag, &_internal_metadata_, ptr, ctx);
1097 CHK_(ptr != nullptr);
1098 continue;
1099 }
1100 } // switch
1101 } // while
1102success:
1103 return ptr;
1104failure:
1105 ptr = nullptr;
1106 goto success;
1107#undef CHK_
1108}
1109#else // GOOGLE_PROTOBUF_ENABLE_EXPERIMENTAL_PARSER
1110bool MemoryLogTensorDeallocation::MergePartialFromCodedStream(
1111 ::PROTOBUF_NAMESPACE_ID::io::CodedInputStream* input) {
1112#define DO_(EXPRESSION) if (!PROTOBUF_PREDICT_TRUE(EXPRESSION)) goto failure
1113 ::PROTOBUF_NAMESPACE_ID::uint32 tag;
1114 // @@protoc_insertion_point(parse_start:tensorflow.MemoryLogTensorDeallocation)
1115 for (;;) {
1116 ::std::pair<::PROTOBUF_NAMESPACE_ID::uint32, bool> p = input->ReadTagWithCutoffNoLastTag(127u);
1117 tag = p.first;
1118 if (!p.second) goto handle_unusual;
1119 switch (::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::GetTagFieldNumber(tag)) {
1120 // int64 allocation_id = 1;
1121 case 1: {
1122 if (static_cast< ::PROTOBUF_NAMESPACE_ID::uint8>(tag) == (8 & 0xFF)) {
1123
1124 DO_((::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::ReadPrimitive<
1125 ::PROTOBUF_NAMESPACE_ID::int64, ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::TYPE_INT64>(
1126 input, &allocation_id_)));
1127 } else {
1128 goto handle_unusual;
1129 }
1130 break;
1131 }
1132
1133 // string allocator_name = 2;
1134 case 2: {
1135 if (static_cast< ::PROTOBUF_NAMESPACE_ID::uint8>(tag) == (18 & 0xFF)) {
1136 DO_(::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::ReadString(
1137 input, this->mutable_allocator_name()));
1138 DO_(::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::VerifyUtf8String(
1139 this->allocator_name().data(), static_cast<int>(this->allocator_name().length()),
1140 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::PARSE,
1141 "tensorflow.MemoryLogTensorDeallocation.allocator_name"));
1142 } else {
1143 goto handle_unusual;
1144 }
1145 break;
1146 }
1147
1148 default: {
1149 handle_unusual:
1150 if (tag == 0) {
1151 goto success;
1152 }
1153 DO_(::PROTOBUF_NAMESPACE_ID::internal::WireFormat::SkipField(
1154 input, tag, _internal_metadata_.mutable_unknown_fields()));
1155 break;
1156 }
1157 }
1158 }
1159success:
1160 // @@protoc_insertion_point(parse_success:tensorflow.MemoryLogTensorDeallocation)
1161 return true;
1162failure:
1163 // @@protoc_insertion_point(parse_failure:tensorflow.MemoryLogTensorDeallocation)
1164 return false;
1165#undef DO_
1166}
1167#endif // GOOGLE_PROTOBUF_ENABLE_EXPERIMENTAL_PARSER
1168
1169void MemoryLogTensorDeallocation::SerializeWithCachedSizes(
1170 ::PROTOBUF_NAMESPACE_ID::io::CodedOutputStream* output) const {
1171 // @@protoc_insertion_point(serialize_start:tensorflow.MemoryLogTensorDeallocation)
1172 ::PROTOBUF_NAMESPACE_ID::uint32 cached_has_bits = 0;
1173 (void) cached_has_bits;
1174
1175 // int64 allocation_id = 1;
1176 if (this->allocation_id() != 0) {
1177 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::WriteInt64(1, this->allocation_id(), output);
1178 }
1179
1180 // string allocator_name = 2;
1181 if (this->allocator_name().size() > 0) {
1182 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::VerifyUtf8String(
1183 this->allocator_name().data(), static_cast<int>(this->allocator_name().length()),
1184 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::SERIALIZE,
1185 "tensorflow.MemoryLogTensorDeallocation.allocator_name");
1186 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::WriteStringMaybeAliased(
1187 2, this->allocator_name(), output);
1188 }
1189
1190 if (_internal_metadata_.have_unknown_fields()) {
1191 ::PROTOBUF_NAMESPACE_ID::internal::WireFormat::SerializeUnknownFields(
1192 _internal_metadata_.unknown_fields(), output);
1193 }
1194 // @@protoc_insertion_point(serialize_end:tensorflow.MemoryLogTensorDeallocation)
1195}
1196
1197::PROTOBUF_NAMESPACE_ID::uint8* MemoryLogTensorDeallocation::InternalSerializeWithCachedSizesToArray(
1198 ::PROTOBUF_NAMESPACE_ID::uint8* target) const {
1199 // @@protoc_insertion_point(serialize_to_array_start:tensorflow.MemoryLogTensorDeallocation)
1200 ::PROTOBUF_NAMESPACE_ID::uint32 cached_has_bits = 0;
1201 (void) cached_has_bits;
1202
1203 // int64 allocation_id = 1;
1204 if (this->allocation_id() != 0) {
1205 target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::WriteInt64ToArray(1, this->allocation_id(), target);
1206 }
1207
1208 // string allocator_name = 2;
1209 if (this->allocator_name().size() > 0) {
1210 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::VerifyUtf8String(
1211 this->allocator_name().data(), static_cast<int>(this->allocator_name().length()),
1212 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::SERIALIZE,
1213 "tensorflow.MemoryLogTensorDeallocation.allocator_name");
1214 target =
1215 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::WriteStringToArray(
1216 2, this->allocator_name(), target);
1217 }
1218
1219 if (_internal_metadata_.have_unknown_fields()) {
1220 target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormat::SerializeUnknownFieldsToArray(
1221 _internal_metadata_.unknown_fields(), target);
1222 }
1223 // @@protoc_insertion_point(serialize_to_array_end:tensorflow.MemoryLogTensorDeallocation)
1224 return target;
1225}
1226
1227size_t MemoryLogTensorDeallocation::ByteSizeLong() const {
1228// @@protoc_insertion_point(message_byte_size_start:tensorflow.MemoryLogTensorDeallocation)
1229 size_t total_size = 0;
1230
1231 if (_internal_metadata_.have_unknown_fields()) {
1232 total_size +=
1233 ::PROTOBUF_NAMESPACE_ID::internal::WireFormat::ComputeUnknownFieldsSize(
1234 _internal_metadata_.unknown_fields());
1235 }
1236 ::PROTOBUF_NAMESPACE_ID::uint32 cached_has_bits = 0;
1237 // Prevent compiler warnings about cached_has_bits being unused
1238 (void) cached_has_bits;
1239
1240 // string allocator_name = 2;
1241 if (this->allocator_name().size() > 0) {
1242 total_size += 1 +
1243 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::StringSize(
1244 this->allocator_name());
1245 }
1246
1247 // int64 allocation_id = 1;
1248 if (this->allocation_id() != 0) {
1249 total_size += 1 +
1250 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::Int64Size(
1251 this->allocation_id());
1252 }
1253
1254 int cached_size = ::PROTOBUF_NAMESPACE_ID::internal::ToCachedSize(total_size);
1255 SetCachedSize(cached_size);
1256 return total_size;
1257}
1258
1259void MemoryLogTensorDeallocation::MergeFrom(const ::PROTOBUF_NAMESPACE_ID::Message& from) {
1260// @@protoc_insertion_point(generalized_merge_from_start:tensorflow.MemoryLogTensorDeallocation)
1261 GOOGLE_DCHECK_NE(&from, this);
1262 const MemoryLogTensorDeallocation* source =
1263 ::PROTOBUF_NAMESPACE_ID::DynamicCastToGenerated<MemoryLogTensorDeallocation>(
1264 &from);
1265 if (source == nullptr) {
1266 // @@protoc_insertion_point(generalized_merge_from_cast_fail:tensorflow.MemoryLogTensorDeallocation)
1267 ::PROTOBUF_NAMESPACE_ID::internal::ReflectionOps::Merge(from, this);
1268 } else {
1269 // @@protoc_insertion_point(generalized_merge_from_cast_success:tensorflow.MemoryLogTensorDeallocation)
1270 MergeFrom(*source);
1271 }
1272}
1273
1274void MemoryLogTensorDeallocation::MergeFrom(const MemoryLogTensorDeallocation& from) {
1275// @@protoc_insertion_point(class_specific_merge_from_start:tensorflow.MemoryLogTensorDeallocation)
1276 GOOGLE_DCHECK_NE(&from, this);
1277 _internal_metadata_.MergeFrom(from._internal_metadata_);
1278 ::PROTOBUF_NAMESPACE_ID::uint32 cached_has_bits = 0;
1279 (void) cached_has_bits;
1280
1281 if (from.allocator_name().size() > 0) {
1282 set_allocator_name(from.allocator_name());
1283 }
1284 if (from.allocation_id() != 0) {
1285 set_allocation_id(from.allocation_id());
1286 }
1287}
1288
1289void MemoryLogTensorDeallocation::CopyFrom(const ::PROTOBUF_NAMESPACE_ID::Message& from) {
1290// @@protoc_insertion_point(generalized_copy_from_start:tensorflow.MemoryLogTensorDeallocation)
1291 if (&from == this) return;
1292 Clear();
1293 MergeFrom(from);
1294}
1295
1296void MemoryLogTensorDeallocation::CopyFrom(const MemoryLogTensorDeallocation& from) {
1297// @@protoc_insertion_point(class_specific_copy_from_start:tensorflow.MemoryLogTensorDeallocation)
1298 if (&from == this) return;
1299 Clear();
1300 MergeFrom(from);
1301}
1302
1303bool MemoryLogTensorDeallocation::IsInitialized() const {
1304 return true;
1305}
1306
1307void MemoryLogTensorDeallocation::InternalSwap(MemoryLogTensorDeallocation* other) {
1308 using std::swap;
1309 _internal_metadata_.Swap(&other->_internal_metadata_);
1310 allocator_name_.Swap(&other->allocator_name_, &::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited(),
1311 GetArenaNoVirtual());
1312 swap(allocation_id_, other->allocation_id_);
1313}
1314
1315::PROTOBUF_NAMESPACE_ID::Metadata MemoryLogTensorDeallocation::GetMetadata() const {
1316 return GetMetadataStatic();
1317}
1318
1319
1320// ===================================================================
1321
1322void MemoryLogTensorOutput::InitAsDefaultInstance() {
1323 ::tensorflow::_MemoryLogTensorOutput_default_instance_._instance.get_mutable()->tensor_ = const_cast< ::tensorflow::TensorDescription*>(
1324 ::tensorflow::TensorDescription::internal_default_instance());
1325}
1326class MemoryLogTensorOutput::_Internal {
1327 public:
1328 static const ::tensorflow::TensorDescription& tensor(const MemoryLogTensorOutput* msg);
1329};
1330
1331const ::tensorflow::TensorDescription&
1332MemoryLogTensorOutput::_Internal::tensor(const MemoryLogTensorOutput* msg) {
1333 return *msg->tensor_;
1334}
1335void MemoryLogTensorOutput::unsafe_arena_set_allocated_tensor(
1336 ::tensorflow::TensorDescription* tensor) {
1337 if (GetArenaNoVirtual() == nullptr) {
1338 delete tensor_;
1339 }
1340 tensor_ = tensor;
1341 if (tensor) {
1342
1343 } else {
1344
1345 }
1346 // @@protoc_insertion_point(field_unsafe_arena_set_allocated:tensorflow.MemoryLogTensorOutput.tensor)
1347}
1348void MemoryLogTensorOutput::clear_tensor() {
1349 if (GetArenaNoVirtual() == nullptr && tensor_ != nullptr) {
1350 delete tensor_;
1351 }
1352 tensor_ = nullptr;
1353}
1354MemoryLogTensorOutput::MemoryLogTensorOutput()
1355 : ::PROTOBUF_NAMESPACE_ID::Message(), _internal_metadata_(nullptr) {
1356 SharedCtor();
1357 // @@protoc_insertion_point(constructor:tensorflow.MemoryLogTensorOutput)
1358}
1359MemoryLogTensorOutput::MemoryLogTensorOutput(::PROTOBUF_NAMESPACE_ID::Arena* arena)
1360 : ::PROTOBUF_NAMESPACE_ID::Message(),
1361 _internal_metadata_(arena) {
1362 SharedCtor();
1363 RegisterArenaDtor(arena);
1364 // @@protoc_insertion_point(arena_constructor:tensorflow.MemoryLogTensorOutput)
1365}
1366MemoryLogTensorOutput::MemoryLogTensorOutput(const MemoryLogTensorOutput& from)
1367 : ::PROTOBUF_NAMESPACE_ID::Message(),
1368 _internal_metadata_(nullptr) {
1369 _internal_metadata_.MergeFrom(from._internal_metadata_);
1370 kernel_name_.UnsafeSetDefault(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited());
1371 if (!from.kernel_name().empty()) {
1372 kernel_name_.Set(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited(), from.kernel_name(),
1373 GetArenaNoVirtual());
1374 }
1375 if (from.has_tensor()) {
1376 tensor_ = new ::tensorflow::TensorDescription(*from.tensor_);
1377 } else {
1378 tensor_ = nullptr;
1379 }
1380 ::memcpy(&step_id_, &from.step_id_,
1381 static_cast<size_t>(reinterpret_cast<char*>(&index_) -
1382 reinterpret_cast<char*>(&step_id_)) + sizeof(index_));
1383 // @@protoc_insertion_point(copy_constructor:tensorflow.MemoryLogTensorOutput)
1384}
1385
1386void MemoryLogTensorOutput::SharedCtor() {
1387 ::PROTOBUF_NAMESPACE_ID::internal::InitSCC(&scc_info_MemoryLogTensorOutput_tensorflow_2fcore_2fframework_2flog_5fmemory_2eproto.base);
1388 kernel_name_.UnsafeSetDefault(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited());
1389 ::memset(&tensor_, 0, static_cast<size_t>(
1390 reinterpret_cast<char*>(&index_) -
1391 reinterpret_cast<char*>(&tensor_)) + sizeof(index_));
1392}
1393
1394MemoryLogTensorOutput::~MemoryLogTensorOutput() {
1395 // @@protoc_insertion_point(destructor:tensorflow.MemoryLogTensorOutput)
1396 SharedDtor();
1397}
1398
1399void MemoryLogTensorOutput::SharedDtor() {
1400 GOOGLE_DCHECK(GetArenaNoVirtual() == nullptr);
1401 kernel_name_.DestroyNoArena(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited());
1402 if (this != internal_default_instance()) delete tensor_;
1403}
1404
1405void MemoryLogTensorOutput::ArenaDtor(void* object) {
1406 MemoryLogTensorOutput* _this = reinterpret_cast< MemoryLogTensorOutput* >(object);
1407 (void)_this;
1408}
1409void MemoryLogTensorOutput::RegisterArenaDtor(::PROTOBUF_NAMESPACE_ID::Arena*) {
1410}
1411void MemoryLogTensorOutput::SetCachedSize(int size) const {
1412 _cached_size_.Set(size);
1413}
1414const MemoryLogTensorOutput& MemoryLogTensorOutput::default_instance() {
1415 ::PROTOBUF_NAMESPACE_ID::internal::InitSCC(&::scc_info_MemoryLogTensorOutput_tensorflow_2fcore_2fframework_2flog_5fmemory_2eproto.base);
1416 return *internal_default_instance();
1417}
1418
1419
1420void MemoryLogTensorOutput::Clear() {
1421// @@protoc_insertion_point(message_clear_start:tensorflow.MemoryLogTensorOutput)
1422 ::PROTOBUF_NAMESPACE_ID::uint32 cached_has_bits = 0;
1423 // Prevent compiler warnings about cached_has_bits being unused
1424 (void) cached_has_bits;
1425
1426 kernel_name_.ClearToEmpty(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited(), GetArenaNoVirtual());
1427 if (GetArenaNoVirtual() == nullptr && tensor_ != nullptr) {
1428 delete tensor_;
1429 }
1430 tensor_ = nullptr;
1431 ::memset(&step_id_, 0, static_cast<size_t>(
1432 reinterpret_cast<char*>(&index_) -
1433 reinterpret_cast<char*>(&step_id_)) + sizeof(index_));
1434 _internal_metadata_.Clear();
1435}
1436
1437#if GOOGLE_PROTOBUF_ENABLE_EXPERIMENTAL_PARSER
1438const char* MemoryLogTensorOutput::_InternalParse(const char* ptr, ::PROTOBUF_NAMESPACE_ID::internal::ParseContext* ctx) {
1439#define CHK_(x) if (PROTOBUF_PREDICT_FALSE(!(x))) goto failure
1440 ::PROTOBUF_NAMESPACE_ID::Arena* arena = GetArenaNoVirtual(); (void)arena;
1441 while (!ctx->Done(&ptr)) {
1442 ::PROTOBUF_NAMESPACE_ID::uint32 tag;
1443 ptr = ::PROTOBUF_NAMESPACE_ID::internal::ReadTag(ptr, &tag);
1444 CHK_(ptr);
1445 switch (tag >> 3) {
1446 // int64 step_id = 1;
1447 case 1:
1448 if (PROTOBUF_PREDICT_TRUE(static_cast<::PROTOBUF_NAMESPACE_ID::uint8>(tag) == 8)) {
1449 step_id_ = ::PROTOBUF_NAMESPACE_ID::internal::ReadVarint(&ptr);
1450 CHK_(ptr);
1451 } else goto handle_unusual;
1452 continue;
1453 // string kernel_name = 2;
1454 case 2:
1455 if (PROTOBUF_PREDICT_TRUE(static_cast<::PROTOBUF_NAMESPACE_ID::uint8>(tag) == 18)) {
1456 ptr = ::PROTOBUF_NAMESPACE_ID::internal::InlineGreedyStringParserUTF8(mutable_kernel_name(), ptr, ctx, "tensorflow.MemoryLogTensorOutput.kernel_name");
1457 CHK_(ptr);
1458 } else goto handle_unusual;
1459 continue;
1460 // int32 index = 3;
1461 case 3:
1462 if (PROTOBUF_PREDICT_TRUE(static_cast<::PROTOBUF_NAMESPACE_ID::uint8>(tag) == 24)) {
1463 index_ = ::PROTOBUF_NAMESPACE_ID::internal::ReadVarint(&ptr);
1464 CHK_(ptr);
1465 } else goto handle_unusual;
1466 continue;
1467 // .tensorflow.TensorDescription tensor = 4;
1468 case 4:
1469 if (PROTOBUF_PREDICT_TRUE(static_cast<::PROTOBUF_NAMESPACE_ID::uint8>(tag) == 34)) {
1470 ptr = ctx->ParseMessage(mutable_tensor(), ptr);
1471 CHK_(ptr);
1472 } else goto handle_unusual;
1473 continue;
1474 default: {
1475 handle_unusual:
1476 if ((tag & 7) == 4 || tag == 0) {
1477 ctx->SetLastTag(tag);
1478 goto success;
1479 }
1480 ptr = UnknownFieldParse(tag, &_internal_metadata_, ptr, ctx);
1481 CHK_(ptr != nullptr);
1482 continue;
1483 }
1484 } // switch
1485 } // while
1486success:
1487 return ptr;
1488failure:
1489 ptr = nullptr;
1490 goto success;
1491#undef CHK_
1492}
1493#else // GOOGLE_PROTOBUF_ENABLE_EXPERIMENTAL_PARSER
1494bool MemoryLogTensorOutput::MergePartialFromCodedStream(
1495 ::PROTOBUF_NAMESPACE_ID::io::CodedInputStream* input) {
1496#define DO_(EXPRESSION) if (!PROTOBUF_PREDICT_TRUE(EXPRESSION)) goto failure
1497 ::PROTOBUF_NAMESPACE_ID::uint32 tag;
1498 // @@protoc_insertion_point(parse_start:tensorflow.MemoryLogTensorOutput)
1499 for (;;) {
1500 ::std::pair<::PROTOBUF_NAMESPACE_ID::uint32, bool> p = input->ReadTagWithCutoffNoLastTag(127u);
1501 tag = p.first;
1502 if (!p.second) goto handle_unusual;
1503 switch (::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::GetTagFieldNumber(tag)) {
1504 // int64 step_id = 1;
1505 case 1: {
1506 if (static_cast< ::PROTOBUF_NAMESPACE_ID::uint8>(tag) == (8 & 0xFF)) {
1507
1508 DO_((::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::ReadPrimitive<
1509 ::PROTOBUF_NAMESPACE_ID::int64, ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::TYPE_INT64>(
1510 input, &step_id_)));
1511 } else {
1512 goto handle_unusual;
1513 }
1514 break;
1515 }
1516
1517 // string kernel_name = 2;
1518 case 2: {
1519 if (static_cast< ::PROTOBUF_NAMESPACE_ID::uint8>(tag) == (18 & 0xFF)) {
1520 DO_(::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::ReadString(
1521 input, this->mutable_kernel_name()));
1522 DO_(::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::VerifyUtf8String(
1523 this->kernel_name().data(), static_cast<int>(this->kernel_name().length()),
1524 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::PARSE,
1525 "tensorflow.MemoryLogTensorOutput.kernel_name"));
1526 } else {
1527 goto handle_unusual;
1528 }
1529 break;
1530 }
1531
1532 // int32 index = 3;
1533 case 3: {
1534 if (static_cast< ::PROTOBUF_NAMESPACE_ID::uint8>(tag) == (24 & 0xFF)) {
1535
1536 DO_((::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::ReadPrimitive<
1537 ::PROTOBUF_NAMESPACE_ID::int32, ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::TYPE_INT32>(
1538 input, &index_)));
1539 } else {
1540 goto handle_unusual;
1541 }
1542 break;
1543 }
1544
1545 // .tensorflow.TensorDescription tensor = 4;
1546 case 4: {
1547 if (static_cast< ::PROTOBUF_NAMESPACE_ID::uint8>(tag) == (34 & 0xFF)) {
1548 DO_(::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::ReadMessage(
1549 input, mutable_tensor()));
1550 } else {
1551 goto handle_unusual;
1552 }
1553 break;
1554 }
1555
1556 default: {
1557 handle_unusual:
1558 if (tag == 0) {
1559 goto success;
1560 }
1561 DO_(::PROTOBUF_NAMESPACE_ID::internal::WireFormat::SkipField(
1562 input, tag, _internal_metadata_.mutable_unknown_fields()));
1563 break;
1564 }
1565 }
1566 }
1567success:
1568 // @@protoc_insertion_point(parse_success:tensorflow.MemoryLogTensorOutput)
1569 return true;
1570failure:
1571 // @@protoc_insertion_point(parse_failure:tensorflow.MemoryLogTensorOutput)
1572 return false;
1573#undef DO_
1574}
1575#endif // GOOGLE_PROTOBUF_ENABLE_EXPERIMENTAL_PARSER
1576
1577void MemoryLogTensorOutput::SerializeWithCachedSizes(
1578 ::PROTOBUF_NAMESPACE_ID::io::CodedOutputStream* output) const {
1579 // @@protoc_insertion_point(serialize_start:tensorflow.MemoryLogTensorOutput)
1580 ::PROTOBUF_NAMESPACE_ID::uint32 cached_has_bits = 0;
1581 (void) cached_has_bits;
1582
1583 // int64 step_id = 1;
1584 if (this->step_id() != 0) {
1585 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::WriteInt64(1, this->step_id(), output);
1586 }
1587
1588 // string kernel_name = 2;
1589 if (this->kernel_name().size() > 0) {
1590 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::VerifyUtf8String(
1591 this->kernel_name().data(), static_cast<int>(this->kernel_name().length()),
1592 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::SERIALIZE,
1593 "tensorflow.MemoryLogTensorOutput.kernel_name");
1594 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::WriteStringMaybeAliased(
1595 2, this->kernel_name(), output);
1596 }
1597
1598 // int32 index = 3;
1599 if (this->index() != 0) {
1600 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::WriteInt32(3, this->index(), output);
1601 }
1602
1603 // .tensorflow.TensorDescription tensor = 4;
1604 if (this->has_tensor()) {
1605 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::WriteMessageMaybeToArray(
1606 4, _Internal::tensor(this), output);
1607 }
1608
1609 if (_internal_metadata_.have_unknown_fields()) {
1610 ::PROTOBUF_NAMESPACE_ID::internal::WireFormat::SerializeUnknownFields(
1611 _internal_metadata_.unknown_fields(), output);
1612 }
1613 // @@protoc_insertion_point(serialize_end:tensorflow.MemoryLogTensorOutput)
1614}
1615
1616::PROTOBUF_NAMESPACE_ID::uint8* MemoryLogTensorOutput::InternalSerializeWithCachedSizesToArray(
1617 ::PROTOBUF_NAMESPACE_ID::uint8* target) const {
1618 // @@protoc_insertion_point(serialize_to_array_start:tensorflow.MemoryLogTensorOutput)
1619 ::PROTOBUF_NAMESPACE_ID::uint32 cached_has_bits = 0;
1620 (void) cached_has_bits;
1621
1622 // int64 step_id = 1;
1623 if (this->step_id() != 0) {
1624 target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::WriteInt64ToArray(1, this->step_id(), target);
1625 }
1626
1627 // string kernel_name = 2;
1628 if (this->kernel_name().size() > 0) {
1629 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::VerifyUtf8String(
1630 this->kernel_name().data(), static_cast<int>(this->kernel_name().length()),
1631 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::SERIALIZE,
1632 "tensorflow.MemoryLogTensorOutput.kernel_name");
1633 target =
1634 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::WriteStringToArray(
1635 2, this->kernel_name(), target);
1636 }
1637
1638 // int32 index = 3;
1639 if (this->index() != 0) {
1640 target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::WriteInt32ToArray(3, this->index(), target);
1641 }
1642
1643 // .tensorflow.TensorDescription tensor = 4;
1644 if (this->has_tensor()) {
1645 target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::
1646 InternalWriteMessageToArray(
1647 4, _Internal::tensor(this), target);
1648 }
1649
1650 if (_internal_metadata_.have_unknown_fields()) {
1651 target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormat::SerializeUnknownFieldsToArray(
1652 _internal_metadata_.unknown_fields(), target);
1653 }
1654 // @@protoc_insertion_point(serialize_to_array_end:tensorflow.MemoryLogTensorOutput)
1655 return target;
1656}
1657
1658size_t MemoryLogTensorOutput::ByteSizeLong() const {
1659// @@protoc_insertion_point(message_byte_size_start:tensorflow.MemoryLogTensorOutput)
1660 size_t total_size = 0;
1661
1662 if (_internal_metadata_.have_unknown_fields()) {
1663 total_size +=
1664 ::PROTOBUF_NAMESPACE_ID::internal::WireFormat::ComputeUnknownFieldsSize(
1665 _internal_metadata_.unknown_fields());
1666 }
1667 ::PROTOBUF_NAMESPACE_ID::uint32 cached_has_bits = 0;
1668 // Prevent compiler warnings about cached_has_bits being unused
1669 (void) cached_has_bits;
1670
1671 // string kernel_name = 2;
1672 if (this->kernel_name().size() > 0) {
1673 total_size += 1 +
1674 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::StringSize(
1675 this->kernel_name());
1676 }
1677
1678 // .tensorflow.TensorDescription tensor = 4;
1679 if (this->has_tensor()) {
1680 total_size += 1 +
1681 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::MessageSize(
1682 *tensor_);
1683 }
1684
1685 // int64 step_id = 1;
1686 if (this->step_id() != 0) {
1687 total_size += 1 +
1688 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::Int64Size(
1689 this->step_id());
1690 }
1691
1692 // int32 index = 3;
1693 if (this->index() != 0) {
1694 total_size += 1 +
1695 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::Int32Size(
1696 this->index());
1697 }
1698
1699 int cached_size = ::PROTOBUF_NAMESPACE_ID::internal::ToCachedSize(total_size);
1700 SetCachedSize(cached_size);
1701 return total_size;
1702}
1703
1704void MemoryLogTensorOutput::MergeFrom(const ::PROTOBUF_NAMESPACE_ID::Message& from) {
1705// @@protoc_insertion_point(generalized_merge_from_start:tensorflow.MemoryLogTensorOutput)
1706 GOOGLE_DCHECK_NE(&from, this);
1707 const MemoryLogTensorOutput* source =
1708 ::PROTOBUF_NAMESPACE_ID::DynamicCastToGenerated<MemoryLogTensorOutput>(
1709 &from);
1710 if (source == nullptr) {
1711 // @@protoc_insertion_point(generalized_merge_from_cast_fail:tensorflow.MemoryLogTensorOutput)
1712 ::PROTOBUF_NAMESPACE_ID::internal::ReflectionOps::Merge(from, this);
1713 } else {
1714 // @@protoc_insertion_point(generalized_merge_from_cast_success:tensorflow.MemoryLogTensorOutput)
1715 MergeFrom(*source);
1716 }
1717}
1718
1719void MemoryLogTensorOutput::MergeFrom(const MemoryLogTensorOutput& from) {
1720// @@protoc_insertion_point(class_specific_merge_from_start:tensorflow.MemoryLogTensorOutput)
1721 GOOGLE_DCHECK_NE(&from, this);
1722 _internal_metadata_.MergeFrom(from._internal_metadata_);
1723 ::PROTOBUF_NAMESPACE_ID::uint32 cached_has_bits = 0;
1724 (void) cached_has_bits;
1725
1726 if (from.kernel_name().size() > 0) {
1727 set_kernel_name(from.kernel_name());
1728 }
1729 if (from.has_tensor()) {
1730 mutable_tensor()->::tensorflow::TensorDescription::MergeFrom(from.tensor());
1731 }
1732 if (from.step_id() != 0) {
1733 set_step_id(from.step_id());
1734 }
1735 if (from.index() != 0) {
1736 set_index(from.index());
1737 }
1738}
1739
1740void MemoryLogTensorOutput::CopyFrom(const ::PROTOBUF_NAMESPACE_ID::Message& from) {
1741// @@protoc_insertion_point(generalized_copy_from_start:tensorflow.MemoryLogTensorOutput)
1742 if (&from == this) return;
1743 Clear();
1744 MergeFrom(from);
1745}
1746
1747void MemoryLogTensorOutput::CopyFrom(const MemoryLogTensorOutput& from) {
1748// @@protoc_insertion_point(class_specific_copy_from_start:tensorflow.MemoryLogTensorOutput)
1749 if (&from == this) return;
1750 Clear();
1751 MergeFrom(from);
1752}
1753
1754bool MemoryLogTensorOutput::IsInitialized() const {
1755 return true;
1756}
1757
1758void MemoryLogTensorOutput::InternalSwap(MemoryLogTensorOutput* other) {
1759 using std::swap;
1760 _internal_metadata_.Swap(&other->_internal_metadata_);
1761 kernel_name_.Swap(&other->kernel_name_, &::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited(),
1762 GetArenaNoVirtual());
1763 swap(tensor_, other->tensor_);
1764 swap(step_id_, other->step_id_);
1765 swap(index_, other->index_);
1766}
1767
1768::PROTOBUF_NAMESPACE_ID::Metadata MemoryLogTensorOutput::GetMetadata() const {
1769 return GetMetadataStatic();
1770}
1771
1772
1773// ===================================================================
1774
1775void MemoryLogRawAllocation::InitAsDefaultInstance() {
1776}
1777class MemoryLogRawAllocation::_Internal {
1778 public:
1779};
1780
1781MemoryLogRawAllocation::MemoryLogRawAllocation()
1782 : ::PROTOBUF_NAMESPACE_ID::Message(), _internal_metadata_(nullptr) {
1783 SharedCtor();
1784 // @@protoc_insertion_point(constructor:tensorflow.MemoryLogRawAllocation)
1785}
1786MemoryLogRawAllocation::MemoryLogRawAllocation(::PROTOBUF_NAMESPACE_ID::Arena* arena)
1787 : ::PROTOBUF_NAMESPACE_ID::Message(),
1788 _internal_metadata_(arena) {
1789 SharedCtor();
1790 RegisterArenaDtor(arena);
1791 // @@protoc_insertion_point(arena_constructor:tensorflow.MemoryLogRawAllocation)
1792}
1793MemoryLogRawAllocation::MemoryLogRawAllocation(const MemoryLogRawAllocation& from)
1794 : ::PROTOBUF_NAMESPACE_ID::Message(),
1795 _internal_metadata_(nullptr) {
1796 _internal_metadata_.MergeFrom(from._internal_metadata_);
1797 operation_.UnsafeSetDefault(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited());
1798 if (!from.operation().empty()) {
1799 operation_.Set(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited(), from.operation(),
1800 GetArenaNoVirtual());
1801 }
1802 allocator_name_.UnsafeSetDefault(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited());
1803 if (!from.allocator_name().empty()) {
1804 allocator_name_.Set(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited(), from.allocator_name(),
1805 GetArenaNoVirtual());
1806 }
1807 ::memcpy(&step_id_, &from.step_id_,
1808 static_cast<size_t>(reinterpret_cast<char*>(&allocation_id_) -
1809 reinterpret_cast<char*>(&step_id_)) + sizeof(allocation_id_));
1810 // @@protoc_insertion_point(copy_constructor:tensorflow.MemoryLogRawAllocation)
1811}
1812
1813void MemoryLogRawAllocation::SharedCtor() {
1814 ::PROTOBUF_NAMESPACE_ID::internal::InitSCC(&scc_info_MemoryLogRawAllocation_tensorflow_2fcore_2fframework_2flog_5fmemory_2eproto.base);
1815 operation_.UnsafeSetDefault(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited());
1816 allocator_name_.UnsafeSetDefault(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited());
1817 ::memset(&step_id_, 0, static_cast<size_t>(
1818 reinterpret_cast<char*>(&allocation_id_) -
1819 reinterpret_cast<char*>(&step_id_)) + sizeof(allocation_id_));
1820}
1821
1822MemoryLogRawAllocation::~MemoryLogRawAllocation() {
1823 // @@protoc_insertion_point(destructor:tensorflow.MemoryLogRawAllocation)
1824 SharedDtor();
1825}
1826
1827void MemoryLogRawAllocation::SharedDtor() {
1828 GOOGLE_DCHECK(GetArenaNoVirtual() == nullptr);
1829 operation_.DestroyNoArena(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited());
1830 allocator_name_.DestroyNoArena(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited());
1831}
1832
1833void MemoryLogRawAllocation::ArenaDtor(void* object) {
1834 MemoryLogRawAllocation* _this = reinterpret_cast< MemoryLogRawAllocation* >(object);
1835 (void)_this;
1836}
1837void MemoryLogRawAllocation::RegisterArenaDtor(::PROTOBUF_NAMESPACE_ID::Arena*) {
1838}
1839void MemoryLogRawAllocation::SetCachedSize(int size) const {
1840 _cached_size_.Set(size);
1841}
1842const MemoryLogRawAllocation& MemoryLogRawAllocation::default_instance() {
1843 ::PROTOBUF_NAMESPACE_ID::internal::InitSCC(&::scc_info_MemoryLogRawAllocation_tensorflow_2fcore_2fframework_2flog_5fmemory_2eproto.base);
1844 return *internal_default_instance();
1845}
1846
1847
1848void MemoryLogRawAllocation::Clear() {
1849// @@protoc_insertion_point(message_clear_start:tensorflow.MemoryLogRawAllocation)
1850 ::PROTOBUF_NAMESPACE_ID::uint32 cached_has_bits = 0;
1851 // Prevent compiler warnings about cached_has_bits being unused
1852 (void) cached_has_bits;
1853
1854 operation_.ClearToEmpty(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited(), GetArenaNoVirtual());
1855 allocator_name_.ClearToEmpty(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited(), GetArenaNoVirtual());
1856 ::memset(&step_id_, 0, static_cast<size_t>(
1857 reinterpret_cast<char*>(&allocation_id_) -
1858 reinterpret_cast<char*>(&step_id_)) + sizeof(allocation_id_));
1859 _internal_metadata_.Clear();
1860}
1861
1862#if GOOGLE_PROTOBUF_ENABLE_EXPERIMENTAL_PARSER
1863const char* MemoryLogRawAllocation::_InternalParse(const char* ptr, ::PROTOBUF_NAMESPACE_ID::internal::ParseContext* ctx) {
1864#define CHK_(x) if (PROTOBUF_PREDICT_FALSE(!(x))) goto failure
1865 ::PROTOBUF_NAMESPACE_ID::Arena* arena = GetArenaNoVirtual(); (void)arena;
1866 while (!ctx->Done(&ptr)) {
1867 ::PROTOBUF_NAMESPACE_ID::uint32 tag;
1868 ptr = ::PROTOBUF_NAMESPACE_ID::internal::ReadTag(ptr, &tag);
1869 CHK_(ptr);
1870 switch (tag >> 3) {
1871 // int64 step_id = 1;
1872 case 1:
1873 if (PROTOBUF_PREDICT_TRUE(static_cast<::PROTOBUF_NAMESPACE_ID::uint8>(tag) == 8)) {
1874 step_id_ = ::PROTOBUF_NAMESPACE_ID::internal::ReadVarint(&ptr);
1875 CHK_(ptr);
1876 } else goto handle_unusual;
1877 continue;
1878 // string operation = 2;
1879 case 2:
1880 if (PROTOBUF_PREDICT_TRUE(static_cast<::PROTOBUF_NAMESPACE_ID::uint8>(tag) == 18)) {
1881 ptr = ::PROTOBUF_NAMESPACE_ID::internal::InlineGreedyStringParserUTF8(mutable_operation(), ptr, ctx, "tensorflow.MemoryLogRawAllocation.operation");
1882 CHK_(ptr);
1883 } else goto handle_unusual;
1884 continue;
1885 // int64 num_bytes = 3;
1886 case 3:
1887 if (PROTOBUF_PREDICT_TRUE(static_cast<::PROTOBUF_NAMESPACE_ID::uint8>(tag) == 24)) {
1888 num_bytes_ = ::PROTOBUF_NAMESPACE_ID::internal::ReadVarint(&ptr);
1889 CHK_(ptr);
1890 } else goto handle_unusual;
1891 continue;
1892 // uint64 ptr = 4;
1893 case 4:
1894 if (PROTOBUF_PREDICT_TRUE(static_cast<::PROTOBUF_NAMESPACE_ID::uint8>(tag) == 32)) {
1895 ptr_ = ::PROTOBUF_NAMESPACE_ID::internal::ReadVarint(&ptr);
1896 CHK_(ptr);
1897 } else goto handle_unusual;
1898 continue;
1899 // int64 allocation_id = 5;
1900 case 5:
1901 if (PROTOBUF_PREDICT_TRUE(static_cast<::PROTOBUF_NAMESPACE_ID::uint8>(tag) == 40)) {
1902 allocation_id_ = ::PROTOBUF_NAMESPACE_ID::internal::ReadVarint(&ptr);
1903 CHK_(ptr);
1904 } else goto handle_unusual;
1905 continue;
1906 // string allocator_name = 6;
1907 case 6:
1908 if (PROTOBUF_PREDICT_TRUE(static_cast<::PROTOBUF_NAMESPACE_ID::uint8>(tag) == 50)) {
1909 ptr = ::PROTOBUF_NAMESPACE_ID::internal::InlineGreedyStringParserUTF8(mutable_allocator_name(), ptr, ctx, "tensorflow.MemoryLogRawAllocation.allocator_name");
1910 CHK_(ptr);
1911 } else goto handle_unusual;
1912 continue;
1913 default: {
1914 handle_unusual:
1915 if ((tag & 7) == 4 || tag == 0) {
1916 ctx->SetLastTag(tag);
1917 goto success;
1918 }
1919 ptr = UnknownFieldParse(tag, &_internal_metadata_, ptr, ctx);
1920 CHK_(ptr != nullptr);
1921 continue;
1922 }
1923 } // switch
1924 } // while
1925success:
1926 return ptr;
1927failure:
1928 ptr = nullptr;
1929 goto success;
1930#undef CHK_
1931}
1932#else // GOOGLE_PROTOBUF_ENABLE_EXPERIMENTAL_PARSER
1933bool MemoryLogRawAllocation::MergePartialFromCodedStream(
1934 ::PROTOBUF_NAMESPACE_ID::io::CodedInputStream* input) {
1935#define DO_(EXPRESSION) if (!PROTOBUF_PREDICT_TRUE(EXPRESSION)) goto failure
1936 ::PROTOBUF_NAMESPACE_ID::uint32 tag;
1937 // @@protoc_insertion_point(parse_start:tensorflow.MemoryLogRawAllocation)
1938 for (;;) {
1939 ::std::pair<::PROTOBUF_NAMESPACE_ID::uint32, bool> p = input->ReadTagWithCutoffNoLastTag(127u);
1940 tag = p.first;
1941 if (!p.second) goto handle_unusual;
1942 switch (::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::GetTagFieldNumber(tag)) {
1943 // int64 step_id = 1;
1944 case 1: {
1945 if (static_cast< ::PROTOBUF_NAMESPACE_ID::uint8>(tag) == (8 & 0xFF)) {
1946
1947 DO_((::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::ReadPrimitive<
1948 ::PROTOBUF_NAMESPACE_ID::int64, ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::TYPE_INT64>(
1949 input, &step_id_)));
1950 } else {
1951 goto handle_unusual;
1952 }
1953 break;
1954 }
1955
1956 // string operation = 2;
1957 case 2: {
1958 if (static_cast< ::PROTOBUF_NAMESPACE_ID::uint8>(tag) == (18 & 0xFF)) {
1959 DO_(::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::ReadString(
1960 input, this->mutable_operation()));
1961 DO_(::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::VerifyUtf8String(
1962 this->operation().data(), static_cast<int>(this->operation().length()),
1963 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::PARSE,
1964 "tensorflow.MemoryLogRawAllocation.operation"));
1965 } else {
1966 goto handle_unusual;
1967 }
1968 break;
1969 }
1970
1971 // int64 num_bytes = 3;
1972 case 3: {
1973 if (static_cast< ::PROTOBUF_NAMESPACE_ID::uint8>(tag) == (24 & 0xFF)) {
1974
1975 DO_((::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::ReadPrimitive<
1976 ::PROTOBUF_NAMESPACE_ID::int64, ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::TYPE_INT64>(
1977 input, &num_bytes_)));
1978 } else {
1979 goto handle_unusual;
1980 }
1981 break;
1982 }
1983
1984 // uint64 ptr = 4;
1985 case 4: {
1986 if (static_cast< ::PROTOBUF_NAMESPACE_ID::uint8>(tag) == (32 & 0xFF)) {
1987
1988 DO_((::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::ReadPrimitive<
1989 ::PROTOBUF_NAMESPACE_ID::uint64, ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::TYPE_UINT64>(
1990 input, &ptr_)));
1991 } else {
1992 goto handle_unusual;
1993 }
1994 break;
1995 }
1996
1997 // int64 allocation_id = 5;
1998 case 5: {
1999 if (static_cast< ::PROTOBUF_NAMESPACE_ID::uint8>(tag) == (40 & 0xFF)) {
2000
2001 DO_((::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::ReadPrimitive<
2002 ::PROTOBUF_NAMESPACE_ID::int64, ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::TYPE_INT64>(
2003 input, &allocation_id_)));
2004 } else {
2005 goto handle_unusual;
2006 }
2007 break;
2008 }
2009
2010 // string allocator_name = 6;
2011 case 6: {
2012 if (static_cast< ::PROTOBUF_NAMESPACE_ID::uint8>(tag) == (50 & 0xFF)) {
2013 DO_(::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::ReadString(
2014 input, this->mutable_allocator_name()));
2015 DO_(::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::VerifyUtf8String(
2016 this->allocator_name().data(), static_cast<int>(this->allocator_name().length()),
2017 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::PARSE,
2018 "tensorflow.MemoryLogRawAllocation.allocator_name"));
2019 } else {
2020 goto handle_unusual;
2021 }
2022 break;
2023 }
2024
2025 default: {
2026 handle_unusual:
2027 if (tag == 0) {
2028 goto success;
2029 }
2030 DO_(::PROTOBUF_NAMESPACE_ID::internal::WireFormat::SkipField(
2031 input, tag, _internal_metadata_.mutable_unknown_fields()));
2032 break;
2033 }
2034 }
2035 }
2036success:
2037 // @@protoc_insertion_point(parse_success:tensorflow.MemoryLogRawAllocation)
2038 return true;
2039failure:
2040 // @@protoc_insertion_point(parse_failure:tensorflow.MemoryLogRawAllocation)
2041 return false;
2042#undef DO_
2043}
2044#endif // GOOGLE_PROTOBUF_ENABLE_EXPERIMENTAL_PARSER
2045
2046void MemoryLogRawAllocation::SerializeWithCachedSizes(
2047 ::PROTOBUF_NAMESPACE_ID::io::CodedOutputStream* output) const {
2048 // @@protoc_insertion_point(serialize_start:tensorflow.MemoryLogRawAllocation)
2049 ::PROTOBUF_NAMESPACE_ID::uint32 cached_has_bits = 0;
2050 (void) cached_has_bits;
2051
2052 // int64 step_id = 1;
2053 if (this->step_id() != 0) {
2054 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::WriteInt64(1, this->step_id(), output);
2055 }
2056
2057 // string operation = 2;
2058 if (this->operation().size() > 0) {
2059 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::VerifyUtf8String(
2060 this->operation().data(), static_cast<int>(this->operation().length()),
2061 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::SERIALIZE,
2062 "tensorflow.MemoryLogRawAllocation.operation");
2063 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::WriteStringMaybeAliased(
2064 2, this->operation(), output);
2065 }
2066
2067 // int64 num_bytes = 3;
2068 if (this->num_bytes() != 0) {
2069 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::WriteInt64(3, this->num_bytes(), output);
2070 }
2071
2072 // uint64 ptr = 4;
2073 if (this->ptr() != 0) {
2074 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::WriteUInt64(4, this->ptr(), output);
2075 }
2076
2077 // int64 allocation_id = 5;
2078 if (this->allocation_id() != 0) {
2079 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::WriteInt64(5, this->allocation_id(), output);
2080 }
2081
2082 // string allocator_name = 6;
2083 if (this->allocator_name().size() > 0) {
2084 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::VerifyUtf8String(
2085 this->allocator_name().data(), static_cast<int>(this->allocator_name().length()),
2086 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::SERIALIZE,
2087 "tensorflow.MemoryLogRawAllocation.allocator_name");
2088 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::WriteStringMaybeAliased(
2089 6, this->allocator_name(), output);
2090 }
2091
2092 if (_internal_metadata_.have_unknown_fields()) {
2093 ::PROTOBUF_NAMESPACE_ID::internal::WireFormat::SerializeUnknownFields(
2094 _internal_metadata_.unknown_fields(), output);
2095 }
2096 // @@protoc_insertion_point(serialize_end:tensorflow.MemoryLogRawAllocation)
2097}
2098
2099::PROTOBUF_NAMESPACE_ID::uint8* MemoryLogRawAllocation::InternalSerializeWithCachedSizesToArray(
2100 ::PROTOBUF_NAMESPACE_ID::uint8* target) const {
2101 // @@protoc_insertion_point(serialize_to_array_start:tensorflow.MemoryLogRawAllocation)
2102 ::PROTOBUF_NAMESPACE_ID::uint32 cached_has_bits = 0;
2103 (void) cached_has_bits;
2104
2105 // int64 step_id = 1;
2106 if (this->step_id() != 0) {
2107 target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::WriteInt64ToArray(1, this->step_id(), target);
2108 }
2109
2110 // string operation = 2;
2111 if (this->operation().size() > 0) {
2112 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::VerifyUtf8String(
2113 this->operation().data(), static_cast<int>(this->operation().length()),
2114 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::SERIALIZE,
2115 "tensorflow.MemoryLogRawAllocation.operation");
2116 target =
2117 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::WriteStringToArray(
2118 2, this->operation(), target);
2119 }
2120
2121 // int64 num_bytes = 3;
2122 if (this->num_bytes() != 0) {
2123 target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::WriteInt64ToArray(3, this->num_bytes(), target);
2124 }
2125
2126 // uint64 ptr = 4;
2127 if (this->ptr() != 0) {
2128 target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::WriteUInt64ToArray(4, this->ptr(), target);
2129 }
2130
2131 // int64 allocation_id = 5;
2132 if (this->allocation_id() != 0) {
2133 target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::WriteInt64ToArray(5, this->allocation_id(), target);
2134 }
2135
2136 // string allocator_name = 6;
2137 if (this->allocator_name().size() > 0) {
2138 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::VerifyUtf8String(
2139 this->allocator_name().data(), static_cast<int>(this->allocator_name().length()),
2140 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::SERIALIZE,
2141 "tensorflow.MemoryLogRawAllocation.allocator_name");
2142 target =
2143 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::WriteStringToArray(
2144 6, this->allocator_name(), target);
2145 }
2146
2147 if (_internal_metadata_.have_unknown_fields()) {
2148 target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormat::SerializeUnknownFieldsToArray(
2149 _internal_metadata_.unknown_fields(), target);
2150 }
2151 // @@protoc_insertion_point(serialize_to_array_end:tensorflow.MemoryLogRawAllocation)
2152 return target;
2153}
2154
2155size_t MemoryLogRawAllocation::ByteSizeLong() const {
2156// @@protoc_insertion_point(message_byte_size_start:tensorflow.MemoryLogRawAllocation)
2157 size_t total_size = 0;
2158
2159 if (_internal_metadata_.have_unknown_fields()) {
2160 total_size +=
2161 ::PROTOBUF_NAMESPACE_ID::internal::WireFormat::ComputeUnknownFieldsSize(
2162 _internal_metadata_.unknown_fields());
2163 }
2164 ::PROTOBUF_NAMESPACE_ID::uint32 cached_has_bits = 0;
2165 // Prevent compiler warnings about cached_has_bits being unused
2166 (void) cached_has_bits;
2167
2168 // string operation = 2;
2169 if (this->operation().size() > 0) {
2170 total_size += 1 +
2171 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::StringSize(
2172 this->operation());
2173 }
2174
2175 // string allocator_name = 6;
2176 if (this->allocator_name().size() > 0) {
2177 total_size += 1 +
2178 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::StringSize(
2179 this->allocator_name());
2180 }
2181
2182 // int64 step_id = 1;
2183 if (this->step_id() != 0) {
2184 total_size += 1 +
2185 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::Int64Size(
2186 this->step_id());
2187 }
2188
2189 // int64 num_bytes = 3;
2190 if (this->num_bytes() != 0) {
2191 total_size += 1 +
2192 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::Int64Size(
2193 this->num_bytes());
2194 }
2195
2196 // uint64 ptr = 4;
2197 if (this->ptr() != 0) {
2198 total_size += 1 +
2199 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::UInt64Size(
2200 this->ptr());
2201 }
2202
2203 // int64 allocation_id = 5;
2204 if (this->allocation_id() != 0) {
2205 total_size += 1 +
2206 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::Int64Size(
2207 this->allocation_id());
2208 }
2209
2210 int cached_size = ::PROTOBUF_NAMESPACE_ID::internal::ToCachedSize(total_size);
2211 SetCachedSize(cached_size);
2212 return total_size;
2213}
2214
2215void MemoryLogRawAllocation::MergeFrom(const ::PROTOBUF_NAMESPACE_ID::Message& from) {
2216// @@protoc_insertion_point(generalized_merge_from_start:tensorflow.MemoryLogRawAllocation)
2217 GOOGLE_DCHECK_NE(&from, this);
2218 const MemoryLogRawAllocation* source =
2219 ::PROTOBUF_NAMESPACE_ID::DynamicCastToGenerated<MemoryLogRawAllocation>(
2220 &from);
2221 if (source == nullptr) {
2222 // @@protoc_insertion_point(generalized_merge_from_cast_fail:tensorflow.MemoryLogRawAllocation)
2223 ::PROTOBUF_NAMESPACE_ID::internal::ReflectionOps::Merge(from, this);
2224 } else {
2225 // @@protoc_insertion_point(generalized_merge_from_cast_success:tensorflow.MemoryLogRawAllocation)
2226 MergeFrom(*source);
2227 }
2228}
2229
2230void MemoryLogRawAllocation::MergeFrom(const MemoryLogRawAllocation& from) {
2231// @@protoc_insertion_point(class_specific_merge_from_start:tensorflow.MemoryLogRawAllocation)
2232 GOOGLE_DCHECK_NE(&from, this);
2233 _internal_metadata_.MergeFrom(from._internal_metadata_);
2234 ::PROTOBUF_NAMESPACE_ID::uint32 cached_has_bits = 0;
2235 (void) cached_has_bits;
2236
2237 if (from.operation().size() > 0) {
2238 set_operation(from.operation());
2239 }
2240 if (from.allocator_name().size() > 0) {
2241 set_allocator_name(from.allocator_name());
2242 }
2243 if (from.step_id() != 0) {
2244 set_step_id(from.step_id());
2245 }
2246 if (from.num_bytes() != 0) {
2247 set_num_bytes(from.num_bytes());
2248 }
2249 if (from.ptr() != 0) {
2250 set_ptr(from.ptr());
2251 }
2252 if (from.allocation_id() != 0) {
2253 set_allocation_id(from.allocation_id());
2254 }
2255}
2256
2257void MemoryLogRawAllocation::CopyFrom(const ::PROTOBUF_NAMESPACE_ID::Message& from) {
2258// @@protoc_insertion_point(generalized_copy_from_start:tensorflow.MemoryLogRawAllocation)
2259 if (&from == this) return;
2260 Clear();
2261 MergeFrom(from);
2262}
2263
2264void MemoryLogRawAllocation::CopyFrom(const MemoryLogRawAllocation& from) {
2265// @@protoc_insertion_point(class_specific_copy_from_start:tensorflow.MemoryLogRawAllocation)
2266 if (&from == this) return;
2267 Clear();
2268 MergeFrom(from);
2269}
2270
2271bool MemoryLogRawAllocation::IsInitialized() const {
2272 return true;
2273}
2274
2275void MemoryLogRawAllocation::InternalSwap(MemoryLogRawAllocation* other) {
2276 using std::swap;
2277 _internal_metadata_.Swap(&other->_internal_metadata_);
2278 operation_.Swap(&other->operation_, &::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited(),
2279 GetArenaNoVirtual());
2280 allocator_name_.Swap(&other->allocator_name_, &::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited(),
2281 GetArenaNoVirtual());
2282 swap(step_id_, other->step_id_);
2283 swap(num_bytes_, other->num_bytes_);
2284 swap(ptr_, other->ptr_);
2285 swap(allocation_id_, other->allocation_id_);
2286}
2287
2288::PROTOBUF_NAMESPACE_ID::Metadata MemoryLogRawAllocation::GetMetadata() const {
2289 return GetMetadataStatic();
2290}
2291
2292
2293// ===================================================================
2294
2295void MemoryLogRawDeallocation::InitAsDefaultInstance() {
2296}
2297class MemoryLogRawDeallocation::_Internal {
2298 public:
2299};
2300
2301MemoryLogRawDeallocation::MemoryLogRawDeallocation()
2302 : ::PROTOBUF_NAMESPACE_ID::Message(), _internal_metadata_(nullptr) {
2303 SharedCtor();
2304 // @@protoc_insertion_point(constructor:tensorflow.MemoryLogRawDeallocation)
2305}
2306MemoryLogRawDeallocation::MemoryLogRawDeallocation(::PROTOBUF_NAMESPACE_ID::Arena* arena)
2307 : ::PROTOBUF_NAMESPACE_ID::Message(),
2308 _internal_metadata_(arena) {
2309 SharedCtor();
2310 RegisterArenaDtor(arena);
2311 // @@protoc_insertion_point(arena_constructor:tensorflow.MemoryLogRawDeallocation)
2312}
2313MemoryLogRawDeallocation::MemoryLogRawDeallocation(const MemoryLogRawDeallocation& from)
2314 : ::PROTOBUF_NAMESPACE_ID::Message(),
2315 _internal_metadata_(nullptr) {
2316 _internal_metadata_.MergeFrom(from._internal_metadata_);
2317 operation_.UnsafeSetDefault(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited());
2318 if (!from.operation().empty()) {
2319 operation_.Set(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited(), from.operation(),
2320 GetArenaNoVirtual());
2321 }
2322 allocator_name_.UnsafeSetDefault(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited());
2323 if (!from.allocator_name().empty()) {
2324 allocator_name_.Set(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited(), from.allocator_name(),
2325 GetArenaNoVirtual());
2326 }
2327 ::memcpy(&step_id_, &from.step_id_,
2328 static_cast<size_t>(reinterpret_cast<char*>(&deferred_) -
2329 reinterpret_cast<char*>(&step_id_)) + sizeof(deferred_));
2330 // @@protoc_insertion_point(copy_constructor:tensorflow.MemoryLogRawDeallocation)
2331}
2332
2333void MemoryLogRawDeallocation::SharedCtor() {
2334 ::PROTOBUF_NAMESPACE_ID::internal::InitSCC(&scc_info_MemoryLogRawDeallocation_tensorflow_2fcore_2fframework_2flog_5fmemory_2eproto.base);
2335 operation_.UnsafeSetDefault(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited());
2336 allocator_name_.UnsafeSetDefault(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited());
2337 ::memset(&step_id_, 0, static_cast<size_t>(
2338 reinterpret_cast<char*>(&deferred_) -
2339 reinterpret_cast<char*>(&step_id_)) + sizeof(deferred_));
2340}
2341
2342MemoryLogRawDeallocation::~MemoryLogRawDeallocation() {
2343 // @@protoc_insertion_point(destructor:tensorflow.MemoryLogRawDeallocation)
2344 SharedDtor();
2345}
2346
2347void MemoryLogRawDeallocation::SharedDtor() {
2348 GOOGLE_DCHECK(GetArenaNoVirtual() == nullptr);
2349 operation_.DestroyNoArena(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited());
2350 allocator_name_.DestroyNoArena(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited());
2351}
2352
2353void MemoryLogRawDeallocation::ArenaDtor(void* object) {
2354 MemoryLogRawDeallocation* _this = reinterpret_cast< MemoryLogRawDeallocation* >(object);
2355 (void)_this;
2356}
2357void MemoryLogRawDeallocation::RegisterArenaDtor(::PROTOBUF_NAMESPACE_ID::Arena*) {
2358}
2359void MemoryLogRawDeallocation::SetCachedSize(int size) const {
2360 _cached_size_.Set(size);
2361}
2362const MemoryLogRawDeallocation& MemoryLogRawDeallocation::default_instance() {
2363 ::PROTOBUF_NAMESPACE_ID::internal::InitSCC(&::scc_info_MemoryLogRawDeallocation_tensorflow_2fcore_2fframework_2flog_5fmemory_2eproto.base);
2364 return *internal_default_instance();
2365}
2366
2367
2368void MemoryLogRawDeallocation::Clear() {
2369// @@protoc_insertion_point(message_clear_start:tensorflow.MemoryLogRawDeallocation)
2370 ::PROTOBUF_NAMESPACE_ID::uint32 cached_has_bits = 0;
2371 // Prevent compiler warnings about cached_has_bits being unused
2372 (void) cached_has_bits;
2373
2374 operation_.ClearToEmpty(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited(), GetArenaNoVirtual());
2375 allocator_name_.ClearToEmpty(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited(), GetArenaNoVirtual());
2376 ::memset(&step_id_, 0, static_cast<size_t>(
2377 reinterpret_cast<char*>(&deferred_) -
2378 reinterpret_cast<char*>(&step_id_)) + sizeof(deferred_));
2379 _internal_metadata_.Clear();
2380}
2381
2382#if GOOGLE_PROTOBUF_ENABLE_EXPERIMENTAL_PARSER
2383const char* MemoryLogRawDeallocation::_InternalParse(const char* ptr, ::PROTOBUF_NAMESPACE_ID::internal::ParseContext* ctx) {
2384#define CHK_(x) if (PROTOBUF_PREDICT_FALSE(!(x))) goto failure
2385 ::PROTOBUF_NAMESPACE_ID::Arena* arena = GetArenaNoVirtual(); (void)arena;
2386 while (!ctx->Done(&ptr)) {
2387 ::PROTOBUF_NAMESPACE_ID::uint32 tag;
2388 ptr = ::PROTOBUF_NAMESPACE_ID::internal::ReadTag(ptr, &tag);
2389 CHK_(ptr);
2390 switch (tag >> 3) {
2391 // int64 step_id = 1;
2392 case 1:
2393 if (PROTOBUF_PREDICT_TRUE(static_cast<::PROTOBUF_NAMESPACE_ID::uint8>(tag) == 8)) {
2394 step_id_ = ::PROTOBUF_NAMESPACE_ID::internal::ReadVarint(&ptr);
2395 CHK_(ptr);
2396 } else goto handle_unusual;
2397 continue;
2398 // string operation = 2;
2399 case 2:
2400 if (PROTOBUF_PREDICT_TRUE(static_cast<::PROTOBUF_NAMESPACE_ID::uint8>(tag) == 18)) {
2401 ptr = ::PROTOBUF_NAMESPACE_ID::internal::InlineGreedyStringParserUTF8(mutable_operation(), ptr, ctx, "tensorflow.MemoryLogRawDeallocation.operation");
2402 CHK_(ptr);
2403 } else goto handle_unusual;
2404 continue;
2405 // int64 allocation_id = 3;
2406 case 3:
2407 if (PROTOBUF_PREDICT_TRUE(static_cast<::PROTOBUF_NAMESPACE_ID::uint8>(tag) == 24)) {
2408 allocation_id_ = ::PROTOBUF_NAMESPACE_ID::internal::ReadVarint(&ptr);
2409 CHK_(ptr);
2410 } else goto handle_unusual;
2411 continue;
2412 // string allocator_name = 4;
2413 case 4:
2414 if (PROTOBUF_PREDICT_TRUE(static_cast<::PROTOBUF_NAMESPACE_ID::uint8>(tag) == 34)) {
2415 ptr = ::PROTOBUF_NAMESPACE_ID::internal::InlineGreedyStringParserUTF8(mutable_allocator_name(), ptr, ctx, "tensorflow.MemoryLogRawDeallocation.allocator_name");
2416 CHK_(ptr);
2417 } else goto handle_unusual;
2418 continue;
2419 // bool deferred = 5;
2420 case 5:
2421 if (PROTOBUF_PREDICT_TRUE(static_cast<::PROTOBUF_NAMESPACE_ID::uint8>(tag) == 40)) {
2422 deferred_ = ::PROTOBUF_NAMESPACE_ID::internal::ReadVarint(&ptr);
2423 CHK_(ptr);
2424 } else goto handle_unusual;
2425 continue;
2426 default: {
2427 handle_unusual:
2428 if ((tag & 7) == 4 || tag == 0) {
2429 ctx->SetLastTag(tag);
2430 goto success;
2431 }
2432 ptr = UnknownFieldParse(tag, &_internal_metadata_, ptr, ctx);
2433 CHK_(ptr != nullptr);
2434 continue;
2435 }
2436 } // switch
2437 } // while
2438success:
2439 return ptr;
2440failure:
2441 ptr = nullptr;
2442 goto success;
2443#undef CHK_
2444}
2445#else // GOOGLE_PROTOBUF_ENABLE_EXPERIMENTAL_PARSER
2446bool MemoryLogRawDeallocation::MergePartialFromCodedStream(
2447 ::PROTOBUF_NAMESPACE_ID::io::CodedInputStream* input) {
2448#define DO_(EXPRESSION) if (!PROTOBUF_PREDICT_TRUE(EXPRESSION)) goto failure
2449 ::PROTOBUF_NAMESPACE_ID::uint32 tag;
2450 // @@protoc_insertion_point(parse_start:tensorflow.MemoryLogRawDeallocation)
2451 for (;;) {
2452 ::std::pair<::PROTOBUF_NAMESPACE_ID::uint32, bool> p = input->ReadTagWithCutoffNoLastTag(127u);
2453 tag = p.first;
2454 if (!p.second) goto handle_unusual;
2455 switch (::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::GetTagFieldNumber(tag)) {
2456 // int64 step_id = 1;
2457 case 1: {
2458 if (static_cast< ::PROTOBUF_NAMESPACE_ID::uint8>(tag) == (8 & 0xFF)) {
2459
2460 DO_((::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::ReadPrimitive<
2461 ::PROTOBUF_NAMESPACE_ID::int64, ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::TYPE_INT64>(
2462 input, &step_id_)));
2463 } else {
2464 goto handle_unusual;
2465 }
2466 break;
2467 }
2468
2469 // string operation = 2;
2470 case 2: {
2471 if (static_cast< ::PROTOBUF_NAMESPACE_ID::uint8>(tag) == (18 & 0xFF)) {
2472 DO_(::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::ReadString(
2473 input, this->mutable_operation()));
2474 DO_(::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::VerifyUtf8String(
2475 this->operation().data(), static_cast<int>(this->operation().length()),
2476 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::PARSE,
2477 "tensorflow.MemoryLogRawDeallocation.operation"));
2478 } else {
2479 goto handle_unusual;
2480 }
2481 break;
2482 }
2483
2484 // int64 allocation_id = 3;
2485 case 3: {
2486 if (static_cast< ::PROTOBUF_NAMESPACE_ID::uint8>(tag) == (24 & 0xFF)) {
2487
2488 DO_((::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::ReadPrimitive<
2489 ::PROTOBUF_NAMESPACE_ID::int64, ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::TYPE_INT64>(
2490 input, &allocation_id_)));
2491 } else {
2492 goto handle_unusual;
2493 }
2494 break;
2495 }
2496
2497 // string allocator_name = 4;
2498 case 4: {
2499 if (static_cast< ::PROTOBUF_NAMESPACE_ID::uint8>(tag) == (34 & 0xFF)) {
2500 DO_(::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::ReadString(
2501 input, this->mutable_allocator_name()));
2502 DO_(::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::VerifyUtf8String(
2503 this->allocator_name().data(), static_cast<int>(this->allocator_name().length()),
2504 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::PARSE,
2505 "tensorflow.MemoryLogRawDeallocation.allocator_name"));
2506 } else {
2507 goto handle_unusual;
2508 }
2509 break;
2510 }
2511
2512 // bool deferred = 5;
2513 case 5: {
2514 if (static_cast< ::PROTOBUF_NAMESPACE_ID::uint8>(tag) == (40 & 0xFF)) {
2515
2516 DO_((::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::ReadPrimitive<
2517 bool, ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::TYPE_BOOL>(
2518 input, &deferred_)));
2519 } else {
2520 goto handle_unusual;
2521 }
2522 break;
2523 }
2524
2525 default: {
2526 handle_unusual:
2527 if (tag == 0) {
2528 goto success;
2529 }
2530 DO_(::PROTOBUF_NAMESPACE_ID::internal::WireFormat::SkipField(
2531 input, tag, _internal_metadata_.mutable_unknown_fields()));
2532 break;
2533 }
2534 }
2535 }
2536success:
2537 // @@protoc_insertion_point(parse_success:tensorflow.MemoryLogRawDeallocation)
2538 return true;
2539failure:
2540 // @@protoc_insertion_point(parse_failure:tensorflow.MemoryLogRawDeallocation)
2541 return false;
2542#undef DO_
2543}
2544#endif // GOOGLE_PROTOBUF_ENABLE_EXPERIMENTAL_PARSER
2545
2546void MemoryLogRawDeallocation::SerializeWithCachedSizes(
2547 ::PROTOBUF_NAMESPACE_ID::io::CodedOutputStream* output) const {
2548 // @@protoc_insertion_point(serialize_start:tensorflow.MemoryLogRawDeallocation)
2549 ::PROTOBUF_NAMESPACE_ID::uint32 cached_has_bits = 0;
2550 (void) cached_has_bits;
2551
2552 // int64 step_id = 1;
2553 if (this->step_id() != 0) {
2554 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::WriteInt64(1, this->step_id(), output);
2555 }
2556
2557 // string operation = 2;
2558 if (this->operation().size() > 0) {
2559 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::VerifyUtf8String(
2560 this->operation().data(), static_cast<int>(this->operation().length()),
2561 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::SERIALIZE,
2562 "tensorflow.MemoryLogRawDeallocation.operation");
2563 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::WriteStringMaybeAliased(
2564 2, this->operation(), output);
2565 }
2566
2567 // int64 allocation_id = 3;
2568 if (this->allocation_id() != 0) {
2569 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::WriteInt64(3, this->allocation_id(), output);
2570 }
2571
2572 // string allocator_name = 4;
2573 if (this->allocator_name().size() > 0) {
2574 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::VerifyUtf8String(
2575 this->allocator_name().data(), static_cast<int>(this->allocator_name().length()),
2576 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::SERIALIZE,
2577 "tensorflow.MemoryLogRawDeallocation.allocator_name");
2578 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::WriteStringMaybeAliased(
2579 4, this->allocator_name(), output);
2580 }
2581
2582 // bool deferred = 5;
2583 if (this->deferred() != 0) {
2584 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::WriteBool(5, this->deferred(), output);
2585 }
2586
2587 if (_internal_metadata_.have_unknown_fields()) {
2588 ::PROTOBUF_NAMESPACE_ID::internal::WireFormat::SerializeUnknownFields(
2589 _internal_metadata_.unknown_fields(), output);
2590 }
2591 // @@protoc_insertion_point(serialize_end:tensorflow.MemoryLogRawDeallocation)
2592}
2593
2594::PROTOBUF_NAMESPACE_ID::uint8* MemoryLogRawDeallocation::InternalSerializeWithCachedSizesToArray(
2595 ::PROTOBUF_NAMESPACE_ID::uint8* target) const {
2596 // @@protoc_insertion_point(serialize_to_array_start:tensorflow.MemoryLogRawDeallocation)
2597 ::PROTOBUF_NAMESPACE_ID::uint32 cached_has_bits = 0;
2598 (void) cached_has_bits;
2599
2600 // int64 step_id = 1;
2601 if (this->step_id() != 0) {
2602 target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::WriteInt64ToArray(1, this->step_id(), target);
2603 }
2604
2605 // string operation = 2;
2606 if (this->operation().size() > 0) {
2607 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::VerifyUtf8String(
2608 this->operation().data(), static_cast<int>(this->operation().length()),
2609 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::SERIALIZE,
2610 "tensorflow.MemoryLogRawDeallocation.operation");
2611 target =
2612 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::WriteStringToArray(
2613 2, this->operation(), target);
2614 }
2615
2616 // int64 allocation_id = 3;
2617 if (this->allocation_id() != 0) {
2618 target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::WriteInt64ToArray(3, this->allocation_id(), target);
2619 }
2620
2621 // string allocator_name = 4;
2622 if (this->allocator_name().size() > 0) {
2623 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::VerifyUtf8String(
2624 this->allocator_name().data(), static_cast<int>(this->allocator_name().length()),
2625 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::SERIALIZE,
2626 "tensorflow.MemoryLogRawDeallocation.allocator_name");
2627 target =
2628 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::WriteStringToArray(
2629 4, this->allocator_name(), target);
2630 }
2631
2632 // bool deferred = 5;
2633 if (this->deferred() != 0) {
2634 target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::WriteBoolToArray(5, this->deferred(), target);
2635 }
2636
2637 if (_internal_metadata_.have_unknown_fields()) {
2638 target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormat::SerializeUnknownFieldsToArray(
2639 _internal_metadata_.unknown_fields(), target);
2640 }
2641 // @@protoc_insertion_point(serialize_to_array_end:tensorflow.MemoryLogRawDeallocation)
2642 return target;
2643}
2644
2645size_t MemoryLogRawDeallocation::ByteSizeLong() const {
2646// @@protoc_insertion_point(message_byte_size_start:tensorflow.MemoryLogRawDeallocation)
2647 size_t total_size = 0;
2648
2649 if (_internal_metadata_.have_unknown_fields()) {
2650 total_size +=
2651 ::PROTOBUF_NAMESPACE_ID::internal::WireFormat::ComputeUnknownFieldsSize(
2652 _internal_metadata_.unknown_fields());
2653 }
2654 ::PROTOBUF_NAMESPACE_ID::uint32 cached_has_bits = 0;
2655 // Prevent compiler warnings about cached_has_bits being unused
2656 (void) cached_has_bits;
2657
2658 // string operation = 2;
2659 if (this->operation().size() > 0) {
2660 total_size += 1 +
2661 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::StringSize(
2662 this->operation());
2663 }
2664
2665 // string allocator_name = 4;
2666 if (this->allocator_name().size() > 0) {
2667 total_size += 1 +
2668 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::StringSize(
2669 this->allocator_name());
2670 }
2671
2672 // int64 step_id = 1;
2673 if (this->step_id() != 0) {
2674 total_size += 1 +
2675 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::Int64Size(
2676 this->step_id());
2677 }
2678
2679 // int64 allocation_id = 3;
2680 if (this->allocation_id() != 0) {
2681 total_size += 1 +
2682 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::Int64Size(
2683 this->allocation_id());
2684 }
2685
2686 // bool deferred = 5;
2687 if (this->deferred() != 0) {
2688 total_size += 1 + 1;
2689 }
2690
2691 int cached_size = ::PROTOBUF_NAMESPACE_ID::internal::ToCachedSize(total_size);
2692 SetCachedSize(cached_size);
2693 return total_size;
2694}
2695
2696void MemoryLogRawDeallocation::MergeFrom(const ::PROTOBUF_NAMESPACE_ID::Message& from) {
2697// @@protoc_insertion_point(generalized_merge_from_start:tensorflow.MemoryLogRawDeallocation)
2698 GOOGLE_DCHECK_NE(&from, this);
2699 const MemoryLogRawDeallocation* source =
2700 ::PROTOBUF_NAMESPACE_ID::DynamicCastToGenerated<MemoryLogRawDeallocation>(
2701 &from);
2702 if (source == nullptr) {
2703 // @@protoc_insertion_point(generalized_merge_from_cast_fail:tensorflow.MemoryLogRawDeallocation)
2704 ::PROTOBUF_NAMESPACE_ID::internal::ReflectionOps::Merge(from, this);
2705 } else {
2706 // @@protoc_insertion_point(generalized_merge_from_cast_success:tensorflow.MemoryLogRawDeallocation)
2707 MergeFrom(*source);
2708 }
2709}
2710
2711void MemoryLogRawDeallocation::MergeFrom(const MemoryLogRawDeallocation& from) {
2712// @@protoc_insertion_point(class_specific_merge_from_start:tensorflow.MemoryLogRawDeallocation)
2713 GOOGLE_DCHECK_NE(&from, this);
2714 _internal_metadata_.MergeFrom(from._internal_metadata_);
2715 ::PROTOBUF_NAMESPACE_ID::uint32 cached_has_bits = 0;
2716 (void) cached_has_bits;
2717
2718 if (from.operation().size() > 0) {
2719 set_operation(from.operation());
2720 }
2721 if (from.allocator_name().size() > 0) {
2722 set_allocator_name(from.allocator_name());
2723 }
2724 if (from.step_id() != 0) {
2725 set_step_id(from.step_id());
2726 }
2727 if (from.allocation_id() != 0) {
2728 set_allocation_id(from.allocation_id());
2729 }
2730 if (from.deferred() != 0) {
2731 set_deferred(from.deferred());
2732 }
2733}
2734
2735void MemoryLogRawDeallocation::CopyFrom(const ::PROTOBUF_NAMESPACE_ID::Message& from) {
2736// @@protoc_insertion_point(generalized_copy_from_start:tensorflow.MemoryLogRawDeallocation)
2737 if (&from == this) return;
2738 Clear();
2739 MergeFrom(from);
2740}
2741
2742void MemoryLogRawDeallocation::CopyFrom(const MemoryLogRawDeallocation& from) {
2743// @@protoc_insertion_point(class_specific_copy_from_start:tensorflow.MemoryLogRawDeallocation)
2744 if (&from == this) return;
2745 Clear();
2746 MergeFrom(from);
2747}
2748
2749bool MemoryLogRawDeallocation::IsInitialized() const {
2750 return true;
2751}
2752
2753void MemoryLogRawDeallocation::InternalSwap(MemoryLogRawDeallocation* other) {
2754 using std::swap;
2755 _internal_metadata_.Swap(&other->_internal_metadata_);
2756 operation_.Swap(&other->operation_, &::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited(),
2757 GetArenaNoVirtual());
2758 allocator_name_.Swap(&other->allocator_name_, &::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited(),
2759 GetArenaNoVirtual());
2760 swap(step_id_, other->step_id_);
2761 swap(allocation_id_, other->allocation_id_);
2762 swap(deferred_, other->deferred_);
2763}
2764
2765::PROTOBUF_NAMESPACE_ID::Metadata MemoryLogRawDeallocation::GetMetadata() const {
2766 return GetMetadataStatic();
2767}
2768
2769
2770// @@protoc_insertion_point(namespace_scope)
2771} // namespace tensorflow
2772PROTOBUF_NAMESPACE_OPEN
2773template<> PROTOBUF_NOINLINE ::tensorflow::MemoryLogStep* Arena::CreateMaybeMessage< ::tensorflow::MemoryLogStep >(Arena* arena) {
2774 return Arena::CreateMessageInternal< ::tensorflow::MemoryLogStep >(arena);
2775}
2776template<> PROTOBUF_NOINLINE ::tensorflow::MemoryLogTensorAllocation* Arena::CreateMaybeMessage< ::tensorflow::MemoryLogTensorAllocation >(Arena* arena) {
2777 return Arena::CreateMessageInternal< ::tensorflow::MemoryLogTensorAllocation >(arena);
2778}
2779template<> PROTOBUF_NOINLINE ::tensorflow::MemoryLogTensorDeallocation* Arena::CreateMaybeMessage< ::tensorflow::MemoryLogTensorDeallocation >(Arena* arena) {
2780 return Arena::CreateMessageInternal< ::tensorflow::MemoryLogTensorDeallocation >(arena);
2781}
2782template<> PROTOBUF_NOINLINE ::tensorflow::MemoryLogTensorOutput* Arena::CreateMaybeMessage< ::tensorflow::MemoryLogTensorOutput >(Arena* arena) {
2783 return Arena::CreateMessageInternal< ::tensorflow::MemoryLogTensorOutput >(arena);
2784}
2785template<> PROTOBUF_NOINLINE ::tensorflow::MemoryLogRawAllocation* Arena::CreateMaybeMessage< ::tensorflow::MemoryLogRawAllocation >(Arena* arena) {
2786 return Arena::CreateMessageInternal< ::tensorflow::MemoryLogRawAllocation >(arena);
2787}
2788template<> PROTOBUF_NOINLINE ::tensorflow::MemoryLogRawDeallocation* Arena::CreateMaybeMessage< ::tensorflow::MemoryLogRawDeallocation >(Arena* arena) {
2789 return Arena::CreateMessageInternal< ::tensorflow::MemoryLogRawDeallocation >(arena);
2790}
2791PROTOBUF_NAMESPACE_CLOSE
2792
2793// @@protoc_insertion_point(global_scope)
2794#include <google/protobuf/port_undef.inc>
2795