1// Generated by the protocol buffer compiler. DO NOT EDIT!
2// source: tensorflow/core/protobuf/config.proto
3
4#include "tensorflow/core/protobuf/config.pb.h"
5
6#include <algorithm>
7
8#include <google/protobuf/stubs/common.h>
9#include <google/protobuf/io/coded_stream.h>
10#include <google/protobuf/extension_set.h>
11#include <google/protobuf/wire_format_lite.h>
12#include <google/protobuf/descriptor.h>
13#include <google/protobuf/generated_message_reflection.h>
14#include <google/protobuf/reflection_ops.h>
15#include <google/protobuf/wire_format.h>
16// @@protoc_insertion_point(includes)
17#include <google/protobuf/port_def.inc>
18extern PROTOBUF_INTERNAL_EXPORT_tensorflow_2fcore_2fprotobuf_2fconfig_2eproto ::PROTOBUF_NAMESPACE_ID::internal::SCCInfo<0> scc_info_CallableOptions_FeedDevicesEntry_DoNotUse_tensorflow_2fcore_2fprotobuf_2fconfig_2eproto;
19extern PROTOBUF_INTERNAL_EXPORT_tensorflow_2fcore_2fprotobuf_2fconfig_2eproto ::PROTOBUF_NAMESPACE_ID::internal::SCCInfo<0> scc_info_CallableOptions_FetchDevicesEntry_DoNotUse_tensorflow_2fcore_2fprotobuf_2fconfig_2eproto;
20extern PROTOBUF_INTERNAL_EXPORT_tensorflow_2fcore_2fprotobuf_2fcluster_2eproto ::PROTOBUF_NAMESPACE_ID::internal::SCCInfo<1> scc_info_ClusterDef_tensorflow_2fcore_2fprotobuf_2fcluster_2eproto;
21extern PROTOBUF_INTERNAL_EXPORT_tensorflow_2fcore_2fprotobuf_2fconfig_2eproto ::PROTOBUF_NAMESPACE_ID::internal::SCCInfo<0> scc_info_ConfigProto_DeviceCountEntry_DoNotUse_tensorflow_2fcore_2fprotobuf_2fconfig_2eproto;
22extern PROTOBUF_INTERNAL_EXPORT_tensorflow_2fcore_2fprotobuf_2fconfig_2eproto ::PROTOBUF_NAMESPACE_ID::internal::SCCInfo<2> scc_info_ConfigProto_Experimental_tensorflow_2fcore_2fprotobuf_2fconfig_2eproto;
23extern PROTOBUF_INTERNAL_EXPORT_tensorflow_2fcore_2fprotobuf_2fcoordination_5fconfig_2eproto ::PROTOBUF_NAMESPACE_ID::internal::SCCInfo<0> scc_info_CoordinationServiceConfig_tensorflow_2fcore_2fprotobuf_2fcoordination_5fconfig_2eproto;
24extern PROTOBUF_INTERNAL_EXPORT_tensorflow_2fcore_2fframework_2fcost_5fgraph_2eproto ::PROTOBUF_NAMESPACE_ID::internal::SCCInfo<2> scc_info_CostGraphDef_tensorflow_2fcore_2fframework_2fcost_5fgraph_2eproto;
25extern PROTOBUF_INTERNAL_EXPORT_tensorflow_2fcore_2fprotobuf_2fdebug_2eproto ::PROTOBUF_NAMESPACE_ID::internal::SCCInfo<1> scc_info_DebugOptions_tensorflow_2fcore_2fprotobuf_2fdebug_2eproto;
26extern PROTOBUF_INTERNAL_EXPORT_tensorflow_2fcore_2fprotobuf_2fconfig_2eproto ::PROTOBUF_NAMESPACE_ID::internal::SCCInfo<1> scc_info_GPUOptions_tensorflow_2fcore_2fprotobuf_2fconfig_2eproto;
27extern PROTOBUF_INTERNAL_EXPORT_tensorflow_2fcore_2fprotobuf_2fconfig_2eproto ::PROTOBUF_NAMESPACE_ID::internal::SCCInfo<1> scc_info_GPUOptions_Experimental_tensorflow_2fcore_2fprotobuf_2fconfig_2eproto;
28extern PROTOBUF_INTERNAL_EXPORT_tensorflow_2fcore_2fprotobuf_2fconfig_2eproto ::PROTOBUF_NAMESPACE_ID::internal::SCCInfo<0> scc_info_GPUOptions_Experimental_VirtualDevices_tensorflow_2fcore_2fprotobuf_2fconfig_2eproto;
29extern PROTOBUF_INTERNAL_EXPORT_tensorflow_2fcore_2fframework_2fgraph_2eproto ::PROTOBUF_NAMESPACE_ID::internal::SCCInfo<3> scc_info_GraphDef_tensorflow_2fcore_2fframework_2fgraph_2eproto;
30extern PROTOBUF_INTERNAL_EXPORT_tensorflow_2fcore_2fprotobuf_2fconfig_2eproto ::PROTOBUF_NAMESPACE_ID::internal::SCCInfo<2> scc_info_GraphOptions_tensorflow_2fcore_2fprotobuf_2fconfig_2eproto;
31extern PROTOBUF_INTERNAL_EXPORT_tensorflow_2fcore_2fprotobuf_2fconfig_2eproto ::PROTOBUF_NAMESPACE_ID::internal::SCCInfo<0> scc_info_OptimizerOptions_tensorflow_2fcore_2fprotobuf_2fconfig_2eproto;
32extern PROTOBUF_INTERNAL_EXPORT_tensorflow_2fcore_2fprotobuf_2fconfig_2eproto ::PROTOBUF_NAMESPACE_ID::internal::SCCInfo<0> scc_info_RPCOptions_tensorflow_2fcore_2fprotobuf_2fconfig_2eproto;
33extern PROTOBUF_INTERNAL_EXPORT_tensorflow_2fcore_2fprotobuf_2frewriter_5fconfig_2eproto ::PROTOBUF_NAMESPACE_ID::internal::SCCInfo<4> scc_info_RewriterConfig_tensorflow_2fcore_2fprotobuf_2frewriter_5fconfig_2eproto;
34extern PROTOBUF_INTERNAL_EXPORT_tensorflow_2fcore_2fprotobuf_2fconfig_2eproto ::PROTOBUF_NAMESPACE_ID::internal::SCCInfo<1> scc_info_RunMetadata_FunctionGraphs_tensorflow_2fcore_2fprotobuf_2fconfig_2eproto;
35extern PROTOBUF_INTERNAL_EXPORT_tensorflow_2fcore_2fprotobuf_2fconfig_2eproto ::PROTOBUF_NAMESPACE_ID::internal::SCCInfo<2> scc_info_RunOptions_tensorflow_2fcore_2fprotobuf_2fconfig_2eproto;
36extern PROTOBUF_INTERNAL_EXPORT_tensorflow_2fcore_2fprotobuf_2fconfig_2eproto ::PROTOBUF_NAMESPACE_ID::internal::SCCInfo<1> scc_info_RunOptions_Experimental_tensorflow_2fcore_2fprotobuf_2fconfig_2eproto;
37extern PROTOBUF_INTERNAL_EXPORT_tensorflow_2fcore_2fprotobuf_2fconfig_2eproto ::PROTOBUF_NAMESPACE_ID::internal::SCCInfo<0> scc_info_RunOptions_Experimental_RunHandlerPoolOptions_tensorflow_2fcore_2fprotobuf_2fconfig_2eproto;
38extern PROTOBUF_INTERNAL_EXPORT_tensorflow_2fcore_2fprotobuf_2fconfig_2eproto ::PROTOBUF_NAMESPACE_ID::internal::SCCInfo<0> scc_info_SessionMetadata_tensorflow_2fcore_2fprotobuf_2fconfig_2eproto;
39extern PROTOBUF_INTERNAL_EXPORT_tensorflow_2fcore_2fframework_2fstep_5fstats_2eproto ::PROTOBUF_NAMESPACE_ID::internal::SCCInfo<1> scc_info_StepStats_tensorflow_2fcore_2fframework_2fstep_5fstats_2eproto;
40extern PROTOBUF_INTERNAL_EXPORT_tensorflow_2fcore_2fprotobuf_2fconfig_2eproto ::PROTOBUF_NAMESPACE_ID::internal::SCCInfo<0> scc_info_TensorConnection_tensorflow_2fcore_2fprotobuf_2fconfig_2eproto;
41extern PROTOBUF_INTERNAL_EXPORT_tensorflow_2fcore_2fprotobuf_2fconfig_2eproto ::PROTOBUF_NAMESPACE_ID::internal::SCCInfo<0> scc_info_ThreadPoolOptionProto_tensorflow_2fcore_2fprotobuf_2fconfig_2eproto;
42namespace tensorflow {
43class GPUOptions_Experimental_VirtualDevicesDefaultTypeInternal {
44 public:
45 ::PROTOBUF_NAMESPACE_ID::internal::ExplicitlyConstructed<GPUOptions_Experimental_VirtualDevices> _instance;
46} _GPUOptions_Experimental_VirtualDevices_default_instance_;
47class GPUOptions_ExperimentalDefaultTypeInternal {
48 public:
49 ::PROTOBUF_NAMESPACE_ID::internal::ExplicitlyConstructed<GPUOptions_Experimental> _instance;
50} _GPUOptions_Experimental_default_instance_;
51class GPUOptionsDefaultTypeInternal {
52 public:
53 ::PROTOBUF_NAMESPACE_ID::internal::ExplicitlyConstructed<GPUOptions> _instance;
54} _GPUOptions_default_instance_;
55class OptimizerOptionsDefaultTypeInternal {
56 public:
57 ::PROTOBUF_NAMESPACE_ID::internal::ExplicitlyConstructed<OptimizerOptions> _instance;
58} _OptimizerOptions_default_instance_;
59class GraphOptionsDefaultTypeInternal {
60 public:
61 ::PROTOBUF_NAMESPACE_ID::internal::ExplicitlyConstructed<GraphOptions> _instance;
62} _GraphOptions_default_instance_;
63class ThreadPoolOptionProtoDefaultTypeInternal {
64 public:
65 ::PROTOBUF_NAMESPACE_ID::internal::ExplicitlyConstructed<ThreadPoolOptionProto> _instance;
66} _ThreadPoolOptionProto_default_instance_;
67class RPCOptionsDefaultTypeInternal {
68 public:
69 ::PROTOBUF_NAMESPACE_ID::internal::ExplicitlyConstructed<RPCOptions> _instance;
70} _RPCOptions_default_instance_;
71class SessionMetadataDefaultTypeInternal {
72 public:
73 ::PROTOBUF_NAMESPACE_ID::internal::ExplicitlyConstructed<SessionMetadata> _instance;
74} _SessionMetadata_default_instance_;
75class ConfigProto_DeviceCountEntry_DoNotUseDefaultTypeInternal {
76 public:
77 ::PROTOBUF_NAMESPACE_ID::internal::ExplicitlyConstructed<ConfigProto_DeviceCountEntry_DoNotUse> _instance;
78} _ConfigProto_DeviceCountEntry_DoNotUse_default_instance_;
79class ConfigProto_ExperimentalDefaultTypeInternal {
80 public:
81 ::PROTOBUF_NAMESPACE_ID::internal::ExplicitlyConstructed<ConfigProto_Experimental> _instance;
82} _ConfigProto_Experimental_default_instance_;
83class ConfigProtoDefaultTypeInternal {
84 public:
85 ::PROTOBUF_NAMESPACE_ID::internal::ExplicitlyConstructed<ConfigProto> _instance;
86} _ConfigProto_default_instance_;
87class RunOptions_Experimental_RunHandlerPoolOptionsDefaultTypeInternal {
88 public:
89 ::PROTOBUF_NAMESPACE_ID::internal::ExplicitlyConstructed<RunOptions_Experimental_RunHandlerPoolOptions> _instance;
90} _RunOptions_Experimental_RunHandlerPoolOptions_default_instance_;
91class RunOptions_ExperimentalDefaultTypeInternal {
92 public:
93 ::PROTOBUF_NAMESPACE_ID::internal::ExplicitlyConstructed<RunOptions_Experimental> _instance;
94} _RunOptions_Experimental_default_instance_;
95class RunOptionsDefaultTypeInternal {
96 public:
97 ::PROTOBUF_NAMESPACE_ID::internal::ExplicitlyConstructed<RunOptions> _instance;
98} _RunOptions_default_instance_;
99class RunMetadata_FunctionGraphsDefaultTypeInternal {
100 public:
101 ::PROTOBUF_NAMESPACE_ID::internal::ExplicitlyConstructed<RunMetadata_FunctionGraphs> _instance;
102} _RunMetadata_FunctionGraphs_default_instance_;
103class RunMetadataDefaultTypeInternal {
104 public:
105 ::PROTOBUF_NAMESPACE_ID::internal::ExplicitlyConstructed<RunMetadata> _instance;
106} _RunMetadata_default_instance_;
107class TensorConnectionDefaultTypeInternal {
108 public:
109 ::PROTOBUF_NAMESPACE_ID::internal::ExplicitlyConstructed<TensorConnection> _instance;
110} _TensorConnection_default_instance_;
111class CallableOptions_FeedDevicesEntry_DoNotUseDefaultTypeInternal {
112 public:
113 ::PROTOBUF_NAMESPACE_ID::internal::ExplicitlyConstructed<CallableOptions_FeedDevicesEntry_DoNotUse> _instance;
114} _CallableOptions_FeedDevicesEntry_DoNotUse_default_instance_;
115class CallableOptions_FetchDevicesEntry_DoNotUseDefaultTypeInternal {
116 public:
117 ::PROTOBUF_NAMESPACE_ID::internal::ExplicitlyConstructed<CallableOptions_FetchDevicesEntry_DoNotUse> _instance;
118} _CallableOptions_FetchDevicesEntry_DoNotUse_default_instance_;
119class CallableOptionsDefaultTypeInternal {
120 public:
121 ::PROTOBUF_NAMESPACE_ID::internal::ExplicitlyConstructed<CallableOptions> _instance;
122} _CallableOptions_default_instance_;
123} // namespace tensorflow
124static void InitDefaultsscc_info_CallableOptions_tensorflow_2fcore_2fprotobuf_2fconfig_2eproto() {
125 GOOGLE_PROTOBUF_VERIFY_VERSION;
126
127 {
128 void* ptr = &::tensorflow::_CallableOptions_default_instance_;
129 new (ptr) ::tensorflow::CallableOptions();
130 ::PROTOBUF_NAMESPACE_ID::internal::OnShutdownDestroyMessage(ptr);
131 }
132 ::tensorflow::CallableOptions::InitAsDefaultInstance();
133}
134
135::PROTOBUF_NAMESPACE_ID::internal::SCCInfo<4> scc_info_CallableOptions_tensorflow_2fcore_2fprotobuf_2fconfig_2eproto =
136 {{ATOMIC_VAR_INIT(::PROTOBUF_NAMESPACE_ID::internal::SCCInfoBase::kUninitialized), 4, InitDefaultsscc_info_CallableOptions_tensorflow_2fcore_2fprotobuf_2fconfig_2eproto}, {
137 &scc_info_RunOptions_tensorflow_2fcore_2fprotobuf_2fconfig_2eproto.base,
138 &scc_info_TensorConnection_tensorflow_2fcore_2fprotobuf_2fconfig_2eproto.base,
139 &scc_info_CallableOptions_FeedDevicesEntry_DoNotUse_tensorflow_2fcore_2fprotobuf_2fconfig_2eproto.base,
140 &scc_info_CallableOptions_FetchDevicesEntry_DoNotUse_tensorflow_2fcore_2fprotobuf_2fconfig_2eproto.base,}};
141
142static void InitDefaultsscc_info_CallableOptions_FeedDevicesEntry_DoNotUse_tensorflow_2fcore_2fprotobuf_2fconfig_2eproto() {
143 GOOGLE_PROTOBUF_VERIFY_VERSION;
144
145 {
146 void* ptr = &::tensorflow::_CallableOptions_FeedDevicesEntry_DoNotUse_default_instance_;
147 new (ptr) ::tensorflow::CallableOptions_FeedDevicesEntry_DoNotUse();
148 }
149 ::tensorflow::CallableOptions_FeedDevicesEntry_DoNotUse::InitAsDefaultInstance();
150}
151
152::PROTOBUF_NAMESPACE_ID::internal::SCCInfo<0> scc_info_CallableOptions_FeedDevicesEntry_DoNotUse_tensorflow_2fcore_2fprotobuf_2fconfig_2eproto =
153 {{ATOMIC_VAR_INIT(::PROTOBUF_NAMESPACE_ID::internal::SCCInfoBase::kUninitialized), 0, InitDefaultsscc_info_CallableOptions_FeedDevicesEntry_DoNotUse_tensorflow_2fcore_2fprotobuf_2fconfig_2eproto}, {}};
154
155static void InitDefaultsscc_info_CallableOptions_FetchDevicesEntry_DoNotUse_tensorflow_2fcore_2fprotobuf_2fconfig_2eproto() {
156 GOOGLE_PROTOBUF_VERIFY_VERSION;
157
158 {
159 void* ptr = &::tensorflow::_CallableOptions_FetchDevicesEntry_DoNotUse_default_instance_;
160 new (ptr) ::tensorflow::CallableOptions_FetchDevicesEntry_DoNotUse();
161 }
162 ::tensorflow::CallableOptions_FetchDevicesEntry_DoNotUse::InitAsDefaultInstance();
163}
164
165::PROTOBUF_NAMESPACE_ID::internal::SCCInfo<0> scc_info_CallableOptions_FetchDevicesEntry_DoNotUse_tensorflow_2fcore_2fprotobuf_2fconfig_2eproto =
166 {{ATOMIC_VAR_INIT(::PROTOBUF_NAMESPACE_ID::internal::SCCInfoBase::kUninitialized), 0, InitDefaultsscc_info_CallableOptions_FetchDevicesEntry_DoNotUse_tensorflow_2fcore_2fprotobuf_2fconfig_2eproto}, {}};
167
168static void InitDefaultsscc_info_ConfigProto_tensorflow_2fcore_2fprotobuf_2fconfig_2eproto() {
169 GOOGLE_PROTOBUF_VERIFY_VERSION;
170
171 {
172 void* ptr = &::tensorflow::_ConfigProto_default_instance_;
173 new (ptr) ::tensorflow::ConfigProto();
174 ::PROTOBUF_NAMESPACE_ID::internal::OnShutdownDestroyMessage(ptr);
175 }
176 ::tensorflow::ConfigProto::InitAsDefaultInstance();
177}
178
179::PROTOBUF_NAMESPACE_ID::internal::SCCInfo<7> scc_info_ConfigProto_tensorflow_2fcore_2fprotobuf_2fconfig_2eproto =
180 {{ATOMIC_VAR_INIT(::PROTOBUF_NAMESPACE_ID::internal::SCCInfoBase::kUninitialized), 7, InitDefaultsscc_info_ConfigProto_tensorflow_2fcore_2fprotobuf_2fconfig_2eproto}, {
181 &scc_info_ConfigProto_DeviceCountEntry_DoNotUse_tensorflow_2fcore_2fprotobuf_2fconfig_2eproto.base,
182 &scc_info_ThreadPoolOptionProto_tensorflow_2fcore_2fprotobuf_2fconfig_2eproto.base,
183 &scc_info_GPUOptions_tensorflow_2fcore_2fprotobuf_2fconfig_2eproto.base,
184 &scc_info_GraphOptions_tensorflow_2fcore_2fprotobuf_2fconfig_2eproto.base,
185 &scc_info_RPCOptions_tensorflow_2fcore_2fprotobuf_2fconfig_2eproto.base,
186 &scc_info_ClusterDef_tensorflow_2fcore_2fprotobuf_2fcluster_2eproto.base,
187 &scc_info_ConfigProto_Experimental_tensorflow_2fcore_2fprotobuf_2fconfig_2eproto.base,}};
188
189static void InitDefaultsscc_info_ConfigProto_DeviceCountEntry_DoNotUse_tensorflow_2fcore_2fprotobuf_2fconfig_2eproto() {
190 GOOGLE_PROTOBUF_VERIFY_VERSION;
191
192 {
193 void* ptr = &::tensorflow::_ConfigProto_DeviceCountEntry_DoNotUse_default_instance_;
194 new (ptr) ::tensorflow::ConfigProto_DeviceCountEntry_DoNotUse();
195 }
196 ::tensorflow::ConfigProto_DeviceCountEntry_DoNotUse::InitAsDefaultInstance();
197}
198
199::PROTOBUF_NAMESPACE_ID::internal::SCCInfo<0> scc_info_ConfigProto_DeviceCountEntry_DoNotUse_tensorflow_2fcore_2fprotobuf_2fconfig_2eproto =
200 {{ATOMIC_VAR_INIT(::PROTOBUF_NAMESPACE_ID::internal::SCCInfoBase::kUninitialized), 0, InitDefaultsscc_info_ConfigProto_DeviceCountEntry_DoNotUse_tensorflow_2fcore_2fprotobuf_2fconfig_2eproto}, {}};
201
202static void InitDefaultsscc_info_ConfigProto_Experimental_tensorflow_2fcore_2fprotobuf_2fconfig_2eproto() {
203 GOOGLE_PROTOBUF_VERIFY_VERSION;
204
205 {
206 void* ptr = &::tensorflow::_ConfigProto_Experimental_default_instance_;
207 new (ptr) ::tensorflow::ConfigProto_Experimental();
208 ::PROTOBUF_NAMESPACE_ID::internal::OnShutdownDestroyMessage(ptr);
209 }
210 ::tensorflow::ConfigProto_Experimental::InitAsDefaultInstance();
211}
212
213::PROTOBUF_NAMESPACE_ID::internal::SCCInfo<2> scc_info_ConfigProto_Experimental_tensorflow_2fcore_2fprotobuf_2fconfig_2eproto =
214 {{ATOMIC_VAR_INIT(::PROTOBUF_NAMESPACE_ID::internal::SCCInfoBase::kUninitialized), 2, InitDefaultsscc_info_ConfigProto_Experimental_tensorflow_2fcore_2fprotobuf_2fconfig_2eproto}, {
215 &scc_info_SessionMetadata_tensorflow_2fcore_2fprotobuf_2fconfig_2eproto.base,
216 &scc_info_CoordinationServiceConfig_tensorflow_2fcore_2fprotobuf_2fcoordination_5fconfig_2eproto.base,}};
217
218static void InitDefaultsscc_info_GPUOptions_tensorflow_2fcore_2fprotobuf_2fconfig_2eproto() {
219 GOOGLE_PROTOBUF_VERIFY_VERSION;
220
221 {
222 void* ptr = &::tensorflow::_GPUOptions_default_instance_;
223 new (ptr) ::tensorflow::GPUOptions();
224 ::PROTOBUF_NAMESPACE_ID::internal::OnShutdownDestroyMessage(ptr);
225 }
226 ::tensorflow::GPUOptions::InitAsDefaultInstance();
227}
228
229::PROTOBUF_NAMESPACE_ID::internal::SCCInfo<1> scc_info_GPUOptions_tensorflow_2fcore_2fprotobuf_2fconfig_2eproto =
230 {{ATOMIC_VAR_INIT(::PROTOBUF_NAMESPACE_ID::internal::SCCInfoBase::kUninitialized), 1, InitDefaultsscc_info_GPUOptions_tensorflow_2fcore_2fprotobuf_2fconfig_2eproto}, {
231 &scc_info_GPUOptions_Experimental_tensorflow_2fcore_2fprotobuf_2fconfig_2eproto.base,}};
232
233static void InitDefaultsscc_info_GPUOptions_Experimental_tensorflow_2fcore_2fprotobuf_2fconfig_2eproto() {
234 GOOGLE_PROTOBUF_VERIFY_VERSION;
235
236 {
237 void* ptr = &::tensorflow::_GPUOptions_Experimental_default_instance_;
238 new (ptr) ::tensorflow::GPUOptions_Experimental();
239 ::PROTOBUF_NAMESPACE_ID::internal::OnShutdownDestroyMessage(ptr);
240 }
241 ::tensorflow::GPUOptions_Experimental::InitAsDefaultInstance();
242}
243
244::PROTOBUF_NAMESPACE_ID::internal::SCCInfo<1> scc_info_GPUOptions_Experimental_tensorflow_2fcore_2fprotobuf_2fconfig_2eproto =
245 {{ATOMIC_VAR_INIT(::PROTOBUF_NAMESPACE_ID::internal::SCCInfoBase::kUninitialized), 1, InitDefaultsscc_info_GPUOptions_Experimental_tensorflow_2fcore_2fprotobuf_2fconfig_2eproto}, {
246 &scc_info_GPUOptions_Experimental_VirtualDevices_tensorflow_2fcore_2fprotobuf_2fconfig_2eproto.base,}};
247
248static void InitDefaultsscc_info_GPUOptions_Experimental_VirtualDevices_tensorflow_2fcore_2fprotobuf_2fconfig_2eproto() {
249 GOOGLE_PROTOBUF_VERIFY_VERSION;
250
251 {
252 void* ptr = &::tensorflow::_GPUOptions_Experimental_VirtualDevices_default_instance_;
253 new (ptr) ::tensorflow::GPUOptions_Experimental_VirtualDevices();
254 ::PROTOBUF_NAMESPACE_ID::internal::OnShutdownDestroyMessage(ptr);
255 }
256 ::tensorflow::GPUOptions_Experimental_VirtualDevices::InitAsDefaultInstance();
257}
258
259::PROTOBUF_NAMESPACE_ID::internal::SCCInfo<0> scc_info_GPUOptions_Experimental_VirtualDevices_tensorflow_2fcore_2fprotobuf_2fconfig_2eproto =
260 {{ATOMIC_VAR_INIT(::PROTOBUF_NAMESPACE_ID::internal::SCCInfoBase::kUninitialized), 0, InitDefaultsscc_info_GPUOptions_Experimental_VirtualDevices_tensorflow_2fcore_2fprotobuf_2fconfig_2eproto}, {}};
261
262static void InitDefaultsscc_info_GraphOptions_tensorflow_2fcore_2fprotobuf_2fconfig_2eproto() {
263 GOOGLE_PROTOBUF_VERIFY_VERSION;
264
265 {
266 void* ptr = &::tensorflow::_GraphOptions_default_instance_;
267 new (ptr) ::tensorflow::GraphOptions();
268 ::PROTOBUF_NAMESPACE_ID::internal::OnShutdownDestroyMessage(ptr);
269 }
270 ::tensorflow::GraphOptions::InitAsDefaultInstance();
271}
272
273::PROTOBUF_NAMESPACE_ID::internal::SCCInfo<2> scc_info_GraphOptions_tensorflow_2fcore_2fprotobuf_2fconfig_2eproto =
274 {{ATOMIC_VAR_INIT(::PROTOBUF_NAMESPACE_ID::internal::SCCInfoBase::kUninitialized), 2, InitDefaultsscc_info_GraphOptions_tensorflow_2fcore_2fprotobuf_2fconfig_2eproto}, {
275 &scc_info_OptimizerOptions_tensorflow_2fcore_2fprotobuf_2fconfig_2eproto.base,
276 &scc_info_RewriterConfig_tensorflow_2fcore_2fprotobuf_2frewriter_5fconfig_2eproto.base,}};
277
278static void InitDefaultsscc_info_OptimizerOptions_tensorflow_2fcore_2fprotobuf_2fconfig_2eproto() {
279 GOOGLE_PROTOBUF_VERIFY_VERSION;
280
281 {
282 void* ptr = &::tensorflow::_OptimizerOptions_default_instance_;
283 new (ptr) ::tensorflow::OptimizerOptions();
284 ::PROTOBUF_NAMESPACE_ID::internal::OnShutdownDestroyMessage(ptr);
285 }
286 ::tensorflow::OptimizerOptions::InitAsDefaultInstance();
287}
288
289::PROTOBUF_NAMESPACE_ID::internal::SCCInfo<0> scc_info_OptimizerOptions_tensorflow_2fcore_2fprotobuf_2fconfig_2eproto =
290 {{ATOMIC_VAR_INIT(::PROTOBUF_NAMESPACE_ID::internal::SCCInfoBase::kUninitialized), 0, InitDefaultsscc_info_OptimizerOptions_tensorflow_2fcore_2fprotobuf_2fconfig_2eproto}, {}};
291
292static void InitDefaultsscc_info_RPCOptions_tensorflow_2fcore_2fprotobuf_2fconfig_2eproto() {
293 GOOGLE_PROTOBUF_VERIFY_VERSION;
294
295 {
296 void* ptr = &::tensorflow::_RPCOptions_default_instance_;
297 new (ptr) ::tensorflow::RPCOptions();
298 ::PROTOBUF_NAMESPACE_ID::internal::OnShutdownDestroyMessage(ptr);
299 }
300 ::tensorflow::RPCOptions::InitAsDefaultInstance();
301}
302
303::PROTOBUF_NAMESPACE_ID::internal::SCCInfo<0> scc_info_RPCOptions_tensorflow_2fcore_2fprotobuf_2fconfig_2eproto =
304 {{ATOMIC_VAR_INIT(::PROTOBUF_NAMESPACE_ID::internal::SCCInfoBase::kUninitialized), 0, InitDefaultsscc_info_RPCOptions_tensorflow_2fcore_2fprotobuf_2fconfig_2eproto}, {}};
305
306static void InitDefaultsscc_info_RunMetadata_tensorflow_2fcore_2fprotobuf_2fconfig_2eproto() {
307 GOOGLE_PROTOBUF_VERIFY_VERSION;
308
309 {
310 void* ptr = &::tensorflow::_RunMetadata_default_instance_;
311 new (ptr) ::tensorflow::RunMetadata();
312 ::PROTOBUF_NAMESPACE_ID::internal::OnShutdownDestroyMessage(ptr);
313 }
314 ::tensorflow::RunMetadata::InitAsDefaultInstance();
315}
316
317::PROTOBUF_NAMESPACE_ID::internal::SCCInfo<5> scc_info_RunMetadata_tensorflow_2fcore_2fprotobuf_2fconfig_2eproto =
318 {{ATOMIC_VAR_INIT(::PROTOBUF_NAMESPACE_ID::internal::SCCInfoBase::kUninitialized), 5, InitDefaultsscc_info_RunMetadata_tensorflow_2fcore_2fprotobuf_2fconfig_2eproto}, {
319 &scc_info_StepStats_tensorflow_2fcore_2fframework_2fstep_5fstats_2eproto.base,
320 &scc_info_CostGraphDef_tensorflow_2fcore_2fframework_2fcost_5fgraph_2eproto.base,
321 &scc_info_GraphDef_tensorflow_2fcore_2fframework_2fgraph_2eproto.base,
322 &scc_info_RunMetadata_FunctionGraphs_tensorflow_2fcore_2fprotobuf_2fconfig_2eproto.base,
323 &scc_info_SessionMetadata_tensorflow_2fcore_2fprotobuf_2fconfig_2eproto.base,}};
324
325static void InitDefaultsscc_info_RunMetadata_FunctionGraphs_tensorflow_2fcore_2fprotobuf_2fconfig_2eproto() {
326 GOOGLE_PROTOBUF_VERIFY_VERSION;
327
328 {
329 void* ptr = &::tensorflow::_RunMetadata_FunctionGraphs_default_instance_;
330 new (ptr) ::tensorflow::RunMetadata_FunctionGraphs();
331 ::PROTOBUF_NAMESPACE_ID::internal::OnShutdownDestroyMessage(ptr);
332 }
333 ::tensorflow::RunMetadata_FunctionGraphs::InitAsDefaultInstance();
334}
335
336::PROTOBUF_NAMESPACE_ID::internal::SCCInfo<1> scc_info_RunMetadata_FunctionGraphs_tensorflow_2fcore_2fprotobuf_2fconfig_2eproto =
337 {{ATOMIC_VAR_INIT(::PROTOBUF_NAMESPACE_ID::internal::SCCInfoBase::kUninitialized), 1, InitDefaultsscc_info_RunMetadata_FunctionGraphs_tensorflow_2fcore_2fprotobuf_2fconfig_2eproto}, {
338 &scc_info_GraphDef_tensorflow_2fcore_2fframework_2fgraph_2eproto.base,}};
339
340static void InitDefaultsscc_info_RunOptions_tensorflow_2fcore_2fprotobuf_2fconfig_2eproto() {
341 GOOGLE_PROTOBUF_VERIFY_VERSION;
342
343 {
344 void* ptr = &::tensorflow::_RunOptions_default_instance_;
345 new (ptr) ::tensorflow::RunOptions();
346 ::PROTOBUF_NAMESPACE_ID::internal::OnShutdownDestroyMessage(ptr);
347 }
348 ::tensorflow::RunOptions::InitAsDefaultInstance();
349}
350
351::PROTOBUF_NAMESPACE_ID::internal::SCCInfo<2> scc_info_RunOptions_tensorflow_2fcore_2fprotobuf_2fconfig_2eproto =
352 {{ATOMIC_VAR_INIT(::PROTOBUF_NAMESPACE_ID::internal::SCCInfoBase::kUninitialized), 2, InitDefaultsscc_info_RunOptions_tensorflow_2fcore_2fprotobuf_2fconfig_2eproto}, {
353 &scc_info_DebugOptions_tensorflow_2fcore_2fprotobuf_2fdebug_2eproto.base,
354 &scc_info_RunOptions_Experimental_tensorflow_2fcore_2fprotobuf_2fconfig_2eproto.base,}};
355
356static void InitDefaultsscc_info_RunOptions_Experimental_tensorflow_2fcore_2fprotobuf_2fconfig_2eproto() {
357 GOOGLE_PROTOBUF_VERIFY_VERSION;
358
359 {
360 void* ptr = &::tensorflow::_RunOptions_Experimental_default_instance_;
361 new (ptr) ::tensorflow::RunOptions_Experimental();
362 ::PROTOBUF_NAMESPACE_ID::internal::OnShutdownDestroyMessage(ptr);
363 }
364 ::tensorflow::RunOptions_Experimental::InitAsDefaultInstance();
365}
366
367::PROTOBUF_NAMESPACE_ID::internal::SCCInfo<1> scc_info_RunOptions_Experimental_tensorflow_2fcore_2fprotobuf_2fconfig_2eproto =
368 {{ATOMIC_VAR_INIT(::PROTOBUF_NAMESPACE_ID::internal::SCCInfoBase::kUninitialized), 1, InitDefaultsscc_info_RunOptions_Experimental_tensorflow_2fcore_2fprotobuf_2fconfig_2eproto}, {
369 &scc_info_RunOptions_Experimental_RunHandlerPoolOptions_tensorflow_2fcore_2fprotobuf_2fconfig_2eproto.base,}};
370
371static void InitDefaultsscc_info_RunOptions_Experimental_RunHandlerPoolOptions_tensorflow_2fcore_2fprotobuf_2fconfig_2eproto() {
372 GOOGLE_PROTOBUF_VERIFY_VERSION;
373
374 {
375 void* ptr = &::tensorflow::_RunOptions_Experimental_RunHandlerPoolOptions_default_instance_;
376 new (ptr) ::tensorflow::RunOptions_Experimental_RunHandlerPoolOptions();
377 ::PROTOBUF_NAMESPACE_ID::internal::OnShutdownDestroyMessage(ptr);
378 }
379 ::tensorflow::RunOptions_Experimental_RunHandlerPoolOptions::InitAsDefaultInstance();
380}
381
382::PROTOBUF_NAMESPACE_ID::internal::SCCInfo<0> scc_info_RunOptions_Experimental_RunHandlerPoolOptions_tensorflow_2fcore_2fprotobuf_2fconfig_2eproto =
383 {{ATOMIC_VAR_INIT(::PROTOBUF_NAMESPACE_ID::internal::SCCInfoBase::kUninitialized), 0, InitDefaultsscc_info_RunOptions_Experimental_RunHandlerPoolOptions_tensorflow_2fcore_2fprotobuf_2fconfig_2eproto}, {}};
384
385static void InitDefaultsscc_info_SessionMetadata_tensorflow_2fcore_2fprotobuf_2fconfig_2eproto() {
386 GOOGLE_PROTOBUF_VERIFY_VERSION;
387
388 {
389 void* ptr = &::tensorflow::_SessionMetadata_default_instance_;
390 new (ptr) ::tensorflow::SessionMetadata();
391 ::PROTOBUF_NAMESPACE_ID::internal::OnShutdownDestroyMessage(ptr);
392 }
393 ::tensorflow::SessionMetadata::InitAsDefaultInstance();
394}
395
396::PROTOBUF_NAMESPACE_ID::internal::SCCInfo<0> scc_info_SessionMetadata_tensorflow_2fcore_2fprotobuf_2fconfig_2eproto =
397 {{ATOMIC_VAR_INIT(::PROTOBUF_NAMESPACE_ID::internal::SCCInfoBase::kUninitialized), 0, InitDefaultsscc_info_SessionMetadata_tensorflow_2fcore_2fprotobuf_2fconfig_2eproto}, {}};
398
399static void InitDefaultsscc_info_TensorConnection_tensorflow_2fcore_2fprotobuf_2fconfig_2eproto() {
400 GOOGLE_PROTOBUF_VERIFY_VERSION;
401
402 {
403 void* ptr = &::tensorflow::_TensorConnection_default_instance_;
404 new (ptr) ::tensorflow::TensorConnection();
405 ::PROTOBUF_NAMESPACE_ID::internal::OnShutdownDestroyMessage(ptr);
406 }
407 ::tensorflow::TensorConnection::InitAsDefaultInstance();
408}
409
410::PROTOBUF_NAMESPACE_ID::internal::SCCInfo<0> scc_info_TensorConnection_tensorflow_2fcore_2fprotobuf_2fconfig_2eproto =
411 {{ATOMIC_VAR_INIT(::PROTOBUF_NAMESPACE_ID::internal::SCCInfoBase::kUninitialized), 0, InitDefaultsscc_info_TensorConnection_tensorflow_2fcore_2fprotobuf_2fconfig_2eproto}, {}};
412
413static void InitDefaultsscc_info_ThreadPoolOptionProto_tensorflow_2fcore_2fprotobuf_2fconfig_2eproto() {
414 GOOGLE_PROTOBUF_VERIFY_VERSION;
415
416 {
417 void* ptr = &::tensorflow::_ThreadPoolOptionProto_default_instance_;
418 new (ptr) ::tensorflow::ThreadPoolOptionProto();
419 ::PROTOBUF_NAMESPACE_ID::internal::OnShutdownDestroyMessage(ptr);
420 }
421 ::tensorflow::ThreadPoolOptionProto::InitAsDefaultInstance();
422}
423
424::PROTOBUF_NAMESPACE_ID::internal::SCCInfo<0> scc_info_ThreadPoolOptionProto_tensorflow_2fcore_2fprotobuf_2fconfig_2eproto =
425 {{ATOMIC_VAR_INIT(::PROTOBUF_NAMESPACE_ID::internal::SCCInfoBase::kUninitialized), 0, InitDefaultsscc_info_ThreadPoolOptionProto_tensorflow_2fcore_2fprotobuf_2fconfig_2eproto}, {}};
426
427static ::PROTOBUF_NAMESPACE_ID::Metadata file_level_metadata_tensorflow_2fcore_2fprotobuf_2fconfig_2eproto[20];
428static const ::PROTOBUF_NAMESPACE_ID::EnumDescriptor* file_level_enum_descriptors_tensorflow_2fcore_2fprotobuf_2fconfig_2eproto[4];
429static constexpr ::PROTOBUF_NAMESPACE_ID::ServiceDescriptor const** file_level_service_descriptors_tensorflow_2fcore_2fprotobuf_2fconfig_2eproto = nullptr;
430
431const ::PROTOBUF_NAMESPACE_ID::uint32 TableStruct_tensorflow_2fcore_2fprotobuf_2fconfig_2eproto::offsets[] PROTOBUF_SECTION_VARIABLE(protodesc_cold) = {
432 ~0u, // no _has_bits_
433 PROTOBUF_FIELD_OFFSET(::tensorflow::GPUOptions_Experimental_VirtualDevices, _internal_metadata_),
434 ~0u, // no _extensions_
435 ~0u, // no _oneof_case_
436 ~0u, // no _weak_field_map_
437 PROTOBUF_FIELD_OFFSET(::tensorflow::GPUOptions_Experimental_VirtualDevices, memory_limit_mb_),
438 PROTOBUF_FIELD_OFFSET(::tensorflow::GPUOptions_Experimental_VirtualDevices, priority_),
439 PROTOBUF_FIELD_OFFSET(::tensorflow::GPUOptions_Experimental_VirtualDevices, device_ordinal_),
440 ~0u, // no _has_bits_
441 PROTOBUF_FIELD_OFFSET(::tensorflow::GPUOptions_Experimental, _internal_metadata_),
442 ~0u, // no _extensions_
443 ~0u, // no _oneof_case_
444 ~0u, // no _weak_field_map_
445 PROTOBUF_FIELD_OFFSET(::tensorflow::GPUOptions_Experimental, virtual_devices_),
446 PROTOBUF_FIELD_OFFSET(::tensorflow::GPUOptions_Experimental, use_unified_memory_),
447 PROTOBUF_FIELD_OFFSET(::tensorflow::GPUOptions_Experimental, num_dev_to_dev_copy_streams_),
448 PROTOBUF_FIELD_OFFSET(::tensorflow::GPUOptions_Experimental, collective_ring_order_),
449 PROTOBUF_FIELD_OFFSET(::tensorflow::GPUOptions_Experimental, timestamped_allocator_),
450 PROTOBUF_FIELD_OFFSET(::tensorflow::GPUOptions_Experimental, kernel_tracker_max_interval_),
451 PROTOBUF_FIELD_OFFSET(::tensorflow::GPUOptions_Experimental, kernel_tracker_max_bytes_),
452 PROTOBUF_FIELD_OFFSET(::tensorflow::GPUOptions_Experimental, kernel_tracker_max_pending_),
453 PROTOBUF_FIELD_OFFSET(::tensorflow::GPUOptions_Experimental, internal_fragmentation_fraction_),
454 PROTOBUF_FIELD_OFFSET(::tensorflow::GPUOptions_Experimental, use_cuda_malloc_async_),
455 PROTOBUF_FIELD_OFFSET(::tensorflow::GPUOptions_Experimental, disallow_retry_on_allocation_failure_),
456 ~0u, // no _has_bits_
457 PROTOBUF_FIELD_OFFSET(::tensorflow::GPUOptions, _internal_metadata_),
458 ~0u, // no _extensions_
459 ~0u, // no _oneof_case_
460 ~0u, // no _weak_field_map_
461 PROTOBUF_FIELD_OFFSET(::tensorflow::GPUOptions, per_process_gpu_memory_fraction_),
462 PROTOBUF_FIELD_OFFSET(::tensorflow::GPUOptions, allow_growth_),
463 PROTOBUF_FIELD_OFFSET(::tensorflow::GPUOptions, allocator_type_),
464 PROTOBUF_FIELD_OFFSET(::tensorflow::GPUOptions, deferred_deletion_bytes_),
465 PROTOBUF_FIELD_OFFSET(::tensorflow::GPUOptions, visible_device_list_),
466 PROTOBUF_FIELD_OFFSET(::tensorflow::GPUOptions, polling_active_delay_usecs_),
467 PROTOBUF_FIELD_OFFSET(::tensorflow::GPUOptions, polling_inactive_delay_msecs_),
468 PROTOBUF_FIELD_OFFSET(::tensorflow::GPUOptions, force_gpu_compatible_),
469 PROTOBUF_FIELD_OFFSET(::tensorflow::GPUOptions, experimental_),
470 ~0u, // no _has_bits_
471 PROTOBUF_FIELD_OFFSET(::tensorflow::OptimizerOptions, _internal_metadata_),
472 ~0u, // no _extensions_
473 ~0u, // no _oneof_case_
474 ~0u, // no _weak_field_map_
475 PROTOBUF_FIELD_OFFSET(::tensorflow::OptimizerOptions, do_common_subexpression_elimination_),
476 PROTOBUF_FIELD_OFFSET(::tensorflow::OptimizerOptions, do_constant_folding_),
477 PROTOBUF_FIELD_OFFSET(::tensorflow::OptimizerOptions, max_folded_constant_in_bytes_),
478 PROTOBUF_FIELD_OFFSET(::tensorflow::OptimizerOptions, do_function_inlining_),
479 PROTOBUF_FIELD_OFFSET(::tensorflow::OptimizerOptions, opt_level_),
480 PROTOBUF_FIELD_OFFSET(::tensorflow::OptimizerOptions, global_jit_level_),
481 PROTOBUF_FIELD_OFFSET(::tensorflow::OptimizerOptions, cpu_global_jit_),
482 ~0u, // no _has_bits_
483 PROTOBUF_FIELD_OFFSET(::tensorflow::GraphOptions, _internal_metadata_),
484 ~0u, // no _extensions_
485 ~0u, // no _oneof_case_
486 ~0u, // no _weak_field_map_
487 PROTOBUF_FIELD_OFFSET(::tensorflow::GraphOptions, enable_recv_scheduling_),
488 PROTOBUF_FIELD_OFFSET(::tensorflow::GraphOptions, optimizer_options_),
489 PROTOBUF_FIELD_OFFSET(::tensorflow::GraphOptions, build_cost_model_),
490 PROTOBUF_FIELD_OFFSET(::tensorflow::GraphOptions, build_cost_model_after_),
491 PROTOBUF_FIELD_OFFSET(::tensorflow::GraphOptions, infer_shapes_),
492 PROTOBUF_FIELD_OFFSET(::tensorflow::GraphOptions, place_pruned_graph_),
493 PROTOBUF_FIELD_OFFSET(::tensorflow::GraphOptions, enable_bfloat16_sendrecv_),
494 PROTOBUF_FIELD_OFFSET(::tensorflow::GraphOptions, timeline_step_),
495 PROTOBUF_FIELD_OFFSET(::tensorflow::GraphOptions, rewrite_options_),
496 ~0u, // no _has_bits_
497 PROTOBUF_FIELD_OFFSET(::tensorflow::ThreadPoolOptionProto, _internal_metadata_),
498 ~0u, // no _extensions_
499 ~0u, // no _oneof_case_
500 ~0u, // no _weak_field_map_
501 PROTOBUF_FIELD_OFFSET(::tensorflow::ThreadPoolOptionProto, num_threads_),
502 PROTOBUF_FIELD_OFFSET(::tensorflow::ThreadPoolOptionProto, global_name_),
503 ~0u, // no _has_bits_
504 PROTOBUF_FIELD_OFFSET(::tensorflow::RPCOptions, _internal_metadata_),
505 ~0u, // no _extensions_
506 ~0u, // no _oneof_case_
507 ~0u, // no _weak_field_map_
508 PROTOBUF_FIELD_OFFSET(::tensorflow::RPCOptions, use_rpc_for_inprocess_master_),
509 PROTOBUF_FIELD_OFFSET(::tensorflow::RPCOptions, compression_algorithm_),
510 PROTOBUF_FIELD_OFFSET(::tensorflow::RPCOptions, compression_level_),
511 PROTOBUF_FIELD_OFFSET(::tensorflow::RPCOptions, cache_rpc_response_),
512 PROTOBUF_FIELD_OFFSET(::tensorflow::RPCOptions, disable_session_connection_sharing_),
513 PROTOBUF_FIELD_OFFSET(::tensorflow::RPCOptions, num_channels_per_target_),
514 ~0u, // no _has_bits_
515 PROTOBUF_FIELD_OFFSET(::tensorflow::SessionMetadata, _internal_metadata_),
516 ~0u, // no _extensions_
517 ~0u, // no _oneof_case_
518 ~0u, // no _weak_field_map_
519 PROTOBUF_FIELD_OFFSET(::tensorflow::SessionMetadata, name_),
520 PROTOBUF_FIELD_OFFSET(::tensorflow::SessionMetadata, version_),
521 PROTOBUF_FIELD_OFFSET(::tensorflow::ConfigProto_DeviceCountEntry_DoNotUse, _has_bits_),
522 PROTOBUF_FIELD_OFFSET(::tensorflow::ConfigProto_DeviceCountEntry_DoNotUse, _internal_metadata_),
523 ~0u, // no _extensions_
524 ~0u, // no _oneof_case_
525 ~0u, // no _weak_field_map_
526 PROTOBUF_FIELD_OFFSET(::tensorflow::ConfigProto_DeviceCountEntry_DoNotUse, key_),
527 PROTOBUF_FIELD_OFFSET(::tensorflow::ConfigProto_DeviceCountEntry_DoNotUse, value_),
528 0,
529 1,
530 ~0u, // no _has_bits_
531 PROTOBUF_FIELD_OFFSET(::tensorflow::ConfigProto_Experimental, _internal_metadata_),
532 ~0u, // no _extensions_
533 ~0u, // no _oneof_case_
534 ~0u, // no _weak_field_map_
535 PROTOBUF_FIELD_OFFSET(::tensorflow::ConfigProto_Experimental, collective_group_leader_),
536 PROTOBUF_FIELD_OFFSET(::tensorflow::ConfigProto_Experimental, executor_type_),
537 PROTOBUF_FIELD_OFFSET(::tensorflow::ConfigProto_Experimental, recv_buf_max_chunk_),
538 PROTOBUF_FIELD_OFFSET(::tensorflow::ConfigProto_Experimental, use_numa_affinity_),
539 PROTOBUF_FIELD_OFFSET(::tensorflow::ConfigProto_Experimental, collective_deterministic_sequential_execution_),
540 PROTOBUF_FIELD_OFFSET(::tensorflow::ConfigProto_Experimental, collective_nccl_),
541 PROTOBUF_FIELD_OFFSET(::tensorflow::ConfigProto_Experimental, share_session_state_in_clusterspec_propagation_),
542 PROTOBUF_FIELD_OFFSET(::tensorflow::ConfigProto_Experimental, disable_thread_spinning_),
543 PROTOBUF_FIELD_OFFSET(::tensorflow::ConfigProto_Experimental, share_cluster_devices_in_session_),
544 PROTOBUF_FIELD_OFFSET(::tensorflow::ConfigProto_Experimental, session_metadata_),
545 PROTOBUF_FIELD_OFFSET(::tensorflow::ConfigProto_Experimental, optimize_for_static_graph_),
546 PROTOBUF_FIELD_OFFSET(::tensorflow::ConfigProto_Experimental, enable_mlir_bridge_),
547 PROTOBUF_FIELD_OFFSET(::tensorflow::ConfigProto_Experimental, mlir_bridge_rollout_),
548 PROTOBUF_FIELD_OFFSET(::tensorflow::ConfigProto_Experimental, enable_mlir_graph_optimization_),
549 PROTOBUF_FIELD_OFFSET(::tensorflow::ConfigProto_Experimental, disable_output_partition_graphs_),
550 PROTOBUF_FIELD_OFFSET(::tensorflow::ConfigProto_Experimental, xla_fusion_autotuner_thresh_),
551 PROTOBUF_FIELD_OFFSET(::tensorflow::ConfigProto_Experimental, use_tfrt_),
552 PROTOBUF_FIELD_OFFSET(::tensorflow::ConfigProto_Experimental, disable_functional_ops_lowering_),
553 PROTOBUF_FIELD_OFFSET(::tensorflow::ConfigProto_Experimental, xla_prefer_single_graph_cluster_),
554 PROTOBUF_FIELD_OFFSET(::tensorflow::ConfigProto_Experimental, coordination_config_),
555 ~0u, // no _has_bits_
556 PROTOBUF_FIELD_OFFSET(::tensorflow::ConfigProto, _internal_metadata_),
557 ~0u, // no _extensions_
558 ~0u, // no _oneof_case_
559 ~0u, // no _weak_field_map_
560 PROTOBUF_FIELD_OFFSET(::tensorflow::ConfigProto, device_count_),
561 PROTOBUF_FIELD_OFFSET(::tensorflow::ConfigProto, intra_op_parallelism_threads_),
562 PROTOBUF_FIELD_OFFSET(::tensorflow::ConfigProto, inter_op_parallelism_threads_),
563 PROTOBUF_FIELD_OFFSET(::tensorflow::ConfigProto, use_per_session_threads_),
564 PROTOBUF_FIELD_OFFSET(::tensorflow::ConfigProto, session_inter_op_thread_pool_),
565 PROTOBUF_FIELD_OFFSET(::tensorflow::ConfigProto, placement_period_),
566 PROTOBUF_FIELD_OFFSET(::tensorflow::ConfigProto, device_filters_),
567 PROTOBUF_FIELD_OFFSET(::tensorflow::ConfigProto, gpu_options_),
568 PROTOBUF_FIELD_OFFSET(::tensorflow::ConfigProto, allow_soft_placement_),
569 PROTOBUF_FIELD_OFFSET(::tensorflow::ConfigProto, log_device_placement_),
570 PROTOBUF_FIELD_OFFSET(::tensorflow::ConfigProto, graph_options_),
571 PROTOBUF_FIELD_OFFSET(::tensorflow::ConfigProto, operation_timeout_in_ms_),
572 PROTOBUF_FIELD_OFFSET(::tensorflow::ConfigProto, rpc_options_),
573 PROTOBUF_FIELD_OFFSET(::tensorflow::ConfigProto, cluster_def_),
574 PROTOBUF_FIELD_OFFSET(::tensorflow::ConfigProto, isolate_session_state_),
575 PROTOBUF_FIELD_OFFSET(::tensorflow::ConfigProto, share_cluster_devices_in_session_),
576 PROTOBUF_FIELD_OFFSET(::tensorflow::ConfigProto, experimental_),
577 ~0u, // no _has_bits_
578 PROTOBUF_FIELD_OFFSET(::tensorflow::RunOptions_Experimental_RunHandlerPoolOptions, _internal_metadata_),
579 ~0u, // no _extensions_
580 ~0u, // no _oneof_case_
581 ~0u, // no _weak_field_map_
582 PROTOBUF_FIELD_OFFSET(::tensorflow::RunOptions_Experimental_RunHandlerPoolOptions, priority_),
583 ~0u, // no _has_bits_
584 PROTOBUF_FIELD_OFFSET(::tensorflow::RunOptions_Experimental, _internal_metadata_),
585 ~0u, // no _extensions_
586 ~0u, // no _oneof_case_
587 ~0u, // no _weak_field_map_
588 PROTOBUF_FIELD_OFFSET(::tensorflow::RunOptions_Experimental, collective_graph_key_),
589 PROTOBUF_FIELD_OFFSET(::tensorflow::RunOptions_Experimental, use_run_handler_pool_),
590 PROTOBUF_FIELD_OFFSET(::tensorflow::RunOptions_Experimental, run_handler_pool_options_),
591 ~0u, // no _has_bits_
592 PROTOBUF_FIELD_OFFSET(::tensorflow::RunOptions, _internal_metadata_),
593 ~0u, // no _extensions_
594 ~0u, // no _oneof_case_
595 ~0u, // no _weak_field_map_
596 PROTOBUF_FIELD_OFFSET(::tensorflow::RunOptions, trace_level_),
597 PROTOBUF_FIELD_OFFSET(::tensorflow::RunOptions, timeout_in_ms_),
598 PROTOBUF_FIELD_OFFSET(::tensorflow::RunOptions, inter_op_thread_pool_),
599 PROTOBUF_FIELD_OFFSET(::tensorflow::RunOptions, output_partition_graphs_),
600 PROTOBUF_FIELD_OFFSET(::tensorflow::RunOptions, debug_options_),
601 PROTOBUF_FIELD_OFFSET(::tensorflow::RunOptions, report_tensor_allocations_upon_oom_),
602 PROTOBUF_FIELD_OFFSET(::tensorflow::RunOptions, experimental_),
603 ~0u, // no _has_bits_
604 PROTOBUF_FIELD_OFFSET(::tensorflow::RunMetadata_FunctionGraphs, _internal_metadata_),
605 ~0u, // no _extensions_
606 ~0u, // no _oneof_case_
607 ~0u, // no _weak_field_map_
608 PROTOBUF_FIELD_OFFSET(::tensorflow::RunMetadata_FunctionGraphs, partition_graphs_),
609 PROTOBUF_FIELD_OFFSET(::tensorflow::RunMetadata_FunctionGraphs, pre_optimization_graph_),
610 PROTOBUF_FIELD_OFFSET(::tensorflow::RunMetadata_FunctionGraphs, post_optimization_graph_),
611 ~0u, // no _has_bits_
612 PROTOBUF_FIELD_OFFSET(::tensorflow::RunMetadata, _internal_metadata_),
613 ~0u, // no _extensions_
614 ~0u, // no _oneof_case_
615 ~0u, // no _weak_field_map_
616 PROTOBUF_FIELD_OFFSET(::tensorflow::RunMetadata, step_stats_),
617 PROTOBUF_FIELD_OFFSET(::tensorflow::RunMetadata, cost_graph_),
618 PROTOBUF_FIELD_OFFSET(::tensorflow::RunMetadata, partition_graphs_),
619 PROTOBUF_FIELD_OFFSET(::tensorflow::RunMetadata, function_graphs_),
620 PROTOBUF_FIELD_OFFSET(::tensorflow::RunMetadata, session_metadata_),
621 ~0u, // no _has_bits_
622 PROTOBUF_FIELD_OFFSET(::tensorflow::TensorConnection, _internal_metadata_),
623 ~0u, // no _extensions_
624 ~0u, // no _oneof_case_
625 ~0u, // no _weak_field_map_
626 PROTOBUF_FIELD_OFFSET(::tensorflow::TensorConnection, from_tensor_),
627 PROTOBUF_FIELD_OFFSET(::tensorflow::TensorConnection, to_tensor_),
628 PROTOBUF_FIELD_OFFSET(::tensorflow::CallableOptions_FeedDevicesEntry_DoNotUse, _has_bits_),
629 PROTOBUF_FIELD_OFFSET(::tensorflow::CallableOptions_FeedDevicesEntry_DoNotUse, _internal_metadata_),
630 ~0u, // no _extensions_
631 ~0u, // no _oneof_case_
632 ~0u, // no _weak_field_map_
633 PROTOBUF_FIELD_OFFSET(::tensorflow::CallableOptions_FeedDevicesEntry_DoNotUse, key_),
634 PROTOBUF_FIELD_OFFSET(::tensorflow::CallableOptions_FeedDevicesEntry_DoNotUse, value_),
635 0,
636 1,
637 PROTOBUF_FIELD_OFFSET(::tensorflow::CallableOptions_FetchDevicesEntry_DoNotUse, _has_bits_),
638 PROTOBUF_FIELD_OFFSET(::tensorflow::CallableOptions_FetchDevicesEntry_DoNotUse, _internal_metadata_),
639 ~0u, // no _extensions_
640 ~0u, // no _oneof_case_
641 ~0u, // no _weak_field_map_
642 PROTOBUF_FIELD_OFFSET(::tensorflow::CallableOptions_FetchDevicesEntry_DoNotUse, key_),
643 PROTOBUF_FIELD_OFFSET(::tensorflow::CallableOptions_FetchDevicesEntry_DoNotUse, value_),
644 0,
645 1,
646 ~0u, // no _has_bits_
647 PROTOBUF_FIELD_OFFSET(::tensorflow::CallableOptions, _internal_metadata_),
648 ~0u, // no _extensions_
649 ~0u, // no _oneof_case_
650 ~0u, // no _weak_field_map_
651 PROTOBUF_FIELD_OFFSET(::tensorflow::CallableOptions, feed_),
652 PROTOBUF_FIELD_OFFSET(::tensorflow::CallableOptions, fetch_),
653 PROTOBUF_FIELD_OFFSET(::tensorflow::CallableOptions, target_),
654 PROTOBUF_FIELD_OFFSET(::tensorflow::CallableOptions, run_options_),
655 PROTOBUF_FIELD_OFFSET(::tensorflow::CallableOptions, tensor_connection_),
656 PROTOBUF_FIELD_OFFSET(::tensorflow::CallableOptions, feed_devices_),
657 PROTOBUF_FIELD_OFFSET(::tensorflow::CallableOptions, fetch_devices_),
658 PROTOBUF_FIELD_OFFSET(::tensorflow::CallableOptions, fetch_skip_sync_),
659};
660static const ::PROTOBUF_NAMESPACE_ID::internal::MigrationSchema schemas[] PROTOBUF_SECTION_VARIABLE(protodesc_cold) = {
661 { 0, -1, sizeof(::tensorflow::GPUOptions_Experimental_VirtualDevices)},
662 { 8, -1, sizeof(::tensorflow::GPUOptions_Experimental)},
663 { 24, -1, sizeof(::tensorflow::GPUOptions)},
664 { 38, -1, sizeof(::tensorflow::OptimizerOptions)},
665 { 50, -1, sizeof(::tensorflow::GraphOptions)},
666 { 64, -1, sizeof(::tensorflow::ThreadPoolOptionProto)},
667 { 71, -1, sizeof(::tensorflow::RPCOptions)},
668 { 82, -1, sizeof(::tensorflow::SessionMetadata)},
669 { 89, 96, sizeof(::tensorflow::ConfigProto_DeviceCountEntry_DoNotUse)},
670 { 98, -1, sizeof(::tensorflow::ConfigProto_Experimental)},
671 { 123, -1, sizeof(::tensorflow::ConfigProto)},
672 { 145, -1, sizeof(::tensorflow::RunOptions_Experimental_RunHandlerPoolOptions)},
673 { 151, -1, sizeof(::tensorflow::RunOptions_Experimental)},
674 { 159, -1, sizeof(::tensorflow::RunOptions)},
675 { 171, -1, sizeof(::tensorflow::RunMetadata_FunctionGraphs)},
676 { 179, -1, sizeof(::tensorflow::RunMetadata)},
677 { 189, -1, sizeof(::tensorflow::TensorConnection)},
678 { 196, 203, sizeof(::tensorflow::CallableOptions_FeedDevicesEntry_DoNotUse)},
679 { 205, 212, sizeof(::tensorflow::CallableOptions_FetchDevicesEntry_DoNotUse)},
680 { 214, -1, sizeof(::tensorflow::CallableOptions)},
681};
682
683static ::PROTOBUF_NAMESPACE_ID::Message const * const file_default_instances[] = {
684 reinterpret_cast<const ::PROTOBUF_NAMESPACE_ID::Message*>(&::tensorflow::_GPUOptions_Experimental_VirtualDevices_default_instance_),
685 reinterpret_cast<const ::PROTOBUF_NAMESPACE_ID::Message*>(&::tensorflow::_GPUOptions_Experimental_default_instance_),
686 reinterpret_cast<const ::PROTOBUF_NAMESPACE_ID::Message*>(&::tensorflow::_GPUOptions_default_instance_),
687 reinterpret_cast<const ::PROTOBUF_NAMESPACE_ID::Message*>(&::tensorflow::_OptimizerOptions_default_instance_),
688 reinterpret_cast<const ::PROTOBUF_NAMESPACE_ID::Message*>(&::tensorflow::_GraphOptions_default_instance_),
689 reinterpret_cast<const ::PROTOBUF_NAMESPACE_ID::Message*>(&::tensorflow::_ThreadPoolOptionProto_default_instance_),
690 reinterpret_cast<const ::PROTOBUF_NAMESPACE_ID::Message*>(&::tensorflow::_RPCOptions_default_instance_),
691 reinterpret_cast<const ::PROTOBUF_NAMESPACE_ID::Message*>(&::tensorflow::_SessionMetadata_default_instance_),
692 reinterpret_cast<const ::PROTOBUF_NAMESPACE_ID::Message*>(&::tensorflow::_ConfigProto_DeviceCountEntry_DoNotUse_default_instance_),
693 reinterpret_cast<const ::PROTOBUF_NAMESPACE_ID::Message*>(&::tensorflow::_ConfigProto_Experimental_default_instance_),
694 reinterpret_cast<const ::PROTOBUF_NAMESPACE_ID::Message*>(&::tensorflow::_ConfigProto_default_instance_),
695 reinterpret_cast<const ::PROTOBUF_NAMESPACE_ID::Message*>(&::tensorflow::_RunOptions_Experimental_RunHandlerPoolOptions_default_instance_),
696 reinterpret_cast<const ::PROTOBUF_NAMESPACE_ID::Message*>(&::tensorflow::_RunOptions_Experimental_default_instance_),
697 reinterpret_cast<const ::PROTOBUF_NAMESPACE_ID::Message*>(&::tensorflow::_RunOptions_default_instance_),
698 reinterpret_cast<const ::PROTOBUF_NAMESPACE_ID::Message*>(&::tensorflow::_RunMetadata_FunctionGraphs_default_instance_),
699 reinterpret_cast<const ::PROTOBUF_NAMESPACE_ID::Message*>(&::tensorflow::_RunMetadata_default_instance_),
700 reinterpret_cast<const ::PROTOBUF_NAMESPACE_ID::Message*>(&::tensorflow::_TensorConnection_default_instance_),
701 reinterpret_cast<const ::PROTOBUF_NAMESPACE_ID::Message*>(&::tensorflow::_CallableOptions_FeedDevicesEntry_DoNotUse_default_instance_),
702 reinterpret_cast<const ::PROTOBUF_NAMESPACE_ID::Message*>(&::tensorflow::_CallableOptions_FetchDevicesEntry_DoNotUse_default_instance_),
703 reinterpret_cast<const ::PROTOBUF_NAMESPACE_ID::Message*>(&::tensorflow::_CallableOptions_default_instance_),
704};
705
706const char descriptor_table_protodef_tensorflow_2fcore_2fprotobuf_2fconfig_2eproto[] PROTOBUF_SECTION_VARIABLE(protodesc_cold) =
707 "\n%tensorflow/core/protobuf/config.proto\022"
708 "\ntensorflow\032*tensorflow/core/framework/c"
709 "ost_graph.proto\032%tensorflow/core/framewo"
710 "rk/graph.proto\032*tensorflow/core/framewor"
711 "k/step_stats.proto\032&tensorflow/core/prot"
712 "obuf/cluster.proto\0322tensorflow/core/prot"
713 "obuf/coordination_config.proto\032$tensorfl"
714 "ow/core/protobuf/debug.proto\032.tensorflow"
715 "/core/protobuf/rewriter_config.proto\"\327\006\n"
716 "\nGPUOptions\022\'\n\037per_process_gpu_memory_fr"
717 "action\030\001 \001(\001\022\024\n\014allow_growth\030\004 \001(\010\022\026\n\016al"
718 "locator_type\030\002 \001(\t\022\037\n\027deferred_deletion_"
719 "bytes\030\003 \001(\003\022\033\n\023visible_device_list\030\005 \001(\t"
720 "\022\"\n\032polling_active_delay_usecs\030\006 \001(\005\022$\n\034"
721 "polling_inactive_delay_msecs\030\007 \001(\005\022\034\n\024fo"
722 "rce_gpu_compatible\030\010 \001(\010\0229\n\014experimental"
723 "\030\t \001(\0132#.tensorflow.GPUOptions.Experimen"
724 "tal\032\220\004\n\014Experimental\022K\n\017virtual_devices\030"
725 "\001 \003(\01322.tensorflow.GPUOptions.Experiment"
726 "al.VirtualDevices\022\032\n\022use_unified_memory\030"
727 "\002 \001(\010\022#\n\033num_dev_to_dev_copy_streams\030\003 \001"
728 "(\005\022\035\n\025collective_ring_order\030\004 \001(\t\022\035\n\025tim"
729 "estamped_allocator\030\005 \001(\010\022#\n\033kernel_track"
730 "er_max_interval\030\007 \001(\005\022 \n\030kernel_tracker_"
731 "max_bytes\030\010 \001(\005\022\"\n\032kernel_tracker_max_pe"
732 "nding\030\t \001(\005\022\'\n\037internal_fragmentation_fr"
733 "action\030\n \001(\001\022\035\n\025use_cuda_malloc_async\030\013 "
734 "\001(\010\022,\n$disallow_retry_on_allocation_fail"
735 "ure\030\014 \001(\010\032S\n\016VirtualDevices\022\027\n\017memory_li"
736 "mit_mb\030\001 \003(\002\022\020\n\010priority\030\002 \003(\005\022\026\n\016device"
737 "_ordinal\030\003 \003(\005\"\235\003\n\020OptimizerOptions\022+\n#d"
738 "o_common_subexpression_elimination\030\001 \001(\010"
739 "\022\033\n\023do_constant_folding\030\002 \001(\010\022$\n\034max_fol"
740 "ded_constant_in_bytes\030\006 \001(\003\022\034\n\024do_functi"
741 "on_inlining\030\004 \001(\010\0225\n\topt_level\030\003 \001(\0162\".t"
742 "ensorflow.OptimizerOptions.Level\022E\n\020glob"
743 "al_jit_level\030\005 \001(\0162+.tensorflow.Optimize"
744 "rOptions.GlobalJitLevel\022\026\n\016cpu_global_ji"
745 "t\030\007 \001(\010\" \n\005Level\022\006\n\002L1\020\000\022\017\n\002L0\020\377\377\377\377\377\377\377\377\377"
746 "\001\"C\n\016GlobalJitLevel\022\013\n\007DEFAULT\020\000\022\020\n\003OFF\020"
747 "\377\377\377\377\377\377\377\377\377\001\022\010\n\004ON_1\020\001\022\010\n\004ON_2\020\002\"\356\002\n\014Graph"
748 "Options\022\036\n\026enable_recv_scheduling\030\002 \001(\010\022"
749 "7\n\021optimizer_options\030\003 \001(\0132\034.tensorflow."
750 "OptimizerOptions\022\030\n\020build_cost_model\030\004 \001"
751 "(\003\022\036\n\026build_cost_model_after\030\t \001(\003\022\024\n\014in"
752 "fer_shapes\030\005 \001(\010\022\032\n\022place_pruned_graph\030\006"
753 " \001(\010\022 \n\030enable_bfloat16_sendrecv\030\007 \001(\010\022\025"
754 "\n\rtimeline_step\030\010 \001(\005\0223\n\017rewrite_options"
755 "\030\n \001(\0132\032.tensorflow.RewriterConfigJ\004\010\001\020\002"
756 "R%skip_common_subexpression_elimination\""
757 "A\n\025ThreadPoolOptionProto\022\023\n\013num_threads\030"
758 "\001 \001(\005\022\023\n\013global_name\030\002 \001(\t\"\325\001\n\nRPCOption"
759 "s\022$\n\034use_rpc_for_inprocess_master\030\001 \001(\010\022"
760 "\035\n\025compression_algorithm\030\002 \001(\t\022\031\n\021compre"
761 "ssion_level\030\003 \001(\005\022\032\n\022cache_rpc_response\030"
762 "\004 \001(\010\022*\n\"disable_session_connection_shar"
763 "ing\030\005 \001(\010\022\037\n\027num_channels_per_target\030\006 \001"
764 "(\005\"0\n\017SessionMetadata\022\014\n\004name\030\001 \001(\t\022\017\n\007v"
765 "ersion\030\002 \001(\003\"\256\016\n\013ConfigProto\022>\n\014device_c"
766 "ount\030\001 \003(\0132(.tensorflow.ConfigProto.Devi"
767 "ceCountEntry\022$\n\034intra_op_parallelism_thr"
768 "eads\030\002 \001(\005\022$\n\034inter_op_parallelism_threa"
769 "ds\030\005 \001(\005\022\037\n\027use_per_session_threads\030\t \001("
770 "\010\022G\n\034session_inter_op_thread_pool\030\014 \003(\0132"
771 "!.tensorflow.ThreadPoolOptionProto\022\030\n\020pl"
772 "acement_period\030\003 \001(\005\022\026\n\016device_filters\030\004"
773 " \003(\t\022+\n\013gpu_options\030\006 \001(\0132\026.tensorflow.G"
774 "PUOptions\022\034\n\024allow_soft_placement\030\007 \001(\010\022"
775 "\034\n\024log_device_placement\030\010 \001(\010\022/\n\rgraph_o"
776 "ptions\030\n \001(\0132\030.tensorflow.GraphOptions\022\037"
777 "\n\027operation_timeout_in_ms\030\013 \001(\003\022+\n\013rpc_o"
778 "ptions\030\r \001(\0132\026.tensorflow.RPCOptions\022+\n\013"
779 "cluster_def\030\016 \001(\0132\026.tensorflow.ClusterDe"
780 "f\022\035\n\025isolate_session_state\030\017 \001(\010\022(\n shar"
781 "e_cluster_devices_in_session\030\021 \001(\010\022:\n\014ex"
782 "perimental\030\020 \001(\0132$.tensorflow.ConfigProt"
783 "o.Experimental\0322\n\020DeviceCountEntry\022\013\n\003ke"
784 "y\030\001 \001(\t\022\r\n\005value\030\002 \001(\005:\0028\001\032\250\010\n\014Experimen"
785 "tal\022\037\n\027collective_group_leader\030\001 \001(\t\022\025\n\r"
786 "executor_type\030\003 \001(\t\022\032\n\022recv_buf_max_chun"
787 "k\030\004 \001(\005\022\031\n\021use_numa_affinity\030\005 \001(\010\0225\n-co"
788 "llective_deterministic_sequential_execut"
789 "ion\030\006 \001(\010\022\027\n\017collective_nccl\030\007 \001(\010\0226\n.sh"
790 "are_session_state_in_clusterspec_propaga"
791 "tion\030\010 \001(\010\022\037\n\027disable_thread_spinning\030\t "
792 "\001(\010\022(\n share_cluster_devices_in_session\030"
793 "\n \001(\010\0225\n\020session_metadata\030\013 \001(\0132\033.tensor"
794 "flow.SessionMetadata\022!\n\031optimize_for_sta"
795 "tic_graph\030\014 \001(\010\022\032\n\022enable_mlir_bridge\030\r "
796 "\001(\010\022S\n\023mlir_bridge_rollout\030\021 \001(\01626.tenso"
797 "rflow.ConfigProto.Experimental.MlirBridg"
798 "eRollout\022&\n\036enable_mlir_graph_optimizati"
799 "on\030\020 \001(\010\022\'\n\037disable_output_partition_gra"
800 "phs\030\016 \001(\010\022#\n\033xla_fusion_autotuner_thresh"
801 "\030\017 \001(\003\022\020\n\010use_tfrt\030\022 \001(\010\022\'\n\037disable_func"
802 "tional_ops_lowering\030\025 \001(\010\022\'\n\037xla_prefer_"
803 "single_graph_cluster\030\026 \001(\010\022B\n\023coordinati"
804 "on_config\030\027 \001(\0132%.tensorflow.Coordinatio"
805 "nServiceConfig\"\332\001\n\021MlirBridgeRollout\022#\n\037"
806 "MLIR_BRIDGE_ROLLOUT_UNSPECIFIED\020\000\022\037\n\033MLI"
807 "R_BRIDGE_ROLLOUT_ENABLED\020\001\022 \n\034MLIR_BRIDG"
808 "E_ROLLOUT_DISABLED\020\002\022)\n%MLIR_BRIDGE_ROLL"
809 "OUT_SAFE_MODE_ENABLED\020\003\0222\n.MLIR_BRIDGE_R"
810 "OLLOUT_SAFE_MODE_FALLBACK_ENABLED\020\004J\004\010\002\020"
811 "\003J\004\010\023\020\024J\004\010\024\020\025\"\341\004\n\nRunOptions\0226\n\013trace_le"
812 "vel\030\001 \001(\0162!.tensorflow.RunOptions.TraceL"
813 "evel\022\025\n\rtimeout_in_ms\030\002 \001(\003\022\034\n\024inter_op_"
814 "thread_pool\030\003 \001(\005\022\037\n\027output_partition_gr"
815 "aphs\030\005 \001(\010\022/\n\rdebug_options\030\006 \001(\0132\030.tens"
816 "orflow.DebugOptions\022*\n\"report_tensor_all"
817 "ocations_upon_oom\030\007 \001(\010\0229\n\014experimental\030"
818 "\010 \001(\0132#.tensorflow.RunOptions.Experiment"
819 "al\032\322\001\n\014Experimental\022\034\n\024collective_graph_"
820 "key\030\001 \001(\003\022\034\n\024use_run_handler_pool\030\002 \001(\010\022"
821 "[\n\030run_handler_pool_options\030\003 \001(\01329.tens"
822 "orflow.RunOptions.Experimental.RunHandle"
823 "rPoolOptions\032)\n\025RunHandlerPoolOptions\022\020\n"
824 "\010priority\030\001 \001(\003\"R\n\nTraceLevel\022\014\n\010NO_TRAC"
825 "E\020\000\022\022\n\016SOFTWARE_TRACE\020\001\022\022\n\016HARDWARE_TRAC"
826 "E\020\002\022\016\n\nFULL_TRACE\020\003J\004\010\004\020\005\"\276\003\n\013RunMetadat"
827 "a\022)\n\nstep_stats\030\001 \001(\0132\025.tensorflow.StepS"
828 "tats\022,\n\ncost_graph\030\002 \001(\0132\030.tensorflow.Co"
829 "stGraphDef\022.\n\020partition_graphs\030\003 \003(\0132\024.t"
830 "ensorflow.GraphDef\022\?\n\017function_graphs\030\004 "
831 "\003(\0132&.tensorflow.RunMetadata.FunctionGra"
832 "phs\0225\n\020session_metadata\030\005 \001(\0132\033.tensorfl"
833 "ow.SessionMetadata\032\255\001\n\016FunctionGraphs\022.\n"
834 "\020partition_graphs\030\001 \003(\0132\024.tensorflow.Gra"
835 "phDef\0224\n\026pre_optimization_graph\030\002 \001(\0132\024."
836 "tensorflow.GraphDef\0225\n\027post_optimization"
837 "_graph\030\003 \001(\0132\024.tensorflow.GraphDef\":\n\020Te"
838 "nsorConnection\022\023\n\013from_tensor\030\001 \001(\t\022\021\n\tt"
839 "o_tensor\030\002 \001(\t\"\260\003\n\017CallableOptions\022\014\n\004fe"
840 "ed\030\001 \003(\t\022\r\n\005fetch\030\002 \003(\t\022\016\n\006target\030\003 \003(\t\022"
841 "+\n\013run_options\030\004 \001(\0132\026.tensorflow.RunOpt"
842 "ions\0227\n\021tensor_connection\030\005 \003(\0132\034.tensor"
843 "flow.TensorConnection\022B\n\014feed_devices\030\006 "
844 "\003(\0132,.tensorflow.CallableOptions.FeedDev"
845 "icesEntry\022D\n\rfetch_devices\030\007 \003(\0132-.tenso"
846 "rflow.CallableOptions.FetchDevicesEntry\022"
847 "\027\n\017fetch_skip_sync\030\010 \001(\010\0322\n\020FeedDevicesE"
848 "ntry\022\013\n\003key\030\001 \001(\t\022\r\n\005value\030\002 \001(\t:\0028\001\0323\n\021"
849 "FetchDevicesEntry\022\013\n\003key\030\001 \001(\t\022\r\n\005value\030"
850 "\002 \001(\t:\0028\001B\204\001\n\030org.tensorflow.frameworkB\014"
851 "ConfigProtosP\001ZUgithub.com/tensorflow/te"
852 "nsorflow/tensorflow/go/core/protobuf/for"
853 "_core_protos_go_proto\370\001\001b\006proto3"
854 ;
855static const ::PROTOBUF_NAMESPACE_ID::internal::DescriptorTable*const descriptor_table_tensorflow_2fcore_2fprotobuf_2fconfig_2eproto_deps[7] = {
856 &::descriptor_table_tensorflow_2fcore_2fframework_2fcost_5fgraph_2eproto,
857 &::descriptor_table_tensorflow_2fcore_2fframework_2fgraph_2eproto,
858 &::descriptor_table_tensorflow_2fcore_2fframework_2fstep_5fstats_2eproto,
859 &::descriptor_table_tensorflow_2fcore_2fprotobuf_2fcluster_2eproto,
860 &::descriptor_table_tensorflow_2fcore_2fprotobuf_2fcoordination_5fconfig_2eproto,
861 &::descriptor_table_tensorflow_2fcore_2fprotobuf_2fdebug_2eproto,
862 &::descriptor_table_tensorflow_2fcore_2fprotobuf_2frewriter_5fconfig_2eproto,
863};
864static ::PROTOBUF_NAMESPACE_ID::internal::SCCInfoBase*const descriptor_table_tensorflow_2fcore_2fprotobuf_2fconfig_2eproto_sccs[20] = {
865 &scc_info_CallableOptions_tensorflow_2fcore_2fprotobuf_2fconfig_2eproto.base,
866 &scc_info_CallableOptions_FeedDevicesEntry_DoNotUse_tensorflow_2fcore_2fprotobuf_2fconfig_2eproto.base,
867 &scc_info_CallableOptions_FetchDevicesEntry_DoNotUse_tensorflow_2fcore_2fprotobuf_2fconfig_2eproto.base,
868 &scc_info_ConfigProto_tensorflow_2fcore_2fprotobuf_2fconfig_2eproto.base,
869 &scc_info_ConfigProto_DeviceCountEntry_DoNotUse_tensorflow_2fcore_2fprotobuf_2fconfig_2eproto.base,
870 &scc_info_ConfigProto_Experimental_tensorflow_2fcore_2fprotobuf_2fconfig_2eproto.base,
871 &scc_info_GPUOptions_tensorflow_2fcore_2fprotobuf_2fconfig_2eproto.base,
872 &scc_info_GPUOptions_Experimental_tensorflow_2fcore_2fprotobuf_2fconfig_2eproto.base,
873 &scc_info_GPUOptions_Experimental_VirtualDevices_tensorflow_2fcore_2fprotobuf_2fconfig_2eproto.base,
874 &scc_info_GraphOptions_tensorflow_2fcore_2fprotobuf_2fconfig_2eproto.base,
875 &scc_info_OptimizerOptions_tensorflow_2fcore_2fprotobuf_2fconfig_2eproto.base,
876 &scc_info_RPCOptions_tensorflow_2fcore_2fprotobuf_2fconfig_2eproto.base,
877 &scc_info_RunMetadata_tensorflow_2fcore_2fprotobuf_2fconfig_2eproto.base,
878 &scc_info_RunMetadata_FunctionGraphs_tensorflow_2fcore_2fprotobuf_2fconfig_2eproto.base,
879 &scc_info_RunOptions_tensorflow_2fcore_2fprotobuf_2fconfig_2eproto.base,
880 &scc_info_RunOptions_Experimental_tensorflow_2fcore_2fprotobuf_2fconfig_2eproto.base,
881 &scc_info_RunOptions_Experimental_RunHandlerPoolOptions_tensorflow_2fcore_2fprotobuf_2fconfig_2eproto.base,
882 &scc_info_SessionMetadata_tensorflow_2fcore_2fprotobuf_2fconfig_2eproto.base,
883 &scc_info_TensorConnection_tensorflow_2fcore_2fprotobuf_2fconfig_2eproto.base,
884 &scc_info_ThreadPoolOptionProto_tensorflow_2fcore_2fprotobuf_2fconfig_2eproto.base,
885};
886static ::PROTOBUF_NAMESPACE_ID::internal::once_flag descriptor_table_tensorflow_2fcore_2fprotobuf_2fconfig_2eproto_once;
887static bool descriptor_table_tensorflow_2fcore_2fprotobuf_2fconfig_2eproto_initialized = false;
888const ::PROTOBUF_NAMESPACE_ID::internal::DescriptorTable descriptor_table_tensorflow_2fcore_2fprotobuf_2fconfig_2eproto = {
889 &descriptor_table_tensorflow_2fcore_2fprotobuf_2fconfig_2eproto_initialized, descriptor_table_protodef_tensorflow_2fcore_2fprotobuf_2fconfig_2eproto, "tensorflow/core/protobuf/config.proto", 5872,
890 &descriptor_table_tensorflow_2fcore_2fprotobuf_2fconfig_2eproto_once, descriptor_table_tensorflow_2fcore_2fprotobuf_2fconfig_2eproto_sccs, descriptor_table_tensorflow_2fcore_2fprotobuf_2fconfig_2eproto_deps, 20, 7,
891 schemas, file_default_instances, TableStruct_tensorflow_2fcore_2fprotobuf_2fconfig_2eproto::offsets,
892 file_level_metadata_tensorflow_2fcore_2fprotobuf_2fconfig_2eproto, 20, file_level_enum_descriptors_tensorflow_2fcore_2fprotobuf_2fconfig_2eproto, file_level_service_descriptors_tensorflow_2fcore_2fprotobuf_2fconfig_2eproto,
893};
894
895// Force running AddDescriptors() at dynamic initialization time.
896static bool dynamic_init_dummy_tensorflow_2fcore_2fprotobuf_2fconfig_2eproto = ( ::PROTOBUF_NAMESPACE_ID::internal::AddDescriptors(&descriptor_table_tensorflow_2fcore_2fprotobuf_2fconfig_2eproto), true);
897namespace tensorflow {
898const ::PROTOBUF_NAMESPACE_ID::EnumDescriptor* OptimizerOptions_Level_descriptor() {
899 ::PROTOBUF_NAMESPACE_ID::internal::AssignDescriptors(&descriptor_table_tensorflow_2fcore_2fprotobuf_2fconfig_2eproto);
900 return file_level_enum_descriptors_tensorflow_2fcore_2fprotobuf_2fconfig_2eproto[0];
901}
902bool OptimizerOptions_Level_IsValid(int value) {
903 switch (value) {
904 case -1:
905 case 0:
906 return true;
907 default:
908 return false;
909 }
910}
911
912#if (__cplusplus < 201703) && (!defined(_MSC_VER) || _MSC_VER >= 1900)
913constexpr OptimizerOptions_Level OptimizerOptions::L1;
914constexpr OptimizerOptions_Level OptimizerOptions::L0;
915constexpr OptimizerOptions_Level OptimizerOptions::Level_MIN;
916constexpr OptimizerOptions_Level OptimizerOptions::Level_MAX;
917constexpr int OptimizerOptions::Level_ARRAYSIZE;
918#endif // (__cplusplus < 201703) && (!defined(_MSC_VER) || _MSC_VER >= 1900)
919const ::PROTOBUF_NAMESPACE_ID::EnumDescriptor* OptimizerOptions_GlobalJitLevel_descriptor() {
920 ::PROTOBUF_NAMESPACE_ID::internal::AssignDescriptors(&descriptor_table_tensorflow_2fcore_2fprotobuf_2fconfig_2eproto);
921 return file_level_enum_descriptors_tensorflow_2fcore_2fprotobuf_2fconfig_2eproto[1];
922}
923bool OptimizerOptions_GlobalJitLevel_IsValid(int value) {
924 switch (value) {
925 case -1:
926 case 0:
927 case 1:
928 case 2:
929 return true;
930 default:
931 return false;
932 }
933}
934
935#if (__cplusplus < 201703) && (!defined(_MSC_VER) || _MSC_VER >= 1900)
936constexpr OptimizerOptions_GlobalJitLevel OptimizerOptions::DEFAULT;
937constexpr OptimizerOptions_GlobalJitLevel OptimizerOptions::OFF;
938constexpr OptimizerOptions_GlobalJitLevel OptimizerOptions::ON_1;
939constexpr OptimizerOptions_GlobalJitLevel OptimizerOptions::ON_2;
940constexpr OptimizerOptions_GlobalJitLevel OptimizerOptions::GlobalJitLevel_MIN;
941constexpr OptimizerOptions_GlobalJitLevel OptimizerOptions::GlobalJitLevel_MAX;
942constexpr int OptimizerOptions::GlobalJitLevel_ARRAYSIZE;
943#endif // (__cplusplus < 201703) && (!defined(_MSC_VER) || _MSC_VER >= 1900)
944const ::PROTOBUF_NAMESPACE_ID::EnumDescriptor* ConfigProto_Experimental_MlirBridgeRollout_descriptor() {
945 ::PROTOBUF_NAMESPACE_ID::internal::AssignDescriptors(&descriptor_table_tensorflow_2fcore_2fprotobuf_2fconfig_2eproto);
946 return file_level_enum_descriptors_tensorflow_2fcore_2fprotobuf_2fconfig_2eproto[2];
947}
948bool ConfigProto_Experimental_MlirBridgeRollout_IsValid(int value) {
949 switch (value) {
950 case 0:
951 case 1:
952 case 2:
953 case 3:
954 case 4:
955 return true;
956 default:
957 return false;
958 }
959}
960
961#if (__cplusplus < 201703) && (!defined(_MSC_VER) || _MSC_VER >= 1900)
962constexpr ConfigProto_Experimental_MlirBridgeRollout ConfigProto_Experimental::MLIR_BRIDGE_ROLLOUT_UNSPECIFIED;
963constexpr ConfigProto_Experimental_MlirBridgeRollout ConfigProto_Experimental::MLIR_BRIDGE_ROLLOUT_ENABLED;
964constexpr ConfigProto_Experimental_MlirBridgeRollout ConfigProto_Experimental::MLIR_BRIDGE_ROLLOUT_DISABLED;
965constexpr ConfigProto_Experimental_MlirBridgeRollout ConfigProto_Experimental::MLIR_BRIDGE_ROLLOUT_SAFE_MODE_ENABLED;
966constexpr ConfigProto_Experimental_MlirBridgeRollout ConfigProto_Experimental::MLIR_BRIDGE_ROLLOUT_SAFE_MODE_FALLBACK_ENABLED;
967constexpr ConfigProto_Experimental_MlirBridgeRollout ConfigProto_Experimental::MlirBridgeRollout_MIN;
968constexpr ConfigProto_Experimental_MlirBridgeRollout ConfigProto_Experimental::MlirBridgeRollout_MAX;
969constexpr int ConfigProto_Experimental::MlirBridgeRollout_ARRAYSIZE;
970#endif // (__cplusplus < 201703) && (!defined(_MSC_VER) || _MSC_VER >= 1900)
971const ::PROTOBUF_NAMESPACE_ID::EnumDescriptor* RunOptions_TraceLevel_descriptor() {
972 ::PROTOBUF_NAMESPACE_ID::internal::AssignDescriptors(&descriptor_table_tensorflow_2fcore_2fprotobuf_2fconfig_2eproto);
973 return file_level_enum_descriptors_tensorflow_2fcore_2fprotobuf_2fconfig_2eproto[3];
974}
975bool RunOptions_TraceLevel_IsValid(int value) {
976 switch (value) {
977 case 0:
978 case 1:
979 case 2:
980 case 3:
981 return true;
982 default:
983 return false;
984 }
985}
986
987#if (__cplusplus < 201703) && (!defined(_MSC_VER) || _MSC_VER >= 1900)
988constexpr RunOptions_TraceLevel RunOptions::NO_TRACE;
989constexpr RunOptions_TraceLevel RunOptions::SOFTWARE_TRACE;
990constexpr RunOptions_TraceLevel RunOptions::HARDWARE_TRACE;
991constexpr RunOptions_TraceLevel RunOptions::FULL_TRACE;
992constexpr RunOptions_TraceLevel RunOptions::TraceLevel_MIN;
993constexpr RunOptions_TraceLevel RunOptions::TraceLevel_MAX;
994constexpr int RunOptions::TraceLevel_ARRAYSIZE;
995#endif // (__cplusplus < 201703) && (!defined(_MSC_VER) || _MSC_VER >= 1900)
996
997// ===================================================================
998
999void GPUOptions_Experimental_VirtualDevices::InitAsDefaultInstance() {
1000}
1001class GPUOptions_Experimental_VirtualDevices::_Internal {
1002 public:
1003};
1004
1005GPUOptions_Experimental_VirtualDevices::GPUOptions_Experimental_VirtualDevices()
1006 : ::PROTOBUF_NAMESPACE_ID::Message(), _internal_metadata_(nullptr) {
1007 SharedCtor();
1008 // @@protoc_insertion_point(constructor:tensorflow.GPUOptions.Experimental.VirtualDevices)
1009}
1010GPUOptions_Experimental_VirtualDevices::GPUOptions_Experimental_VirtualDevices(::PROTOBUF_NAMESPACE_ID::Arena* arena)
1011 : ::PROTOBUF_NAMESPACE_ID::Message(),
1012 _internal_metadata_(arena),
1013 memory_limit_mb_(arena),
1014 priority_(arena),
1015 device_ordinal_(arena) {
1016 SharedCtor();
1017 RegisterArenaDtor(arena);
1018 // @@protoc_insertion_point(arena_constructor:tensorflow.GPUOptions.Experimental.VirtualDevices)
1019}
1020GPUOptions_Experimental_VirtualDevices::GPUOptions_Experimental_VirtualDevices(const GPUOptions_Experimental_VirtualDevices& from)
1021 : ::PROTOBUF_NAMESPACE_ID::Message(),
1022 _internal_metadata_(nullptr),
1023 memory_limit_mb_(from.memory_limit_mb_),
1024 priority_(from.priority_),
1025 device_ordinal_(from.device_ordinal_) {
1026 _internal_metadata_.MergeFrom(from._internal_metadata_);
1027 // @@protoc_insertion_point(copy_constructor:tensorflow.GPUOptions.Experimental.VirtualDevices)
1028}
1029
1030void GPUOptions_Experimental_VirtualDevices::SharedCtor() {
1031}
1032
1033GPUOptions_Experimental_VirtualDevices::~GPUOptions_Experimental_VirtualDevices() {
1034 // @@protoc_insertion_point(destructor:tensorflow.GPUOptions.Experimental.VirtualDevices)
1035 SharedDtor();
1036}
1037
1038void GPUOptions_Experimental_VirtualDevices::SharedDtor() {
1039 GOOGLE_DCHECK(GetArenaNoVirtual() == nullptr);
1040}
1041
1042void GPUOptions_Experimental_VirtualDevices::ArenaDtor(void* object) {
1043 GPUOptions_Experimental_VirtualDevices* _this = reinterpret_cast< GPUOptions_Experimental_VirtualDevices* >(object);
1044 (void)_this;
1045}
1046void GPUOptions_Experimental_VirtualDevices::RegisterArenaDtor(::PROTOBUF_NAMESPACE_ID::Arena*) {
1047}
1048void GPUOptions_Experimental_VirtualDevices::SetCachedSize(int size) const {
1049 _cached_size_.Set(size);
1050}
1051const GPUOptions_Experimental_VirtualDevices& GPUOptions_Experimental_VirtualDevices::default_instance() {
1052 ::PROTOBUF_NAMESPACE_ID::internal::InitSCC(&::scc_info_GPUOptions_Experimental_VirtualDevices_tensorflow_2fcore_2fprotobuf_2fconfig_2eproto.base);
1053 return *internal_default_instance();
1054}
1055
1056
1057void GPUOptions_Experimental_VirtualDevices::Clear() {
1058// @@protoc_insertion_point(message_clear_start:tensorflow.GPUOptions.Experimental.VirtualDevices)
1059 ::PROTOBUF_NAMESPACE_ID::uint32 cached_has_bits = 0;
1060 // Prevent compiler warnings about cached_has_bits being unused
1061 (void) cached_has_bits;
1062
1063 memory_limit_mb_.Clear();
1064 priority_.Clear();
1065 device_ordinal_.Clear();
1066 _internal_metadata_.Clear();
1067}
1068
1069#if GOOGLE_PROTOBUF_ENABLE_EXPERIMENTAL_PARSER
1070const char* GPUOptions_Experimental_VirtualDevices::_InternalParse(const char* ptr, ::PROTOBUF_NAMESPACE_ID::internal::ParseContext* ctx) {
1071#define CHK_(x) if (PROTOBUF_PREDICT_FALSE(!(x))) goto failure
1072 ::PROTOBUF_NAMESPACE_ID::Arena* arena = GetArenaNoVirtual(); (void)arena;
1073 while (!ctx->Done(&ptr)) {
1074 ::PROTOBUF_NAMESPACE_ID::uint32 tag;
1075 ptr = ::PROTOBUF_NAMESPACE_ID::internal::ReadTag(ptr, &tag);
1076 CHK_(ptr);
1077 switch (tag >> 3) {
1078 // repeated float memory_limit_mb = 1;
1079 case 1:
1080 if (PROTOBUF_PREDICT_TRUE(static_cast<::PROTOBUF_NAMESPACE_ID::uint8>(tag) == 10)) {
1081 ptr = ::PROTOBUF_NAMESPACE_ID::internal::PackedFloatParser(mutable_memory_limit_mb(), ptr, ctx);
1082 CHK_(ptr);
1083 } else if (static_cast<::PROTOBUF_NAMESPACE_ID::uint8>(tag) == 13) {
1084 add_memory_limit_mb(::PROTOBUF_NAMESPACE_ID::internal::UnalignedLoad<float>(ptr));
1085 ptr += sizeof(float);
1086 } else goto handle_unusual;
1087 continue;
1088 // repeated int32 priority = 2;
1089 case 2:
1090 if (PROTOBUF_PREDICT_TRUE(static_cast<::PROTOBUF_NAMESPACE_ID::uint8>(tag) == 18)) {
1091 ptr = ::PROTOBUF_NAMESPACE_ID::internal::PackedInt32Parser(mutable_priority(), ptr, ctx);
1092 CHK_(ptr);
1093 } else if (static_cast<::PROTOBUF_NAMESPACE_ID::uint8>(tag) == 16) {
1094 add_priority(::PROTOBUF_NAMESPACE_ID::internal::ReadVarint(&ptr));
1095 CHK_(ptr);
1096 } else goto handle_unusual;
1097 continue;
1098 // repeated int32 device_ordinal = 3;
1099 case 3:
1100 if (PROTOBUF_PREDICT_TRUE(static_cast<::PROTOBUF_NAMESPACE_ID::uint8>(tag) == 26)) {
1101 ptr = ::PROTOBUF_NAMESPACE_ID::internal::PackedInt32Parser(mutable_device_ordinal(), ptr, ctx);
1102 CHK_(ptr);
1103 } else if (static_cast<::PROTOBUF_NAMESPACE_ID::uint8>(tag) == 24) {
1104 add_device_ordinal(::PROTOBUF_NAMESPACE_ID::internal::ReadVarint(&ptr));
1105 CHK_(ptr);
1106 } else goto handle_unusual;
1107 continue;
1108 default: {
1109 handle_unusual:
1110 if ((tag & 7) == 4 || tag == 0) {
1111 ctx->SetLastTag(tag);
1112 goto success;
1113 }
1114 ptr = UnknownFieldParse(tag, &_internal_metadata_, ptr, ctx);
1115 CHK_(ptr != nullptr);
1116 continue;
1117 }
1118 } // switch
1119 } // while
1120success:
1121 return ptr;
1122failure:
1123 ptr = nullptr;
1124 goto success;
1125#undef CHK_
1126}
1127#else // GOOGLE_PROTOBUF_ENABLE_EXPERIMENTAL_PARSER
1128bool GPUOptions_Experimental_VirtualDevices::MergePartialFromCodedStream(
1129 ::PROTOBUF_NAMESPACE_ID::io::CodedInputStream* input) {
1130#define DO_(EXPRESSION) if (!PROTOBUF_PREDICT_TRUE(EXPRESSION)) goto failure
1131 ::PROTOBUF_NAMESPACE_ID::uint32 tag;
1132 // @@protoc_insertion_point(parse_start:tensorflow.GPUOptions.Experimental.VirtualDevices)
1133 for (;;) {
1134 ::std::pair<::PROTOBUF_NAMESPACE_ID::uint32, bool> p = input->ReadTagWithCutoffNoLastTag(127u);
1135 tag = p.first;
1136 if (!p.second) goto handle_unusual;
1137 switch (::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::GetTagFieldNumber(tag)) {
1138 // repeated float memory_limit_mb = 1;
1139 case 1: {
1140 if (static_cast< ::PROTOBUF_NAMESPACE_ID::uint8>(tag) == (10 & 0xFF)) {
1141 DO_((::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::ReadPackedPrimitive<
1142 float, ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::TYPE_FLOAT>(
1143 input, this->mutable_memory_limit_mb())));
1144 } else if (static_cast< ::PROTOBUF_NAMESPACE_ID::uint8>(tag) == (13 & 0xFF)) {
1145 DO_((::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::ReadRepeatedPrimitiveNoInline<
1146 float, ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::TYPE_FLOAT>(
1147 1, 10u, input, this->mutable_memory_limit_mb())));
1148 } else {
1149 goto handle_unusual;
1150 }
1151 break;
1152 }
1153
1154 // repeated int32 priority = 2;
1155 case 2: {
1156 if (static_cast< ::PROTOBUF_NAMESPACE_ID::uint8>(tag) == (18 & 0xFF)) {
1157 DO_((::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::ReadPackedPrimitive<
1158 ::PROTOBUF_NAMESPACE_ID::int32, ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::TYPE_INT32>(
1159 input, this->mutable_priority())));
1160 } else if (static_cast< ::PROTOBUF_NAMESPACE_ID::uint8>(tag) == (16 & 0xFF)) {
1161 DO_((::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::ReadRepeatedPrimitiveNoInline<
1162 ::PROTOBUF_NAMESPACE_ID::int32, ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::TYPE_INT32>(
1163 1, 18u, input, this->mutable_priority())));
1164 } else {
1165 goto handle_unusual;
1166 }
1167 break;
1168 }
1169
1170 // repeated int32 device_ordinal = 3;
1171 case 3: {
1172 if (static_cast< ::PROTOBUF_NAMESPACE_ID::uint8>(tag) == (26 & 0xFF)) {
1173 DO_((::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::ReadPackedPrimitive<
1174 ::PROTOBUF_NAMESPACE_ID::int32, ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::TYPE_INT32>(
1175 input, this->mutable_device_ordinal())));
1176 } else if (static_cast< ::PROTOBUF_NAMESPACE_ID::uint8>(tag) == (24 & 0xFF)) {
1177 DO_((::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::ReadRepeatedPrimitiveNoInline<
1178 ::PROTOBUF_NAMESPACE_ID::int32, ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::TYPE_INT32>(
1179 1, 26u, input, this->mutable_device_ordinal())));
1180 } else {
1181 goto handle_unusual;
1182 }
1183 break;
1184 }
1185
1186 default: {
1187 handle_unusual:
1188 if (tag == 0) {
1189 goto success;
1190 }
1191 DO_(::PROTOBUF_NAMESPACE_ID::internal::WireFormat::SkipField(
1192 input, tag, _internal_metadata_.mutable_unknown_fields()));
1193 break;
1194 }
1195 }
1196 }
1197success:
1198 // @@protoc_insertion_point(parse_success:tensorflow.GPUOptions.Experimental.VirtualDevices)
1199 return true;
1200failure:
1201 // @@protoc_insertion_point(parse_failure:tensorflow.GPUOptions.Experimental.VirtualDevices)
1202 return false;
1203#undef DO_
1204}
1205#endif // GOOGLE_PROTOBUF_ENABLE_EXPERIMENTAL_PARSER
1206
1207void GPUOptions_Experimental_VirtualDevices::SerializeWithCachedSizes(
1208 ::PROTOBUF_NAMESPACE_ID::io::CodedOutputStream* output) const {
1209 // @@protoc_insertion_point(serialize_start:tensorflow.GPUOptions.Experimental.VirtualDevices)
1210 ::PROTOBUF_NAMESPACE_ID::uint32 cached_has_bits = 0;
1211 (void) cached_has_bits;
1212
1213 // repeated float memory_limit_mb = 1;
1214 if (this->memory_limit_mb_size() > 0) {
1215 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::WriteTag(1, ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::WIRETYPE_LENGTH_DELIMITED, output);
1216 output->WriteVarint32(_memory_limit_mb_cached_byte_size_.load(
1217 std::memory_order_relaxed));
1218 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::WriteFloatArray(
1219 this->memory_limit_mb().data(), this->memory_limit_mb_size(), output);
1220 }
1221
1222 // repeated int32 priority = 2;
1223 if (this->priority_size() > 0) {
1224 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::WriteTag(2, ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::WIRETYPE_LENGTH_DELIMITED, output);
1225 output->WriteVarint32(_priority_cached_byte_size_.load(
1226 std::memory_order_relaxed));
1227 }
1228 for (int i = 0, n = this->priority_size(); i < n; i++) {
1229 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::WriteInt32NoTag(
1230 this->priority(i), output);
1231 }
1232
1233 // repeated int32 device_ordinal = 3;
1234 if (this->device_ordinal_size() > 0) {
1235 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::WriteTag(3, ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::WIRETYPE_LENGTH_DELIMITED, output);
1236 output->WriteVarint32(_device_ordinal_cached_byte_size_.load(
1237 std::memory_order_relaxed));
1238 }
1239 for (int i = 0, n = this->device_ordinal_size(); i < n; i++) {
1240 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::WriteInt32NoTag(
1241 this->device_ordinal(i), output);
1242 }
1243
1244 if (_internal_metadata_.have_unknown_fields()) {
1245 ::PROTOBUF_NAMESPACE_ID::internal::WireFormat::SerializeUnknownFields(
1246 _internal_metadata_.unknown_fields(), output);
1247 }
1248 // @@protoc_insertion_point(serialize_end:tensorflow.GPUOptions.Experimental.VirtualDevices)
1249}
1250
1251::PROTOBUF_NAMESPACE_ID::uint8* GPUOptions_Experimental_VirtualDevices::InternalSerializeWithCachedSizesToArray(
1252 ::PROTOBUF_NAMESPACE_ID::uint8* target) const {
1253 // @@protoc_insertion_point(serialize_to_array_start:tensorflow.GPUOptions.Experimental.VirtualDevices)
1254 ::PROTOBUF_NAMESPACE_ID::uint32 cached_has_bits = 0;
1255 (void) cached_has_bits;
1256
1257 // repeated float memory_limit_mb = 1;
1258 if (this->memory_limit_mb_size() > 0) {
1259 target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::WriteTagToArray(
1260 1,
1261 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::WIRETYPE_LENGTH_DELIMITED,
1262 target);
1263 target = ::PROTOBUF_NAMESPACE_ID::io::CodedOutputStream::WriteVarint32ToArray(
1264 _memory_limit_mb_cached_byte_size_.load(std::memory_order_relaxed),
1265 target);
1266 target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::
1267 WriteFloatNoTagToArray(this->memory_limit_mb_, target);
1268 }
1269
1270 // repeated int32 priority = 2;
1271 if (this->priority_size() > 0) {
1272 target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::WriteTagToArray(
1273 2,
1274 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::WIRETYPE_LENGTH_DELIMITED,
1275 target);
1276 target = ::PROTOBUF_NAMESPACE_ID::io::CodedOutputStream::WriteVarint32ToArray(
1277 _priority_cached_byte_size_.load(std::memory_order_relaxed),
1278 target);
1279 target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::
1280 WriteInt32NoTagToArray(this->priority_, target);
1281 }
1282
1283 // repeated int32 device_ordinal = 3;
1284 if (this->device_ordinal_size() > 0) {
1285 target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::WriteTagToArray(
1286 3,
1287 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::WIRETYPE_LENGTH_DELIMITED,
1288 target);
1289 target = ::PROTOBUF_NAMESPACE_ID::io::CodedOutputStream::WriteVarint32ToArray(
1290 _device_ordinal_cached_byte_size_.load(std::memory_order_relaxed),
1291 target);
1292 target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::
1293 WriteInt32NoTagToArray(this->device_ordinal_, target);
1294 }
1295
1296 if (_internal_metadata_.have_unknown_fields()) {
1297 target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormat::SerializeUnknownFieldsToArray(
1298 _internal_metadata_.unknown_fields(), target);
1299 }
1300 // @@protoc_insertion_point(serialize_to_array_end:tensorflow.GPUOptions.Experimental.VirtualDevices)
1301 return target;
1302}
1303
1304size_t GPUOptions_Experimental_VirtualDevices::ByteSizeLong() const {
1305// @@protoc_insertion_point(message_byte_size_start:tensorflow.GPUOptions.Experimental.VirtualDevices)
1306 size_t total_size = 0;
1307
1308 if (_internal_metadata_.have_unknown_fields()) {
1309 total_size +=
1310 ::PROTOBUF_NAMESPACE_ID::internal::WireFormat::ComputeUnknownFieldsSize(
1311 _internal_metadata_.unknown_fields());
1312 }
1313 ::PROTOBUF_NAMESPACE_ID::uint32 cached_has_bits = 0;
1314 // Prevent compiler warnings about cached_has_bits being unused
1315 (void) cached_has_bits;
1316
1317 // repeated float memory_limit_mb = 1;
1318 {
1319 unsigned int count = static_cast<unsigned int>(this->memory_limit_mb_size());
1320 size_t data_size = 4UL * count;
1321 if (data_size > 0) {
1322 total_size += 1 +
1323 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::Int32Size(
1324 static_cast<::PROTOBUF_NAMESPACE_ID::int32>(data_size));
1325 }
1326 int cached_size = ::PROTOBUF_NAMESPACE_ID::internal::ToCachedSize(data_size);
1327 _memory_limit_mb_cached_byte_size_.store(cached_size,
1328 std::memory_order_relaxed);
1329 total_size += data_size;
1330 }
1331
1332 // repeated int32 priority = 2;
1333 {
1334 size_t data_size = ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::
1335 Int32Size(this->priority_);
1336 if (data_size > 0) {
1337 total_size += 1 +
1338 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::Int32Size(
1339 static_cast<::PROTOBUF_NAMESPACE_ID::int32>(data_size));
1340 }
1341 int cached_size = ::PROTOBUF_NAMESPACE_ID::internal::ToCachedSize(data_size);
1342 _priority_cached_byte_size_.store(cached_size,
1343 std::memory_order_relaxed);
1344 total_size += data_size;
1345 }
1346
1347 // repeated int32 device_ordinal = 3;
1348 {
1349 size_t data_size = ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::
1350 Int32Size(this->device_ordinal_);
1351 if (data_size > 0) {
1352 total_size += 1 +
1353 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::Int32Size(
1354 static_cast<::PROTOBUF_NAMESPACE_ID::int32>(data_size));
1355 }
1356 int cached_size = ::PROTOBUF_NAMESPACE_ID::internal::ToCachedSize(data_size);
1357 _device_ordinal_cached_byte_size_.store(cached_size,
1358 std::memory_order_relaxed);
1359 total_size += data_size;
1360 }
1361
1362 int cached_size = ::PROTOBUF_NAMESPACE_ID::internal::ToCachedSize(total_size);
1363 SetCachedSize(cached_size);
1364 return total_size;
1365}
1366
1367void GPUOptions_Experimental_VirtualDevices::MergeFrom(const ::PROTOBUF_NAMESPACE_ID::Message& from) {
1368// @@protoc_insertion_point(generalized_merge_from_start:tensorflow.GPUOptions.Experimental.VirtualDevices)
1369 GOOGLE_DCHECK_NE(&from, this);
1370 const GPUOptions_Experimental_VirtualDevices* source =
1371 ::PROTOBUF_NAMESPACE_ID::DynamicCastToGenerated<GPUOptions_Experimental_VirtualDevices>(
1372 &from);
1373 if (source == nullptr) {
1374 // @@protoc_insertion_point(generalized_merge_from_cast_fail:tensorflow.GPUOptions.Experimental.VirtualDevices)
1375 ::PROTOBUF_NAMESPACE_ID::internal::ReflectionOps::Merge(from, this);
1376 } else {
1377 // @@protoc_insertion_point(generalized_merge_from_cast_success:tensorflow.GPUOptions.Experimental.VirtualDevices)
1378 MergeFrom(*source);
1379 }
1380}
1381
1382void GPUOptions_Experimental_VirtualDevices::MergeFrom(const GPUOptions_Experimental_VirtualDevices& from) {
1383// @@protoc_insertion_point(class_specific_merge_from_start:tensorflow.GPUOptions.Experimental.VirtualDevices)
1384 GOOGLE_DCHECK_NE(&from, this);
1385 _internal_metadata_.MergeFrom(from._internal_metadata_);
1386 ::PROTOBUF_NAMESPACE_ID::uint32 cached_has_bits = 0;
1387 (void) cached_has_bits;
1388
1389 memory_limit_mb_.MergeFrom(from.memory_limit_mb_);
1390 priority_.MergeFrom(from.priority_);
1391 device_ordinal_.MergeFrom(from.device_ordinal_);
1392}
1393
1394void GPUOptions_Experimental_VirtualDevices::CopyFrom(const ::PROTOBUF_NAMESPACE_ID::Message& from) {
1395// @@protoc_insertion_point(generalized_copy_from_start:tensorflow.GPUOptions.Experimental.VirtualDevices)
1396 if (&from == this) return;
1397 Clear();
1398 MergeFrom(from);
1399}
1400
1401void GPUOptions_Experimental_VirtualDevices::CopyFrom(const GPUOptions_Experimental_VirtualDevices& from) {
1402// @@protoc_insertion_point(class_specific_copy_from_start:tensorflow.GPUOptions.Experimental.VirtualDevices)
1403 if (&from == this) return;
1404 Clear();
1405 MergeFrom(from);
1406}
1407
1408bool GPUOptions_Experimental_VirtualDevices::IsInitialized() const {
1409 return true;
1410}
1411
1412void GPUOptions_Experimental_VirtualDevices::InternalSwap(GPUOptions_Experimental_VirtualDevices* other) {
1413 using std::swap;
1414 _internal_metadata_.Swap(&other->_internal_metadata_);
1415 memory_limit_mb_.InternalSwap(&other->memory_limit_mb_);
1416 priority_.InternalSwap(&other->priority_);
1417 device_ordinal_.InternalSwap(&other->device_ordinal_);
1418}
1419
1420::PROTOBUF_NAMESPACE_ID::Metadata GPUOptions_Experimental_VirtualDevices::GetMetadata() const {
1421 return GetMetadataStatic();
1422}
1423
1424
1425// ===================================================================
1426
1427void GPUOptions_Experimental::InitAsDefaultInstance() {
1428}
1429class GPUOptions_Experimental::_Internal {
1430 public:
1431};
1432
1433GPUOptions_Experimental::GPUOptions_Experimental()
1434 : ::PROTOBUF_NAMESPACE_ID::Message(), _internal_metadata_(nullptr) {
1435 SharedCtor();
1436 // @@protoc_insertion_point(constructor:tensorflow.GPUOptions.Experimental)
1437}
1438GPUOptions_Experimental::GPUOptions_Experimental(::PROTOBUF_NAMESPACE_ID::Arena* arena)
1439 : ::PROTOBUF_NAMESPACE_ID::Message(),
1440 _internal_metadata_(arena),
1441 virtual_devices_(arena) {
1442 SharedCtor();
1443 RegisterArenaDtor(arena);
1444 // @@protoc_insertion_point(arena_constructor:tensorflow.GPUOptions.Experimental)
1445}
1446GPUOptions_Experimental::GPUOptions_Experimental(const GPUOptions_Experimental& from)
1447 : ::PROTOBUF_NAMESPACE_ID::Message(),
1448 _internal_metadata_(nullptr),
1449 virtual_devices_(from.virtual_devices_) {
1450 _internal_metadata_.MergeFrom(from._internal_metadata_);
1451 collective_ring_order_.UnsafeSetDefault(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited());
1452 if (!from.collective_ring_order().empty()) {
1453 collective_ring_order_.Set(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited(), from.collective_ring_order(),
1454 GetArenaNoVirtual());
1455 }
1456 ::memcpy(&num_dev_to_dev_copy_streams_, &from.num_dev_to_dev_copy_streams_,
1457 static_cast<size_t>(reinterpret_cast<char*>(&kernel_tracker_max_pending_) -
1458 reinterpret_cast<char*>(&num_dev_to_dev_copy_streams_)) + sizeof(kernel_tracker_max_pending_));
1459 // @@protoc_insertion_point(copy_constructor:tensorflow.GPUOptions.Experimental)
1460}
1461
1462void GPUOptions_Experimental::SharedCtor() {
1463 ::PROTOBUF_NAMESPACE_ID::internal::InitSCC(&scc_info_GPUOptions_Experimental_tensorflow_2fcore_2fprotobuf_2fconfig_2eproto.base);
1464 collective_ring_order_.UnsafeSetDefault(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited());
1465 ::memset(&num_dev_to_dev_copy_streams_, 0, static_cast<size_t>(
1466 reinterpret_cast<char*>(&kernel_tracker_max_pending_) -
1467 reinterpret_cast<char*>(&num_dev_to_dev_copy_streams_)) + sizeof(kernel_tracker_max_pending_));
1468}
1469
1470GPUOptions_Experimental::~GPUOptions_Experimental() {
1471 // @@protoc_insertion_point(destructor:tensorflow.GPUOptions.Experimental)
1472 SharedDtor();
1473}
1474
1475void GPUOptions_Experimental::SharedDtor() {
1476 GOOGLE_DCHECK(GetArenaNoVirtual() == nullptr);
1477 collective_ring_order_.DestroyNoArena(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited());
1478}
1479
1480void GPUOptions_Experimental::ArenaDtor(void* object) {
1481 GPUOptions_Experimental* _this = reinterpret_cast< GPUOptions_Experimental* >(object);
1482 (void)_this;
1483}
1484void GPUOptions_Experimental::RegisterArenaDtor(::PROTOBUF_NAMESPACE_ID::Arena*) {
1485}
1486void GPUOptions_Experimental::SetCachedSize(int size) const {
1487 _cached_size_.Set(size);
1488}
1489const GPUOptions_Experimental& GPUOptions_Experimental::default_instance() {
1490 ::PROTOBUF_NAMESPACE_ID::internal::InitSCC(&::scc_info_GPUOptions_Experimental_tensorflow_2fcore_2fprotobuf_2fconfig_2eproto.base);
1491 return *internal_default_instance();
1492}
1493
1494
1495void GPUOptions_Experimental::Clear() {
1496// @@protoc_insertion_point(message_clear_start:tensorflow.GPUOptions.Experimental)
1497 ::PROTOBUF_NAMESPACE_ID::uint32 cached_has_bits = 0;
1498 // Prevent compiler warnings about cached_has_bits being unused
1499 (void) cached_has_bits;
1500
1501 virtual_devices_.Clear();
1502 collective_ring_order_.ClearToEmpty(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited(), GetArenaNoVirtual());
1503 ::memset(&num_dev_to_dev_copy_streams_, 0, static_cast<size_t>(
1504 reinterpret_cast<char*>(&kernel_tracker_max_pending_) -
1505 reinterpret_cast<char*>(&num_dev_to_dev_copy_streams_)) + sizeof(kernel_tracker_max_pending_));
1506 _internal_metadata_.Clear();
1507}
1508
1509#if GOOGLE_PROTOBUF_ENABLE_EXPERIMENTAL_PARSER
1510const char* GPUOptions_Experimental::_InternalParse(const char* ptr, ::PROTOBUF_NAMESPACE_ID::internal::ParseContext* ctx) {
1511#define CHK_(x) if (PROTOBUF_PREDICT_FALSE(!(x))) goto failure
1512 ::PROTOBUF_NAMESPACE_ID::Arena* arena = GetArenaNoVirtual(); (void)arena;
1513 while (!ctx->Done(&ptr)) {
1514 ::PROTOBUF_NAMESPACE_ID::uint32 tag;
1515 ptr = ::PROTOBUF_NAMESPACE_ID::internal::ReadTag(ptr, &tag);
1516 CHK_(ptr);
1517 switch (tag >> 3) {
1518 // repeated .tensorflow.GPUOptions.Experimental.VirtualDevices virtual_devices = 1;
1519 case 1:
1520 if (PROTOBUF_PREDICT_TRUE(static_cast<::PROTOBUF_NAMESPACE_ID::uint8>(tag) == 10)) {
1521 ptr -= 1;
1522 do {
1523 ptr += 1;
1524 ptr = ctx->ParseMessage(add_virtual_devices(), ptr);
1525 CHK_(ptr);
1526 if (!ctx->DataAvailable(ptr)) break;
1527 } while (::PROTOBUF_NAMESPACE_ID::internal::UnalignedLoad<::PROTOBUF_NAMESPACE_ID::uint8>(ptr) == 10);
1528 } else goto handle_unusual;
1529 continue;
1530 // bool use_unified_memory = 2;
1531 case 2:
1532 if (PROTOBUF_PREDICT_TRUE(static_cast<::PROTOBUF_NAMESPACE_ID::uint8>(tag) == 16)) {
1533 use_unified_memory_ = ::PROTOBUF_NAMESPACE_ID::internal::ReadVarint(&ptr);
1534 CHK_(ptr);
1535 } else goto handle_unusual;
1536 continue;
1537 // int32 num_dev_to_dev_copy_streams = 3;
1538 case 3:
1539 if (PROTOBUF_PREDICT_TRUE(static_cast<::PROTOBUF_NAMESPACE_ID::uint8>(tag) == 24)) {
1540 num_dev_to_dev_copy_streams_ = ::PROTOBUF_NAMESPACE_ID::internal::ReadVarint(&ptr);
1541 CHK_(ptr);
1542 } else goto handle_unusual;
1543 continue;
1544 // string collective_ring_order = 4;
1545 case 4:
1546 if (PROTOBUF_PREDICT_TRUE(static_cast<::PROTOBUF_NAMESPACE_ID::uint8>(tag) == 34)) {
1547 ptr = ::PROTOBUF_NAMESPACE_ID::internal::InlineGreedyStringParserUTF8(mutable_collective_ring_order(), ptr, ctx, "tensorflow.GPUOptions.Experimental.collective_ring_order");
1548 CHK_(ptr);
1549 } else goto handle_unusual;
1550 continue;
1551 // bool timestamped_allocator = 5;
1552 case 5:
1553 if (PROTOBUF_PREDICT_TRUE(static_cast<::PROTOBUF_NAMESPACE_ID::uint8>(tag) == 40)) {
1554 timestamped_allocator_ = ::PROTOBUF_NAMESPACE_ID::internal::ReadVarint(&ptr);
1555 CHK_(ptr);
1556 } else goto handle_unusual;
1557 continue;
1558 // int32 kernel_tracker_max_interval = 7;
1559 case 7:
1560 if (PROTOBUF_PREDICT_TRUE(static_cast<::PROTOBUF_NAMESPACE_ID::uint8>(tag) == 56)) {
1561 kernel_tracker_max_interval_ = ::PROTOBUF_NAMESPACE_ID::internal::ReadVarint(&ptr);
1562 CHK_(ptr);
1563 } else goto handle_unusual;
1564 continue;
1565 // int32 kernel_tracker_max_bytes = 8;
1566 case 8:
1567 if (PROTOBUF_PREDICT_TRUE(static_cast<::PROTOBUF_NAMESPACE_ID::uint8>(tag) == 64)) {
1568 kernel_tracker_max_bytes_ = ::PROTOBUF_NAMESPACE_ID::internal::ReadVarint(&ptr);
1569 CHK_(ptr);
1570 } else goto handle_unusual;
1571 continue;
1572 // int32 kernel_tracker_max_pending = 9;
1573 case 9:
1574 if (PROTOBUF_PREDICT_TRUE(static_cast<::PROTOBUF_NAMESPACE_ID::uint8>(tag) == 72)) {
1575 kernel_tracker_max_pending_ = ::PROTOBUF_NAMESPACE_ID::internal::ReadVarint(&ptr);
1576 CHK_(ptr);
1577 } else goto handle_unusual;
1578 continue;
1579 // double internal_fragmentation_fraction = 10;
1580 case 10:
1581 if (PROTOBUF_PREDICT_TRUE(static_cast<::PROTOBUF_NAMESPACE_ID::uint8>(tag) == 81)) {
1582 internal_fragmentation_fraction_ = ::PROTOBUF_NAMESPACE_ID::internal::UnalignedLoad<double>(ptr);
1583 ptr += sizeof(double);
1584 } else goto handle_unusual;
1585 continue;
1586 // bool use_cuda_malloc_async = 11;
1587 case 11:
1588 if (PROTOBUF_PREDICT_TRUE(static_cast<::PROTOBUF_NAMESPACE_ID::uint8>(tag) == 88)) {
1589 use_cuda_malloc_async_ = ::PROTOBUF_NAMESPACE_ID::internal::ReadVarint(&ptr);
1590 CHK_(ptr);
1591 } else goto handle_unusual;
1592 continue;
1593 // bool disallow_retry_on_allocation_failure = 12;
1594 case 12:
1595 if (PROTOBUF_PREDICT_TRUE(static_cast<::PROTOBUF_NAMESPACE_ID::uint8>(tag) == 96)) {
1596 disallow_retry_on_allocation_failure_ = ::PROTOBUF_NAMESPACE_ID::internal::ReadVarint(&ptr);
1597 CHK_(ptr);
1598 } else goto handle_unusual;
1599 continue;
1600 default: {
1601 handle_unusual:
1602 if ((tag & 7) == 4 || tag == 0) {
1603 ctx->SetLastTag(tag);
1604 goto success;
1605 }
1606 ptr = UnknownFieldParse(tag, &_internal_metadata_, ptr, ctx);
1607 CHK_(ptr != nullptr);
1608 continue;
1609 }
1610 } // switch
1611 } // while
1612success:
1613 return ptr;
1614failure:
1615 ptr = nullptr;
1616 goto success;
1617#undef CHK_
1618}
1619#else // GOOGLE_PROTOBUF_ENABLE_EXPERIMENTAL_PARSER
1620bool GPUOptions_Experimental::MergePartialFromCodedStream(
1621 ::PROTOBUF_NAMESPACE_ID::io::CodedInputStream* input) {
1622#define DO_(EXPRESSION) if (!PROTOBUF_PREDICT_TRUE(EXPRESSION)) goto failure
1623 ::PROTOBUF_NAMESPACE_ID::uint32 tag;
1624 // @@protoc_insertion_point(parse_start:tensorflow.GPUOptions.Experimental)
1625 for (;;) {
1626 ::std::pair<::PROTOBUF_NAMESPACE_ID::uint32, bool> p = input->ReadTagWithCutoffNoLastTag(127u);
1627 tag = p.first;
1628 if (!p.second) goto handle_unusual;
1629 switch (::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::GetTagFieldNumber(tag)) {
1630 // repeated .tensorflow.GPUOptions.Experimental.VirtualDevices virtual_devices = 1;
1631 case 1: {
1632 if (static_cast< ::PROTOBUF_NAMESPACE_ID::uint8>(tag) == (10 & 0xFF)) {
1633 DO_(::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::ReadMessage(
1634 input, add_virtual_devices()));
1635 } else {
1636 goto handle_unusual;
1637 }
1638 break;
1639 }
1640
1641 // bool use_unified_memory = 2;
1642 case 2: {
1643 if (static_cast< ::PROTOBUF_NAMESPACE_ID::uint8>(tag) == (16 & 0xFF)) {
1644
1645 DO_((::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::ReadPrimitive<
1646 bool, ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::TYPE_BOOL>(
1647 input, &use_unified_memory_)));
1648 } else {
1649 goto handle_unusual;
1650 }
1651 break;
1652 }
1653
1654 // int32 num_dev_to_dev_copy_streams = 3;
1655 case 3: {
1656 if (static_cast< ::PROTOBUF_NAMESPACE_ID::uint8>(tag) == (24 & 0xFF)) {
1657
1658 DO_((::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::ReadPrimitive<
1659 ::PROTOBUF_NAMESPACE_ID::int32, ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::TYPE_INT32>(
1660 input, &num_dev_to_dev_copy_streams_)));
1661 } else {
1662 goto handle_unusual;
1663 }
1664 break;
1665 }
1666
1667 // string collective_ring_order = 4;
1668 case 4: {
1669 if (static_cast< ::PROTOBUF_NAMESPACE_ID::uint8>(tag) == (34 & 0xFF)) {
1670 DO_(::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::ReadString(
1671 input, this->mutable_collective_ring_order()));
1672 DO_(::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::VerifyUtf8String(
1673 this->collective_ring_order().data(), static_cast<int>(this->collective_ring_order().length()),
1674 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::PARSE,
1675 "tensorflow.GPUOptions.Experimental.collective_ring_order"));
1676 } else {
1677 goto handle_unusual;
1678 }
1679 break;
1680 }
1681
1682 // bool timestamped_allocator = 5;
1683 case 5: {
1684 if (static_cast< ::PROTOBUF_NAMESPACE_ID::uint8>(tag) == (40 & 0xFF)) {
1685
1686 DO_((::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::ReadPrimitive<
1687 bool, ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::TYPE_BOOL>(
1688 input, &timestamped_allocator_)));
1689 } else {
1690 goto handle_unusual;
1691 }
1692 break;
1693 }
1694
1695 // int32 kernel_tracker_max_interval = 7;
1696 case 7: {
1697 if (static_cast< ::PROTOBUF_NAMESPACE_ID::uint8>(tag) == (56 & 0xFF)) {
1698
1699 DO_((::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::ReadPrimitive<
1700 ::PROTOBUF_NAMESPACE_ID::int32, ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::TYPE_INT32>(
1701 input, &kernel_tracker_max_interval_)));
1702 } else {
1703 goto handle_unusual;
1704 }
1705 break;
1706 }
1707
1708 // int32 kernel_tracker_max_bytes = 8;
1709 case 8: {
1710 if (static_cast< ::PROTOBUF_NAMESPACE_ID::uint8>(tag) == (64 & 0xFF)) {
1711
1712 DO_((::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::ReadPrimitive<
1713 ::PROTOBUF_NAMESPACE_ID::int32, ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::TYPE_INT32>(
1714 input, &kernel_tracker_max_bytes_)));
1715 } else {
1716 goto handle_unusual;
1717 }
1718 break;
1719 }
1720
1721 // int32 kernel_tracker_max_pending = 9;
1722 case 9: {
1723 if (static_cast< ::PROTOBUF_NAMESPACE_ID::uint8>(tag) == (72 & 0xFF)) {
1724
1725 DO_((::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::ReadPrimitive<
1726 ::PROTOBUF_NAMESPACE_ID::int32, ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::TYPE_INT32>(
1727 input, &kernel_tracker_max_pending_)));
1728 } else {
1729 goto handle_unusual;
1730 }
1731 break;
1732 }
1733
1734 // double internal_fragmentation_fraction = 10;
1735 case 10: {
1736 if (static_cast< ::PROTOBUF_NAMESPACE_ID::uint8>(tag) == (81 & 0xFF)) {
1737
1738 DO_((::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::ReadPrimitive<
1739 double, ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::TYPE_DOUBLE>(
1740 input, &internal_fragmentation_fraction_)));
1741 } else {
1742 goto handle_unusual;
1743 }
1744 break;
1745 }
1746
1747 // bool use_cuda_malloc_async = 11;
1748 case 11: {
1749 if (static_cast< ::PROTOBUF_NAMESPACE_ID::uint8>(tag) == (88 & 0xFF)) {
1750
1751 DO_((::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::ReadPrimitive<
1752 bool, ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::TYPE_BOOL>(
1753 input, &use_cuda_malloc_async_)));
1754 } else {
1755 goto handle_unusual;
1756 }
1757 break;
1758 }
1759
1760 // bool disallow_retry_on_allocation_failure = 12;
1761 case 12: {
1762 if (static_cast< ::PROTOBUF_NAMESPACE_ID::uint8>(tag) == (96 & 0xFF)) {
1763
1764 DO_((::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::ReadPrimitive<
1765 bool, ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::TYPE_BOOL>(
1766 input, &disallow_retry_on_allocation_failure_)));
1767 } else {
1768 goto handle_unusual;
1769 }
1770 break;
1771 }
1772
1773 default: {
1774 handle_unusual:
1775 if (tag == 0) {
1776 goto success;
1777 }
1778 DO_(::PROTOBUF_NAMESPACE_ID::internal::WireFormat::SkipField(
1779 input, tag, _internal_metadata_.mutable_unknown_fields()));
1780 break;
1781 }
1782 }
1783 }
1784success:
1785 // @@protoc_insertion_point(parse_success:tensorflow.GPUOptions.Experimental)
1786 return true;
1787failure:
1788 // @@protoc_insertion_point(parse_failure:tensorflow.GPUOptions.Experimental)
1789 return false;
1790#undef DO_
1791}
1792#endif // GOOGLE_PROTOBUF_ENABLE_EXPERIMENTAL_PARSER
1793
1794void GPUOptions_Experimental::SerializeWithCachedSizes(
1795 ::PROTOBUF_NAMESPACE_ID::io::CodedOutputStream* output) const {
1796 // @@protoc_insertion_point(serialize_start:tensorflow.GPUOptions.Experimental)
1797 ::PROTOBUF_NAMESPACE_ID::uint32 cached_has_bits = 0;
1798 (void) cached_has_bits;
1799
1800 // repeated .tensorflow.GPUOptions.Experimental.VirtualDevices virtual_devices = 1;
1801 for (unsigned int i = 0,
1802 n = static_cast<unsigned int>(this->virtual_devices_size()); i < n; i++) {
1803 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::WriteMessageMaybeToArray(
1804 1,
1805 this->virtual_devices(static_cast<int>(i)),
1806 output);
1807 }
1808
1809 // bool use_unified_memory = 2;
1810 if (this->use_unified_memory() != 0) {
1811 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::WriteBool(2, this->use_unified_memory(), output);
1812 }
1813
1814 // int32 num_dev_to_dev_copy_streams = 3;
1815 if (this->num_dev_to_dev_copy_streams() != 0) {
1816 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::WriteInt32(3, this->num_dev_to_dev_copy_streams(), output);
1817 }
1818
1819 // string collective_ring_order = 4;
1820 if (this->collective_ring_order().size() > 0) {
1821 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::VerifyUtf8String(
1822 this->collective_ring_order().data(), static_cast<int>(this->collective_ring_order().length()),
1823 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::SERIALIZE,
1824 "tensorflow.GPUOptions.Experimental.collective_ring_order");
1825 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::WriteStringMaybeAliased(
1826 4, this->collective_ring_order(), output);
1827 }
1828
1829 // bool timestamped_allocator = 5;
1830 if (this->timestamped_allocator() != 0) {
1831 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::WriteBool(5, this->timestamped_allocator(), output);
1832 }
1833
1834 // int32 kernel_tracker_max_interval = 7;
1835 if (this->kernel_tracker_max_interval() != 0) {
1836 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::WriteInt32(7, this->kernel_tracker_max_interval(), output);
1837 }
1838
1839 // int32 kernel_tracker_max_bytes = 8;
1840 if (this->kernel_tracker_max_bytes() != 0) {
1841 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::WriteInt32(8, this->kernel_tracker_max_bytes(), output);
1842 }
1843
1844 // int32 kernel_tracker_max_pending = 9;
1845 if (this->kernel_tracker_max_pending() != 0) {
1846 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::WriteInt32(9, this->kernel_tracker_max_pending(), output);
1847 }
1848
1849 // double internal_fragmentation_fraction = 10;
1850 if (!(this->internal_fragmentation_fraction() <= 0 && this->internal_fragmentation_fraction() >= 0)) {
1851 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::WriteDouble(10, this->internal_fragmentation_fraction(), output);
1852 }
1853
1854 // bool use_cuda_malloc_async = 11;
1855 if (this->use_cuda_malloc_async() != 0) {
1856 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::WriteBool(11, this->use_cuda_malloc_async(), output);
1857 }
1858
1859 // bool disallow_retry_on_allocation_failure = 12;
1860 if (this->disallow_retry_on_allocation_failure() != 0) {
1861 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::WriteBool(12, this->disallow_retry_on_allocation_failure(), output);
1862 }
1863
1864 if (_internal_metadata_.have_unknown_fields()) {
1865 ::PROTOBUF_NAMESPACE_ID::internal::WireFormat::SerializeUnknownFields(
1866 _internal_metadata_.unknown_fields(), output);
1867 }
1868 // @@protoc_insertion_point(serialize_end:tensorflow.GPUOptions.Experimental)
1869}
1870
1871::PROTOBUF_NAMESPACE_ID::uint8* GPUOptions_Experimental::InternalSerializeWithCachedSizesToArray(
1872 ::PROTOBUF_NAMESPACE_ID::uint8* target) const {
1873 // @@protoc_insertion_point(serialize_to_array_start:tensorflow.GPUOptions.Experimental)
1874 ::PROTOBUF_NAMESPACE_ID::uint32 cached_has_bits = 0;
1875 (void) cached_has_bits;
1876
1877 // repeated .tensorflow.GPUOptions.Experimental.VirtualDevices virtual_devices = 1;
1878 for (unsigned int i = 0,
1879 n = static_cast<unsigned int>(this->virtual_devices_size()); i < n; i++) {
1880 target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::
1881 InternalWriteMessageToArray(
1882 1, this->virtual_devices(static_cast<int>(i)), target);
1883 }
1884
1885 // bool use_unified_memory = 2;
1886 if (this->use_unified_memory() != 0) {
1887 target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::WriteBoolToArray(2, this->use_unified_memory(), target);
1888 }
1889
1890 // int32 num_dev_to_dev_copy_streams = 3;
1891 if (this->num_dev_to_dev_copy_streams() != 0) {
1892 target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::WriteInt32ToArray(3, this->num_dev_to_dev_copy_streams(), target);
1893 }
1894
1895 // string collective_ring_order = 4;
1896 if (this->collective_ring_order().size() > 0) {
1897 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::VerifyUtf8String(
1898 this->collective_ring_order().data(), static_cast<int>(this->collective_ring_order().length()),
1899 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::SERIALIZE,
1900 "tensorflow.GPUOptions.Experimental.collective_ring_order");
1901 target =
1902 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::WriteStringToArray(
1903 4, this->collective_ring_order(), target);
1904 }
1905
1906 // bool timestamped_allocator = 5;
1907 if (this->timestamped_allocator() != 0) {
1908 target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::WriteBoolToArray(5, this->timestamped_allocator(), target);
1909 }
1910
1911 // int32 kernel_tracker_max_interval = 7;
1912 if (this->kernel_tracker_max_interval() != 0) {
1913 target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::WriteInt32ToArray(7, this->kernel_tracker_max_interval(), target);
1914 }
1915
1916 // int32 kernel_tracker_max_bytes = 8;
1917 if (this->kernel_tracker_max_bytes() != 0) {
1918 target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::WriteInt32ToArray(8, this->kernel_tracker_max_bytes(), target);
1919 }
1920
1921 // int32 kernel_tracker_max_pending = 9;
1922 if (this->kernel_tracker_max_pending() != 0) {
1923 target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::WriteInt32ToArray(9, this->kernel_tracker_max_pending(), target);
1924 }
1925
1926 // double internal_fragmentation_fraction = 10;
1927 if (!(this->internal_fragmentation_fraction() <= 0 && this->internal_fragmentation_fraction() >= 0)) {
1928 target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::WriteDoubleToArray(10, this->internal_fragmentation_fraction(), target);
1929 }
1930
1931 // bool use_cuda_malloc_async = 11;
1932 if (this->use_cuda_malloc_async() != 0) {
1933 target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::WriteBoolToArray(11, this->use_cuda_malloc_async(), target);
1934 }
1935
1936 // bool disallow_retry_on_allocation_failure = 12;
1937 if (this->disallow_retry_on_allocation_failure() != 0) {
1938 target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::WriteBoolToArray(12, this->disallow_retry_on_allocation_failure(), target);
1939 }
1940
1941 if (_internal_metadata_.have_unknown_fields()) {
1942 target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormat::SerializeUnknownFieldsToArray(
1943 _internal_metadata_.unknown_fields(), target);
1944 }
1945 // @@protoc_insertion_point(serialize_to_array_end:tensorflow.GPUOptions.Experimental)
1946 return target;
1947}
1948
1949size_t GPUOptions_Experimental::ByteSizeLong() const {
1950// @@protoc_insertion_point(message_byte_size_start:tensorflow.GPUOptions.Experimental)
1951 size_t total_size = 0;
1952
1953 if (_internal_metadata_.have_unknown_fields()) {
1954 total_size +=
1955 ::PROTOBUF_NAMESPACE_ID::internal::WireFormat::ComputeUnknownFieldsSize(
1956 _internal_metadata_.unknown_fields());
1957 }
1958 ::PROTOBUF_NAMESPACE_ID::uint32 cached_has_bits = 0;
1959 // Prevent compiler warnings about cached_has_bits being unused
1960 (void) cached_has_bits;
1961
1962 // repeated .tensorflow.GPUOptions.Experimental.VirtualDevices virtual_devices = 1;
1963 {
1964 unsigned int count = static_cast<unsigned int>(this->virtual_devices_size());
1965 total_size += 1UL * count;
1966 for (unsigned int i = 0; i < count; i++) {
1967 total_size +=
1968 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::MessageSize(
1969 this->virtual_devices(static_cast<int>(i)));
1970 }
1971 }
1972
1973 // string collective_ring_order = 4;
1974 if (this->collective_ring_order().size() > 0) {
1975 total_size += 1 +
1976 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::StringSize(
1977 this->collective_ring_order());
1978 }
1979
1980 // int32 num_dev_to_dev_copy_streams = 3;
1981 if (this->num_dev_to_dev_copy_streams() != 0) {
1982 total_size += 1 +
1983 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::Int32Size(
1984 this->num_dev_to_dev_copy_streams());
1985 }
1986
1987 // int32 kernel_tracker_max_interval = 7;
1988 if (this->kernel_tracker_max_interval() != 0) {
1989 total_size += 1 +
1990 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::Int32Size(
1991 this->kernel_tracker_max_interval());
1992 }
1993
1994 // bool use_unified_memory = 2;
1995 if (this->use_unified_memory() != 0) {
1996 total_size += 1 + 1;
1997 }
1998
1999 // bool timestamped_allocator = 5;
2000 if (this->timestamped_allocator() != 0) {
2001 total_size += 1 + 1;
2002 }
2003
2004 // bool use_cuda_malloc_async = 11;
2005 if (this->use_cuda_malloc_async() != 0) {
2006 total_size += 1 + 1;
2007 }
2008
2009 // bool disallow_retry_on_allocation_failure = 12;
2010 if (this->disallow_retry_on_allocation_failure() != 0) {
2011 total_size += 1 + 1;
2012 }
2013
2014 // int32 kernel_tracker_max_bytes = 8;
2015 if (this->kernel_tracker_max_bytes() != 0) {
2016 total_size += 1 +
2017 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::Int32Size(
2018 this->kernel_tracker_max_bytes());
2019 }
2020
2021 // double internal_fragmentation_fraction = 10;
2022 if (!(this->internal_fragmentation_fraction() <= 0 && this->internal_fragmentation_fraction() >= 0)) {
2023 total_size += 1 + 8;
2024 }
2025
2026 // int32 kernel_tracker_max_pending = 9;
2027 if (this->kernel_tracker_max_pending() != 0) {
2028 total_size += 1 +
2029 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::Int32Size(
2030 this->kernel_tracker_max_pending());
2031 }
2032
2033 int cached_size = ::PROTOBUF_NAMESPACE_ID::internal::ToCachedSize(total_size);
2034 SetCachedSize(cached_size);
2035 return total_size;
2036}
2037
2038void GPUOptions_Experimental::MergeFrom(const ::PROTOBUF_NAMESPACE_ID::Message& from) {
2039// @@protoc_insertion_point(generalized_merge_from_start:tensorflow.GPUOptions.Experimental)
2040 GOOGLE_DCHECK_NE(&from, this);
2041 const GPUOptions_Experimental* source =
2042 ::PROTOBUF_NAMESPACE_ID::DynamicCastToGenerated<GPUOptions_Experimental>(
2043 &from);
2044 if (source == nullptr) {
2045 // @@protoc_insertion_point(generalized_merge_from_cast_fail:tensorflow.GPUOptions.Experimental)
2046 ::PROTOBUF_NAMESPACE_ID::internal::ReflectionOps::Merge(from, this);
2047 } else {
2048 // @@protoc_insertion_point(generalized_merge_from_cast_success:tensorflow.GPUOptions.Experimental)
2049 MergeFrom(*source);
2050 }
2051}
2052
2053void GPUOptions_Experimental::MergeFrom(const GPUOptions_Experimental& from) {
2054// @@protoc_insertion_point(class_specific_merge_from_start:tensorflow.GPUOptions.Experimental)
2055 GOOGLE_DCHECK_NE(&from, this);
2056 _internal_metadata_.MergeFrom(from._internal_metadata_);
2057 ::PROTOBUF_NAMESPACE_ID::uint32 cached_has_bits = 0;
2058 (void) cached_has_bits;
2059
2060 virtual_devices_.MergeFrom(from.virtual_devices_);
2061 if (from.collective_ring_order().size() > 0) {
2062 set_collective_ring_order(from.collective_ring_order());
2063 }
2064 if (from.num_dev_to_dev_copy_streams() != 0) {
2065 set_num_dev_to_dev_copy_streams(from.num_dev_to_dev_copy_streams());
2066 }
2067 if (from.kernel_tracker_max_interval() != 0) {
2068 set_kernel_tracker_max_interval(from.kernel_tracker_max_interval());
2069 }
2070 if (from.use_unified_memory() != 0) {
2071 set_use_unified_memory(from.use_unified_memory());
2072 }
2073 if (from.timestamped_allocator() != 0) {
2074 set_timestamped_allocator(from.timestamped_allocator());
2075 }
2076 if (from.use_cuda_malloc_async() != 0) {
2077 set_use_cuda_malloc_async(from.use_cuda_malloc_async());
2078 }
2079 if (from.disallow_retry_on_allocation_failure() != 0) {
2080 set_disallow_retry_on_allocation_failure(from.disallow_retry_on_allocation_failure());
2081 }
2082 if (from.kernel_tracker_max_bytes() != 0) {
2083 set_kernel_tracker_max_bytes(from.kernel_tracker_max_bytes());
2084 }
2085 if (!(from.internal_fragmentation_fraction() <= 0 && from.internal_fragmentation_fraction() >= 0)) {
2086 set_internal_fragmentation_fraction(from.internal_fragmentation_fraction());
2087 }
2088 if (from.kernel_tracker_max_pending() != 0) {
2089 set_kernel_tracker_max_pending(from.kernel_tracker_max_pending());
2090 }
2091}
2092
2093void GPUOptions_Experimental::CopyFrom(const ::PROTOBUF_NAMESPACE_ID::Message& from) {
2094// @@protoc_insertion_point(generalized_copy_from_start:tensorflow.GPUOptions.Experimental)
2095 if (&from == this) return;
2096 Clear();
2097 MergeFrom(from);
2098}
2099
2100void GPUOptions_Experimental::CopyFrom(const GPUOptions_Experimental& from) {
2101// @@protoc_insertion_point(class_specific_copy_from_start:tensorflow.GPUOptions.Experimental)
2102 if (&from == this) return;
2103 Clear();
2104 MergeFrom(from);
2105}
2106
2107bool GPUOptions_Experimental::IsInitialized() const {
2108 return true;
2109}
2110
2111void GPUOptions_Experimental::InternalSwap(GPUOptions_Experimental* other) {
2112 using std::swap;
2113 _internal_metadata_.Swap(&other->_internal_metadata_);
2114 CastToBase(&virtual_devices_)->InternalSwap(CastToBase(&other->virtual_devices_));
2115 collective_ring_order_.Swap(&other->collective_ring_order_, &::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited(),
2116 GetArenaNoVirtual());
2117 swap(num_dev_to_dev_copy_streams_, other->num_dev_to_dev_copy_streams_);
2118 swap(kernel_tracker_max_interval_, other->kernel_tracker_max_interval_);
2119 swap(use_unified_memory_, other->use_unified_memory_);
2120 swap(timestamped_allocator_, other->timestamped_allocator_);
2121 swap(use_cuda_malloc_async_, other->use_cuda_malloc_async_);
2122 swap(disallow_retry_on_allocation_failure_, other->disallow_retry_on_allocation_failure_);
2123 swap(kernel_tracker_max_bytes_, other->kernel_tracker_max_bytes_);
2124 swap(internal_fragmentation_fraction_, other->internal_fragmentation_fraction_);
2125 swap(kernel_tracker_max_pending_, other->kernel_tracker_max_pending_);
2126}
2127
2128::PROTOBUF_NAMESPACE_ID::Metadata GPUOptions_Experimental::GetMetadata() const {
2129 return GetMetadataStatic();
2130}
2131
2132
2133// ===================================================================
2134
2135void GPUOptions::InitAsDefaultInstance() {
2136 ::tensorflow::_GPUOptions_default_instance_._instance.get_mutable()->experimental_ = const_cast< ::tensorflow::GPUOptions_Experimental*>(
2137 ::tensorflow::GPUOptions_Experimental::internal_default_instance());
2138}
2139class GPUOptions::_Internal {
2140 public:
2141 static const ::tensorflow::GPUOptions_Experimental& experimental(const GPUOptions* msg);
2142};
2143
2144const ::tensorflow::GPUOptions_Experimental&
2145GPUOptions::_Internal::experimental(const GPUOptions* msg) {
2146 return *msg->experimental_;
2147}
2148void GPUOptions::unsafe_arena_set_allocated_experimental(
2149 ::tensorflow::GPUOptions_Experimental* experimental) {
2150 if (GetArenaNoVirtual() == nullptr) {
2151 delete experimental_;
2152 }
2153 experimental_ = experimental;
2154 if (experimental) {
2155
2156 } else {
2157
2158 }
2159 // @@protoc_insertion_point(field_unsafe_arena_set_allocated:tensorflow.GPUOptions.experimental)
2160}
2161GPUOptions::GPUOptions()
2162 : ::PROTOBUF_NAMESPACE_ID::Message(), _internal_metadata_(nullptr) {
2163 SharedCtor();
2164 // @@protoc_insertion_point(constructor:tensorflow.GPUOptions)
2165}
2166GPUOptions::GPUOptions(::PROTOBUF_NAMESPACE_ID::Arena* arena)
2167 : ::PROTOBUF_NAMESPACE_ID::Message(),
2168 _internal_metadata_(arena) {
2169 SharedCtor();
2170 RegisterArenaDtor(arena);
2171 // @@protoc_insertion_point(arena_constructor:tensorflow.GPUOptions)
2172}
2173GPUOptions::GPUOptions(const GPUOptions& from)
2174 : ::PROTOBUF_NAMESPACE_ID::Message(),
2175 _internal_metadata_(nullptr) {
2176 _internal_metadata_.MergeFrom(from._internal_metadata_);
2177 allocator_type_.UnsafeSetDefault(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited());
2178 if (!from.allocator_type().empty()) {
2179 allocator_type_.Set(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited(), from.allocator_type(),
2180 GetArenaNoVirtual());
2181 }
2182 visible_device_list_.UnsafeSetDefault(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited());
2183 if (!from.visible_device_list().empty()) {
2184 visible_device_list_.Set(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited(), from.visible_device_list(),
2185 GetArenaNoVirtual());
2186 }
2187 if (from.has_experimental()) {
2188 experimental_ = new ::tensorflow::GPUOptions_Experimental(*from.experimental_);
2189 } else {
2190 experimental_ = nullptr;
2191 }
2192 ::memcpy(&per_process_gpu_memory_fraction_, &from.per_process_gpu_memory_fraction_,
2193 static_cast<size_t>(reinterpret_cast<char*>(&polling_inactive_delay_msecs_) -
2194 reinterpret_cast<char*>(&per_process_gpu_memory_fraction_)) + sizeof(polling_inactive_delay_msecs_));
2195 // @@protoc_insertion_point(copy_constructor:tensorflow.GPUOptions)
2196}
2197
2198void GPUOptions::SharedCtor() {
2199 ::PROTOBUF_NAMESPACE_ID::internal::InitSCC(&scc_info_GPUOptions_tensorflow_2fcore_2fprotobuf_2fconfig_2eproto.base);
2200 allocator_type_.UnsafeSetDefault(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited());
2201 visible_device_list_.UnsafeSetDefault(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited());
2202 ::memset(&experimental_, 0, static_cast<size_t>(
2203 reinterpret_cast<char*>(&polling_inactive_delay_msecs_) -
2204 reinterpret_cast<char*>(&experimental_)) + sizeof(polling_inactive_delay_msecs_));
2205}
2206
2207GPUOptions::~GPUOptions() {
2208 // @@protoc_insertion_point(destructor:tensorflow.GPUOptions)
2209 SharedDtor();
2210}
2211
2212void GPUOptions::SharedDtor() {
2213 GOOGLE_DCHECK(GetArenaNoVirtual() == nullptr);
2214 allocator_type_.DestroyNoArena(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited());
2215 visible_device_list_.DestroyNoArena(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited());
2216 if (this != internal_default_instance()) delete experimental_;
2217}
2218
2219void GPUOptions::ArenaDtor(void* object) {
2220 GPUOptions* _this = reinterpret_cast< GPUOptions* >(object);
2221 (void)_this;
2222}
2223void GPUOptions::RegisterArenaDtor(::PROTOBUF_NAMESPACE_ID::Arena*) {
2224}
2225void GPUOptions::SetCachedSize(int size) const {
2226 _cached_size_.Set(size);
2227}
2228const GPUOptions& GPUOptions::default_instance() {
2229 ::PROTOBUF_NAMESPACE_ID::internal::InitSCC(&::scc_info_GPUOptions_tensorflow_2fcore_2fprotobuf_2fconfig_2eproto.base);
2230 return *internal_default_instance();
2231}
2232
2233
2234void GPUOptions::Clear() {
2235// @@protoc_insertion_point(message_clear_start:tensorflow.GPUOptions)
2236 ::PROTOBUF_NAMESPACE_ID::uint32 cached_has_bits = 0;
2237 // Prevent compiler warnings about cached_has_bits being unused
2238 (void) cached_has_bits;
2239
2240 allocator_type_.ClearToEmpty(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited(), GetArenaNoVirtual());
2241 visible_device_list_.ClearToEmpty(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited(), GetArenaNoVirtual());
2242 if (GetArenaNoVirtual() == nullptr && experimental_ != nullptr) {
2243 delete experimental_;
2244 }
2245 experimental_ = nullptr;
2246 ::memset(&per_process_gpu_memory_fraction_, 0, static_cast<size_t>(
2247 reinterpret_cast<char*>(&polling_inactive_delay_msecs_) -
2248 reinterpret_cast<char*>(&per_process_gpu_memory_fraction_)) + sizeof(polling_inactive_delay_msecs_));
2249 _internal_metadata_.Clear();
2250}
2251
2252#if GOOGLE_PROTOBUF_ENABLE_EXPERIMENTAL_PARSER
2253const char* GPUOptions::_InternalParse(const char* ptr, ::PROTOBUF_NAMESPACE_ID::internal::ParseContext* ctx) {
2254#define CHK_(x) if (PROTOBUF_PREDICT_FALSE(!(x))) goto failure
2255 ::PROTOBUF_NAMESPACE_ID::Arena* arena = GetArenaNoVirtual(); (void)arena;
2256 while (!ctx->Done(&ptr)) {
2257 ::PROTOBUF_NAMESPACE_ID::uint32 tag;
2258 ptr = ::PROTOBUF_NAMESPACE_ID::internal::ReadTag(ptr, &tag);
2259 CHK_(ptr);
2260 switch (tag >> 3) {
2261 // double per_process_gpu_memory_fraction = 1;
2262 case 1:
2263 if (PROTOBUF_PREDICT_TRUE(static_cast<::PROTOBUF_NAMESPACE_ID::uint8>(tag) == 9)) {
2264 per_process_gpu_memory_fraction_ = ::PROTOBUF_NAMESPACE_ID::internal::UnalignedLoad<double>(ptr);
2265 ptr += sizeof(double);
2266 } else goto handle_unusual;
2267 continue;
2268 // string allocator_type = 2;
2269 case 2:
2270 if (PROTOBUF_PREDICT_TRUE(static_cast<::PROTOBUF_NAMESPACE_ID::uint8>(tag) == 18)) {
2271 ptr = ::PROTOBUF_NAMESPACE_ID::internal::InlineGreedyStringParserUTF8(mutable_allocator_type(), ptr, ctx, "tensorflow.GPUOptions.allocator_type");
2272 CHK_(ptr);
2273 } else goto handle_unusual;
2274 continue;
2275 // int64 deferred_deletion_bytes = 3;
2276 case 3:
2277 if (PROTOBUF_PREDICT_TRUE(static_cast<::PROTOBUF_NAMESPACE_ID::uint8>(tag) == 24)) {
2278 deferred_deletion_bytes_ = ::PROTOBUF_NAMESPACE_ID::internal::ReadVarint(&ptr);
2279 CHK_(ptr);
2280 } else goto handle_unusual;
2281 continue;
2282 // bool allow_growth = 4;
2283 case 4:
2284 if (PROTOBUF_PREDICT_TRUE(static_cast<::PROTOBUF_NAMESPACE_ID::uint8>(tag) == 32)) {
2285 allow_growth_ = ::PROTOBUF_NAMESPACE_ID::internal::ReadVarint(&ptr);
2286 CHK_(ptr);
2287 } else goto handle_unusual;
2288 continue;
2289 // string visible_device_list = 5;
2290 case 5:
2291 if (PROTOBUF_PREDICT_TRUE(static_cast<::PROTOBUF_NAMESPACE_ID::uint8>(tag) == 42)) {
2292 ptr = ::PROTOBUF_NAMESPACE_ID::internal::InlineGreedyStringParserUTF8(mutable_visible_device_list(), ptr, ctx, "tensorflow.GPUOptions.visible_device_list");
2293 CHK_(ptr);
2294 } else goto handle_unusual;
2295 continue;
2296 // int32 polling_active_delay_usecs = 6;
2297 case 6:
2298 if (PROTOBUF_PREDICT_TRUE(static_cast<::PROTOBUF_NAMESPACE_ID::uint8>(tag) == 48)) {
2299 polling_active_delay_usecs_ = ::PROTOBUF_NAMESPACE_ID::internal::ReadVarint(&ptr);
2300 CHK_(ptr);
2301 } else goto handle_unusual;
2302 continue;
2303 // int32 polling_inactive_delay_msecs = 7;
2304 case 7:
2305 if (PROTOBUF_PREDICT_TRUE(static_cast<::PROTOBUF_NAMESPACE_ID::uint8>(tag) == 56)) {
2306 polling_inactive_delay_msecs_ = ::PROTOBUF_NAMESPACE_ID::internal::ReadVarint(&ptr);
2307 CHK_(ptr);
2308 } else goto handle_unusual;
2309 continue;
2310 // bool force_gpu_compatible = 8;
2311 case 8:
2312 if (PROTOBUF_PREDICT_TRUE(static_cast<::PROTOBUF_NAMESPACE_ID::uint8>(tag) == 64)) {
2313 force_gpu_compatible_ = ::PROTOBUF_NAMESPACE_ID::internal::ReadVarint(&ptr);
2314 CHK_(ptr);
2315 } else goto handle_unusual;
2316 continue;
2317 // .tensorflow.GPUOptions.Experimental experimental = 9;
2318 case 9:
2319 if (PROTOBUF_PREDICT_TRUE(static_cast<::PROTOBUF_NAMESPACE_ID::uint8>(tag) == 74)) {
2320 ptr = ctx->ParseMessage(mutable_experimental(), ptr);
2321 CHK_(ptr);
2322 } else goto handle_unusual;
2323 continue;
2324 default: {
2325 handle_unusual:
2326 if ((tag & 7) == 4 || tag == 0) {
2327 ctx->SetLastTag(tag);
2328 goto success;
2329 }
2330 ptr = UnknownFieldParse(tag, &_internal_metadata_, ptr, ctx);
2331 CHK_(ptr != nullptr);
2332 continue;
2333 }
2334 } // switch
2335 } // while
2336success:
2337 return ptr;
2338failure:
2339 ptr = nullptr;
2340 goto success;
2341#undef CHK_
2342}
2343#else // GOOGLE_PROTOBUF_ENABLE_EXPERIMENTAL_PARSER
2344bool GPUOptions::MergePartialFromCodedStream(
2345 ::PROTOBUF_NAMESPACE_ID::io::CodedInputStream* input) {
2346#define DO_(EXPRESSION) if (!PROTOBUF_PREDICT_TRUE(EXPRESSION)) goto failure
2347 ::PROTOBUF_NAMESPACE_ID::uint32 tag;
2348 // @@protoc_insertion_point(parse_start:tensorflow.GPUOptions)
2349 for (;;) {
2350 ::std::pair<::PROTOBUF_NAMESPACE_ID::uint32, bool> p = input->ReadTagWithCutoffNoLastTag(127u);
2351 tag = p.first;
2352 if (!p.second) goto handle_unusual;
2353 switch (::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::GetTagFieldNumber(tag)) {
2354 // double per_process_gpu_memory_fraction = 1;
2355 case 1: {
2356 if (static_cast< ::PROTOBUF_NAMESPACE_ID::uint8>(tag) == (9 & 0xFF)) {
2357
2358 DO_((::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::ReadPrimitive<
2359 double, ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::TYPE_DOUBLE>(
2360 input, &per_process_gpu_memory_fraction_)));
2361 } else {
2362 goto handle_unusual;
2363 }
2364 break;
2365 }
2366
2367 // string allocator_type = 2;
2368 case 2: {
2369 if (static_cast< ::PROTOBUF_NAMESPACE_ID::uint8>(tag) == (18 & 0xFF)) {
2370 DO_(::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::ReadString(
2371 input, this->mutable_allocator_type()));
2372 DO_(::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::VerifyUtf8String(
2373 this->allocator_type().data(), static_cast<int>(this->allocator_type().length()),
2374 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::PARSE,
2375 "tensorflow.GPUOptions.allocator_type"));
2376 } else {
2377 goto handle_unusual;
2378 }
2379 break;
2380 }
2381
2382 // int64 deferred_deletion_bytes = 3;
2383 case 3: {
2384 if (static_cast< ::PROTOBUF_NAMESPACE_ID::uint8>(tag) == (24 & 0xFF)) {
2385
2386 DO_((::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::ReadPrimitive<
2387 ::PROTOBUF_NAMESPACE_ID::int64, ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::TYPE_INT64>(
2388 input, &deferred_deletion_bytes_)));
2389 } else {
2390 goto handle_unusual;
2391 }
2392 break;
2393 }
2394
2395 // bool allow_growth = 4;
2396 case 4: {
2397 if (static_cast< ::PROTOBUF_NAMESPACE_ID::uint8>(tag) == (32 & 0xFF)) {
2398
2399 DO_((::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::ReadPrimitive<
2400 bool, ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::TYPE_BOOL>(
2401 input, &allow_growth_)));
2402 } else {
2403 goto handle_unusual;
2404 }
2405 break;
2406 }
2407
2408 // string visible_device_list = 5;
2409 case 5: {
2410 if (static_cast< ::PROTOBUF_NAMESPACE_ID::uint8>(tag) == (42 & 0xFF)) {
2411 DO_(::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::ReadString(
2412 input, this->mutable_visible_device_list()));
2413 DO_(::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::VerifyUtf8String(
2414 this->visible_device_list().data(), static_cast<int>(this->visible_device_list().length()),
2415 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::PARSE,
2416 "tensorflow.GPUOptions.visible_device_list"));
2417 } else {
2418 goto handle_unusual;
2419 }
2420 break;
2421 }
2422
2423 // int32 polling_active_delay_usecs = 6;
2424 case 6: {
2425 if (static_cast< ::PROTOBUF_NAMESPACE_ID::uint8>(tag) == (48 & 0xFF)) {
2426
2427 DO_((::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::ReadPrimitive<
2428 ::PROTOBUF_NAMESPACE_ID::int32, ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::TYPE_INT32>(
2429 input, &polling_active_delay_usecs_)));
2430 } else {
2431 goto handle_unusual;
2432 }
2433 break;
2434 }
2435
2436 // int32 polling_inactive_delay_msecs = 7;
2437 case 7: {
2438 if (static_cast< ::PROTOBUF_NAMESPACE_ID::uint8>(tag) == (56 & 0xFF)) {
2439
2440 DO_((::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::ReadPrimitive<
2441 ::PROTOBUF_NAMESPACE_ID::int32, ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::TYPE_INT32>(
2442 input, &polling_inactive_delay_msecs_)));
2443 } else {
2444 goto handle_unusual;
2445 }
2446 break;
2447 }
2448
2449 // bool force_gpu_compatible = 8;
2450 case 8: {
2451 if (static_cast< ::PROTOBUF_NAMESPACE_ID::uint8>(tag) == (64 & 0xFF)) {
2452
2453 DO_((::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::ReadPrimitive<
2454 bool, ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::TYPE_BOOL>(
2455 input, &force_gpu_compatible_)));
2456 } else {
2457 goto handle_unusual;
2458 }
2459 break;
2460 }
2461
2462 // .tensorflow.GPUOptions.Experimental experimental = 9;
2463 case 9: {
2464 if (static_cast< ::PROTOBUF_NAMESPACE_ID::uint8>(tag) == (74 & 0xFF)) {
2465 DO_(::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::ReadMessage(
2466 input, mutable_experimental()));
2467 } else {
2468 goto handle_unusual;
2469 }
2470 break;
2471 }
2472
2473 default: {
2474 handle_unusual:
2475 if (tag == 0) {
2476 goto success;
2477 }
2478 DO_(::PROTOBUF_NAMESPACE_ID::internal::WireFormat::SkipField(
2479 input, tag, _internal_metadata_.mutable_unknown_fields()));
2480 break;
2481 }
2482 }
2483 }
2484success:
2485 // @@protoc_insertion_point(parse_success:tensorflow.GPUOptions)
2486 return true;
2487failure:
2488 // @@protoc_insertion_point(parse_failure:tensorflow.GPUOptions)
2489 return false;
2490#undef DO_
2491}
2492#endif // GOOGLE_PROTOBUF_ENABLE_EXPERIMENTAL_PARSER
2493
2494void GPUOptions::SerializeWithCachedSizes(
2495 ::PROTOBUF_NAMESPACE_ID::io::CodedOutputStream* output) const {
2496 // @@protoc_insertion_point(serialize_start:tensorflow.GPUOptions)
2497 ::PROTOBUF_NAMESPACE_ID::uint32 cached_has_bits = 0;
2498 (void) cached_has_bits;
2499
2500 // double per_process_gpu_memory_fraction = 1;
2501 if (!(this->per_process_gpu_memory_fraction() <= 0 && this->per_process_gpu_memory_fraction() >= 0)) {
2502 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::WriteDouble(1, this->per_process_gpu_memory_fraction(), output);
2503 }
2504
2505 // string allocator_type = 2;
2506 if (this->allocator_type().size() > 0) {
2507 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::VerifyUtf8String(
2508 this->allocator_type().data(), static_cast<int>(this->allocator_type().length()),
2509 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::SERIALIZE,
2510 "tensorflow.GPUOptions.allocator_type");
2511 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::WriteStringMaybeAliased(
2512 2, this->allocator_type(), output);
2513 }
2514
2515 // int64 deferred_deletion_bytes = 3;
2516 if (this->deferred_deletion_bytes() != 0) {
2517 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::WriteInt64(3, this->deferred_deletion_bytes(), output);
2518 }
2519
2520 // bool allow_growth = 4;
2521 if (this->allow_growth() != 0) {
2522 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::WriteBool(4, this->allow_growth(), output);
2523 }
2524
2525 // string visible_device_list = 5;
2526 if (this->visible_device_list().size() > 0) {
2527 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::VerifyUtf8String(
2528 this->visible_device_list().data(), static_cast<int>(this->visible_device_list().length()),
2529 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::SERIALIZE,
2530 "tensorflow.GPUOptions.visible_device_list");
2531 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::WriteStringMaybeAliased(
2532 5, this->visible_device_list(), output);
2533 }
2534
2535 // int32 polling_active_delay_usecs = 6;
2536 if (this->polling_active_delay_usecs() != 0) {
2537 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::WriteInt32(6, this->polling_active_delay_usecs(), output);
2538 }
2539
2540 // int32 polling_inactive_delay_msecs = 7;
2541 if (this->polling_inactive_delay_msecs() != 0) {
2542 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::WriteInt32(7, this->polling_inactive_delay_msecs(), output);
2543 }
2544
2545 // bool force_gpu_compatible = 8;
2546 if (this->force_gpu_compatible() != 0) {
2547 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::WriteBool(8, this->force_gpu_compatible(), output);
2548 }
2549
2550 // .tensorflow.GPUOptions.Experimental experimental = 9;
2551 if (this->has_experimental()) {
2552 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::WriteMessageMaybeToArray(
2553 9, _Internal::experimental(this), output);
2554 }
2555
2556 if (_internal_metadata_.have_unknown_fields()) {
2557 ::PROTOBUF_NAMESPACE_ID::internal::WireFormat::SerializeUnknownFields(
2558 _internal_metadata_.unknown_fields(), output);
2559 }
2560 // @@protoc_insertion_point(serialize_end:tensorflow.GPUOptions)
2561}
2562
2563::PROTOBUF_NAMESPACE_ID::uint8* GPUOptions::InternalSerializeWithCachedSizesToArray(
2564 ::PROTOBUF_NAMESPACE_ID::uint8* target) const {
2565 // @@protoc_insertion_point(serialize_to_array_start:tensorflow.GPUOptions)
2566 ::PROTOBUF_NAMESPACE_ID::uint32 cached_has_bits = 0;
2567 (void) cached_has_bits;
2568
2569 // double per_process_gpu_memory_fraction = 1;
2570 if (!(this->per_process_gpu_memory_fraction() <= 0 && this->per_process_gpu_memory_fraction() >= 0)) {
2571 target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::WriteDoubleToArray(1, this->per_process_gpu_memory_fraction(), target);
2572 }
2573
2574 // string allocator_type = 2;
2575 if (this->allocator_type().size() > 0) {
2576 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::VerifyUtf8String(
2577 this->allocator_type().data(), static_cast<int>(this->allocator_type().length()),
2578 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::SERIALIZE,
2579 "tensorflow.GPUOptions.allocator_type");
2580 target =
2581 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::WriteStringToArray(
2582 2, this->allocator_type(), target);
2583 }
2584
2585 // int64 deferred_deletion_bytes = 3;
2586 if (this->deferred_deletion_bytes() != 0) {
2587 target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::WriteInt64ToArray(3, this->deferred_deletion_bytes(), target);
2588 }
2589
2590 // bool allow_growth = 4;
2591 if (this->allow_growth() != 0) {
2592 target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::WriteBoolToArray(4, this->allow_growth(), target);
2593 }
2594
2595 // string visible_device_list = 5;
2596 if (this->visible_device_list().size() > 0) {
2597 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::VerifyUtf8String(
2598 this->visible_device_list().data(), static_cast<int>(this->visible_device_list().length()),
2599 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::SERIALIZE,
2600 "tensorflow.GPUOptions.visible_device_list");
2601 target =
2602 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::WriteStringToArray(
2603 5, this->visible_device_list(), target);
2604 }
2605
2606 // int32 polling_active_delay_usecs = 6;
2607 if (this->polling_active_delay_usecs() != 0) {
2608 target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::WriteInt32ToArray(6, this->polling_active_delay_usecs(), target);
2609 }
2610
2611 // int32 polling_inactive_delay_msecs = 7;
2612 if (this->polling_inactive_delay_msecs() != 0) {
2613 target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::WriteInt32ToArray(7, this->polling_inactive_delay_msecs(), target);
2614 }
2615
2616 // bool force_gpu_compatible = 8;
2617 if (this->force_gpu_compatible() != 0) {
2618 target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::WriteBoolToArray(8, this->force_gpu_compatible(), target);
2619 }
2620
2621 // .tensorflow.GPUOptions.Experimental experimental = 9;
2622 if (this->has_experimental()) {
2623 target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::
2624 InternalWriteMessageToArray(
2625 9, _Internal::experimental(this), target);
2626 }
2627
2628 if (_internal_metadata_.have_unknown_fields()) {
2629 target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormat::SerializeUnknownFieldsToArray(
2630 _internal_metadata_.unknown_fields(), target);
2631 }
2632 // @@protoc_insertion_point(serialize_to_array_end:tensorflow.GPUOptions)
2633 return target;
2634}
2635
2636size_t GPUOptions::ByteSizeLong() const {
2637// @@protoc_insertion_point(message_byte_size_start:tensorflow.GPUOptions)
2638 size_t total_size = 0;
2639
2640 if (_internal_metadata_.have_unknown_fields()) {
2641 total_size +=
2642 ::PROTOBUF_NAMESPACE_ID::internal::WireFormat::ComputeUnknownFieldsSize(
2643 _internal_metadata_.unknown_fields());
2644 }
2645 ::PROTOBUF_NAMESPACE_ID::uint32 cached_has_bits = 0;
2646 // Prevent compiler warnings about cached_has_bits being unused
2647 (void) cached_has_bits;
2648
2649 // string allocator_type = 2;
2650 if (this->allocator_type().size() > 0) {
2651 total_size += 1 +
2652 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::StringSize(
2653 this->allocator_type());
2654 }
2655
2656 // string visible_device_list = 5;
2657 if (this->visible_device_list().size() > 0) {
2658 total_size += 1 +
2659 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::StringSize(
2660 this->visible_device_list());
2661 }
2662
2663 // .tensorflow.GPUOptions.Experimental experimental = 9;
2664 if (this->has_experimental()) {
2665 total_size += 1 +
2666 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::MessageSize(
2667 *experimental_);
2668 }
2669
2670 // double per_process_gpu_memory_fraction = 1;
2671 if (!(this->per_process_gpu_memory_fraction() <= 0 && this->per_process_gpu_memory_fraction() >= 0)) {
2672 total_size += 1 + 8;
2673 }
2674
2675 // int64 deferred_deletion_bytes = 3;
2676 if (this->deferred_deletion_bytes() != 0) {
2677 total_size += 1 +
2678 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::Int64Size(
2679 this->deferred_deletion_bytes());
2680 }
2681
2682 // int32 polling_active_delay_usecs = 6;
2683 if (this->polling_active_delay_usecs() != 0) {
2684 total_size += 1 +
2685 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::Int32Size(
2686 this->polling_active_delay_usecs());
2687 }
2688
2689 // bool allow_growth = 4;
2690 if (this->allow_growth() != 0) {
2691 total_size += 1 + 1;
2692 }
2693
2694 // bool force_gpu_compatible = 8;
2695 if (this->force_gpu_compatible() != 0) {
2696 total_size += 1 + 1;
2697 }
2698
2699 // int32 polling_inactive_delay_msecs = 7;
2700 if (this->polling_inactive_delay_msecs() != 0) {
2701 total_size += 1 +
2702 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::Int32Size(
2703 this->polling_inactive_delay_msecs());
2704 }
2705
2706 int cached_size = ::PROTOBUF_NAMESPACE_ID::internal::ToCachedSize(total_size);
2707 SetCachedSize(cached_size);
2708 return total_size;
2709}
2710
2711void GPUOptions::MergeFrom(const ::PROTOBUF_NAMESPACE_ID::Message& from) {
2712// @@protoc_insertion_point(generalized_merge_from_start:tensorflow.GPUOptions)
2713 GOOGLE_DCHECK_NE(&from, this);
2714 const GPUOptions* source =
2715 ::PROTOBUF_NAMESPACE_ID::DynamicCastToGenerated<GPUOptions>(
2716 &from);
2717 if (source == nullptr) {
2718 // @@protoc_insertion_point(generalized_merge_from_cast_fail:tensorflow.GPUOptions)
2719 ::PROTOBUF_NAMESPACE_ID::internal::ReflectionOps::Merge(from, this);
2720 } else {
2721 // @@protoc_insertion_point(generalized_merge_from_cast_success:tensorflow.GPUOptions)
2722 MergeFrom(*source);
2723 }
2724}
2725
2726void GPUOptions::MergeFrom(const GPUOptions& from) {
2727// @@protoc_insertion_point(class_specific_merge_from_start:tensorflow.GPUOptions)
2728 GOOGLE_DCHECK_NE(&from, this);
2729 _internal_metadata_.MergeFrom(from._internal_metadata_);
2730 ::PROTOBUF_NAMESPACE_ID::uint32 cached_has_bits = 0;
2731 (void) cached_has_bits;
2732
2733 if (from.allocator_type().size() > 0) {
2734 set_allocator_type(from.allocator_type());
2735 }
2736 if (from.visible_device_list().size() > 0) {
2737 set_visible_device_list(from.visible_device_list());
2738 }
2739 if (from.has_experimental()) {
2740 mutable_experimental()->::tensorflow::GPUOptions_Experimental::MergeFrom(from.experimental());
2741 }
2742 if (!(from.per_process_gpu_memory_fraction() <= 0 && from.per_process_gpu_memory_fraction() >= 0)) {
2743 set_per_process_gpu_memory_fraction(from.per_process_gpu_memory_fraction());
2744 }
2745 if (from.deferred_deletion_bytes() != 0) {
2746 set_deferred_deletion_bytes(from.deferred_deletion_bytes());
2747 }
2748 if (from.polling_active_delay_usecs() != 0) {
2749 set_polling_active_delay_usecs(from.polling_active_delay_usecs());
2750 }
2751 if (from.allow_growth() != 0) {
2752 set_allow_growth(from.allow_growth());
2753 }
2754 if (from.force_gpu_compatible() != 0) {
2755 set_force_gpu_compatible(from.force_gpu_compatible());
2756 }
2757 if (from.polling_inactive_delay_msecs() != 0) {
2758 set_polling_inactive_delay_msecs(from.polling_inactive_delay_msecs());
2759 }
2760}
2761
2762void GPUOptions::CopyFrom(const ::PROTOBUF_NAMESPACE_ID::Message& from) {
2763// @@protoc_insertion_point(generalized_copy_from_start:tensorflow.GPUOptions)
2764 if (&from == this) return;
2765 Clear();
2766 MergeFrom(from);
2767}
2768
2769void GPUOptions::CopyFrom(const GPUOptions& from) {
2770// @@protoc_insertion_point(class_specific_copy_from_start:tensorflow.GPUOptions)
2771 if (&from == this) return;
2772 Clear();
2773 MergeFrom(from);
2774}
2775
2776bool GPUOptions::IsInitialized() const {
2777 return true;
2778}
2779
2780void GPUOptions::InternalSwap(GPUOptions* other) {
2781 using std::swap;
2782 _internal_metadata_.Swap(&other->_internal_metadata_);
2783 allocator_type_.Swap(&other->allocator_type_, &::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited(),
2784 GetArenaNoVirtual());
2785 visible_device_list_.Swap(&other->visible_device_list_, &::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited(),
2786 GetArenaNoVirtual());
2787 swap(experimental_, other->experimental_);
2788 swap(per_process_gpu_memory_fraction_, other->per_process_gpu_memory_fraction_);
2789 swap(deferred_deletion_bytes_, other->deferred_deletion_bytes_);
2790 swap(polling_active_delay_usecs_, other->polling_active_delay_usecs_);
2791 swap(allow_growth_, other->allow_growth_);
2792 swap(force_gpu_compatible_, other->force_gpu_compatible_);
2793 swap(polling_inactive_delay_msecs_, other->polling_inactive_delay_msecs_);
2794}
2795
2796::PROTOBUF_NAMESPACE_ID::Metadata GPUOptions::GetMetadata() const {
2797 return GetMetadataStatic();
2798}
2799
2800
2801// ===================================================================
2802
2803void OptimizerOptions::InitAsDefaultInstance() {
2804}
2805class OptimizerOptions::_Internal {
2806 public:
2807};
2808
2809OptimizerOptions::OptimizerOptions()
2810 : ::PROTOBUF_NAMESPACE_ID::Message(), _internal_metadata_(nullptr) {
2811 SharedCtor();
2812 // @@protoc_insertion_point(constructor:tensorflow.OptimizerOptions)
2813}
2814OptimizerOptions::OptimizerOptions(::PROTOBUF_NAMESPACE_ID::Arena* arena)
2815 : ::PROTOBUF_NAMESPACE_ID::Message(),
2816 _internal_metadata_(arena) {
2817 SharedCtor();
2818 RegisterArenaDtor(arena);
2819 // @@protoc_insertion_point(arena_constructor:tensorflow.OptimizerOptions)
2820}
2821OptimizerOptions::OptimizerOptions(const OptimizerOptions& from)
2822 : ::PROTOBUF_NAMESPACE_ID::Message(),
2823 _internal_metadata_(nullptr) {
2824 _internal_metadata_.MergeFrom(from._internal_metadata_);
2825 ::memcpy(&opt_level_, &from.opt_level_,
2826 static_cast<size_t>(reinterpret_cast<char*>(&global_jit_level_) -
2827 reinterpret_cast<char*>(&opt_level_)) + sizeof(global_jit_level_));
2828 // @@protoc_insertion_point(copy_constructor:tensorflow.OptimizerOptions)
2829}
2830
2831void OptimizerOptions::SharedCtor() {
2832 ::memset(&opt_level_, 0, static_cast<size_t>(
2833 reinterpret_cast<char*>(&global_jit_level_) -
2834 reinterpret_cast<char*>(&opt_level_)) + sizeof(global_jit_level_));
2835}
2836
2837OptimizerOptions::~OptimizerOptions() {
2838 // @@protoc_insertion_point(destructor:tensorflow.OptimizerOptions)
2839 SharedDtor();
2840}
2841
2842void OptimizerOptions::SharedDtor() {
2843 GOOGLE_DCHECK(GetArenaNoVirtual() == nullptr);
2844}
2845
2846void OptimizerOptions::ArenaDtor(void* object) {
2847 OptimizerOptions* _this = reinterpret_cast< OptimizerOptions* >(object);
2848 (void)_this;
2849}
2850void OptimizerOptions::RegisterArenaDtor(::PROTOBUF_NAMESPACE_ID::Arena*) {
2851}
2852void OptimizerOptions::SetCachedSize(int size) const {
2853 _cached_size_.Set(size);
2854}
2855const OptimizerOptions& OptimizerOptions::default_instance() {
2856 ::PROTOBUF_NAMESPACE_ID::internal::InitSCC(&::scc_info_OptimizerOptions_tensorflow_2fcore_2fprotobuf_2fconfig_2eproto.base);
2857 return *internal_default_instance();
2858}
2859
2860
2861void OptimizerOptions::Clear() {
2862// @@protoc_insertion_point(message_clear_start:tensorflow.OptimizerOptions)
2863 ::PROTOBUF_NAMESPACE_ID::uint32 cached_has_bits = 0;
2864 // Prevent compiler warnings about cached_has_bits being unused
2865 (void) cached_has_bits;
2866
2867 ::memset(&opt_level_, 0, static_cast<size_t>(
2868 reinterpret_cast<char*>(&global_jit_level_) -
2869 reinterpret_cast<char*>(&opt_level_)) + sizeof(global_jit_level_));
2870 _internal_metadata_.Clear();
2871}
2872
2873#if GOOGLE_PROTOBUF_ENABLE_EXPERIMENTAL_PARSER
2874const char* OptimizerOptions::_InternalParse(const char* ptr, ::PROTOBUF_NAMESPACE_ID::internal::ParseContext* ctx) {
2875#define CHK_(x) if (PROTOBUF_PREDICT_FALSE(!(x))) goto failure
2876 ::PROTOBUF_NAMESPACE_ID::Arena* arena = GetArenaNoVirtual(); (void)arena;
2877 while (!ctx->Done(&ptr)) {
2878 ::PROTOBUF_NAMESPACE_ID::uint32 tag;
2879 ptr = ::PROTOBUF_NAMESPACE_ID::internal::ReadTag(ptr, &tag);
2880 CHK_(ptr);
2881 switch (tag >> 3) {
2882 // bool do_common_subexpression_elimination = 1;
2883 case 1:
2884 if (PROTOBUF_PREDICT_TRUE(static_cast<::PROTOBUF_NAMESPACE_ID::uint8>(tag) == 8)) {
2885 do_common_subexpression_elimination_ = ::PROTOBUF_NAMESPACE_ID::internal::ReadVarint(&ptr);
2886 CHK_(ptr);
2887 } else goto handle_unusual;
2888 continue;
2889 // bool do_constant_folding = 2;
2890 case 2:
2891 if (PROTOBUF_PREDICT_TRUE(static_cast<::PROTOBUF_NAMESPACE_ID::uint8>(tag) == 16)) {
2892 do_constant_folding_ = ::PROTOBUF_NAMESPACE_ID::internal::ReadVarint(&ptr);
2893 CHK_(ptr);
2894 } else goto handle_unusual;
2895 continue;
2896 // .tensorflow.OptimizerOptions.Level opt_level = 3;
2897 case 3:
2898 if (PROTOBUF_PREDICT_TRUE(static_cast<::PROTOBUF_NAMESPACE_ID::uint8>(tag) == 24)) {
2899 ::PROTOBUF_NAMESPACE_ID::uint64 val = ::PROTOBUF_NAMESPACE_ID::internal::ReadVarint(&ptr);
2900 CHK_(ptr);
2901 set_opt_level(static_cast<::tensorflow::OptimizerOptions_Level>(val));
2902 } else goto handle_unusual;
2903 continue;
2904 // bool do_function_inlining = 4;
2905 case 4:
2906 if (PROTOBUF_PREDICT_TRUE(static_cast<::PROTOBUF_NAMESPACE_ID::uint8>(tag) == 32)) {
2907 do_function_inlining_ = ::PROTOBUF_NAMESPACE_ID::internal::ReadVarint(&ptr);
2908 CHK_(ptr);
2909 } else goto handle_unusual;
2910 continue;
2911 // .tensorflow.OptimizerOptions.GlobalJitLevel global_jit_level = 5;
2912 case 5:
2913 if (PROTOBUF_PREDICT_TRUE(static_cast<::PROTOBUF_NAMESPACE_ID::uint8>(tag) == 40)) {
2914 ::PROTOBUF_NAMESPACE_ID::uint64 val = ::PROTOBUF_NAMESPACE_ID::internal::ReadVarint(&ptr);
2915 CHK_(ptr);
2916 set_global_jit_level(static_cast<::tensorflow::OptimizerOptions_GlobalJitLevel>(val));
2917 } else goto handle_unusual;
2918 continue;
2919 // int64 max_folded_constant_in_bytes = 6;
2920 case 6:
2921 if (PROTOBUF_PREDICT_TRUE(static_cast<::PROTOBUF_NAMESPACE_ID::uint8>(tag) == 48)) {
2922 max_folded_constant_in_bytes_ = ::PROTOBUF_NAMESPACE_ID::internal::ReadVarint(&ptr);
2923 CHK_(ptr);
2924 } else goto handle_unusual;
2925 continue;
2926 // bool cpu_global_jit = 7;
2927 case 7:
2928 if (PROTOBUF_PREDICT_TRUE(static_cast<::PROTOBUF_NAMESPACE_ID::uint8>(tag) == 56)) {
2929 cpu_global_jit_ = ::PROTOBUF_NAMESPACE_ID::internal::ReadVarint(&ptr);
2930 CHK_(ptr);
2931 } else goto handle_unusual;
2932 continue;
2933 default: {
2934 handle_unusual:
2935 if ((tag & 7) == 4 || tag == 0) {
2936 ctx->SetLastTag(tag);
2937 goto success;
2938 }
2939 ptr = UnknownFieldParse(tag, &_internal_metadata_, ptr, ctx);
2940 CHK_(ptr != nullptr);
2941 continue;
2942 }
2943 } // switch
2944 } // while
2945success:
2946 return ptr;
2947failure:
2948 ptr = nullptr;
2949 goto success;
2950#undef CHK_
2951}
2952#else // GOOGLE_PROTOBUF_ENABLE_EXPERIMENTAL_PARSER
2953bool OptimizerOptions::MergePartialFromCodedStream(
2954 ::PROTOBUF_NAMESPACE_ID::io::CodedInputStream* input) {
2955#define DO_(EXPRESSION) if (!PROTOBUF_PREDICT_TRUE(EXPRESSION)) goto failure
2956 ::PROTOBUF_NAMESPACE_ID::uint32 tag;
2957 // @@protoc_insertion_point(parse_start:tensorflow.OptimizerOptions)
2958 for (;;) {
2959 ::std::pair<::PROTOBUF_NAMESPACE_ID::uint32, bool> p = input->ReadTagWithCutoffNoLastTag(127u);
2960 tag = p.first;
2961 if (!p.second) goto handle_unusual;
2962 switch (::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::GetTagFieldNumber(tag)) {
2963 // bool do_common_subexpression_elimination = 1;
2964 case 1: {
2965 if (static_cast< ::PROTOBUF_NAMESPACE_ID::uint8>(tag) == (8 & 0xFF)) {
2966
2967 DO_((::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::ReadPrimitive<
2968 bool, ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::TYPE_BOOL>(
2969 input, &do_common_subexpression_elimination_)));
2970 } else {
2971 goto handle_unusual;
2972 }
2973 break;
2974 }
2975
2976 // bool do_constant_folding = 2;
2977 case 2: {
2978 if (static_cast< ::PROTOBUF_NAMESPACE_ID::uint8>(tag) == (16 & 0xFF)) {
2979
2980 DO_((::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::ReadPrimitive<
2981 bool, ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::TYPE_BOOL>(
2982 input, &do_constant_folding_)));
2983 } else {
2984 goto handle_unusual;
2985 }
2986 break;
2987 }
2988
2989 // .tensorflow.OptimizerOptions.Level opt_level = 3;
2990 case 3: {
2991 if (static_cast< ::PROTOBUF_NAMESPACE_ID::uint8>(tag) == (24 & 0xFF)) {
2992 int value = 0;
2993 DO_((::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::ReadPrimitive<
2994 int, ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::TYPE_ENUM>(
2995 input, &value)));
2996 set_opt_level(static_cast< ::tensorflow::OptimizerOptions_Level >(value));
2997 } else {
2998 goto handle_unusual;
2999 }
3000 break;
3001 }
3002
3003 // bool do_function_inlining = 4;
3004 case 4: {
3005 if (static_cast< ::PROTOBUF_NAMESPACE_ID::uint8>(tag) == (32 & 0xFF)) {
3006
3007 DO_((::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::ReadPrimitive<
3008 bool, ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::TYPE_BOOL>(
3009 input, &do_function_inlining_)));
3010 } else {
3011 goto handle_unusual;
3012 }
3013 break;
3014 }
3015
3016 // .tensorflow.OptimizerOptions.GlobalJitLevel global_jit_level = 5;
3017 case 5: {
3018 if (static_cast< ::PROTOBUF_NAMESPACE_ID::uint8>(tag) == (40 & 0xFF)) {
3019 int value = 0;
3020 DO_((::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::ReadPrimitive<
3021 int, ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::TYPE_ENUM>(
3022 input, &value)));
3023 set_global_jit_level(static_cast< ::tensorflow::OptimizerOptions_GlobalJitLevel >(value));
3024 } else {
3025 goto handle_unusual;
3026 }
3027 break;
3028 }
3029
3030 // int64 max_folded_constant_in_bytes = 6;
3031 case 6: {
3032 if (static_cast< ::PROTOBUF_NAMESPACE_ID::uint8>(tag) == (48 & 0xFF)) {
3033
3034 DO_((::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::ReadPrimitive<
3035 ::PROTOBUF_NAMESPACE_ID::int64, ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::TYPE_INT64>(
3036 input, &max_folded_constant_in_bytes_)));
3037 } else {
3038 goto handle_unusual;
3039 }
3040 break;
3041 }
3042
3043 // bool cpu_global_jit = 7;
3044 case 7: {
3045 if (static_cast< ::PROTOBUF_NAMESPACE_ID::uint8>(tag) == (56 & 0xFF)) {
3046
3047 DO_((::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::ReadPrimitive<
3048 bool, ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::TYPE_BOOL>(
3049 input, &cpu_global_jit_)));
3050 } else {
3051 goto handle_unusual;
3052 }
3053 break;
3054 }
3055
3056 default: {
3057 handle_unusual:
3058 if (tag == 0) {
3059 goto success;
3060 }
3061 DO_(::PROTOBUF_NAMESPACE_ID::internal::WireFormat::SkipField(
3062 input, tag, _internal_metadata_.mutable_unknown_fields()));
3063 break;
3064 }
3065 }
3066 }
3067success:
3068 // @@protoc_insertion_point(parse_success:tensorflow.OptimizerOptions)
3069 return true;
3070failure:
3071 // @@protoc_insertion_point(parse_failure:tensorflow.OptimizerOptions)
3072 return false;
3073#undef DO_
3074}
3075#endif // GOOGLE_PROTOBUF_ENABLE_EXPERIMENTAL_PARSER
3076
3077void OptimizerOptions::SerializeWithCachedSizes(
3078 ::PROTOBUF_NAMESPACE_ID::io::CodedOutputStream* output) const {
3079 // @@protoc_insertion_point(serialize_start:tensorflow.OptimizerOptions)
3080 ::PROTOBUF_NAMESPACE_ID::uint32 cached_has_bits = 0;
3081 (void) cached_has_bits;
3082
3083 // bool do_common_subexpression_elimination = 1;
3084 if (this->do_common_subexpression_elimination() != 0) {
3085 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::WriteBool(1, this->do_common_subexpression_elimination(), output);
3086 }
3087
3088 // bool do_constant_folding = 2;
3089 if (this->do_constant_folding() != 0) {
3090 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::WriteBool(2, this->do_constant_folding(), output);
3091 }
3092
3093 // .tensorflow.OptimizerOptions.Level opt_level = 3;
3094 if (this->opt_level() != 0) {
3095 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::WriteEnum(
3096 3, this->opt_level(), output);
3097 }
3098
3099 // bool do_function_inlining = 4;
3100 if (this->do_function_inlining() != 0) {
3101 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::WriteBool(4, this->do_function_inlining(), output);
3102 }
3103
3104 // .tensorflow.OptimizerOptions.GlobalJitLevel global_jit_level = 5;
3105 if (this->global_jit_level() != 0) {
3106 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::WriteEnum(
3107 5, this->global_jit_level(), output);
3108 }
3109
3110 // int64 max_folded_constant_in_bytes = 6;
3111 if (this->max_folded_constant_in_bytes() != 0) {
3112 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::WriteInt64(6, this->max_folded_constant_in_bytes(), output);
3113 }
3114
3115 // bool cpu_global_jit = 7;
3116 if (this->cpu_global_jit() != 0) {
3117 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::WriteBool(7, this->cpu_global_jit(), output);
3118 }
3119
3120 if (_internal_metadata_.have_unknown_fields()) {
3121 ::PROTOBUF_NAMESPACE_ID::internal::WireFormat::SerializeUnknownFields(
3122 _internal_metadata_.unknown_fields(), output);
3123 }
3124 // @@protoc_insertion_point(serialize_end:tensorflow.OptimizerOptions)
3125}
3126
3127::PROTOBUF_NAMESPACE_ID::uint8* OptimizerOptions::InternalSerializeWithCachedSizesToArray(
3128 ::PROTOBUF_NAMESPACE_ID::uint8* target) const {
3129 // @@protoc_insertion_point(serialize_to_array_start:tensorflow.OptimizerOptions)
3130 ::PROTOBUF_NAMESPACE_ID::uint32 cached_has_bits = 0;
3131 (void) cached_has_bits;
3132
3133 // bool do_common_subexpression_elimination = 1;
3134 if (this->do_common_subexpression_elimination() != 0) {
3135 target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::WriteBoolToArray(1, this->do_common_subexpression_elimination(), target);
3136 }
3137
3138 // bool do_constant_folding = 2;
3139 if (this->do_constant_folding() != 0) {
3140 target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::WriteBoolToArray(2, this->do_constant_folding(), target);
3141 }
3142
3143 // .tensorflow.OptimizerOptions.Level opt_level = 3;
3144 if (this->opt_level() != 0) {
3145 target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::WriteEnumToArray(
3146 3, this->opt_level(), target);
3147 }
3148
3149 // bool do_function_inlining = 4;
3150 if (this->do_function_inlining() != 0) {
3151 target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::WriteBoolToArray(4, this->do_function_inlining(), target);
3152 }
3153
3154 // .tensorflow.OptimizerOptions.GlobalJitLevel global_jit_level = 5;
3155 if (this->global_jit_level() != 0) {
3156 target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::WriteEnumToArray(
3157 5, this->global_jit_level(), target);
3158 }
3159
3160 // int64 max_folded_constant_in_bytes = 6;
3161 if (this->max_folded_constant_in_bytes() != 0) {
3162 target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::WriteInt64ToArray(6, this->max_folded_constant_in_bytes(), target);
3163 }
3164
3165 // bool cpu_global_jit = 7;
3166 if (this->cpu_global_jit() != 0) {
3167 target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::WriteBoolToArray(7, this->cpu_global_jit(), target);
3168 }
3169
3170 if (_internal_metadata_.have_unknown_fields()) {
3171 target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormat::SerializeUnknownFieldsToArray(
3172 _internal_metadata_.unknown_fields(), target);
3173 }
3174 // @@protoc_insertion_point(serialize_to_array_end:tensorflow.OptimizerOptions)
3175 return target;
3176}
3177
3178size_t OptimizerOptions::ByteSizeLong() const {
3179// @@protoc_insertion_point(message_byte_size_start:tensorflow.OptimizerOptions)
3180 size_t total_size = 0;
3181
3182 if (_internal_metadata_.have_unknown_fields()) {
3183 total_size +=
3184 ::PROTOBUF_NAMESPACE_ID::internal::WireFormat::ComputeUnknownFieldsSize(
3185 _internal_metadata_.unknown_fields());
3186 }
3187 ::PROTOBUF_NAMESPACE_ID::uint32 cached_has_bits = 0;
3188 // Prevent compiler warnings about cached_has_bits being unused
3189 (void) cached_has_bits;
3190
3191 // .tensorflow.OptimizerOptions.Level opt_level = 3;
3192 if (this->opt_level() != 0) {
3193 total_size += 1 +
3194 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::EnumSize(this->opt_level());
3195 }
3196
3197 // bool do_common_subexpression_elimination = 1;
3198 if (this->do_common_subexpression_elimination() != 0) {
3199 total_size += 1 + 1;
3200 }
3201
3202 // bool do_constant_folding = 2;
3203 if (this->do_constant_folding() != 0) {
3204 total_size += 1 + 1;
3205 }
3206
3207 // bool do_function_inlining = 4;
3208 if (this->do_function_inlining() != 0) {
3209 total_size += 1 + 1;
3210 }
3211
3212 // bool cpu_global_jit = 7;
3213 if (this->cpu_global_jit() != 0) {
3214 total_size += 1 + 1;
3215 }
3216
3217 // int64 max_folded_constant_in_bytes = 6;
3218 if (this->max_folded_constant_in_bytes() != 0) {
3219 total_size += 1 +
3220 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::Int64Size(
3221 this->max_folded_constant_in_bytes());
3222 }
3223
3224 // .tensorflow.OptimizerOptions.GlobalJitLevel global_jit_level = 5;
3225 if (this->global_jit_level() != 0) {
3226 total_size += 1 +
3227 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::EnumSize(this->global_jit_level());
3228 }
3229
3230 int cached_size = ::PROTOBUF_NAMESPACE_ID::internal::ToCachedSize(total_size);
3231 SetCachedSize(cached_size);
3232 return total_size;
3233}
3234
3235void OptimizerOptions::MergeFrom(const ::PROTOBUF_NAMESPACE_ID::Message& from) {
3236// @@protoc_insertion_point(generalized_merge_from_start:tensorflow.OptimizerOptions)
3237 GOOGLE_DCHECK_NE(&from, this);
3238 const OptimizerOptions* source =
3239 ::PROTOBUF_NAMESPACE_ID::DynamicCastToGenerated<OptimizerOptions>(
3240 &from);
3241 if (source == nullptr) {
3242 // @@protoc_insertion_point(generalized_merge_from_cast_fail:tensorflow.OptimizerOptions)
3243 ::PROTOBUF_NAMESPACE_ID::internal::ReflectionOps::Merge(from, this);
3244 } else {
3245 // @@protoc_insertion_point(generalized_merge_from_cast_success:tensorflow.OptimizerOptions)
3246 MergeFrom(*source);
3247 }
3248}
3249
3250void OptimizerOptions::MergeFrom(const OptimizerOptions& from) {
3251// @@protoc_insertion_point(class_specific_merge_from_start:tensorflow.OptimizerOptions)
3252 GOOGLE_DCHECK_NE(&from, this);
3253 _internal_metadata_.MergeFrom(from._internal_metadata_);
3254 ::PROTOBUF_NAMESPACE_ID::uint32 cached_has_bits = 0;
3255 (void) cached_has_bits;
3256
3257 if (from.opt_level() != 0) {
3258 set_opt_level(from.opt_level());
3259 }
3260 if (from.do_common_subexpression_elimination() != 0) {
3261 set_do_common_subexpression_elimination(from.do_common_subexpression_elimination());
3262 }
3263 if (from.do_constant_folding() != 0) {
3264 set_do_constant_folding(from.do_constant_folding());
3265 }
3266 if (from.do_function_inlining() != 0) {
3267 set_do_function_inlining(from.do_function_inlining());
3268 }
3269 if (from.cpu_global_jit() != 0) {
3270 set_cpu_global_jit(from.cpu_global_jit());
3271 }
3272 if (from.max_folded_constant_in_bytes() != 0) {
3273 set_max_folded_constant_in_bytes(from.max_folded_constant_in_bytes());
3274 }
3275 if (from.global_jit_level() != 0) {
3276 set_global_jit_level(from.global_jit_level());
3277 }
3278}
3279
3280void OptimizerOptions::CopyFrom(const ::PROTOBUF_NAMESPACE_ID::Message& from) {
3281// @@protoc_insertion_point(generalized_copy_from_start:tensorflow.OptimizerOptions)
3282 if (&from == this) return;
3283 Clear();
3284 MergeFrom(from);
3285}
3286
3287void OptimizerOptions::CopyFrom(const OptimizerOptions& from) {
3288// @@protoc_insertion_point(class_specific_copy_from_start:tensorflow.OptimizerOptions)
3289 if (&from == this) return;
3290 Clear();
3291 MergeFrom(from);
3292}
3293
3294bool OptimizerOptions::IsInitialized() const {
3295 return true;
3296}
3297
3298void OptimizerOptions::InternalSwap(OptimizerOptions* other) {
3299 using std::swap;
3300 _internal_metadata_.Swap(&other->_internal_metadata_);
3301 swap(opt_level_, other->opt_level_);
3302 swap(do_common_subexpression_elimination_, other->do_common_subexpression_elimination_);
3303 swap(do_constant_folding_, other->do_constant_folding_);
3304 swap(do_function_inlining_, other->do_function_inlining_);
3305 swap(cpu_global_jit_, other->cpu_global_jit_);
3306 swap(max_folded_constant_in_bytes_, other->max_folded_constant_in_bytes_);
3307 swap(global_jit_level_, other->global_jit_level_);
3308}
3309
3310::PROTOBUF_NAMESPACE_ID::Metadata OptimizerOptions::GetMetadata() const {
3311 return GetMetadataStatic();
3312}
3313
3314
3315// ===================================================================
3316
3317void GraphOptions::InitAsDefaultInstance() {
3318 ::tensorflow::_GraphOptions_default_instance_._instance.get_mutable()->optimizer_options_ = const_cast< ::tensorflow::OptimizerOptions*>(
3319 ::tensorflow::OptimizerOptions::internal_default_instance());
3320 ::tensorflow::_GraphOptions_default_instance_._instance.get_mutable()->rewrite_options_ = const_cast< ::tensorflow::RewriterConfig*>(
3321 ::tensorflow::RewriterConfig::internal_default_instance());
3322}
3323class GraphOptions::_Internal {
3324 public:
3325 static const ::tensorflow::OptimizerOptions& optimizer_options(const GraphOptions* msg);
3326 static const ::tensorflow::RewriterConfig& rewrite_options(const GraphOptions* msg);
3327};
3328
3329const ::tensorflow::OptimizerOptions&
3330GraphOptions::_Internal::optimizer_options(const GraphOptions* msg) {
3331 return *msg->optimizer_options_;
3332}
3333const ::tensorflow::RewriterConfig&
3334GraphOptions::_Internal::rewrite_options(const GraphOptions* msg) {
3335 return *msg->rewrite_options_;
3336}
3337void GraphOptions::unsafe_arena_set_allocated_optimizer_options(
3338 ::tensorflow::OptimizerOptions* optimizer_options) {
3339 if (GetArenaNoVirtual() == nullptr) {
3340 delete optimizer_options_;
3341 }
3342 optimizer_options_ = optimizer_options;
3343 if (optimizer_options) {
3344
3345 } else {
3346
3347 }
3348 // @@protoc_insertion_point(field_unsafe_arena_set_allocated:tensorflow.GraphOptions.optimizer_options)
3349}
3350void GraphOptions::unsafe_arena_set_allocated_rewrite_options(
3351 ::tensorflow::RewriterConfig* rewrite_options) {
3352 if (GetArenaNoVirtual() == nullptr) {
3353 delete rewrite_options_;
3354 }
3355 rewrite_options_ = rewrite_options;
3356 if (rewrite_options) {
3357
3358 } else {
3359
3360 }
3361 // @@protoc_insertion_point(field_unsafe_arena_set_allocated:tensorflow.GraphOptions.rewrite_options)
3362}
3363void GraphOptions::clear_rewrite_options() {
3364 if (GetArenaNoVirtual() == nullptr && rewrite_options_ != nullptr) {
3365 delete rewrite_options_;
3366 }
3367 rewrite_options_ = nullptr;
3368}
3369GraphOptions::GraphOptions()
3370 : ::PROTOBUF_NAMESPACE_ID::Message(), _internal_metadata_(nullptr) {
3371 SharedCtor();
3372 // @@protoc_insertion_point(constructor:tensorflow.GraphOptions)
3373}
3374GraphOptions::GraphOptions(::PROTOBUF_NAMESPACE_ID::Arena* arena)
3375 : ::PROTOBUF_NAMESPACE_ID::Message(),
3376 _internal_metadata_(arena) {
3377 SharedCtor();
3378 RegisterArenaDtor(arena);
3379 // @@protoc_insertion_point(arena_constructor:tensorflow.GraphOptions)
3380}
3381GraphOptions::GraphOptions(const GraphOptions& from)
3382 : ::PROTOBUF_NAMESPACE_ID::Message(),
3383 _internal_metadata_(nullptr) {
3384 _internal_metadata_.MergeFrom(from._internal_metadata_);
3385 if (from.has_optimizer_options()) {
3386 optimizer_options_ = new ::tensorflow::OptimizerOptions(*from.optimizer_options_);
3387 } else {
3388 optimizer_options_ = nullptr;
3389 }
3390 if (from.has_rewrite_options()) {
3391 rewrite_options_ = new ::tensorflow::RewriterConfig(*from.rewrite_options_);
3392 } else {
3393 rewrite_options_ = nullptr;
3394 }
3395 ::memcpy(&build_cost_model_, &from.build_cost_model_,
3396 static_cast<size_t>(reinterpret_cast<char*>(&build_cost_model_after_) -
3397 reinterpret_cast<char*>(&build_cost_model_)) + sizeof(build_cost_model_after_));
3398 // @@protoc_insertion_point(copy_constructor:tensorflow.GraphOptions)
3399}
3400
3401void GraphOptions::SharedCtor() {
3402 ::PROTOBUF_NAMESPACE_ID::internal::InitSCC(&scc_info_GraphOptions_tensorflow_2fcore_2fprotobuf_2fconfig_2eproto.base);
3403 ::memset(&optimizer_options_, 0, static_cast<size_t>(
3404 reinterpret_cast<char*>(&build_cost_model_after_) -
3405 reinterpret_cast<char*>(&optimizer_options_)) + sizeof(build_cost_model_after_));
3406}
3407
3408GraphOptions::~GraphOptions() {
3409 // @@protoc_insertion_point(destructor:tensorflow.GraphOptions)
3410 SharedDtor();
3411}
3412
3413void GraphOptions::SharedDtor() {
3414 GOOGLE_DCHECK(GetArenaNoVirtual() == nullptr);
3415 if (this != internal_default_instance()) delete optimizer_options_;
3416 if (this != internal_default_instance()) delete rewrite_options_;
3417}
3418
3419void GraphOptions::ArenaDtor(void* object) {
3420 GraphOptions* _this = reinterpret_cast< GraphOptions* >(object);
3421 (void)_this;
3422}
3423void GraphOptions::RegisterArenaDtor(::PROTOBUF_NAMESPACE_ID::Arena*) {
3424}
3425void GraphOptions::SetCachedSize(int size) const {
3426 _cached_size_.Set(size);
3427}
3428const GraphOptions& GraphOptions::default_instance() {
3429 ::PROTOBUF_NAMESPACE_ID::internal::InitSCC(&::scc_info_GraphOptions_tensorflow_2fcore_2fprotobuf_2fconfig_2eproto.base);
3430 return *internal_default_instance();
3431}
3432
3433
3434void GraphOptions::Clear() {
3435// @@protoc_insertion_point(message_clear_start:tensorflow.GraphOptions)
3436 ::PROTOBUF_NAMESPACE_ID::uint32 cached_has_bits = 0;
3437 // Prevent compiler warnings about cached_has_bits being unused
3438 (void) cached_has_bits;
3439
3440 if (GetArenaNoVirtual() == nullptr && optimizer_options_ != nullptr) {
3441 delete optimizer_options_;
3442 }
3443 optimizer_options_ = nullptr;
3444 if (GetArenaNoVirtual() == nullptr && rewrite_options_ != nullptr) {
3445 delete rewrite_options_;
3446 }
3447 rewrite_options_ = nullptr;
3448 ::memset(&build_cost_model_, 0, static_cast<size_t>(
3449 reinterpret_cast<char*>(&build_cost_model_after_) -
3450 reinterpret_cast<char*>(&build_cost_model_)) + sizeof(build_cost_model_after_));
3451 _internal_metadata_.Clear();
3452}
3453
3454#if GOOGLE_PROTOBUF_ENABLE_EXPERIMENTAL_PARSER
3455const char* GraphOptions::_InternalParse(const char* ptr, ::PROTOBUF_NAMESPACE_ID::internal::ParseContext* ctx) {
3456#define CHK_(x) if (PROTOBUF_PREDICT_FALSE(!(x))) goto failure
3457 ::PROTOBUF_NAMESPACE_ID::Arena* arena = GetArenaNoVirtual(); (void)arena;
3458 while (!ctx->Done(&ptr)) {
3459 ::PROTOBUF_NAMESPACE_ID::uint32 tag;
3460 ptr = ::PROTOBUF_NAMESPACE_ID::internal::ReadTag(ptr, &tag);
3461 CHK_(ptr);
3462 switch (tag >> 3) {
3463 // bool enable_recv_scheduling = 2;
3464 case 2:
3465 if (PROTOBUF_PREDICT_TRUE(static_cast<::PROTOBUF_NAMESPACE_ID::uint8>(tag) == 16)) {
3466 enable_recv_scheduling_ = ::PROTOBUF_NAMESPACE_ID::internal::ReadVarint(&ptr);
3467 CHK_(ptr);
3468 } else goto handle_unusual;
3469 continue;
3470 // .tensorflow.OptimizerOptions optimizer_options = 3;
3471 case 3:
3472 if (PROTOBUF_PREDICT_TRUE(static_cast<::PROTOBUF_NAMESPACE_ID::uint8>(tag) == 26)) {
3473 ptr = ctx->ParseMessage(mutable_optimizer_options(), ptr);
3474 CHK_(ptr);
3475 } else goto handle_unusual;
3476 continue;
3477 // int64 build_cost_model = 4;
3478 case 4:
3479 if (PROTOBUF_PREDICT_TRUE(static_cast<::PROTOBUF_NAMESPACE_ID::uint8>(tag) == 32)) {
3480 build_cost_model_ = ::PROTOBUF_NAMESPACE_ID::internal::ReadVarint(&ptr);
3481 CHK_(ptr);
3482 } else goto handle_unusual;
3483 continue;
3484 // bool infer_shapes = 5;
3485 case 5:
3486 if (PROTOBUF_PREDICT_TRUE(static_cast<::PROTOBUF_NAMESPACE_ID::uint8>(tag) == 40)) {
3487 infer_shapes_ = ::PROTOBUF_NAMESPACE_ID::internal::ReadVarint(&ptr);
3488 CHK_(ptr);
3489 } else goto handle_unusual;
3490 continue;
3491 // bool place_pruned_graph = 6;
3492 case 6:
3493 if (PROTOBUF_PREDICT_TRUE(static_cast<::PROTOBUF_NAMESPACE_ID::uint8>(tag) == 48)) {
3494 place_pruned_graph_ = ::PROTOBUF_NAMESPACE_ID::internal::ReadVarint(&ptr);
3495 CHK_(ptr);
3496 } else goto handle_unusual;
3497 continue;
3498 // bool enable_bfloat16_sendrecv = 7;
3499 case 7:
3500 if (PROTOBUF_PREDICT_TRUE(static_cast<::PROTOBUF_NAMESPACE_ID::uint8>(tag) == 56)) {
3501 enable_bfloat16_sendrecv_ = ::PROTOBUF_NAMESPACE_ID::internal::ReadVarint(&ptr);
3502 CHK_(ptr);
3503 } else goto handle_unusual;
3504 continue;
3505 // int32 timeline_step = 8;
3506 case 8:
3507 if (PROTOBUF_PREDICT_TRUE(static_cast<::PROTOBUF_NAMESPACE_ID::uint8>(tag) == 64)) {
3508 timeline_step_ = ::PROTOBUF_NAMESPACE_ID::internal::ReadVarint(&ptr);
3509 CHK_(ptr);
3510 } else goto handle_unusual;
3511 continue;
3512 // int64 build_cost_model_after = 9;
3513 case 9:
3514 if (PROTOBUF_PREDICT_TRUE(static_cast<::PROTOBUF_NAMESPACE_ID::uint8>(tag) == 72)) {
3515 build_cost_model_after_ = ::PROTOBUF_NAMESPACE_ID::internal::ReadVarint(&ptr);
3516 CHK_(ptr);
3517 } else goto handle_unusual;
3518 continue;
3519 // .tensorflow.RewriterConfig rewrite_options = 10;
3520 case 10:
3521 if (PROTOBUF_PREDICT_TRUE(static_cast<::PROTOBUF_NAMESPACE_ID::uint8>(tag) == 82)) {
3522 ptr = ctx->ParseMessage(mutable_rewrite_options(), ptr);
3523 CHK_(ptr);
3524 } else goto handle_unusual;
3525 continue;
3526 default: {
3527 handle_unusual:
3528 if ((tag & 7) == 4 || tag == 0) {
3529 ctx->SetLastTag(tag);
3530 goto success;
3531 }
3532 ptr = UnknownFieldParse(tag, &_internal_metadata_, ptr, ctx);
3533 CHK_(ptr != nullptr);
3534 continue;
3535 }
3536 } // switch
3537 } // while
3538success:
3539 return ptr;
3540failure:
3541 ptr = nullptr;
3542 goto success;
3543#undef CHK_
3544}
3545#else // GOOGLE_PROTOBUF_ENABLE_EXPERIMENTAL_PARSER
3546bool GraphOptions::MergePartialFromCodedStream(
3547 ::PROTOBUF_NAMESPACE_ID::io::CodedInputStream* input) {
3548#define DO_(EXPRESSION) if (!PROTOBUF_PREDICT_TRUE(EXPRESSION)) goto failure
3549 ::PROTOBUF_NAMESPACE_ID::uint32 tag;
3550 // @@protoc_insertion_point(parse_start:tensorflow.GraphOptions)
3551 for (;;) {
3552 ::std::pair<::PROTOBUF_NAMESPACE_ID::uint32, bool> p = input->ReadTagWithCutoffNoLastTag(127u);
3553 tag = p.first;
3554 if (!p.second) goto handle_unusual;
3555 switch (::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::GetTagFieldNumber(tag)) {
3556 // bool enable_recv_scheduling = 2;
3557 case 2: {
3558 if (static_cast< ::PROTOBUF_NAMESPACE_ID::uint8>(tag) == (16 & 0xFF)) {
3559
3560 DO_((::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::ReadPrimitive<
3561 bool, ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::TYPE_BOOL>(
3562 input, &enable_recv_scheduling_)));
3563 } else {
3564 goto handle_unusual;
3565 }
3566 break;
3567 }
3568
3569 // .tensorflow.OptimizerOptions optimizer_options = 3;
3570 case 3: {
3571 if (static_cast< ::PROTOBUF_NAMESPACE_ID::uint8>(tag) == (26 & 0xFF)) {
3572 DO_(::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::ReadMessage(
3573 input, mutable_optimizer_options()));
3574 } else {
3575 goto handle_unusual;
3576 }
3577 break;
3578 }
3579
3580 // int64 build_cost_model = 4;
3581 case 4: {
3582 if (static_cast< ::PROTOBUF_NAMESPACE_ID::uint8>(tag) == (32 & 0xFF)) {
3583
3584 DO_((::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::ReadPrimitive<
3585 ::PROTOBUF_NAMESPACE_ID::int64, ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::TYPE_INT64>(
3586 input, &build_cost_model_)));
3587 } else {
3588 goto handle_unusual;
3589 }
3590 break;
3591 }
3592
3593 // bool infer_shapes = 5;
3594 case 5: {
3595 if (static_cast< ::PROTOBUF_NAMESPACE_ID::uint8>(tag) == (40 & 0xFF)) {
3596
3597 DO_((::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::ReadPrimitive<
3598 bool, ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::TYPE_BOOL>(
3599 input, &infer_shapes_)));
3600 } else {
3601 goto handle_unusual;
3602 }
3603 break;
3604 }
3605
3606 // bool place_pruned_graph = 6;
3607 case 6: {
3608 if (static_cast< ::PROTOBUF_NAMESPACE_ID::uint8>(tag) == (48 & 0xFF)) {
3609
3610 DO_((::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::ReadPrimitive<
3611 bool, ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::TYPE_BOOL>(
3612 input, &place_pruned_graph_)));
3613 } else {
3614 goto handle_unusual;
3615 }
3616 break;
3617 }
3618
3619 // bool enable_bfloat16_sendrecv = 7;
3620 case 7: {
3621 if (static_cast< ::PROTOBUF_NAMESPACE_ID::uint8>(tag) == (56 & 0xFF)) {
3622
3623 DO_((::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::ReadPrimitive<
3624 bool, ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::TYPE_BOOL>(
3625 input, &enable_bfloat16_sendrecv_)));
3626 } else {
3627 goto handle_unusual;
3628 }
3629 break;
3630 }
3631
3632 // int32 timeline_step = 8;
3633 case 8: {
3634 if (static_cast< ::PROTOBUF_NAMESPACE_ID::uint8>(tag) == (64 & 0xFF)) {
3635
3636 DO_((::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::ReadPrimitive<
3637 ::PROTOBUF_NAMESPACE_ID::int32, ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::TYPE_INT32>(
3638 input, &timeline_step_)));
3639 } else {
3640 goto handle_unusual;
3641 }
3642 break;
3643 }
3644
3645 // int64 build_cost_model_after = 9;
3646 case 9: {
3647 if (static_cast< ::PROTOBUF_NAMESPACE_ID::uint8>(tag) == (72 & 0xFF)) {
3648
3649 DO_((::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::ReadPrimitive<
3650 ::PROTOBUF_NAMESPACE_ID::int64, ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::TYPE_INT64>(
3651 input, &build_cost_model_after_)));
3652 } else {
3653 goto handle_unusual;
3654 }
3655 break;
3656 }
3657
3658 // .tensorflow.RewriterConfig rewrite_options = 10;
3659 case 10: {
3660 if (static_cast< ::PROTOBUF_NAMESPACE_ID::uint8>(tag) == (82 & 0xFF)) {
3661 DO_(::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::ReadMessage(
3662 input, mutable_rewrite_options()));
3663 } else {
3664 goto handle_unusual;
3665 }
3666 break;
3667 }
3668
3669 default: {
3670 handle_unusual:
3671 if (tag == 0) {
3672 goto success;
3673 }
3674 DO_(::PROTOBUF_NAMESPACE_ID::internal::WireFormat::SkipField(
3675 input, tag, _internal_metadata_.mutable_unknown_fields()));
3676 break;
3677 }
3678 }
3679 }
3680success:
3681 // @@protoc_insertion_point(parse_success:tensorflow.GraphOptions)
3682 return true;
3683failure:
3684 // @@protoc_insertion_point(parse_failure:tensorflow.GraphOptions)
3685 return false;
3686#undef DO_
3687}
3688#endif // GOOGLE_PROTOBUF_ENABLE_EXPERIMENTAL_PARSER
3689
3690void GraphOptions::SerializeWithCachedSizes(
3691 ::PROTOBUF_NAMESPACE_ID::io::CodedOutputStream* output) const {
3692 // @@protoc_insertion_point(serialize_start:tensorflow.GraphOptions)
3693 ::PROTOBUF_NAMESPACE_ID::uint32 cached_has_bits = 0;
3694 (void) cached_has_bits;
3695
3696 // bool enable_recv_scheduling = 2;
3697 if (this->enable_recv_scheduling() != 0) {
3698 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::WriteBool(2, this->enable_recv_scheduling(), output);
3699 }
3700
3701 // .tensorflow.OptimizerOptions optimizer_options = 3;
3702 if (this->has_optimizer_options()) {
3703 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::WriteMessageMaybeToArray(
3704 3, _Internal::optimizer_options(this), output);
3705 }
3706
3707 // int64 build_cost_model = 4;
3708 if (this->build_cost_model() != 0) {
3709 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::WriteInt64(4, this->build_cost_model(), output);
3710 }
3711
3712 // bool infer_shapes = 5;
3713 if (this->infer_shapes() != 0) {
3714 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::WriteBool(5, this->infer_shapes(), output);
3715 }
3716
3717 // bool place_pruned_graph = 6;
3718 if (this->place_pruned_graph() != 0) {
3719 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::WriteBool(6, this->place_pruned_graph(), output);
3720 }
3721
3722 // bool enable_bfloat16_sendrecv = 7;
3723 if (this->enable_bfloat16_sendrecv() != 0) {
3724 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::WriteBool(7, this->enable_bfloat16_sendrecv(), output);
3725 }
3726
3727 // int32 timeline_step = 8;
3728 if (this->timeline_step() != 0) {
3729 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::WriteInt32(8, this->timeline_step(), output);
3730 }
3731
3732 // int64 build_cost_model_after = 9;
3733 if (this->build_cost_model_after() != 0) {
3734 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::WriteInt64(9, this->build_cost_model_after(), output);
3735 }
3736
3737 // .tensorflow.RewriterConfig rewrite_options = 10;
3738 if (this->has_rewrite_options()) {
3739 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::WriteMessageMaybeToArray(
3740 10, _Internal::rewrite_options(this), output);
3741 }
3742
3743 if (_internal_metadata_.have_unknown_fields()) {
3744 ::PROTOBUF_NAMESPACE_ID::internal::WireFormat::SerializeUnknownFields(
3745 _internal_metadata_.unknown_fields(), output);
3746 }
3747 // @@protoc_insertion_point(serialize_end:tensorflow.GraphOptions)
3748}
3749
3750::PROTOBUF_NAMESPACE_ID::uint8* GraphOptions::InternalSerializeWithCachedSizesToArray(
3751 ::PROTOBUF_NAMESPACE_ID::uint8* target) const {
3752 // @@protoc_insertion_point(serialize_to_array_start:tensorflow.GraphOptions)
3753 ::PROTOBUF_NAMESPACE_ID::uint32 cached_has_bits = 0;
3754 (void) cached_has_bits;
3755
3756 // bool enable_recv_scheduling = 2;
3757 if (this->enable_recv_scheduling() != 0) {
3758 target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::WriteBoolToArray(2, this->enable_recv_scheduling(), target);
3759 }
3760
3761 // .tensorflow.OptimizerOptions optimizer_options = 3;
3762 if (this->has_optimizer_options()) {
3763 target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::
3764 InternalWriteMessageToArray(
3765 3, _Internal::optimizer_options(this), target);
3766 }
3767
3768 // int64 build_cost_model = 4;
3769 if (this->build_cost_model() != 0) {
3770 target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::WriteInt64ToArray(4, this->build_cost_model(), target);
3771 }
3772
3773 // bool infer_shapes = 5;
3774 if (this->infer_shapes() != 0) {
3775 target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::WriteBoolToArray(5, this->infer_shapes(), target);
3776 }
3777
3778 // bool place_pruned_graph = 6;
3779 if (this->place_pruned_graph() != 0) {
3780 target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::WriteBoolToArray(6, this->place_pruned_graph(), target);
3781 }
3782
3783 // bool enable_bfloat16_sendrecv = 7;
3784 if (this->enable_bfloat16_sendrecv() != 0) {
3785 target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::WriteBoolToArray(7, this->enable_bfloat16_sendrecv(), target);
3786 }
3787
3788 // int32 timeline_step = 8;
3789 if (this->timeline_step() != 0) {
3790 target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::WriteInt32ToArray(8, this->timeline_step(), target);
3791 }
3792
3793 // int64 build_cost_model_after = 9;
3794 if (this->build_cost_model_after() != 0) {
3795 target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::WriteInt64ToArray(9, this->build_cost_model_after(), target);
3796 }
3797
3798 // .tensorflow.RewriterConfig rewrite_options = 10;
3799 if (this->has_rewrite_options()) {
3800 target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::
3801 InternalWriteMessageToArray(
3802 10, _Internal::rewrite_options(this), target);
3803 }
3804
3805 if (_internal_metadata_.have_unknown_fields()) {
3806 target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormat::SerializeUnknownFieldsToArray(
3807 _internal_metadata_.unknown_fields(), target);
3808 }
3809 // @@protoc_insertion_point(serialize_to_array_end:tensorflow.GraphOptions)
3810 return target;
3811}
3812
3813size_t GraphOptions::ByteSizeLong() const {
3814// @@protoc_insertion_point(message_byte_size_start:tensorflow.GraphOptions)
3815 size_t total_size = 0;
3816
3817 if (_internal_metadata_.have_unknown_fields()) {
3818 total_size +=
3819 ::PROTOBUF_NAMESPACE_ID::internal::WireFormat::ComputeUnknownFieldsSize(
3820 _internal_metadata_.unknown_fields());
3821 }
3822 ::PROTOBUF_NAMESPACE_ID::uint32 cached_has_bits = 0;
3823 // Prevent compiler warnings about cached_has_bits being unused
3824 (void) cached_has_bits;
3825
3826 // .tensorflow.OptimizerOptions optimizer_options = 3;
3827 if (this->has_optimizer_options()) {
3828 total_size += 1 +
3829 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::MessageSize(
3830 *optimizer_options_);
3831 }
3832
3833 // .tensorflow.RewriterConfig rewrite_options = 10;
3834 if (this->has_rewrite_options()) {
3835 total_size += 1 +
3836 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::MessageSize(
3837 *rewrite_options_);
3838 }
3839
3840 // int64 build_cost_model = 4;
3841 if (this->build_cost_model() != 0) {
3842 total_size += 1 +
3843 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::Int64Size(
3844 this->build_cost_model());
3845 }
3846
3847 // bool enable_recv_scheduling = 2;
3848 if (this->enable_recv_scheduling() != 0) {
3849 total_size += 1 + 1;
3850 }
3851
3852 // bool infer_shapes = 5;
3853 if (this->infer_shapes() != 0) {
3854 total_size += 1 + 1;
3855 }
3856
3857 // bool place_pruned_graph = 6;
3858 if (this->place_pruned_graph() != 0) {
3859 total_size += 1 + 1;
3860 }
3861
3862 // bool enable_bfloat16_sendrecv = 7;
3863 if (this->enable_bfloat16_sendrecv() != 0) {
3864 total_size += 1 + 1;
3865 }
3866
3867 // int32 timeline_step = 8;
3868 if (this->timeline_step() != 0) {
3869 total_size += 1 +
3870 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::Int32Size(
3871 this->timeline_step());
3872 }
3873
3874 // int64 build_cost_model_after = 9;
3875 if (this->build_cost_model_after() != 0) {
3876 total_size += 1 +
3877 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::Int64Size(
3878 this->build_cost_model_after());
3879 }
3880
3881 int cached_size = ::PROTOBUF_NAMESPACE_ID::internal::ToCachedSize(total_size);
3882 SetCachedSize(cached_size);
3883 return total_size;
3884}
3885
3886void GraphOptions::MergeFrom(const ::PROTOBUF_NAMESPACE_ID::Message& from) {
3887// @@protoc_insertion_point(generalized_merge_from_start:tensorflow.GraphOptions)
3888 GOOGLE_DCHECK_NE(&from, this);
3889 const GraphOptions* source =
3890 ::PROTOBUF_NAMESPACE_ID::DynamicCastToGenerated<GraphOptions>(
3891 &from);
3892 if (source == nullptr) {
3893 // @@protoc_insertion_point(generalized_merge_from_cast_fail:tensorflow.GraphOptions)
3894 ::PROTOBUF_NAMESPACE_ID::internal::ReflectionOps::Merge(from, this);
3895 } else {
3896 // @@protoc_insertion_point(generalized_merge_from_cast_success:tensorflow.GraphOptions)
3897 MergeFrom(*source);
3898 }
3899}
3900
3901void GraphOptions::MergeFrom(const GraphOptions& from) {
3902// @@protoc_insertion_point(class_specific_merge_from_start:tensorflow.GraphOptions)
3903 GOOGLE_DCHECK_NE(&from, this);
3904 _internal_metadata_.MergeFrom(from._internal_metadata_);
3905 ::PROTOBUF_NAMESPACE_ID::uint32 cached_has_bits = 0;
3906 (void) cached_has_bits;
3907
3908 if (from.has_optimizer_options()) {
3909 mutable_optimizer_options()->::tensorflow::OptimizerOptions::MergeFrom(from.optimizer_options());
3910 }
3911 if (from.has_rewrite_options()) {
3912 mutable_rewrite_options()->::tensorflow::RewriterConfig::MergeFrom(from.rewrite_options());
3913 }
3914 if (from.build_cost_model() != 0) {
3915 set_build_cost_model(from.build_cost_model());
3916 }
3917 if (from.enable_recv_scheduling() != 0) {
3918 set_enable_recv_scheduling(from.enable_recv_scheduling());
3919 }
3920 if (from.infer_shapes() != 0) {
3921 set_infer_shapes(from.infer_shapes());
3922 }
3923 if (from.place_pruned_graph() != 0) {
3924 set_place_pruned_graph(from.place_pruned_graph());
3925 }
3926 if (from.enable_bfloat16_sendrecv() != 0) {
3927 set_enable_bfloat16_sendrecv(from.enable_bfloat16_sendrecv());
3928 }
3929 if (from.timeline_step() != 0) {
3930 set_timeline_step(from.timeline_step());
3931 }
3932 if (from.build_cost_model_after() != 0) {
3933 set_build_cost_model_after(from.build_cost_model_after());
3934 }
3935}
3936
3937void GraphOptions::CopyFrom(const ::PROTOBUF_NAMESPACE_ID::Message& from) {
3938// @@protoc_insertion_point(generalized_copy_from_start:tensorflow.GraphOptions)
3939 if (&from == this) return;
3940 Clear();
3941 MergeFrom(from);
3942}
3943
3944void GraphOptions::CopyFrom(const GraphOptions& from) {
3945// @@protoc_insertion_point(class_specific_copy_from_start:tensorflow.GraphOptions)
3946 if (&from == this) return;
3947 Clear();
3948 MergeFrom(from);
3949}
3950
3951bool GraphOptions::IsInitialized() const {
3952 return true;
3953}
3954
3955void GraphOptions::InternalSwap(GraphOptions* other) {
3956 using std::swap;
3957 _internal_metadata_.Swap(&other->_internal_metadata_);
3958 swap(optimizer_options_, other->optimizer_options_);
3959 swap(rewrite_options_, other->rewrite_options_);
3960 swap(build_cost_model_, other->build_cost_model_);
3961 swap(enable_recv_scheduling_, other->enable_recv_scheduling_);
3962 swap(infer_shapes_, other->infer_shapes_);
3963 swap(place_pruned_graph_, other->place_pruned_graph_);
3964 swap(enable_bfloat16_sendrecv_, other->enable_bfloat16_sendrecv_);
3965 swap(timeline_step_, other->timeline_step_);
3966 swap(build_cost_model_after_, other->build_cost_model_after_);
3967}
3968
3969::PROTOBUF_NAMESPACE_ID::Metadata GraphOptions::GetMetadata() const {
3970 return GetMetadataStatic();
3971}
3972
3973
3974// ===================================================================
3975
3976void ThreadPoolOptionProto::InitAsDefaultInstance() {
3977}
3978class ThreadPoolOptionProto::_Internal {
3979 public:
3980};
3981
3982ThreadPoolOptionProto::ThreadPoolOptionProto()
3983 : ::PROTOBUF_NAMESPACE_ID::Message(), _internal_metadata_(nullptr) {
3984 SharedCtor();
3985 // @@protoc_insertion_point(constructor:tensorflow.ThreadPoolOptionProto)
3986}
3987ThreadPoolOptionProto::ThreadPoolOptionProto(::PROTOBUF_NAMESPACE_ID::Arena* arena)
3988 : ::PROTOBUF_NAMESPACE_ID::Message(),
3989 _internal_metadata_(arena) {
3990 SharedCtor();
3991 RegisterArenaDtor(arena);
3992 // @@protoc_insertion_point(arena_constructor:tensorflow.ThreadPoolOptionProto)
3993}
3994ThreadPoolOptionProto::ThreadPoolOptionProto(const ThreadPoolOptionProto& from)
3995 : ::PROTOBUF_NAMESPACE_ID::Message(),
3996 _internal_metadata_(nullptr) {
3997 _internal_metadata_.MergeFrom(from._internal_metadata_);
3998 global_name_.UnsafeSetDefault(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited());
3999 if (!from.global_name().empty()) {
4000 global_name_.Set(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited(), from.global_name(),
4001 GetArenaNoVirtual());
4002 }
4003 num_threads_ = from.num_threads_;
4004 // @@protoc_insertion_point(copy_constructor:tensorflow.ThreadPoolOptionProto)
4005}
4006
4007void ThreadPoolOptionProto::SharedCtor() {
4008 ::PROTOBUF_NAMESPACE_ID::internal::InitSCC(&scc_info_ThreadPoolOptionProto_tensorflow_2fcore_2fprotobuf_2fconfig_2eproto.base);
4009 global_name_.UnsafeSetDefault(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited());
4010 num_threads_ = 0;
4011}
4012
4013ThreadPoolOptionProto::~ThreadPoolOptionProto() {
4014 // @@protoc_insertion_point(destructor:tensorflow.ThreadPoolOptionProto)
4015 SharedDtor();
4016}
4017
4018void ThreadPoolOptionProto::SharedDtor() {
4019 GOOGLE_DCHECK(GetArenaNoVirtual() == nullptr);
4020 global_name_.DestroyNoArena(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited());
4021}
4022
4023void ThreadPoolOptionProto::ArenaDtor(void* object) {
4024 ThreadPoolOptionProto* _this = reinterpret_cast< ThreadPoolOptionProto* >(object);
4025 (void)_this;
4026}
4027void ThreadPoolOptionProto::RegisterArenaDtor(::PROTOBUF_NAMESPACE_ID::Arena*) {
4028}
4029void ThreadPoolOptionProto::SetCachedSize(int size) const {
4030 _cached_size_.Set(size);
4031}
4032const ThreadPoolOptionProto& ThreadPoolOptionProto::default_instance() {
4033 ::PROTOBUF_NAMESPACE_ID::internal::InitSCC(&::scc_info_ThreadPoolOptionProto_tensorflow_2fcore_2fprotobuf_2fconfig_2eproto.base);
4034 return *internal_default_instance();
4035}
4036
4037
4038void ThreadPoolOptionProto::Clear() {
4039// @@protoc_insertion_point(message_clear_start:tensorflow.ThreadPoolOptionProto)
4040 ::PROTOBUF_NAMESPACE_ID::uint32 cached_has_bits = 0;
4041 // Prevent compiler warnings about cached_has_bits being unused
4042 (void) cached_has_bits;
4043
4044 global_name_.ClearToEmpty(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited(), GetArenaNoVirtual());
4045 num_threads_ = 0;
4046 _internal_metadata_.Clear();
4047}
4048
4049#if GOOGLE_PROTOBUF_ENABLE_EXPERIMENTAL_PARSER
4050const char* ThreadPoolOptionProto::_InternalParse(const char* ptr, ::PROTOBUF_NAMESPACE_ID::internal::ParseContext* ctx) {
4051#define CHK_(x) if (PROTOBUF_PREDICT_FALSE(!(x))) goto failure
4052 ::PROTOBUF_NAMESPACE_ID::Arena* arena = GetArenaNoVirtual(); (void)arena;
4053 while (!ctx->Done(&ptr)) {
4054 ::PROTOBUF_NAMESPACE_ID::uint32 tag;
4055 ptr = ::PROTOBUF_NAMESPACE_ID::internal::ReadTag(ptr, &tag);
4056 CHK_(ptr);
4057 switch (tag >> 3) {
4058 // int32 num_threads = 1;
4059 case 1:
4060 if (PROTOBUF_PREDICT_TRUE(static_cast<::PROTOBUF_NAMESPACE_ID::uint8>(tag) == 8)) {
4061 num_threads_ = ::PROTOBUF_NAMESPACE_ID::internal::ReadVarint(&ptr);
4062 CHK_(ptr);
4063 } else goto handle_unusual;
4064 continue;
4065 // string global_name = 2;
4066 case 2:
4067 if (PROTOBUF_PREDICT_TRUE(static_cast<::PROTOBUF_NAMESPACE_ID::uint8>(tag) == 18)) {
4068 ptr = ::PROTOBUF_NAMESPACE_ID::internal::InlineGreedyStringParserUTF8(mutable_global_name(), ptr, ctx, "tensorflow.ThreadPoolOptionProto.global_name");
4069 CHK_(ptr);
4070 } else goto handle_unusual;
4071 continue;
4072 default: {
4073 handle_unusual:
4074 if ((tag & 7) == 4 || tag == 0) {
4075 ctx->SetLastTag(tag);
4076 goto success;
4077 }
4078 ptr = UnknownFieldParse(tag, &_internal_metadata_, ptr, ctx);
4079 CHK_(ptr != nullptr);
4080 continue;
4081 }
4082 } // switch
4083 } // while
4084success:
4085 return ptr;
4086failure:
4087 ptr = nullptr;
4088 goto success;
4089#undef CHK_
4090}
4091#else // GOOGLE_PROTOBUF_ENABLE_EXPERIMENTAL_PARSER
4092bool ThreadPoolOptionProto::MergePartialFromCodedStream(
4093 ::PROTOBUF_NAMESPACE_ID::io::CodedInputStream* input) {
4094#define DO_(EXPRESSION) if (!PROTOBUF_PREDICT_TRUE(EXPRESSION)) goto failure
4095 ::PROTOBUF_NAMESPACE_ID::uint32 tag;
4096 // @@protoc_insertion_point(parse_start:tensorflow.ThreadPoolOptionProto)
4097 for (;;) {
4098 ::std::pair<::PROTOBUF_NAMESPACE_ID::uint32, bool> p = input->ReadTagWithCutoffNoLastTag(127u);
4099 tag = p.first;
4100 if (!p.second) goto handle_unusual;
4101 switch (::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::GetTagFieldNumber(tag)) {
4102 // int32 num_threads = 1;
4103 case 1: {
4104 if (static_cast< ::PROTOBUF_NAMESPACE_ID::uint8>(tag) == (8 & 0xFF)) {
4105
4106 DO_((::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::ReadPrimitive<
4107 ::PROTOBUF_NAMESPACE_ID::int32, ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::TYPE_INT32>(
4108 input, &num_threads_)));
4109 } else {
4110 goto handle_unusual;
4111 }
4112 break;
4113 }
4114
4115 // string global_name = 2;
4116 case 2: {
4117 if (static_cast< ::PROTOBUF_NAMESPACE_ID::uint8>(tag) == (18 & 0xFF)) {
4118 DO_(::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::ReadString(
4119 input, this->mutable_global_name()));
4120 DO_(::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::VerifyUtf8String(
4121 this->global_name().data(), static_cast<int>(this->global_name().length()),
4122 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::PARSE,
4123 "tensorflow.ThreadPoolOptionProto.global_name"));
4124 } else {
4125 goto handle_unusual;
4126 }
4127 break;
4128 }
4129
4130 default: {
4131 handle_unusual:
4132 if (tag == 0) {
4133 goto success;
4134 }
4135 DO_(::PROTOBUF_NAMESPACE_ID::internal::WireFormat::SkipField(
4136 input, tag, _internal_metadata_.mutable_unknown_fields()));
4137 break;
4138 }
4139 }
4140 }
4141success:
4142 // @@protoc_insertion_point(parse_success:tensorflow.ThreadPoolOptionProto)
4143 return true;
4144failure:
4145 // @@protoc_insertion_point(parse_failure:tensorflow.ThreadPoolOptionProto)
4146 return false;
4147#undef DO_
4148}
4149#endif // GOOGLE_PROTOBUF_ENABLE_EXPERIMENTAL_PARSER
4150
4151void ThreadPoolOptionProto::SerializeWithCachedSizes(
4152 ::PROTOBUF_NAMESPACE_ID::io::CodedOutputStream* output) const {
4153 // @@protoc_insertion_point(serialize_start:tensorflow.ThreadPoolOptionProto)
4154 ::PROTOBUF_NAMESPACE_ID::uint32 cached_has_bits = 0;
4155 (void) cached_has_bits;
4156
4157 // int32 num_threads = 1;
4158 if (this->num_threads() != 0) {
4159 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::WriteInt32(1, this->num_threads(), output);
4160 }
4161
4162 // string global_name = 2;
4163 if (this->global_name().size() > 0) {
4164 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::VerifyUtf8String(
4165 this->global_name().data(), static_cast<int>(this->global_name().length()),
4166 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::SERIALIZE,
4167 "tensorflow.ThreadPoolOptionProto.global_name");
4168 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::WriteStringMaybeAliased(
4169 2, this->global_name(), output);
4170 }
4171
4172 if (_internal_metadata_.have_unknown_fields()) {
4173 ::PROTOBUF_NAMESPACE_ID::internal::WireFormat::SerializeUnknownFields(
4174 _internal_metadata_.unknown_fields(), output);
4175 }
4176 // @@protoc_insertion_point(serialize_end:tensorflow.ThreadPoolOptionProto)
4177}
4178
4179::PROTOBUF_NAMESPACE_ID::uint8* ThreadPoolOptionProto::InternalSerializeWithCachedSizesToArray(
4180 ::PROTOBUF_NAMESPACE_ID::uint8* target) const {
4181 // @@protoc_insertion_point(serialize_to_array_start:tensorflow.ThreadPoolOptionProto)
4182 ::PROTOBUF_NAMESPACE_ID::uint32 cached_has_bits = 0;
4183 (void) cached_has_bits;
4184
4185 // int32 num_threads = 1;
4186 if (this->num_threads() != 0) {
4187 target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::WriteInt32ToArray(1, this->num_threads(), target);
4188 }
4189
4190 // string global_name = 2;
4191 if (this->global_name().size() > 0) {
4192 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::VerifyUtf8String(
4193 this->global_name().data(), static_cast<int>(this->global_name().length()),
4194 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::SERIALIZE,
4195 "tensorflow.ThreadPoolOptionProto.global_name");
4196 target =
4197 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::WriteStringToArray(
4198 2, this->global_name(), target);
4199 }
4200
4201 if (_internal_metadata_.have_unknown_fields()) {
4202 target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormat::SerializeUnknownFieldsToArray(
4203 _internal_metadata_.unknown_fields(), target);
4204 }
4205 // @@protoc_insertion_point(serialize_to_array_end:tensorflow.ThreadPoolOptionProto)
4206 return target;
4207}
4208
4209size_t ThreadPoolOptionProto::ByteSizeLong() const {
4210// @@protoc_insertion_point(message_byte_size_start:tensorflow.ThreadPoolOptionProto)
4211 size_t total_size = 0;
4212
4213 if (_internal_metadata_.have_unknown_fields()) {
4214 total_size +=
4215 ::PROTOBUF_NAMESPACE_ID::internal::WireFormat::ComputeUnknownFieldsSize(
4216 _internal_metadata_.unknown_fields());
4217 }
4218 ::PROTOBUF_NAMESPACE_ID::uint32 cached_has_bits = 0;
4219 // Prevent compiler warnings about cached_has_bits being unused
4220 (void) cached_has_bits;
4221
4222 // string global_name = 2;
4223 if (this->global_name().size() > 0) {
4224 total_size += 1 +
4225 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::StringSize(
4226 this->global_name());
4227 }
4228
4229 // int32 num_threads = 1;
4230 if (this->num_threads() != 0) {
4231 total_size += 1 +
4232 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::Int32Size(
4233 this->num_threads());
4234 }
4235
4236 int cached_size = ::PROTOBUF_NAMESPACE_ID::internal::ToCachedSize(total_size);
4237 SetCachedSize(cached_size);
4238 return total_size;
4239}
4240
4241void ThreadPoolOptionProto::MergeFrom(const ::PROTOBUF_NAMESPACE_ID::Message& from) {
4242// @@protoc_insertion_point(generalized_merge_from_start:tensorflow.ThreadPoolOptionProto)
4243 GOOGLE_DCHECK_NE(&from, this);
4244 const ThreadPoolOptionProto* source =
4245 ::PROTOBUF_NAMESPACE_ID::DynamicCastToGenerated<ThreadPoolOptionProto>(
4246 &from);
4247 if (source == nullptr) {
4248 // @@protoc_insertion_point(generalized_merge_from_cast_fail:tensorflow.ThreadPoolOptionProto)
4249 ::PROTOBUF_NAMESPACE_ID::internal::ReflectionOps::Merge(from, this);
4250 } else {
4251 // @@protoc_insertion_point(generalized_merge_from_cast_success:tensorflow.ThreadPoolOptionProto)
4252 MergeFrom(*source);
4253 }
4254}
4255
4256void ThreadPoolOptionProto::MergeFrom(const ThreadPoolOptionProto& from) {
4257// @@protoc_insertion_point(class_specific_merge_from_start:tensorflow.ThreadPoolOptionProto)
4258 GOOGLE_DCHECK_NE(&from, this);
4259 _internal_metadata_.MergeFrom(from._internal_metadata_);
4260 ::PROTOBUF_NAMESPACE_ID::uint32 cached_has_bits = 0;
4261 (void) cached_has_bits;
4262
4263 if (from.global_name().size() > 0) {
4264 set_global_name(from.global_name());
4265 }
4266 if (from.num_threads() != 0) {
4267 set_num_threads(from.num_threads());
4268 }
4269}
4270
4271void ThreadPoolOptionProto::CopyFrom(const ::PROTOBUF_NAMESPACE_ID::Message& from) {
4272// @@protoc_insertion_point(generalized_copy_from_start:tensorflow.ThreadPoolOptionProto)
4273 if (&from == this) return;
4274 Clear();
4275 MergeFrom(from);
4276}
4277
4278void ThreadPoolOptionProto::CopyFrom(const ThreadPoolOptionProto& from) {
4279// @@protoc_insertion_point(class_specific_copy_from_start:tensorflow.ThreadPoolOptionProto)
4280 if (&from == this) return;
4281 Clear();
4282 MergeFrom(from);
4283}
4284
4285bool ThreadPoolOptionProto::IsInitialized() const {
4286 return true;
4287}
4288
4289void ThreadPoolOptionProto::InternalSwap(ThreadPoolOptionProto* other) {
4290 using std::swap;
4291 _internal_metadata_.Swap(&other->_internal_metadata_);
4292 global_name_.Swap(&other->global_name_, &::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited(),
4293 GetArenaNoVirtual());
4294 swap(num_threads_, other->num_threads_);
4295}
4296
4297::PROTOBUF_NAMESPACE_ID::Metadata ThreadPoolOptionProto::GetMetadata() const {
4298 return GetMetadataStatic();
4299}
4300
4301
4302// ===================================================================
4303
4304void RPCOptions::InitAsDefaultInstance() {
4305}
4306class RPCOptions::_Internal {
4307 public:
4308};
4309
4310RPCOptions::RPCOptions()
4311 : ::PROTOBUF_NAMESPACE_ID::Message(), _internal_metadata_(nullptr) {
4312 SharedCtor();
4313 // @@protoc_insertion_point(constructor:tensorflow.RPCOptions)
4314}
4315RPCOptions::RPCOptions(::PROTOBUF_NAMESPACE_ID::Arena* arena)
4316 : ::PROTOBUF_NAMESPACE_ID::Message(),
4317 _internal_metadata_(arena) {
4318 SharedCtor();
4319 RegisterArenaDtor(arena);
4320 // @@protoc_insertion_point(arena_constructor:tensorflow.RPCOptions)
4321}
4322RPCOptions::RPCOptions(const RPCOptions& from)
4323 : ::PROTOBUF_NAMESPACE_ID::Message(),
4324 _internal_metadata_(nullptr) {
4325 _internal_metadata_.MergeFrom(from._internal_metadata_);
4326 compression_algorithm_.UnsafeSetDefault(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited());
4327 if (!from.compression_algorithm().empty()) {
4328 compression_algorithm_.Set(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited(), from.compression_algorithm(),
4329 GetArenaNoVirtual());
4330 }
4331 ::memcpy(&compression_level_, &from.compression_level_,
4332 static_cast<size_t>(reinterpret_cast<char*>(&num_channels_per_target_) -
4333 reinterpret_cast<char*>(&compression_level_)) + sizeof(num_channels_per_target_));
4334 // @@protoc_insertion_point(copy_constructor:tensorflow.RPCOptions)
4335}
4336
4337void RPCOptions::SharedCtor() {
4338 ::PROTOBUF_NAMESPACE_ID::internal::InitSCC(&scc_info_RPCOptions_tensorflow_2fcore_2fprotobuf_2fconfig_2eproto.base);
4339 compression_algorithm_.UnsafeSetDefault(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited());
4340 ::memset(&compression_level_, 0, static_cast<size_t>(
4341 reinterpret_cast<char*>(&num_channels_per_target_) -
4342 reinterpret_cast<char*>(&compression_level_)) + sizeof(num_channels_per_target_));
4343}
4344
4345RPCOptions::~RPCOptions() {
4346 // @@protoc_insertion_point(destructor:tensorflow.RPCOptions)
4347 SharedDtor();
4348}
4349
4350void RPCOptions::SharedDtor() {
4351 GOOGLE_DCHECK(GetArenaNoVirtual() == nullptr);
4352 compression_algorithm_.DestroyNoArena(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited());
4353}
4354
4355void RPCOptions::ArenaDtor(void* object) {
4356 RPCOptions* _this = reinterpret_cast< RPCOptions* >(object);
4357 (void)_this;
4358}
4359void RPCOptions::RegisterArenaDtor(::PROTOBUF_NAMESPACE_ID::Arena*) {
4360}
4361void RPCOptions::SetCachedSize(int size) const {
4362 _cached_size_.Set(size);
4363}
4364const RPCOptions& RPCOptions::default_instance() {
4365 ::PROTOBUF_NAMESPACE_ID::internal::InitSCC(&::scc_info_RPCOptions_tensorflow_2fcore_2fprotobuf_2fconfig_2eproto.base);
4366 return *internal_default_instance();
4367}
4368
4369
4370void RPCOptions::Clear() {
4371// @@protoc_insertion_point(message_clear_start:tensorflow.RPCOptions)
4372 ::PROTOBUF_NAMESPACE_ID::uint32 cached_has_bits = 0;
4373 // Prevent compiler warnings about cached_has_bits being unused
4374 (void) cached_has_bits;
4375
4376 compression_algorithm_.ClearToEmpty(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited(), GetArenaNoVirtual());
4377 ::memset(&compression_level_, 0, static_cast<size_t>(
4378 reinterpret_cast<char*>(&num_channels_per_target_) -
4379 reinterpret_cast<char*>(&compression_level_)) + sizeof(num_channels_per_target_));
4380 _internal_metadata_.Clear();
4381}
4382
4383#if GOOGLE_PROTOBUF_ENABLE_EXPERIMENTAL_PARSER
4384const char* RPCOptions::_InternalParse(const char* ptr, ::PROTOBUF_NAMESPACE_ID::internal::ParseContext* ctx) {
4385#define CHK_(x) if (PROTOBUF_PREDICT_FALSE(!(x))) goto failure
4386 ::PROTOBUF_NAMESPACE_ID::Arena* arena = GetArenaNoVirtual(); (void)arena;
4387 while (!ctx->Done(&ptr)) {
4388 ::PROTOBUF_NAMESPACE_ID::uint32 tag;
4389 ptr = ::PROTOBUF_NAMESPACE_ID::internal::ReadTag(ptr, &tag);
4390 CHK_(ptr);
4391 switch (tag >> 3) {
4392 // bool use_rpc_for_inprocess_master = 1;
4393 case 1:
4394 if (PROTOBUF_PREDICT_TRUE(static_cast<::PROTOBUF_NAMESPACE_ID::uint8>(tag) == 8)) {
4395 use_rpc_for_inprocess_master_ = ::PROTOBUF_NAMESPACE_ID::internal::ReadVarint(&ptr);
4396 CHK_(ptr);
4397 } else goto handle_unusual;
4398 continue;
4399 // string compression_algorithm = 2;
4400 case 2:
4401 if (PROTOBUF_PREDICT_TRUE(static_cast<::PROTOBUF_NAMESPACE_ID::uint8>(tag) == 18)) {
4402 ptr = ::PROTOBUF_NAMESPACE_ID::internal::InlineGreedyStringParserUTF8(mutable_compression_algorithm(), ptr, ctx, "tensorflow.RPCOptions.compression_algorithm");
4403 CHK_(ptr);
4404 } else goto handle_unusual;
4405 continue;
4406 // int32 compression_level = 3;
4407 case 3:
4408 if (PROTOBUF_PREDICT_TRUE(static_cast<::PROTOBUF_NAMESPACE_ID::uint8>(tag) == 24)) {
4409 compression_level_ = ::PROTOBUF_NAMESPACE_ID::internal::ReadVarint(&ptr);
4410 CHK_(ptr);
4411 } else goto handle_unusual;
4412 continue;
4413 // bool cache_rpc_response = 4;
4414 case 4:
4415 if (PROTOBUF_PREDICT_TRUE(static_cast<::PROTOBUF_NAMESPACE_ID::uint8>(tag) == 32)) {
4416 cache_rpc_response_ = ::PROTOBUF_NAMESPACE_ID::internal::ReadVarint(&ptr);
4417 CHK_(ptr);
4418 } else goto handle_unusual;
4419 continue;
4420 // bool disable_session_connection_sharing = 5;
4421 case 5:
4422 if (PROTOBUF_PREDICT_TRUE(static_cast<::PROTOBUF_NAMESPACE_ID::uint8>(tag) == 40)) {
4423 disable_session_connection_sharing_ = ::PROTOBUF_NAMESPACE_ID::internal::ReadVarint(&ptr);
4424 CHK_(ptr);
4425 } else goto handle_unusual;
4426 continue;
4427 // int32 num_channels_per_target = 6;
4428 case 6:
4429 if (PROTOBUF_PREDICT_TRUE(static_cast<::PROTOBUF_NAMESPACE_ID::uint8>(tag) == 48)) {
4430 num_channels_per_target_ = ::PROTOBUF_NAMESPACE_ID::internal::ReadVarint(&ptr);
4431 CHK_(ptr);
4432 } else goto handle_unusual;
4433 continue;
4434 default: {
4435 handle_unusual:
4436 if ((tag & 7) == 4 || tag == 0) {
4437 ctx->SetLastTag(tag);
4438 goto success;
4439 }
4440 ptr = UnknownFieldParse(tag, &_internal_metadata_, ptr, ctx);
4441 CHK_(ptr != nullptr);
4442 continue;
4443 }
4444 } // switch
4445 } // while
4446success:
4447 return ptr;
4448failure:
4449 ptr = nullptr;
4450 goto success;
4451#undef CHK_
4452}
4453#else // GOOGLE_PROTOBUF_ENABLE_EXPERIMENTAL_PARSER
4454bool RPCOptions::MergePartialFromCodedStream(
4455 ::PROTOBUF_NAMESPACE_ID::io::CodedInputStream* input) {
4456#define DO_(EXPRESSION) if (!PROTOBUF_PREDICT_TRUE(EXPRESSION)) goto failure
4457 ::PROTOBUF_NAMESPACE_ID::uint32 tag;
4458 // @@protoc_insertion_point(parse_start:tensorflow.RPCOptions)
4459 for (;;) {
4460 ::std::pair<::PROTOBUF_NAMESPACE_ID::uint32, bool> p = input->ReadTagWithCutoffNoLastTag(127u);
4461 tag = p.first;
4462 if (!p.second) goto handle_unusual;
4463 switch (::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::GetTagFieldNumber(tag)) {
4464 // bool use_rpc_for_inprocess_master = 1;
4465 case 1: {
4466 if (static_cast< ::PROTOBUF_NAMESPACE_ID::uint8>(tag) == (8 & 0xFF)) {
4467
4468 DO_((::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::ReadPrimitive<
4469 bool, ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::TYPE_BOOL>(
4470 input, &use_rpc_for_inprocess_master_)));
4471 } else {
4472 goto handle_unusual;
4473 }
4474 break;
4475 }
4476
4477 // string compression_algorithm = 2;
4478 case 2: {
4479 if (static_cast< ::PROTOBUF_NAMESPACE_ID::uint8>(tag) == (18 & 0xFF)) {
4480 DO_(::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::ReadString(
4481 input, this->mutable_compression_algorithm()));
4482 DO_(::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::VerifyUtf8String(
4483 this->compression_algorithm().data(), static_cast<int>(this->compression_algorithm().length()),
4484 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::PARSE,
4485 "tensorflow.RPCOptions.compression_algorithm"));
4486 } else {
4487 goto handle_unusual;
4488 }
4489 break;
4490 }
4491
4492 // int32 compression_level = 3;
4493 case 3: {
4494 if (static_cast< ::PROTOBUF_NAMESPACE_ID::uint8>(tag) == (24 & 0xFF)) {
4495
4496 DO_((::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::ReadPrimitive<
4497 ::PROTOBUF_NAMESPACE_ID::int32, ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::TYPE_INT32>(
4498 input, &compression_level_)));
4499 } else {
4500 goto handle_unusual;
4501 }
4502 break;
4503 }
4504
4505 // bool cache_rpc_response = 4;
4506 case 4: {
4507 if (static_cast< ::PROTOBUF_NAMESPACE_ID::uint8>(tag) == (32 & 0xFF)) {
4508
4509 DO_((::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::ReadPrimitive<
4510 bool, ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::TYPE_BOOL>(
4511 input, &cache_rpc_response_)));
4512 } else {
4513 goto handle_unusual;
4514 }
4515 break;
4516 }
4517
4518 // bool disable_session_connection_sharing = 5;
4519 case 5: {
4520 if (static_cast< ::PROTOBUF_NAMESPACE_ID::uint8>(tag) == (40 & 0xFF)) {
4521
4522 DO_((::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::ReadPrimitive<
4523 bool, ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::TYPE_BOOL>(
4524 input, &disable_session_connection_sharing_)));
4525 } else {
4526 goto handle_unusual;
4527 }
4528 break;
4529 }
4530
4531 // int32 num_channels_per_target = 6;
4532 case 6: {
4533 if (static_cast< ::PROTOBUF_NAMESPACE_ID::uint8>(tag) == (48 & 0xFF)) {
4534
4535 DO_((::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::ReadPrimitive<
4536 ::PROTOBUF_NAMESPACE_ID::int32, ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::TYPE_INT32>(
4537 input, &num_channels_per_target_)));
4538 } else {
4539 goto handle_unusual;
4540 }
4541 break;
4542 }
4543
4544 default: {
4545 handle_unusual:
4546 if (tag == 0) {
4547 goto success;
4548 }
4549 DO_(::PROTOBUF_NAMESPACE_ID::internal::WireFormat::SkipField(
4550 input, tag, _internal_metadata_.mutable_unknown_fields()));
4551 break;
4552 }
4553 }
4554 }
4555success:
4556 // @@protoc_insertion_point(parse_success:tensorflow.RPCOptions)
4557 return true;
4558failure:
4559 // @@protoc_insertion_point(parse_failure:tensorflow.RPCOptions)
4560 return false;
4561#undef DO_
4562}
4563#endif // GOOGLE_PROTOBUF_ENABLE_EXPERIMENTAL_PARSER
4564
4565void RPCOptions::SerializeWithCachedSizes(
4566 ::PROTOBUF_NAMESPACE_ID::io::CodedOutputStream* output) const {
4567 // @@protoc_insertion_point(serialize_start:tensorflow.RPCOptions)
4568 ::PROTOBUF_NAMESPACE_ID::uint32 cached_has_bits = 0;
4569 (void) cached_has_bits;
4570
4571 // bool use_rpc_for_inprocess_master = 1;
4572 if (this->use_rpc_for_inprocess_master() != 0) {
4573 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::WriteBool(1, this->use_rpc_for_inprocess_master(), output);
4574 }
4575
4576 // string compression_algorithm = 2;
4577 if (this->compression_algorithm().size() > 0) {
4578 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::VerifyUtf8String(
4579 this->compression_algorithm().data(), static_cast<int>(this->compression_algorithm().length()),
4580 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::SERIALIZE,
4581 "tensorflow.RPCOptions.compression_algorithm");
4582 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::WriteStringMaybeAliased(
4583 2, this->compression_algorithm(), output);
4584 }
4585
4586 // int32 compression_level = 3;
4587 if (this->compression_level() != 0) {
4588 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::WriteInt32(3, this->compression_level(), output);
4589 }
4590
4591 // bool cache_rpc_response = 4;
4592 if (this->cache_rpc_response() != 0) {
4593 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::WriteBool(4, this->cache_rpc_response(), output);
4594 }
4595
4596 // bool disable_session_connection_sharing = 5;
4597 if (this->disable_session_connection_sharing() != 0) {
4598 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::WriteBool(5, this->disable_session_connection_sharing(), output);
4599 }
4600
4601 // int32 num_channels_per_target = 6;
4602 if (this->num_channels_per_target() != 0) {
4603 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::WriteInt32(6, this->num_channels_per_target(), output);
4604 }
4605
4606 if (_internal_metadata_.have_unknown_fields()) {
4607 ::PROTOBUF_NAMESPACE_ID::internal::WireFormat::SerializeUnknownFields(
4608 _internal_metadata_.unknown_fields(), output);
4609 }
4610 // @@protoc_insertion_point(serialize_end:tensorflow.RPCOptions)
4611}
4612
4613::PROTOBUF_NAMESPACE_ID::uint8* RPCOptions::InternalSerializeWithCachedSizesToArray(
4614 ::PROTOBUF_NAMESPACE_ID::uint8* target) const {
4615 // @@protoc_insertion_point(serialize_to_array_start:tensorflow.RPCOptions)
4616 ::PROTOBUF_NAMESPACE_ID::uint32 cached_has_bits = 0;
4617 (void) cached_has_bits;
4618
4619 // bool use_rpc_for_inprocess_master = 1;
4620 if (this->use_rpc_for_inprocess_master() != 0) {
4621 target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::WriteBoolToArray(1, this->use_rpc_for_inprocess_master(), target);
4622 }
4623
4624 // string compression_algorithm = 2;
4625 if (this->compression_algorithm().size() > 0) {
4626 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::VerifyUtf8String(
4627 this->compression_algorithm().data(), static_cast<int>(this->compression_algorithm().length()),
4628 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::SERIALIZE,
4629 "tensorflow.RPCOptions.compression_algorithm");
4630 target =
4631 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::WriteStringToArray(
4632 2, this->compression_algorithm(), target);
4633 }
4634
4635 // int32 compression_level = 3;
4636 if (this->compression_level() != 0) {
4637 target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::WriteInt32ToArray(3, this->compression_level(), target);
4638 }
4639
4640 // bool cache_rpc_response = 4;
4641 if (this->cache_rpc_response() != 0) {
4642 target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::WriteBoolToArray(4, this->cache_rpc_response(), target);
4643 }
4644
4645 // bool disable_session_connection_sharing = 5;
4646 if (this->disable_session_connection_sharing() != 0) {
4647 target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::WriteBoolToArray(5, this->disable_session_connection_sharing(), target);
4648 }
4649
4650 // int32 num_channels_per_target = 6;
4651 if (this->num_channels_per_target() != 0) {
4652 target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::WriteInt32ToArray(6, this->num_channels_per_target(), target);
4653 }
4654
4655 if (_internal_metadata_.have_unknown_fields()) {
4656 target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormat::SerializeUnknownFieldsToArray(
4657 _internal_metadata_.unknown_fields(), target);
4658 }
4659 // @@protoc_insertion_point(serialize_to_array_end:tensorflow.RPCOptions)
4660 return target;
4661}
4662
4663size_t RPCOptions::ByteSizeLong() const {
4664// @@protoc_insertion_point(message_byte_size_start:tensorflow.RPCOptions)
4665 size_t total_size = 0;
4666
4667 if (_internal_metadata_.have_unknown_fields()) {
4668 total_size +=
4669 ::PROTOBUF_NAMESPACE_ID::internal::WireFormat::ComputeUnknownFieldsSize(
4670 _internal_metadata_.unknown_fields());
4671 }
4672 ::PROTOBUF_NAMESPACE_ID::uint32 cached_has_bits = 0;
4673 // Prevent compiler warnings about cached_has_bits being unused
4674 (void) cached_has_bits;
4675
4676 // string compression_algorithm = 2;
4677 if (this->compression_algorithm().size() > 0) {
4678 total_size += 1 +
4679 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::StringSize(
4680 this->compression_algorithm());
4681 }
4682
4683 // int32 compression_level = 3;
4684 if (this->compression_level() != 0) {
4685 total_size += 1 +
4686 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::Int32Size(
4687 this->compression_level());
4688 }
4689
4690 // bool use_rpc_for_inprocess_master = 1;
4691 if (this->use_rpc_for_inprocess_master() != 0) {
4692 total_size += 1 + 1;
4693 }
4694
4695 // bool cache_rpc_response = 4;
4696 if (this->cache_rpc_response() != 0) {
4697 total_size += 1 + 1;
4698 }
4699
4700 // bool disable_session_connection_sharing = 5;
4701 if (this->disable_session_connection_sharing() != 0) {
4702 total_size += 1 + 1;
4703 }
4704
4705 // int32 num_channels_per_target = 6;
4706 if (this->num_channels_per_target() != 0) {
4707 total_size += 1 +
4708 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::Int32Size(
4709 this->num_channels_per_target());
4710 }
4711
4712 int cached_size = ::PROTOBUF_NAMESPACE_ID::internal::ToCachedSize(total_size);
4713 SetCachedSize(cached_size);
4714 return total_size;
4715}
4716
4717void RPCOptions::MergeFrom(const ::PROTOBUF_NAMESPACE_ID::Message& from) {
4718// @@protoc_insertion_point(generalized_merge_from_start:tensorflow.RPCOptions)
4719 GOOGLE_DCHECK_NE(&from, this);
4720 const RPCOptions* source =
4721 ::PROTOBUF_NAMESPACE_ID::DynamicCastToGenerated<RPCOptions>(
4722 &from);
4723 if (source == nullptr) {
4724 // @@protoc_insertion_point(generalized_merge_from_cast_fail:tensorflow.RPCOptions)
4725 ::PROTOBUF_NAMESPACE_ID::internal::ReflectionOps::Merge(from, this);
4726 } else {
4727 // @@protoc_insertion_point(generalized_merge_from_cast_success:tensorflow.RPCOptions)
4728 MergeFrom(*source);
4729 }
4730}
4731
4732void RPCOptions::MergeFrom(const RPCOptions& from) {
4733// @@protoc_insertion_point(class_specific_merge_from_start:tensorflow.RPCOptions)
4734 GOOGLE_DCHECK_NE(&from, this);
4735 _internal_metadata_.MergeFrom(from._internal_metadata_);
4736 ::PROTOBUF_NAMESPACE_ID::uint32 cached_has_bits = 0;
4737 (void) cached_has_bits;
4738
4739 if (from.compression_algorithm().size() > 0) {
4740 set_compression_algorithm(from.compression_algorithm());
4741 }
4742 if (from.compression_level() != 0) {
4743 set_compression_level(from.compression_level());
4744 }
4745 if (from.use_rpc_for_inprocess_master() != 0) {
4746 set_use_rpc_for_inprocess_master(from.use_rpc_for_inprocess_master());
4747 }
4748 if (from.cache_rpc_response() != 0) {
4749 set_cache_rpc_response(from.cache_rpc_response());
4750 }
4751 if (from.disable_session_connection_sharing() != 0) {
4752 set_disable_session_connection_sharing(from.disable_session_connection_sharing());
4753 }
4754 if (from.num_channels_per_target() != 0) {
4755 set_num_channels_per_target(from.num_channels_per_target());
4756 }
4757}
4758
4759void RPCOptions::CopyFrom(const ::PROTOBUF_NAMESPACE_ID::Message& from) {
4760// @@protoc_insertion_point(generalized_copy_from_start:tensorflow.RPCOptions)
4761 if (&from == this) return;
4762 Clear();
4763 MergeFrom(from);
4764}
4765
4766void RPCOptions::CopyFrom(const RPCOptions& from) {
4767// @@protoc_insertion_point(class_specific_copy_from_start:tensorflow.RPCOptions)
4768 if (&from == this) return;
4769 Clear();
4770 MergeFrom(from);
4771}
4772
4773bool RPCOptions::IsInitialized() const {
4774 return true;
4775}
4776
4777void RPCOptions::InternalSwap(RPCOptions* other) {
4778 using std::swap;
4779 _internal_metadata_.Swap(&other->_internal_metadata_);
4780 compression_algorithm_.Swap(&other->compression_algorithm_, &::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited(),
4781 GetArenaNoVirtual());
4782 swap(compression_level_, other->compression_level_);
4783 swap(use_rpc_for_inprocess_master_, other->use_rpc_for_inprocess_master_);
4784 swap(cache_rpc_response_, other->cache_rpc_response_);
4785 swap(disable_session_connection_sharing_, other->disable_session_connection_sharing_);
4786 swap(num_channels_per_target_, other->num_channels_per_target_);
4787}
4788
4789::PROTOBUF_NAMESPACE_ID::Metadata RPCOptions::GetMetadata() const {
4790 return GetMetadataStatic();
4791}
4792
4793
4794// ===================================================================
4795
4796void SessionMetadata::InitAsDefaultInstance() {
4797}
4798class SessionMetadata::_Internal {
4799 public:
4800};
4801
4802SessionMetadata::SessionMetadata()
4803 : ::PROTOBUF_NAMESPACE_ID::Message(), _internal_metadata_(nullptr) {
4804 SharedCtor();
4805 // @@protoc_insertion_point(constructor:tensorflow.SessionMetadata)
4806}
4807SessionMetadata::SessionMetadata(::PROTOBUF_NAMESPACE_ID::Arena* arena)
4808 : ::PROTOBUF_NAMESPACE_ID::Message(),
4809 _internal_metadata_(arena) {
4810 SharedCtor();
4811 RegisterArenaDtor(arena);
4812 // @@protoc_insertion_point(arena_constructor:tensorflow.SessionMetadata)
4813}
4814SessionMetadata::SessionMetadata(const SessionMetadata& from)
4815 : ::PROTOBUF_NAMESPACE_ID::Message(),
4816 _internal_metadata_(nullptr) {
4817 _internal_metadata_.MergeFrom(from._internal_metadata_);
4818 name_.UnsafeSetDefault(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited());
4819 if (!from.name().empty()) {
4820 name_.Set(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited(), from.name(),
4821 GetArenaNoVirtual());
4822 }
4823 version_ = from.version_;
4824 // @@protoc_insertion_point(copy_constructor:tensorflow.SessionMetadata)
4825}
4826
4827void SessionMetadata::SharedCtor() {
4828 ::PROTOBUF_NAMESPACE_ID::internal::InitSCC(&scc_info_SessionMetadata_tensorflow_2fcore_2fprotobuf_2fconfig_2eproto.base);
4829 name_.UnsafeSetDefault(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited());
4830 version_ = PROTOBUF_LONGLONG(0);
4831}
4832
4833SessionMetadata::~SessionMetadata() {
4834 // @@protoc_insertion_point(destructor:tensorflow.SessionMetadata)
4835 SharedDtor();
4836}
4837
4838void SessionMetadata::SharedDtor() {
4839 GOOGLE_DCHECK(GetArenaNoVirtual() == nullptr);
4840 name_.DestroyNoArena(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited());
4841}
4842
4843void SessionMetadata::ArenaDtor(void* object) {
4844 SessionMetadata* _this = reinterpret_cast< SessionMetadata* >(object);
4845 (void)_this;
4846}
4847void SessionMetadata::RegisterArenaDtor(::PROTOBUF_NAMESPACE_ID::Arena*) {
4848}
4849void SessionMetadata::SetCachedSize(int size) const {
4850 _cached_size_.Set(size);
4851}
4852const SessionMetadata& SessionMetadata::default_instance() {
4853 ::PROTOBUF_NAMESPACE_ID::internal::InitSCC(&::scc_info_SessionMetadata_tensorflow_2fcore_2fprotobuf_2fconfig_2eproto.base);
4854 return *internal_default_instance();
4855}
4856
4857
4858void SessionMetadata::Clear() {
4859// @@protoc_insertion_point(message_clear_start:tensorflow.SessionMetadata)
4860 ::PROTOBUF_NAMESPACE_ID::uint32 cached_has_bits = 0;
4861 // Prevent compiler warnings about cached_has_bits being unused
4862 (void) cached_has_bits;
4863
4864 name_.ClearToEmpty(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited(), GetArenaNoVirtual());
4865 version_ = PROTOBUF_LONGLONG(0);
4866 _internal_metadata_.Clear();
4867}
4868
4869#if GOOGLE_PROTOBUF_ENABLE_EXPERIMENTAL_PARSER
4870const char* SessionMetadata::_InternalParse(const char* ptr, ::PROTOBUF_NAMESPACE_ID::internal::ParseContext* ctx) {
4871#define CHK_(x) if (PROTOBUF_PREDICT_FALSE(!(x))) goto failure
4872 ::PROTOBUF_NAMESPACE_ID::Arena* arena = GetArenaNoVirtual(); (void)arena;
4873 while (!ctx->Done(&ptr)) {
4874 ::PROTOBUF_NAMESPACE_ID::uint32 tag;
4875 ptr = ::PROTOBUF_NAMESPACE_ID::internal::ReadTag(ptr, &tag);
4876 CHK_(ptr);
4877 switch (tag >> 3) {
4878 // string name = 1;
4879 case 1:
4880 if (PROTOBUF_PREDICT_TRUE(static_cast<::PROTOBUF_NAMESPACE_ID::uint8>(tag) == 10)) {
4881 ptr = ::PROTOBUF_NAMESPACE_ID::internal::InlineGreedyStringParserUTF8(mutable_name(), ptr, ctx, "tensorflow.SessionMetadata.name");
4882 CHK_(ptr);
4883 } else goto handle_unusual;
4884 continue;
4885 // int64 version = 2;
4886 case 2:
4887 if (PROTOBUF_PREDICT_TRUE(static_cast<::PROTOBUF_NAMESPACE_ID::uint8>(tag) == 16)) {
4888 version_ = ::PROTOBUF_NAMESPACE_ID::internal::ReadVarint(&ptr);
4889 CHK_(ptr);
4890 } else goto handle_unusual;
4891 continue;
4892 default: {
4893 handle_unusual:
4894 if ((tag & 7) == 4 || tag == 0) {
4895 ctx->SetLastTag(tag);
4896 goto success;
4897 }
4898 ptr = UnknownFieldParse(tag, &_internal_metadata_, ptr, ctx);
4899 CHK_(ptr != nullptr);
4900 continue;
4901 }
4902 } // switch
4903 } // while
4904success:
4905 return ptr;
4906failure:
4907 ptr = nullptr;
4908 goto success;
4909#undef CHK_
4910}
4911#else // GOOGLE_PROTOBUF_ENABLE_EXPERIMENTAL_PARSER
4912bool SessionMetadata::MergePartialFromCodedStream(
4913 ::PROTOBUF_NAMESPACE_ID::io::CodedInputStream* input) {
4914#define DO_(EXPRESSION) if (!PROTOBUF_PREDICT_TRUE(EXPRESSION)) goto failure
4915 ::PROTOBUF_NAMESPACE_ID::uint32 tag;
4916 // @@protoc_insertion_point(parse_start:tensorflow.SessionMetadata)
4917 for (;;) {
4918 ::std::pair<::PROTOBUF_NAMESPACE_ID::uint32, bool> p = input->ReadTagWithCutoffNoLastTag(127u);
4919 tag = p.first;
4920 if (!p.second) goto handle_unusual;
4921 switch (::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::GetTagFieldNumber(tag)) {
4922 // string name = 1;
4923 case 1: {
4924 if (static_cast< ::PROTOBUF_NAMESPACE_ID::uint8>(tag) == (10 & 0xFF)) {
4925 DO_(::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::ReadString(
4926 input, this->mutable_name()));
4927 DO_(::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::VerifyUtf8String(
4928 this->name().data(), static_cast<int>(this->name().length()),
4929 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::PARSE,
4930 "tensorflow.SessionMetadata.name"));
4931 } else {
4932 goto handle_unusual;
4933 }
4934 break;
4935 }
4936
4937 // int64 version = 2;
4938 case 2: {
4939 if (static_cast< ::PROTOBUF_NAMESPACE_ID::uint8>(tag) == (16 & 0xFF)) {
4940
4941 DO_((::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::ReadPrimitive<
4942 ::PROTOBUF_NAMESPACE_ID::int64, ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::TYPE_INT64>(
4943 input, &version_)));
4944 } else {
4945 goto handle_unusual;
4946 }
4947 break;
4948 }
4949
4950 default: {
4951 handle_unusual:
4952 if (tag == 0) {
4953 goto success;
4954 }
4955 DO_(::PROTOBUF_NAMESPACE_ID::internal::WireFormat::SkipField(
4956 input, tag, _internal_metadata_.mutable_unknown_fields()));
4957 break;
4958 }
4959 }
4960 }
4961success:
4962 // @@protoc_insertion_point(parse_success:tensorflow.SessionMetadata)
4963 return true;
4964failure:
4965 // @@protoc_insertion_point(parse_failure:tensorflow.SessionMetadata)
4966 return false;
4967#undef DO_
4968}
4969#endif // GOOGLE_PROTOBUF_ENABLE_EXPERIMENTAL_PARSER
4970
4971void SessionMetadata::SerializeWithCachedSizes(
4972 ::PROTOBUF_NAMESPACE_ID::io::CodedOutputStream* output) const {
4973 // @@protoc_insertion_point(serialize_start:tensorflow.SessionMetadata)
4974 ::PROTOBUF_NAMESPACE_ID::uint32 cached_has_bits = 0;
4975 (void) cached_has_bits;
4976
4977 // string name = 1;
4978 if (this->name().size() > 0) {
4979 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::VerifyUtf8String(
4980 this->name().data(), static_cast<int>(this->name().length()),
4981 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::SERIALIZE,
4982 "tensorflow.SessionMetadata.name");
4983 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::WriteStringMaybeAliased(
4984 1, this->name(), output);
4985 }
4986
4987 // int64 version = 2;
4988 if (this->version() != 0) {
4989 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::WriteInt64(2, this->version(), output);
4990 }
4991
4992 if (_internal_metadata_.have_unknown_fields()) {
4993 ::PROTOBUF_NAMESPACE_ID::internal::WireFormat::SerializeUnknownFields(
4994 _internal_metadata_.unknown_fields(), output);
4995 }
4996 // @@protoc_insertion_point(serialize_end:tensorflow.SessionMetadata)
4997}
4998
4999::PROTOBUF_NAMESPACE_ID::uint8* SessionMetadata::InternalSerializeWithCachedSizesToArray(
5000 ::PROTOBUF_NAMESPACE_ID::uint8* target) const {
5001 // @@protoc_insertion_point(serialize_to_array_start:tensorflow.SessionMetadata)
5002 ::PROTOBUF_NAMESPACE_ID::uint32 cached_has_bits = 0;
5003 (void) cached_has_bits;
5004
5005 // string name = 1;
5006 if (this->name().size() > 0) {
5007 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::VerifyUtf8String(
5008 this->name().data(), static_cast<int>(this->name().length()),
5009 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::SERIALIZE,
5010 "tensorflow.SessionMetadata.name");
5011 target =
5012 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::WriteStringToArray(
5013 1, this->name(), target);
5014 }
5015
5016 // int64 version = 2;
5017 if (this->version() != 0) {
5018 target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::WriteInt64ToArray(2, this->version(), target);
5019 }
5020
5021 if (_internal_metadata_.have_unknown_fields()) {
5022 target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormat::SerializeUnknownFieldsToArray(
5023 _internal_metadata_.unknown_fields(), target);
5024 }
5025 // @@protoc_insertion_point(serialize_to_array_end:tensorflow.SessionMetadata)
5026 return target;
5027}
5028
5029size_t SessionMetadata::ByteSizeLong() const {
5030// @@protoc_insertion_point(message_byte_size_start:tensorflow.SessionMetadata)
5031 size_t total_size = 0;
5032
5033 if (_internal_metadata_.have_unknown_fields()) {
5034 total_size +=
5035 ::PROTOBUF_NAMESPACE_ID::internal::WireFormat::ComputeUnknownFieldsSize(
5036 _internal_metadata_.unknown_fields());
5037 }
5038 ::PROTOBUF_NAMESPACE_ID::uint32 cached_has_bits = 0;
5039 // Prevent compiler warnings about cached_has_bits being unused
5040 (void) cached_has_bits;
5041
5042 // string name = 1;
5043 if (this->name().size() > 0) {
5044 total_size += 1 +
5045 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::StringSize(
5046 this->name());
5047 }
5048
5049 // int64 version = 2;
5050 if (this->version() != 0) {
5051 total_size += 1 +
5052 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::Int64Size(
5053 this->version());
5054 }
5055
5056 int cached_size = ::PROTOBUF_NAMESPACE_ID::internal::ToCachedSize(total_size);
5057 SetCachedSize(cached_size);
5058 return total_size;
5059}
5060
5061void SessionMetadata::MergeFrom(const ::PROTOBUF_NAMESPACE_ID::Message& from) {
5062// @@protoc_insertion_point(generalized_merge_from_start:tensorflow.SessionMetadata)
5063 GOOGLE_DCHECK_NE(&from, this);
5064 const SessionMetadata* source =
5065 ::PROTOBUF_NAMESPACE_ID::DynamicCastToGenerated<SessionMetadata>(
5066 &from);
5067 if (source == nullptr) {
5068 // @@protoc_insertion_point(generalized_merge_from_cast_fail:tensorflow.SessionMetadata)
5069 ::PROTOBUF_NAMESPACE_ID::internal::ReflectionOps::Merge(from, this);
5070 } else {
5071 // @@protoc_insertion_point(generalized_merge_from_cast_success:tensorflow.SessionMetadata)
5072 MergeFrom(*source);
5073 }
5074}
5075
5076void SessionMetadata::MergeFrom(const SessionMetadata& from) {
5077// @@protoc_insertion_point(class_specific_merge_from_start:tensorflow.SessionMetadata)
5078 GOOGLE_DCHECK_NE(&from, this);
5079 _internal_metadata_.MergeFrom(from._internal_metadata_);
5080 ::PROTOBUF_NAMESPACE_ID::uint32 cached_has_bits = 0;
5081 (void) cached_has_bits;
5082
5083 if (from.name().size() > 0) {
5084 set_name(from.name());
5085 }
5086 if (from.version() != 0) {
5087 set_version(from.version());
5088 }
5089}
5090
5091void SessionMetadata::CopyFrom(const ::PROTOBUF_NAMESPACE_ID::Message& from) {
5092// @@protoc_insertion_point(generalized_copy_from_start:tensorflow.SessionMetadata)
5093 if (&from == this) return;
5094 Clear();
5095 MergeFrom(from);
5096}
5097
5098void SessionMetadata::CopyFrom(const SessionMetadata& from) {
5099// @@protoc_insertion_point(class_specific_copy_from_start:tensorflow.SessionMetadata)
5100 if (&from == this) return;
5101 Clear();
5102 MergeFrom(from);
5103}
5104
5105bool SessionMetadata::IsInitialized() const {
5106 return true;
5107}
5108
5109void SessionMetadata::InternalSwap(SessionMetadata* other) {
5110 using std::swap;
5111 _internal_metadata_.Swap(&other->_internal_metadata_);
5112 name_.Swap(&other->name_, &::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited(),
5113 GetArenaNoVirtual());
5114 swap(version_, other->version_);
5115}
5116
5117::PROTOBUF_NAMESPACE_ID::Metadata SessionMetadata::GetMetadata() const {
5118 return GetMetadataStatic();
5119}
5120
5121
5122// ===================================================================
5123
5124ConfigProto_DeviceCountEntry_DoNotUse::ConfigProto_DeviceCountEntry_DoNotUse() {}
5125ConfigProto_DeviceCountEntry_DoNotUse::ConfigProto_DeviceCountEntry_DoNotUse(::PROTOBUF_NAMESPACE_ID::Arena* arena)
5126 : SuperType(arena) {}
5127void ConfigProto_DeviceCountEntry_DoNotUse::MergeFrom(const ConfigProto_DeviceCountEntry_DoNotUse& other) {
5128 MergeFromInternal(other);
5129}
5130::PROTOBUF_NAMESPACE_ID::Metadata ConfigProto_DeviceCountEntry_DoNotUse::GetMetadata() const {
5131 return GetMetadataStatic();
5132}
5133void ConfigProto_DeviceCountEntry_DoNotUse::MergeFrom(
5134 const ::PROTOBUF_NAMESPACE_ID::Message& other) {
5135 ::PROTOBUF_NAMESPACE_ID::Message::MergeFrom(other);
5136}
5137
5138
5139// ===================================================================
5140
5141void ConfigProto_Experimental::InitAsDefaultInstance() {
5142 ::tensorflow::_ConfigProto_Experimental_default_instance_._instance.get_mutable()->session_metadata_ = const_cast< ::tensorflow::SessionMetadata*>(
5143 ::tensorflow::SessionMetadata::internal_default_instance());
5144 ::tensorflow::_ConfigProto_Experimental_default_instance_._instance.get_mutable()->coordination_config_ = const_cast< ::tensorflow::CoordinationServiceConfig*>(
5145 ::tensorflow::CoordinationServiceConfig::internal_default_instance());
5146}
5147class ConfigProto_Experimental::_Internal {
5148 public:
5149 static const ::tensorflow::SessionMetadata& session_metadata(const ConfigProto_Experimental* msg);
5150 static const ::tensorflow::CoordinationServiceConfig& coordination_config(const ConfigProto_Experimental* msg);
5151};
5152
5153const ::tensorflow::SessionMetadata&
5154ConfigProto_Experimental::_Internal::session_metadata(const ConfigProto_Experimental* msg) {
5155 return *msg->session_metadata_;
5156}
5157const ::tensorflow::CoordinationServiceConfig&
5158ConfigProto_Experimental::_Internal::coordination_config(const ConfigProto_Experimental* msg) {
5159 return *msg->coordination_config_;
5160}
5161void ConfigProto_Experimental::unsafe_arena_set_allocated_session_metadata(
5162 ::tensorflow::SessionMetadata* session_metadata) {
5163 if (GetArenaNoVirtual() == nullptr) {
5164 delete session_metadata_;
5165 }
5166 session_metadata_ = session_metadata;
5167 if (session_metadata) {
5168
5169 } else {
5170
5171 }
5172 // @@protoc_insertion_point(field_unsafe_arena_set_allocated:tensorflow.ConfigProto.Experimental.session_metadata)
5173}
5174void ConfigProto_Experimental::unsafe_arena_set_allocated_coordination_config(
5175 ::tensorflow::CoordinationServiceConfig* coordination_config) {
5176 if (GetArenaNoVirtual() == nullptr) {
5177 delete coordination_config_;
5178 }
5179 coordination_config_ = coordination_config;
5180 if (coordination_config) {
5181
5182 } else {
5183
5184 }
5185 // @@protoc_insertion_point(field_unsafe_arena_set_allocated:tensorflow.ConfigProto.Experimental.coordination_config)
5186}
5187void ConfigProto_Experimental::clear_coordination_config() {
5188 if (GetArenaNoVirtual() == nullptr && coordination_config_ != nullptr) {
5189 delete coordination_config_;
5190 }
5191 coordination_config_ = nullptr;
5192}
5193ConfigProto_Experimental::ConfigProto_Experimental()
5194 : ::PROTOBUF_NAMESPACE_ID::Message(), _internal_metadata_(nullptr) {
5195 SharedCtor();
5196 // @@protoc_insertion_point(constructor:tensorflow.ConfigProto.Experimental)
5197}
5198ConfigProto_Experimental::ConfigProto_Experimental(::PROTOBUF_NAMESPACE_ID::Arena* arena)
5199 : ::PROTOBUF_NAMESPACE_ID::Message(),
5200 _internal_metadata_(arena) {
5201 SharedCtor();
5202 RegisterArenaDtor(arena);
5203 // @@protoc_insertion_point(arena_constructor:tensorflow.ConfigProto.Experimental)
5204}
5205ConfigProto_Experimental::ConfigProto_Experimental(const ConfigProto_Experimental& from)
5206 : ::PROTOBUF_NAMESPACE_ID::Message(),
5207 _internal_metadata_(nullptr) {
5208 _internal_metadata_.MergeFrom(from._internal_metadata_);
5209 collective_group_leader_.UnsafeSetDefault(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited());
5210 if (!from.collective_group_leader().empty()) {
5211 collective_group_leader_.Set(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited(), from.collective_group_leader(),
5212 GetArenaNoVirtual());
5213 }
5214 executor_type_.UnsafeSetDefault(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited());
5215 if (!from.executor_type().empty()) {
5216 executor_type_.Set(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited(), from.executor_type(),
5217 GetArenaNoVirtual());
5218 }
5219 if (from.has_session_metadata()) {
5220 session_metadata_ = new ::tensorflow::SessionMetadata(*from.session_metadata_);
5221 } else {
5222 session_metadata_ = nullptr;
5223 }
5224 if (from.has_coordination_config()) {
5225 coordination_config_ = new ::tensorflow::CoordinationServiceConfig(*from.coordination_config_);
5226 } else {
5227 coordination_config_ = nullptr;
5228 }
5229 ::memcpy(&recv_buf_max_chunk_, &from.recv_buf_max_chunk_,
5230 static_cast<size_t>(reinterpret_cast<char*>(&xla_prefer_single_graph_cluster_) -
5231 reinterpret_cast<char*>(&recv_buf_max_chunk_)) + sizeof(xla_prefer_single_graph_cluster_));
5232 // @@protoc_insertion_point(copy_constructor:tensorflow.ConfigProto.Experimental)
5233}
5234
5235void ConfigProto_Experimental::SharedCtor() {
5236 ::PROTOBUF_NAMESPACE_ID::internal::InitSCC(&scc_info_ConfigProto_Experimental_tensorflow_2fcore_2fprotobuf_2fconfig_2eproto.base);
5237 collective_group_leader_.UnsafeSetDefault(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited());
5238 executor_type_.UnsafeSetDefault(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited());
5239 ::memset(&session_metadata_, 0, static_cast<size_t>(
5240 reinterpret_cast<char*>(&xla_prefer_single_graph_cluster_) -
5241 reinterpret_cast<char*>(&session_metadata_)) + sizeof(xla_prefer_single_graph_cluster_));
5242}
5243
5244ConfigProto_Experimental::~ConfigProto_Experimental() {
5245 // @@protoc_insertion_point(destructor:tensorflow.ConfigProto.Experimental)
5246 SharedDtor();
5247}
5248
5249void ConfigProto_Experimental::SharedDtor() {
5250 GOOGLE_DCHECK(GetArenaNoVirtual() == nullptr);
5251 collective_group_leader_.DestroyNoArena(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited());
5252 executor_type_.DestroyNoArena(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited());
5253 if (this != internal_default_instance()) delete session_metadata_;
5254 if (this != internal_default_instance()) delete coordination_config_;
5255}
5256
5257void ConfigProto_Experimental::ArenaDtor(void* object) {
5258 ConfigProto_Experimental* _this = reinterpret_cast< ConfigProto_Experimental* >(object);
5259 (void)_this;
5260}
5261void ConfigProto_Experimental::RegisterArenaDtor(::PROTOBUF_NAMESPACE_ID::Arena*) {
5262}
5263void ConfigProto_Experimental::SetCachedSize(int size) const {
5264 _cached_size_.Set(size);
5265}
5266const ConfigProto_Experimental& ConfigProto_Experimental::default_instance() {
5267 ::PROTOBUF_NAMESPACE_ID::internal::InitSCC(&::scc_info_ConfigProto_Experimental_tensorflow_2fcore_2fprotobuf_2fconfig_2eproto.base);
5268 return *internal_default_instance();
5269}
5270
5271
5272void ConfigProto_Experimental::Clear() {
5273// @@protoc_insertion_point(message_clear_start:tensorflow.ConfigProto.Experimental)
5274 ::PROTOBUF_NAMESPACE_ID::uint32 cached_has_bits = 0;
5275 // Prevent compiler warnings about cached_has_bits being unused
5276 (void) cached_has_bits;
5277
5278 collective_group_leader_.ClearToEmpty(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited(), GetArenaNoVirtual());
5279 executor_type_.ClearToEmpty(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited(), GetArenaNoVirtual());
5280 if (GetArenaNoVirtual() == nullptr && session_metadata_ != nullptr) {
5281 delete session_metadata_;
5282 }
5283 session_metadata_ = nullptr;
5284 if (GetArenaNoVirtual() == nullptr && coordination_config_ != nullptr) {
5285 delete coordination_config_;
5286 }
5287 coordination_config_ = nullptr;
5288 ::memset(&recv_buf_max_chunk_, 0, static_cast<size_t>(
5289 reinterpret_cast<char*>(&xla_prefer_single_graph_cluster_) -
5290 reinterpret_cast<char*>(&recv_buf_max_chunk_)) + sizeof(xla_prefer_single_graph_cluster_));
5291 _internal_metadata_.Clear();
5292}
5293
5294#if GOOGLE_PROTOBUF_ENABLE_EXPERIMENTAL_PARSER
5295const char* ConfigProto_Experimental::_InternalParse(const char* ptr, ::PROTOBUF_NAMESPACE_ID::internal::ParseContext* ctx) {
5296#define CHK_(x) if (PROTOBUF_PREDICT_FALSE(!(x))) goto failure
5297 ::PROTOBUF_NAMESPACE_ID::Arena* arena = GetArenaNoVirtual(); (void)arena;
5298 while (!ctx->Done(&ptr)) {
5299 ::PROTOBUF_NAMESPACE_ID::uint32 tag;
5300 ptr = ::PROTOBUF_NAMESPACE_ID::internal::ReadTag(ptr, &tag);
5301 CHK_(ptr);
5302 switch (tag >> 3) {
5303 // string collective_group_leader = 1;
5304 case 1:
5305 if (PROTOBUF_PREDICT_TRUE(static_cast<::PROTOBUF_NAMESPACE_ID::uint8>(tag) == 10)) {
5306 ptr = ::PROTOBUF_NAMESPACE_ID::internal::InlineGreedyStringParserUTF8(mutable_collective_group_leader(), ptr, ctx, "tensorflow.ConfigProto.Experimental.collective_group_leader");
5307 CHK_(ptr);
5308 } else goto handle_unusual;
5309 continue;
5310 // string executor_type = 3;
5311 case 3:
5312 if (PROTOBUF_PREDICT_TRUE(static_cast<::PROTOBUF_NAMESPACE_ID::uint8>(tag) == 26)) {
5313 ptr = ::PROTOBUF_NAMESPACE_ID::internal::InlineGreedyStringParserUTF8(mutable_executor_type(), ptr, ctx, "tensorflow.ConfigProto.Experimental.executor_type");
5314 CHK_(ptr);
5315 } else goto handle_unusual;
5316 continue;
5317 // int32 recv_buf_max_chunk = 4;
5318 case 4:
5319 if (PROTOBUF_PREDICT_TRUE(static_cast<::PROTOBUF_NAMESPACE_ID::uint8>(tag) == 32)) {
5320 recv_buf_max_chunk_ = ::PROTOBUF_NAMESPACE_ID::internal::ReadVarint(&ptr);
5321 CHK_(ptr);
5322 } else goto handle_unusual;
5323 continue;
5324 // bool use_numa_affinity = 5;
5325 case 5:
5326 if (PROTOBUF_PREDICT_TRUE(static_cast<::PROTOBUF_NAMESPACE_ID::uint8>(tag) == 40)) {
5327 use_numa_affinity_ = ::PROTOBUF_NAMESPACE_ID::internal::ReadVarint(&ptr);
5328 CHK_(ptr);
5329 } else goto handle_unusual;
5330 continue;
5331 // bool collective_deterministic_sequential_execution = 6;
5332 case 6:
5333 if (PROTOBUF_PREDICT_TRUE(static_cast<::PROTOBUF_NAMESPACE_ID::uint8>(tag) == 48)) {
5334 collective_deterministic_sequential_execution_ = ::PROTOBUF_NAMESPACE_ID::internal::ReadVarint(&ptr);
5335 CHK_(ptr);
5336 } else goto handle_unusual;
5337 continue;
5338 // bool collective_nccl = 7;
5339 case 7:
5340 if (PROTOBUF_PREDICT_TRUE(static_cast<::PROTOBUF_NAMESPACE_ID::uint8>(tag) == 56)) {
5341 collective_nccl_ = ::PROTOBUF_NAMESPACE_ID::internal::ReadVarint(&ptr);
5342 CHK_(ptr);
5343 } else goto handle_unusual;
5344 continue;
5345 // bool share_session_state_in_clusterspec_propagation = 8;
5346 case 8:
5347 if (PROTOBUF_PREDICT_TRUE(static_cast<::PROTOBUF_NAMESPACE_ID::uint8>(tag) == 64)) {
5348 share_session_state_in_clusterspec_propagation_ = ::PROTOBUF_NAMESPACE_ID::internal::ReadVarint(&ptr);
5349 CHK_(ptr);
5350 } else goto handle_unusual;
5351 continue;
5352 // bool disable_thread_spinning = 9;
5353 case 9:
5354 if (PROTOBUF_PREDICT_TRUE(static_cast<::PROTOBUF_NAMESPACE_ID::uint8>(tag) == 72)) {
5355 disable_thread_spinning_ = ::PROTOBUF_NAMESPACE_ID::internal::ReadVarint(&ptr);
5356 CHK_(ptr);
5357 } else goto handle_unusual;
5358 continue;
5359 // bool share_cluster_devices_in_session = 10;
5360 case 10:
5361 if (PROTOBUF_PREDICT_TRUE(static_cast<::PROTOBUF_NAMESPACE_ID::uint8>(tag) == 80)) {
5362 share_cluster_devices_in_session_ = ::PROTOBUF_NAMESPACE_ID::internal::ReadVarint(&ptr);
5363 CHK_(ptr);
5364 } else goto handle_unusual;
5365 continue;
5366 // .tensorflow.SessionMetadata session_metadata = 11;
5367 case 11:
5368 if (PROTOBUF_PREDICT_TRUE(static_cast<::PROTOBUF_NAMESPACE_ID::uint8>(tag) == 90)) {
5369 ptr = ctx->ParseMessage(mutable_session_metadata(), ptr);
5370 CHK_(ptr);
5371 } else goto handle_unusual;
5372 continue;
5373 // bool optimize_for_static_graph = 12;
5374 case 12:
5375 if (PROTOBUF_PREDICT_TRUE(static_cast<::PROTOBUF_NAMESPACE_ID::uint8>(tag) == 96)) {
5376 optimize_for_static_graph_ = ::PROTOBUF_NAMESPACE_ID::internal::ReadVarint(&ptr);
5377 CHK_(ptr);
5378 } else goto handle_unusual;
5379 continue;
5380 // bool enable_mlir_bridge = 13;
5381 case 13:
5382 if (PROTOBUF_PREDICT_TRUE(static_cast<::PROTOBUF_NAMESPACE_ID::uint8>(tag) == 104)) {
5383 enable_mlir_bridge_ = ::PROTOBUF_NAMESPACE_ID::internal::ReadVarint(&ptr);
5384 CHK_(ptr);
5385 } else goto handle_unusual;
5386 continue;
5387 // bool disable_output_partition_graphs = 14;
5388 case 14:
5389 if (PROTOBUF_PREDICT_TRUE(static_cast<::PROTOBUF_NAMESPACE_ID::uint8>(tag) == 112)) {
5390 disable_output_partition_graphs_ = ::PROTOBUF_NAMESPACE_ID::internal::ReadVarint(&ptr);
5391 CHK_(ptr);
5392 } else goto handle_unusual;
5393 continue;
5394 // int64 xla_fusion_autotuner_thresh = 15;
5395 case 15:
5396 if (PROTOBUF_PREDICT_TRUE(static_cast<::PROTOBUF_NAMESPACE_ID::uint8>(tag) == 120)) {
5397 xla_fusion_autotuner_thresh_ = ::PROTOBUF_NAMESPACE_ID::internal::ReadVarint(&ptr);
5398 CHK_(ptr);
5399 } else goto handle_unusual;
5400 continue;
5401 // bool enable_mlir_graph_optimization = 16;
5402 case 16:
5403 if (PROTOBUF_PREDICT_TRUE(static_cast<::PROTOBUF_NAMESPACE_ID::uint8>(tag) == 128)) {
5404 enable_mlir_graph_optimization_ = ::PROTOBUF_NAMESPACE_ID::internal::ReadVarint(&ptr);
5405 CHK_(ptr);
5406 } else goto handle_unusual;
5407 continue;
5408 // .tensorflow.ConfigProto.Experimental.MlirBridgeRollout mlir_bridge_rollout = 17;
5409 case 17:
5410 if (PROTOBUF_PREDICT_TRUE(static_cast<::PROTOBUF_NAMESPACE_ID::uint8>(tag) == 136)) {
5411 ::PROTOBUF_NAMESPACE_ID::uint64 val = ::PROTOBUF_NAMESPACE_ID::internal::ReadVarint(&ptr);
5412 CHK_(ptr);
5413 set_mlir_bridge_rollout(static_cast<::tensorflow::ConfigProto_Experimental_MlirBridgeRollout>(val));
5414 } else goto handle_unusual;
5415 continue;
5416 // bool use_tfrt = 18;
5417 case 18:
5418 if (PROTOBUF_PREDICT_TRUE(static_cast<::PROTOBUF_NAMESPACE_ID::uint8>(tag) == 144)) {
5419 use_tfrt_ = ::PROTOBUF_NAMESPACE_ID::internal::ReadVarint(&ptr);
5420 CHK_(ptr);
5421 } else goto handle_unusual;
5422 continue;
5423 // bool disable_functional_ops_lowering = 21;
5424 case 21:
5425 if (PROTOBUF_PREDICT_TRUE(static_cast<::PROTOBUF_NAMESPACE_ID::uint8>(tag) == 168)) {
5426 disable_functional_ops_lowering_ = ::PROTOBUF_NAMESPACE_ID::internal::ReadVarint(&ptr);
5427 CHK_(ptr);
5428 } else goto handle_unusual;
5429 continue;
5430 // bool xla_prefer_single_graph_cluster = 22;
5431 case 22:
5432 if (PROTOBUF_PREDICT_TRUE(static_cast<::PROTOBUF_NAMESPACE_ID::uint8>(tag) == 176)) {
5433 xla_prefer_single_graph_cluster_ = ::PROTOBUF_NAMESPACE_ID::internal::ReadVarint(&ptr);
5434 CHK_(ptr);
5435 } else goto handle_unusual;
5436 continue;
5437 // .tensorflow.CoordinationServiceConfig coordination_config = 23;
5438 case 23:
5439 if (PROTOBUF_PREDICT_TRUE(static_cast<::PROTOBUF_NAMESPACE_ID::uint8>(tag) == 186)) {
5440 ptr = ctx->ParseMessage(mutable_coordination_config(), ptr);
5441 CHK_(ptr);
5442 } else goto handle_unusual;
5443 continue;
5444 default: {
5445 handle_unusual:
5446 if ((tag & 7) == 4 || tag == 0) {
5447 ctx->SetLastTag(tag);
5448 goto success;
5449 }
5450 ptr = UnknownFieldParse(tag, &_internal_metadata_, ptr, ctx);
5451 CHK_(ptr != nullptr);
5452 continue;
5453 }
5454 } // switch
5455 } // while
5456success:
5457 return ptr;
5458failure:
5459 ptr = nullptr;
5460 goto success;
5461#undef CHK_
5462}
5463#else // GOOGLE_PROTOBUF_ENABLE_EXPERIMENTAL_PARSER
5464bool ConfigProto_Experimental::MergePartialFromCodedStream(
5465 ::PROTOBUF_NAMESPACE_ID::io::CodedInputStream* input) {
5466#define DO_(EXPRESSION) if (!PROTOBUF_PREDICT_TRUE(EXPRESSION)) goto failure
5467 ::PROTOBUF_NAMESPACE_ID::uint32 tag;
5468 // @@protoc_insertion_point(parse_start:tensorflow.ConfigProto.Experimental)
5469 for (;;) {
5470 ::std::pair<::PROTOBUF_NAMESPACE_ID::uint32, bool> p = input->ReadTagWithCutoffNoLastTag(16383u);
5471 tag = p.first;
5472 if (!p.second) goto handle_unusual;
5473 switch (::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::GetTagFieldNumber(tag)) {
5474 // string collective_group_leader = 1;
5475 case 1: {
5476 if (static_cast< ::PROTOBUF_NAMESPACE_ID::uint8>(tag) == (10 & 0xFF)) {
5477 DO_(::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::ReadString(
5478 input, this->mutable_collective_group_leader()));
5479 DO_(::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::VerifyUtf8String(
5480 this->collective_group_leader().data(), static_cast<int>(this->collective_group_leader().length()),
5481 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::PARSE,
5482 "tensorflow.ConfigProto.Experimental.collective_group_leader"));
5483 } else {
5484 goto handle_unusual;
5485 }
5486 break;
5487 }
5488
5489 // string executor_type = 3;
5490 case 3: {
5491 if (static_cast< ::PROTOBUF_NAMESPACE_ID::uint8>(tag) == (26 & 0xFF)) {
5492 DO_(::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::ReadString(
5493 input, this->mutable_executor_type()));
5494 DO_(::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::VerifyUtf8String(
5495 this->executor_type().data(), static_cast<int>(this->executor_type().length()),
5496 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::PARSE,
5497 "tensorflow.ConfigProto.Experimental.executor_type"));
5498 } else {
5499 goto handle_unusual;
5500 }
5501 break;
5502 }
5503
5504 // int32 recv_buf_max_chunk = 4;
5505 case 4: {
5506 if (static_cast< ::PROTOBUF_NAMESPACE_ID::uint8>(tag) == (32 & 0xFF)) {
5507
5508 DO_((::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::ReadPrimitive<
5509 ::PROTOBUF_NAMESPACE_ID::int32, ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::TYPE_INT32>(
5510 input, &recv_buf_max_chunk_)));
5511 } else {
5512 goto handle_unusual;
5513 }
5514 break;
5515 }
5516
5517 // bool use_numa_affinity = 5;
5518 case 5: {
5519 if (static_cast< ::PROTOBUF_NAMESPACE_ID::uint8>(tag) == (40 & 0xFF)) {
5520
5521 DO_((::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::ReadPrimitive<
5522 bool, ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::TYPE_BOOL>(
5523 input, &use_numa_affinity_)));
5524 } else {
5525 goto handle_unusual;
5526 }
5527 break;
5528 }
5529
5530 // bool collective_deterministic_sequential_execution = 6;
5531 case 6: {
5532 if (static_cast< ::PROTOBUF_NAMESPACE_ID::uint8>(tag) == (48 & 0xFF)) {
5533
5534 DO_((::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::ReadPrimitive<
5535 bool, ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::TYPE_BOOL>(
5536 input, &collective_deterministic_sequential_execution_)));
5537 } else {
5538 goto handle_unusual;
5539 }
5540 break;
5541 }
5542
5543 // bool collective_nccl = 7;
5544 case 7: {
5545 if (static_cast< ::PROTOBUF_NAMESPACE_ID::uint8>(tag) == (56 & 0xFF)) {
5546
5547 DO_((::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::ReadPrimitive<
5548 bool, ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::TYPE_BOOL>(
5549 input, &collective_nccl_)));
5550 } else {
5551 goto handle_unusual;
5552 }
5553 break;
5554 }
5555
5556 // bool share_session_state_in_clusterspec_propagation = 8;
5557 case 8: {
5558 if (static_cast< ::PROTOBUF_NAMESPACE_ID::uint8>(tag) == (64 & 0xFF)) {
5559
5560 DO_((::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::ReadPrimitive<
5561 bool, ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::TYPE_BOOL>(
5562 input, &share_session_state_in_clusterspec_propagation_)));
5563 } else {
5564 goto handle_unusual;
5565 }
5566 break;
5567 }
5568
5569 // bool disable_thread_spinning = 9;
5570 case 9: {
5571 if (static_cast< ::PROTOBUF_NAMESPACE_ID::uint8>(tag) == (72 & 0xFF)) {
5572
5573 DO_((::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::ReadPrimitive<
5574 bool, ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::TYPE_BOOL>(
5575 input, &disable_thread_spinning_)));
5576 } else {
5577 goto handle_unusual;
5578 }
5579 break;
5580 }
5581
5582 // bool share_cluster_devices_in_session = 10;
5583 case 10: {
5584 if (static_cast< ::PROTOBUF_NAMESPACE_ID::uint8>(tag) == (80 & 0xFF)) {
5585
5586 DO_((::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::ReadPrimitive<
5587 bool, ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::TYPE_BOOL>(
5588 input, &share_cluster_devices_in_session_)));
5589 } else {
5590 goto handle_unusual;
5591 }
5592 break;
5593 }
5594
5595 // .tensorflow.SessionMetadata session_metadata = 11;
5596 case 11: {
5597 if (static_cast< ::PROTOBUF_NAMESPACE_ID::uint8>(tag) == (90 & 0xFF)) {
5598 DO_(::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::ReadMessage(
5599 input, mutable_session_metadata()));
5600 } else {
5601 goto handle_unusual;
5602 }
5603 break;
5604 }
5605
5606 // bool optimize_for_static_graph = 12;
5607 case 12: {
5608 if (static_cast< ::PROTOBUF_NAMESPACE_ID::uint8>(tag) == (96 & 0xFF)) {
5609
5610 DO_((::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::ReadPrimitive<
5611 bool, ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::TYPE_BOOL>(
5612 input, &optimize_for_static_graph_)));
5613 } else {
5614 goto handle_unusual;
5615 }
5616 break;
5617 }
5618
5619 // bool enable_mlir_bridge = 13;
5620 case 13: {
5621 if (static_cast< ::PROTOBUF_NAMESPACE_ID::uint8>(tag) == (104 & 0xFF)) {
5622
5623 DO_((::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::ReadPrimitive<
5624 bool, ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::TYPE_BOOL>(
5625 input, &enable_mlir_bridge_)));
5626 } else {
5627 goto handle_unusual;
5628 }
5629 break;
5630 }
5631
5632 // bool disable_output_partition_graphs = 14;
5633 case 14: {
5634 if (static_cast< ::PROTOBUF_NAMESPACE_ID::uint8>(tag) == (112 & 0xFF)) {
5635
5636 DO_((::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::ReadPrimitive<
5637 bool, ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::TYPE_BOOL>(
5638 input, &disable_output_partition_graphs_)));
5639 } else {
5640 goto handle_unusual;
5641 }
5642 break;
5643 }
5644
5645 // int64 xla_fusion_autotuner_thresh = 15;
5646 case 15: {
5647 if (static_cast< ::PROTOBUF_NAMESPACE_ID::uint8>(tag) == (120 & 0xFF)) {
5648
5649 DO_((::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::ReadPrimitive<
5650 ::PROTOBUF_NAMESPACE_ID::int64, ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::TYPE_INT64>(
5651 input, &xla_fusion_autotuner_thresh_)));
5652 } else {
5653 goto handle_unusual;
5654 }
5655 break;
5656 }
5657
5658 // bool enable_mlir_graph_optimization = 16;
5659 case 16: {
5660 if (static_cast< ::PROTOBUF_NAMESPACE_ID::uint8>(tag) == (128 & 0xFF)) {
5661
5662 DO_((::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::ReadPrimitive<
5663 bool, ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::TYPE_BOOL>(
5664 input, &enable_mlir_graph_optimization_)));
5665 } else {
5666 goto handle_unusual;
5667 }
5668 break;
5669 }
5670
5671 // .tensorflow.ConfigProto.Experimental.MlirBridgeRollout mlir_bridge_rollout = 17;
5672 case 17: {
5673 if (static_cast< ::PROTOBUF_NAMESPACE_ID::uint8>(tag) == (136 & 0xFF)) {
5674 int value = 0;
5675 DO_((::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::ReadPrimitive<
5676 int, ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::TYPE_ENUM>(
5677 input, &value)));
5678 set_mlir_bridge_rollout(static_cast< ::tensorflow::ConfigProto_Experimental_MlirBridgeRollout >(value));
5679 } else {
5680 goto handle_unusual;
5681 }
5682 break;
5683 }
5684
5685 // bool use_tfrt = 18;
5686 case 18: {
5687 if (static_cast< ::PROTOBUF_NAMESPACE_ID::uint8>(tag) == (144 & 0xFF)) {
5688
5689 DO_((::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::ReadPrimitive<
5690 bool, ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::TYPE_BOOL>(
5691 input, &use_tfrt_)));
5692 } else {
5693 goto handle_unusual;
5694 }
5695 break;
5696 }
5697
5698 // bool disable_functional_ops_lowering = 21;
5699 case 21: {
5700 if (static_cast< ::PROTOBUF_NAMESPACE_ID::uint8>(tag) == (168 & 0xFF)) {
5701
5702 DO_((::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::ReadPrimitive<
5703 bool, ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::TYPE_BOOL>(
5704 input, &disable_functional_ops_lowering_)));
5705 } else {
5706 goto handle_unusual;
5707 }
5708 break;
5709 }
5710
5711 // bool xla_prefer_single_graph_cluster = 22;
5712 case 22: {
5713 if (static_cast< ::PROTOBUF_NAMESPACE_ID::uint8>(tag) == (176 & 0xFF)) {
5714
5715 DO_((::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::ReadPrimitive<
5716 bool, ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::TYPE_BOOL>(
5717 input, &xla_prefer_single_graph_cluster_)));
5718 } else {
5719 goto handle_unusual;
5720 }
5721 break;
5722 }
5723
5724 // .tensorflow.CoordinationServiceConfig coordination_config = 23;
5725 case 23: {
5726 if (static_cast< ::PROTOBUF_NAMESPACE_ID::uint8>(tag) == (186 & 0xFF)) {
5727 DO_(::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::ReadMessage(
5728 input, mutable_coordination_config()));
5729 } else {
5730 goto handle_unusual;
5731 }
5732 break;
5733 }
5734
5735 default: {
5736 handle_unusual:
5737 if (tag == 0) {
5738 goto success;
5739 }
5740 DO_(::PROTOBUF_NAMESPACE_ID::internal::WireFormat::SkipField(
5741 input, tag, _internal_metadata_.mutable_unknown_fields()));
5742 break;
5743 }
5744 }
5745 }
5746success:
5747 // @@protoc_insertion_point(parse_success:tensorflow.ConfigProto.Experimental)
5748 return true;
5749failure:
5750 // @@protoc_insertion_point(parse_failure:tensorflow.ConfigProto.Experimental)
5751 return false;
5752#undef DO_
5753}
5754#endif // GOOGLE_PROTOBUF_ENABLE_EXPERIMENTAL_PARSER
5755
5756void ConfigProto_Experimental::SerializeWithCachedSizes(
5757 ::PROTOBUF_NAMESPACE_ID::io::CodedOutputStream* output) const {
5758 // @@protoc_insertion_point(serialize_start:tensorflow.ConfigProto.Experimental)
5759 ::PROTOBUF_NAMESPACE_ID::uint32 cached_has_bits = 0;
5760 (void) cached_has_bits;
5761
5762 // string collective_group_leader = 1;
5763 if (this->collective_group_leader().size() > 0) {
5764 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::VerifyUtf8String(
5765 this->collective_group_leader().data(), static_cast<int>(this->collective_group_leader().length()),
5766 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::SERIALIZE,
5767 "tensorflow.ConfigProto.Experimental.collective_group_leader");
5768 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::WriteStringMaybeAliased(
5769 1, this->collective_group_leader(), output);
5770 }
5771
5772 // string executor_type = 3;
5773 if (this->executor_type().size() > 0) {
5774 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::VerifyUtf8String(
5775 this->executor_type().data(), static_cast<int>(this->executor_type().length()),
5776 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::SERIALIZE,
5777 "tensorflow.ConfigProto.Experimental.executor_type");
5778 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::WriteStringMaybeAliased(
5779 3, this->executor_type(), output);
5780 }
5781
5782 // int32 recv_buf_max_chunk = 4;
5783 if (this->recv_buf_max_chunk() != 0) {
5784 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::WriteInt32(4, this->recv_buf_max_chunk(), output);
5785 }
5786
5787 // bool use_numa_affinity = 5;
5788 if (this->use_numa_affinity() != 0) {
5789 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::WriteBool(5, this->use_numa_affinity(), output);
5790 }
5791
5792 // bool collective_deterministic_sequential_execution = 6;
5793 if (this->collective_deterministic_sequential_execution() != 0) {
5794 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::WriteBool(6, this->collective_deterministic_sequential_execution(), output);
5795 }
5796
5797 // bool collective_nccl = 7;
5798 if (this->collective_nccl() != 0) {
5799 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::WriteBool(7, this->collective_nccl(), output);
5800 }
5801
5802 // bool share_session_state_in_clusterspec_propagation = 8;
5803 if (this->share_session_state_in_clusterspec_propagation() != 0) {
5804 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::WriteBool(8, this->share_session_state_in_clusterspec_propagation(), output);
5805 }
5806
5807 // bool disable_thread_spinning = 9;
5808 if (this->disable_thread_spinning() != 0) {
5809 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::WriteBool(9, this->disable_thread_spinning(), output);
5810 }
5811
5812 // bool share_cluster_devices_in_session = 10;
5813 if (this->share_cluster_devices_in_session() != 0) {
5814 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::WriteBool(10, this->share_cluster_devices_in_session(), output);
5815 }
5816
5817 // .tensorflow.SessionMetadata session_metadata = 11;
5818 if (this->has_session_metadata()) {
5819 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::WriteMessageMaybeToArray(
5820 11, _Internal::session_metadata(this), output);
5821 }
5822
5823 // bool optimize_for_static_graph = 12;
5824 if (this->optimize_for_static_graph() != 0) {
5825 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::WriteBool(12, this->optimize_for_static_graph(), output);
5826 }
5827
5828 // bool enable_mlir_bridge = 13;
5829 if (this->enable_mlir_bridge() != 0) {
5830 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::WriteBool(13, this->enable_mlir_bridge(), output);
5831 }
5832
5833 // bool disable_output_partition_graphs = 14;
5834 if (this->disable_output_partition_graphs() != 0) {
5835 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::WriteBool(14, this->disable_output_partition_graphs(), output);
5836 }
5837
5838 // int64 xla_fusion_autotuner_thresh = 15;
5839 if (this->xla_fusion_autotuner_thresh() != 0) {
5840 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::WriteInt64(15, this->xla_fusion_autotuner_thresh(), output);
5841 }
5842
5843 // bool enable_mlir_graph_optimization = 16;
5844 if (this->enable_mlir_graph_optimization() != 0) {
5845 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::WriteBool(16, this->enable_mlir_graph_optimization(), output);
5846 }
5847
5848 // .tensorflow.ConfigProto.Experimental.MlirBridgeRollout mlir_bridge_rollout = 17;
5849 if (this->mlir_bridge_rollout() != 0) {
5850 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::WriteEnum(
5851 17, this->mlir_bridge_rollout(), output);
5852 }
5853
5854 // bool use_tfrt = 18;
5855 if (this->use_tfrt() != 0) {
5856 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::WriteBool(18, this->use_tfrt(), output);
5857 }
5858
5859 // bool disable_functional_ops_lowering = 21;
5860 if (this->disable_functional_ops_lowering() != 0) {
5861 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::WriteBool(21, this->disable_functional_ops_lowering(), output);
5862 }
5863
5864 // bool xla_prefer_single_graph_cluster = 22;
5865 if (this->xla_prefer_single_graph_cluster() != 0) {
5866 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::WriteBool(22, this->xla_prefer_single_graph_cluster(), output);
5867 }
5868
5869 // .tensorflow.CoordinationServiceConfig coordination_config = 23;
5870 if (this->has_coordination_config()) {
5871 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::WriteMessageMaybeToArray(
5872 23, _Internal::coordination_config(this), output);
5873 }
5874
5875 if (_internal_metadata_.have_unknown_fields()) {
5876 ::PROTOBUF_NAMESPACE_ID::internal::WireFormat::SerializeUnknownFields(
5877 _internal_metadata_.unknown_fields(), output);
5878 }
5879 // @@protoc_insertion_point(serialize_end:tensorflow.ConfigProto.Experimental)
5880}
5881
5882::PROTOBUF_NAMESPACE_ID::uint8* ConfigProto_Experimental::InternalSerializeWithCachedSizesToArray(
5883 ::PROTOBUF_NAMESPACE_ID::uint8* target) const {
5884 // @@protoc_insertion_point(serialize_to_array_start:tensorflow.ConfigProto.Experimental)
5885 ::PROTOBUF_NAMESPACE_ID::uint32 cached_has_bits = 0;
5886 (void) cached_has_bits;
5887
5888 // string collective_group_leader = 1;
5889 if (this->collective_group_leader().size() > 0) {
5890 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::VerifyUtf8String(
5891 this->collective_group_leader().data(), static_cast<int>(this->collective_group_leader().length()),
5892 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::SERIALIZE,
5893 "tensorflow.ConfigProto.Experimental.collective_group_leader");
5894 target =
5895 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::WriteStringToArray(
5896 1, this->collective_group_leader(), target);
5897 }
5898
5899 // string executor_type = 3;
5900 if (this->executor_type().size() > 0) {
5901 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::VerifyUtf8String(
5902 this->executor_type().data(), static_cast<int>(this->executor_type().length()),
5903 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::SERIALIZE,
5904 "tensorflow.ConfigProto.Experimental.executor_type");
5905 target =
5906 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::WriteStringToArray(
5907 3, this->executor_type(), target);
5908 }
5909
5910 // int32 recv_buf_max_chunk = 4;
5911 if (this->recv_buf_max_chunk() != 0) {
5912 target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::WriteInt32ToArray(4, this->recv_buf_max_chunk(), target);
5913 }
5914
5915 // bool use_numa_affinity = 5;
5916 if (this->use_numa_affinity() != 0) {
5917 target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::WriteBoolToArray(5, this->use_numa_affinity(), target);
5918 }
5919
5920 // bool collective_deterministic_sequential_execution = 6;
5921 if (this->collective_deterministic_sequential_execution() != 0) {
5922 target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::WriteBoolToArray(6, this->collective_deterministic_sequential_execution(), target);
5923 }
5924
5925 // bool collective_nccl = 7;
5926 if (this->collective_nccl() != 0) {
5927 target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::WriteBoolToArray(7, this->collective_nccl(), target);
5928 }
5929
5930 // bool share_session_state_in_clusterspec_propagation = 8;
5931 if (this->share_session_state_in_clusterspec_propagation() != 0) {
5932 target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::WriteBoolToArray(8, this->share_session_state_in_clusterspec_propagation(), target);
5933 }
5934
5935 // bool disable_thread_spinning = 9;
5936 if (this->disable_thread_spinning() != 0) {
5937 target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::WriteBoolToArray(9, this->disable_thread_spinning(), target);
5938 }
5939
5940 // bool share_cluster_devices_in_session = 10;
5941 if (this->share_cluster_devices_in_session() != 0) {
5942 target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::WriteBoolToArray(10, this->share_cluster_devices_in_session(), target);
5943 }
5944
5945 // .tensorflow.SessionMetadata session_metadata = 11;
5946 if (this->has_session_metadata()) {
5947 target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::
5948 InternalWriteMessageToArray(
5949 11, _Internal::session_metadata(this), target);
5950 }
5951
5952 // bool optimize_for_static_graph = 12;
5953 if (this->optimize_for_static_graph() != 0) {
5954 target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::WriteBoolToArray(12, this->optimize_for_static_graph(), target);
5955 }
5956
5957 // bool enable_mlir_bridge = 13;
5958 if (this->enable_mlir_bridge() != 0) {
5959 target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::WriteBoolToArray(13, this->enable_mlir_bridge(), target);
5960 }
5961
5962 // bool disable_output_partition_graphs = 14;
5963 if (this->disable_output_partition_graphs() != 0) {
5964 target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::WriteBoolToArray(14, this->disable_output_partition_graphs(), target);
5965 }
5966
5967 // int64 xla_fusion_autotuner_thresh = 15;
5968 if (this->xla_fusion_autotuner_thresh() != 0) {
5969 target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::WriteInt64ToArray(15, this->xla_fusion_autotuner_thresh(), target);
5970 }
5971
5972 // bool enable_mlir_graph_optimization = 16;
5973 if (this->enable_mlir_graph_optimization() != 0) {
5974 target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::WriteBoolToArray(16, this->enable_mlir_graph_optimization(), target);
5975 }
5976
5977 // .tensorflow.ConfigProto.Experimental.MlirBridgeRollout mlir_bridge_rollout = 17;
5978 if (this->mlir_bridge_rollout() != 0) {
5979 target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::WriteEnumToArray(
5980 17, this->mlir_bridge_rollout(), target);
5981 }
5982
5983 // bool use_tfrt = 18;
5984 if (this->use_tfrt() != 0) {
5985 target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::WriteBoolToArray(18, this->use_tfrt(), target);
5986 }
5987
5988 // bool disable_functional_ops_lowering = 21;
5989 if (this->disable_functional_ops_lowering() != 0) {
5990 target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::WriteBoolToArray(21, this->disable_functional_ops_lowering(), target);
5991 }
5992
5993 // bool xla_prefer_single_graph_cluster = 22;
5994 if (this->xla_prefer_single_graph_cluster() != 0) {
5995 target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::WriteBoolToArray(22, this->xla_prefer_single_graph_cluster(), target);
5996 }
5997
5998 // .tensorflow.CoordinationServiceConfig coordination_config = 23;
5999 if (this->has_coordination_config()) {
6000 target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::
6001 InternalWriteMessageToArray(
6002 23, _Internal::coordination_config(this), target);
6003 }
6004
6005 if (_internal_metadata_.have_unknown_fields()) {
6006 target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormat::SerializeUnknownFieldsToArray(
6007 _internal_metadata_.unknown_fields(), target);
6008 }
6009 // @@protoc_insertion_point(serialize_to_array_end:tensorflow.ConfigProto.Experimental)
6010 return target;
6011}
6012
6013size_t ConfigProto_Experimental::ByteSizeLong() const {
6014// @@protoc_insertion_point(message_byte_size_start:tensorflow.ConfigProto.Experimental)
6015 size_t total_size = 0;
6016
6017 if (_internal_metadata_.have_unknown_fields()) {
6018 total_size +=
6019 ::PROTOBUF_NAMESPACE_ID::internal::WireFormat::ComputeUnknownFieldsSize(
6020 _internal_metadata_.unknown_fields());
6021 }
6022 ::PROTOBUF_NAMESPACE_ID::uint32 cached_has_bits = 0;
6023 // Prevent compiler warnings about cached_has_bits being unused
6024 (void) cached_has_bits;
6025
6026 // string collective_group_leader = 1;
6027 if (this->collective_group_leader().size() > 0) {
6028 total_size += 1 +
6029 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::StringSize(
6030 this->collective_group_leader());
6031 }
6032
6033 // string executor_type = 3;
6034 if (this->executor_type().size() > 0) {
6035 total_size += 1 +
6036 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::StringSize(
6037 this->executor_type());
6038 }
6039
6040 // .tensorflow.SessionMetadata session_metadata = 11;
6041 if (this->has_session_metadata()) {
6042 total_size += 1 +
6043 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::MessageSize(
6044 *session_metadata_);
6045 }
6046
6047 // .tensorflow.CoordinationServiceConfig coordination_config = 23;
6048 if (this->has_coordination_config()) {
6049 total_size += 2 +
6050 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::MessageSize(
6051 *coordination_config_);
6052 }
6053
6054 // int32 recv_buf_max_chunk = 4;
6055 if (this->recv_buf_max_chunk() != 0) {
6056 total_size += 1 +
6057 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::Int32Size(
6058 this->recv_buf_max_chunk());
6059 }
6060
6061 // bool use_numa_affinity = 5;
6062 if (this->use_numa_affinity() != 0) {
6063 total_size += 1 + 1;
6064 }
6065
6066 // bool collective_deterministic_sequential_execution = 6;
6067 if (this->collective_deterministic_sequential_execution() != 0) {
6068 total_size += 1 + 1;
6069 }
6070
6071 // bool collective_nccl = 7;
6072 if (this->collective_nccl() != 0) {
6073 total_size += 1 + 1;
6074 }
6075
6076 // bool share_session_state_in_clusterspec_propagation = 8;
6077 if (this->share_session_state_in_clusterspec_propagation() != 0) {
6078 total_size += 1 + 1;
6079 }
6080
6081 // bool disable_thread_spinning = 9;
6082 if (this->disable_thread_spinning() != 0) {
6083 total_size += 1 + 1;
6084 }
6085
6086 // bool share_cluster_devices_in_session = 10;
6087 if (this->share_cluster_devices_in_session() != 0) {
6088 total_size += 1 + 1;
6089 }
6090
6091 // bool optimize_for_static_graph = 12;
6092 if (this->optimize_for_static_graph() != 0) {
6093 total_size += 1 + 1;
6094 }
6095
6096 // bool enable_mlir_bridge = 13;
6097 if (this->enable_mlir_bridge() != 0) {
6098 total_size += 1 + 1;
6099 }
6100
6101 // .tensorflow.ConfigProto.Experimental.MlirBridgeRollout mlir_bridge_rollout = 17;
6102 if (this->mlir_bridge_rollout() != 0) {
6103 total_size += 2 +
6104 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::EnumSize(this->mlir_bridge_rollout());
6105 }
6106
6107 // int64 xla_fusion_autotuner_thresh = 15;
6108 if (this->xla_fusion_autotuner_thresh() != 0) {
6109 total_size += 1 +
6110 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::Int64Size(
6111 this->xla_fusion_autotuner_thresh());
6112 }
6113
6114 // bool enable_mlir_graph_optimization = 16;
6115 if (this->enable_mlir_graph_optimization() != 0) {
6116 total_size += 2 + 1;
6117 }
6118
6119 // bool disable_output_partition_graphs = 14;
6120 if (this->disable_output_partition_graphs() != 0) {
6121 total_size += 1 + 1;
6122 }
6123
6124 // bool use_tfrt = 18;
6125 if (this->use_tfrt() != 0) {
6126 total_size += 2 + 1;
6127 }
6128
6129 // bool disable_functional_ops_lowering = 21;
6130 if (this->disable_functional_ops_lowering() != 0) {
6131 total_size += 2 + 1;
6132 }
6133
6134 // bool xla_prefer_single_graph_cluster = 22;
6135 if (this->xla_prefer_single_graph_cluster() != 0) {
6136 total_size += 2 + 1;
6137 }
6138
6139 int cached_size = ::PROTOBUF_NAMESPACE_ID::internal::ToCachedSize(total_size);
6140 SetCachedSize(cached_size);
6141 return total_size;
6142}
6143
6144void ConfigProto_Experimental::MergeFrom(const ::PROTOBUF_NAMESPACE_ID::Message& from) {
6145// @@protoc_insertion_point(generalized_merge_from_start:tensorflow.ConfigProto.Experimental)
6146 GOOGLE_DCHECK_NE(&from, this);
6147 const ConfigProto_Experimental* source =
6148 ::PROTOBUF_NAMESPACE_ID::DynamicCastToGenerated<ConfigProto_Experimental>(
6149 &from);
6150 if (source == nullptr) {
6151 // @@protoc_insertion_point(generalized_merge_from_cast_fail:tensorflow.ConfigProto.Experimental)
6152 ::PROTOBUF_NAMESPACE_ID::internal::ReflectionOps::Merge(from, this);
6153 } else {
6154 // @@protoc_insertion_point(generalized_merge_from_cast_success:tensorflow.ConfigProto.Experimental)
6155 MergeFrom(*source);
6156 }
6157}
6158
6159void ConfigProto_Experimental::MergeFrom(const ConfigProto_Experimental& from) {
6160// @@protoc_insertion_point(class_specific_merge_from_start:tensorflow.ConfigProto.Experimental)
6161 GOOGLE_DCHECK_NE(&from, this);
6162 _internal_metadata_.MergeFrom(from._internal_metadata_);
6163 ::PROTOBUF_NAMESPACE_ID::uint32 cached_has_bits = 0;
6164 (void) cached_has_bits;
6165
6166 if (from.collective_group_leader().size() > 0) {
6167 set_collective_group_leader(from.collective_group_leader());
6168 }
6169 if (from.executor_type().size() > 0) {
6170 set_executor_type(from.executor_type());
6171 }
6172 if (from.has_session_metadata()) {
6173 mutable_session_metadata()->::tensorflow::SessionMetadata::MergeFrom(from.session_metadata());
6174 }
6175 if (from.has_coordination_config()) {
6176 mutable_coordination_config()->::tensorflow::CoordinationServiceConfig::MergeFrom(from.coordination_config());
6177 }
6178 if (from.recv_buf_max_chunk() != 0) {
6179 set_recv_buf_max_chunk(from.recv_buf_max_chunk());
6180 }
6181 if (from.use_numa_affinity() != 0) {
6182 set_use_numa_affinity(from.use_numa_affinity());
6183 }
6184 if (from.collective_deterministic_sequential_execution() != 0) {
6185 set_collective_deterministic_sequential_execution(from.collective_deterministic_sequential_execution());
6186 }
6187 if (from.collective_nccl() != 0) {
6188 set_collective_nccl(from.collective_nccl());
6189 }
6190 if (from.share_session_state_in_clusterspec_propagation() != 0) {
6191 set_share_session_state_in_clusterspec_propagation(from.share_session_state_in_clusterspec_propagation());
6192 }
6193 if (from.disable_thread_spinning() != 0) {
6194 set_disable_thread_spinning(from.disable_thread_spinning());
6195 }
6196 if (from.share_cluster_devices_in_session() != 0) {
6197 set_share_cluster_devices_in_session(from.share_cluster_devices_in_session());
6198 }
6199 if (from.optimize_for_static_graph() != 0) {
6200 set_optimize_for_static_graph(from.optimize_for_static_graph());
6201 }
6202 if (from.enable_mlir_bridge() != 0) {
6203 set_enable_mlir_bridge(from.enable_mlir_bridge());
6204 }
6205 if (from.mlir_bridge_rollout() != 0) {
6206 set_mlir_bridge_rollout(from.mlir_bridge_rollout());
6207 }
6208 if (from.xla_fusion_autotuner_thresh() != 0) {
6209 set_xla_fusion_autotuner_thresh(from.xla_fusion_autotuner_thresh());
6210 }
6211 if (from.enable_mlir_graph_optimization() != 0) {
6212 set_enable_mlir_graph_optimization(from.enable_mlir_graph_optimization());
6213 }
6214 if (from.disable_output_partition_graphs() != 0) {
6215 set_disable_output_partition_graphs(from.disable_output_partition_graphs());
6216 }
6217 if (from.use_tfrt() != 0) {
6218 set_use_tfrt(from.use_tfrt());
6219 }
6220 if (from.disable_functional_ops_lowering() != 0) {
6221 set_disable_functional_ops_lowering(from.disable_functional_ops_lowering());
6222 }
6223 if (from.xla_prefer_single_graph_cluster() != 0) {
6224 set_xla_prefer_single_graph_cluster(from.xla_prefer_single_graph_cluster());
6225 }
6226}
6227
6228void ConfigProto_Experimental::CopyFrom(const ::PROTOBUF_NAMESPACE_ID::Message& from) {
6229// @@protoc_insertion_point(generalized_copy_from_start:tensorflow.ConfigProto.Experimental)
6230 if (&from == this) return;
6231 Clear();
6232 MergeFrom(from);
6233}
6234
6235void ConfigProto_Experimental::CopyFrom(const ConfigProto_Experimental& from) {
6236// @@protoc_insertion_point(class_specific_copy_from_start:tensorflow.ConfigProto.Experimental)
6237 if (&from == this) return;
6238 Clear();
6239 MergeFrom(from);
6240}
6241
6242bool ConfigProto_Experimental::IsInitialized() const {
6243 return true;
6244}
6245
6246void ConfigProto_Experimental::InternalSwap(ConfigProto_Experimental* other) {
6247 using std::swap;
6248 _internal_metadata_.Swap(&other->_internal_metadata_);
6249 collective_group_leader_.Swap(&other->collective_group_leader_, &::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited(),
6250 GetArenaNoVirtual());
6251 executor_type_.Swap(&other->executor_type_, &::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited(),
6252 GetArenaNoVirtual());
6253 swap(session_metadata_, other->session_metadata_);
6254 swap(coordination_config_, other->coordination_config_);
6255 swap(recv_buf_max_chunk_, other->recv_buf_max_chunk_);
6256 swap(use_numa_affinity_, other->use_numa_affinity_);
6257 swap(collective_deterministic_sequential_execution_, other->collective_deterministic_sequential_execution_);
6258 swap(collective_nccl_, other->collective_nccl_);
6259 swap(share_session_state_in_clusterspec_propagation_, other->share_session_state_in_clusterspec_propagation_);
6260 swap(disable_thread_spinning_, other->disable_thread_spinning_);
6261 swap(share_cluster_devices_in_session_, other->share_cluster_devices_in_session_);
6262 swap(optimize_for_static_graph_, other->optimize_for_static_graph_);
6263 swap(enable_mlir_bridge_, other->enable_mlir_bridge_);
6264 swap(mlir_bridge_rollout_, other->mlir_bridge_rollout_);
6265 swap(xla_fusion_autotuner_thresh_, other->xla_fusion_autotuner_thresh_);
6266 swap(enable_mlir_graph_optimization_, other->enable_mlir_graph_optimization_);
6267 swap(disable_output_partition_graphs_, other->disable_output_partition_graphs_);
6268 swap(use_tfrt_, other->use_tfrt_);
6269 swap(disable_functional_ops_lowering_, other->disable_functional_ops_lowering_);
6270 swap(xla_prefer_single_graph_cluster_, other->xla_prefer_single_graph_cluster_);
6271}
6272
6273::PROTOBUF_NAMESPACE_ID::Metadata ConfigProto_Experimental::GetMetadata() const {
6274 return GetMetadataStatic();
6275}
6276
6277
6278// ===================================================================
6279
6280void ConfigProto::InitAsDefaultInstance() {
6281 ::tensorflow::_ConfigProto_default_instance_._instance.get_mutable()->gpu_options_ = const_cast< ::tensorflow::GPUOptions*>(
6282 ::tensorflow::GPUOptions::internal_default_instance());
6283 ::tensorflow::_ConfigProto_default_instance_._instance.get_mutable()->graph_options_ = const_cast< ::tensorflow::GraphOptions*>(
6284 ::tensorflow::GraphOptions::internal_default_instance());
6285 ::tensorflow::_ConfigProto_default_instance_._instance.get_mutable()->rpc_options_ = const_cast< ::tensorflow::RPCOptions*>(
6286 ::tensorflow::RPCOptions::internal_default_instance());
6287 ::tensorflow::_ConfigProto_default_instance_._instance.get_mutable()->cluster_def_ = const_cast< ::tensorflow::ClusterDef*>(
6288 ::tensorflow::ClusterDef::internal_default_instance());
6289 ::tensorflow::_ConfigProto_default_instance_._instance.get_mutable()->experimental_ = const_cast< ::tensorflow::ConfigProto_Experimental*>(
6290 ::tensorflow::ConfigProto_Experimental::internal_default_instance());
6291}
6292class ConfigProto::_Internal {
6293 public:
6294 static const ::tensorflow::GPUOptions& gpu_options(const ConfigProto* msg);
6295 static const ::tensorflow::GraphOptions& graph_options(const ConfigProto* msg);
6296 static const ::tensorflow::RPCOptions& rpc_options(const ConfigProto* msg);
6297 static const ::tensorflow::ClusterDef& cluster_def(const ConfigProto* msg);
6298 static const ::tensorflow::ConfigProto_Experimental& experimental(const ConfigProto* msg);
6299};
6300
6301const ::tensorflow::GPUOptions&
6302ConfigProto::_Internal::gpu_options(const ConfigProto* msg) {
6303 return *msg->gpu_options_;
6304}
6305const ::tensorflow::GraphOptions&
6306ConfigProto::_Internal::graph_options(const ConfigProto* msg) {
6307 return *msg->graph_options_;
6308}
6309const ::tensorflow::RPCOptions&
6310ConfigProto::_Internal::rpc_options(const ConfigProto* msg) {
6311 return *msg->rpc_options_;
6312}
6313const ::tensorflow::ClusterDef&
6314ConfigProto::_Internal::cluster_def(const ConfigProto* msg) {
6315 return *msg->cluster_def_;
6316}
6317const ::tensorflow::ConfigProto_Experimental&
6318ConfigProto::_Internal::experimental(const ConfigProto* msg) {
6319 return *msg->experimental_;
6320}
6321void ConfigProto::unsafe_arena_set_allocated_gpu_options(
6322 ::tensorflow::GPUOptions* gpu_options) {
6323 if (GetArenaNoVirtual() == nullptr) {
6324 delete gpu_options_;
6325 }
6326 gpu_options_ = gpu_options;
6327 if (gpu_options) {
6328
6329 } else {
6330
6331 }
6332 // @@protoc_insertion_point(field_unsafe_arena_set_allocated:tensorflow.ConfigProto.gpu_options)
6333}
6334void ConfigProto::unsafe_arena_set_allocated_graph_options(
6335 ::tensorflow::GraphOptions* graph_options) {
6336 if (GetArenaNoVirtual() == nullptr) {
6337 delete graph_options_;
6338 }
6339 graph_options_ = graph_options;
6340 if (graph_options) {
6341
6342 } else {
6343
6344 }
6345 // @@protoc_insertion_point(field_unsafe_arena_set_allocated:tensorflow.ConfigProto.graph_options)
6346}
6347void ConfigProto::unsafe_arena_set_allocated_rpc_options(
6348 ::tensorflow::RPCOptions* rpc_options) {
6349 if (GetArenaNoVirtual() == nullptr) {
6350 delete rpc_options_;
6351 }
6352 rpc_options_ = rpc_options;
6353 if (rpc_options) {
6354
6355 } else {
6356
6357 }
6358 // @@protoc_insertion_point(field_unsafe_arena_set_allocated:tensorflow.ConfigProto.rpc_options)
6359}
6360void ConfigProto::unsafe_arena_set_allocated_cluster_def(
6361 ::tensorflow::ClusterDef* cluster_def) {
6362 if (GetArenaNoVirtual() == nullptr) {
6363 delete cluster_def_;
6364 }
6365 cluster_def_ = cluster_def;
6366 if (cluster_def) {
6367
6368 } else {
6369
6370 }
6371 // @@protoc_insertion_point(field_unsafe_arena_set_allocated:tensorflow.ConfigProto.cluster_def)
6372}
6373void ConfigProto::clear_cluster_def() {
6374 if (GetArenaNoVirtual() == nullptr && cluster_def_ != nullptr) {
6375 delete cluster_def_;
6376 }
6377 cluster_def_ = nullptr;
6378}
6379void ConfigProto::unsafe_arena_set_allocated_experimental(
6380 ::tensorflow::ConfigProto_Experimental* experimental) {
6381 if (GetArenaNoVirtual() == nullptr) {
6382 delete experimental_;
6383 }
6384 experimental_ = experimental;
6385 if (experimental) {
6386
6387 } else {
6388
6389 }
6390 // @@protoc_insertion_point(field_unsafe_arena_set_allocated:tensorflow.ConfigProto.experimental)
6391}
6392ConfigProto::ConfigProto()
6393 : ::PROTOBUF_NAMESPACE_ID::Message(), _internal_metadata_(nullptr) {
6394 SharedCtor();
6395 // @@protoc_insertion_point(constructor:tensorflow.ConfigProto)
6396}
6397ConfigProto::ConfigProto(::PROTOBUF_NAMESPACE_ID::Arena* arena)
6398 : ::PROTOBUF_NAMESPACE_ID::Message(),
6399 _internal_metadata_(arena),
6400 device_count_(arena),
6401 device_filters_(arena),
6402 session_inter_op_thread_pool_(arena) {
6403 SharedCtor();
6404 RegisterArenaDtor(arena);
6405 // @@protoc_insertion_point(arena_constructor:tensorflow.ConfigProto)
6406}
6407ConfigProto::ConfigProto(const ConfigProto& from)
6408 : ::PROTOBUF_NAMESPACE_ID::Message(),
6409 _internal_metadata_(nullptr),
6410 device_filters_(from.device_filters_),
6411 session_inter_op_thread_pool_(from.session_inter_op_thread_pool_) {
6412 _internal_metadata_.MergeFrom(from._internal_metadata_);
6413 device_count_.MergeFrom(from.device_count_);
6414 if (from.has_gpu_options()) {
6415 gpu_options_ = new ::tensorflow::GPUOptions(*from.gpu_options_);
6416 } else {
6417 gpu_options_ = nullptr;
6418 }
6419 if (from.has_graph_options()) {
6420 graph_options_ = new ::tensorflow::GraphOptions(*from.graph_options_);
6421 } else {
6422 graph_options_ = nullptr;
6423 }
6424 if (from.has_rpc_options()) {
6425 rpc_options_ = new ::tensorflow::RPCOptions(*from.rpc_options_);
6426 } else {
6427 rpc_options_ = nullptr;
6428 }
6429 if (from.has_cluster_def()) {
6430 cluster_def_ = new ::tensorflow::ClusterDef(*from.cluster_def_);
6431 } else {
6432 cluster_def_ = nullptr;
6433 }
6434 if (from.has_experimental()) {
6435 experimental_ = new ::tensorflow::ConfigProto_Experimental(*from.experimental_);
6436 } else {
6437 experimental_ = nullptr;
6438 }
6439 ::memcpy(&intra_op_parallelism_threads_, &from.intra_op_parallelism_threads_,
6440 static_cast<size_t>(reinterpret_cast<char*>(&share_cluster_devices_in_session_) -
6441 reinterpret_cast<char*>(&intra_op_parallelism_threads_)) + sizeof(share_cluster_devices_in_session_));
6442 // @@protoc_insertion_point(copy_constructor:tensorflow.ConfigProto)
6443}
6444
6445void ConfigProto::SharedCtor() {
6446 ::PROTOBUF_NAMESPACE_ID::internal::InitSCC(&scc_info_ConfigProto_tensorflow_2fcore_2fprotobuf_2fconfig_2eproto.base);
6447 ::memset(&gpu_options_, 0, static_cast<size_t>(
6448 reinterpret_cast<char*>(&share_cluster_devices_in_session_) -
6449 reinterpret_cast<char*>(&gpu_options_)) + sizeof(share_cluster_devices_in_session_));
6450}
6451
6452ConfigProto::~ConfigProto() {
6453 // @@protoc_insertion_point(destructor:tensorflow.ConfigProto)
6454 SharedDtor();
6455}
6456
6457void ConfigProto::SharedDtor() {
6458 GOOGLE_DCHECK(GetArenaNoVirtual() == nullptr);
6459 if (this != internal_default_instance()) delete gpu_options_;
6460 if (this != internal_default_instance()) delete graph_options_;
6461 if (this != internal_default_instance()) delete rpc_options_;
6462 if (this != internal_default_instance()) delete cluster_def_;
6463 if (this != internal_default_instance()) delete experimental_;
6464}
6465
6466void ConfigProto::ArenaDtor(void* object) {
6467 ConfigProto* _this = reinterpret_cast< ConfigProto* >(object);
6468 (void)_this;
6469}
6470void ConfigProto::RegisterArenaDtor(::PROTOBUF_NAMESPACE_ID::Arena*) {
6471}
6472void ConfigProto::SetCachedSize(int size) const {
6473 _cached_size_.Set(size);
6474}
6475const ConfigProto& ConfigProto::default_instance() {
6476 ::PROTOBUF_NAMESPACE_ID::internal::InitSCC(&::scc_info_ConfigProto_tensorflow_2fcore_2fprotobuf_2fconfig_2eproto.base);
6477 return *internal_default_instance();
6478}
6479
6480
6481void ConfigProto::Clear() {
6482// @@protoc_insertion_point(message_clear_start:tensorflow.ConfigProto)
6483 ::PROTOBUF_NAMESPACE_ID::uint32 cached_has_bits = 0;
6484 // Prevent compiler warnings about cached_has_bits being unused
6485 (void) cached_has_bits;
6486
6487 device_count_.Clear();
6488 device_filters_.Clear();
6489 session_inter_op_thread_pool_.Clear();
6490 if (GetArenaNoVirtual() == nullptr && gpu_options_ != nullptr) {
6491 delete gpu_options_;
6492 }
6493 gpu_options_ = nullptr;
6494 if (GetArenaNoVirtual() == nullptr && graph_options_ != nullptr) {
6495 delete graph_options_;
6496 }
6497 graph_options_ = nullptr;
6498 if (GetArenaNoVirtual() == nullptr && rpc_options_ != nullptr) {
6499 delete rpc_options_;
6500 }
6501 rpc_options_ = nullptr;
6502 if (GetArenaNoVirtual() == nullptr && cluster_def_ != nullptr) {
6503 delete cluster_def_;
6504 }
6505 cluster_def_ = nullptr;
6506 if (GetArenaNoVirtual() == nullptr && experimental_ != nullptr) {
6507 delete experimental_;
6508 }
6509 experimental_ = nullptr;
6510 ::memset(&intra_op_parallelism_threads_, 0, static_cast<size_t>(
6511 reinterpret_cast<char*>(&share_cluster_devices_in_session_) -
6512 reinterpret_cast<char*>(&intra_op_parallelism_threads_)) + sizeof(share_cluster_devices_in_session_));
6513 _internal_metadata_.Clear();
6514}
6515
6516#if GOOGLE_PROTOBUF_ENABLE_EXPERIMENTAL_PARSER
6517const char* ConfigProto::_InternalParse(const char* ptr, ::PROTOBUF_NAMESPACE_ID::internal::ParseContext* ctx) {
6518#define CHK_(x) if (PROTOBUF_PREDICT_FALSE(!(x))) goto failure
6519 ::PROTOBUF_NAMESPACE_ID::Arena* arena = GetArenaNoVirtual(); (void)arena;
6520 while (!ctx->Done(&ptr)) {
6521 ::PROTOBUF_NAMESPACE_ID::uint32 tag;
6522 ptr = ::PROTOBUF_NAMESPACE_ID::internal::ReadTag(ptr, &tag);
6523 CHK_(ptr);
6524 switch (tag >> 3) {
6525 // map<string, int32> device_count = 1;
6526 case 1:
6527 if (PROTOBUF_PREDICT_TRUE(static_cast<::PROTOBUF_NAMESPACE_ID::uint8>(tag) == 10)) {
6528 ptr -= 1;
6529 do {
6530 ptr += 1;
6531 ptr = ctx->ParseMessage(&device_count_, ptr);
6532 CHK_(ptr);
6533 if (!ctx->DataAvailable(ptr)) break;
6534 } while (::PROTOBUF_NAMESPACE_ID::internal::UnalignedLoad<::PROTOBUF_NAMESPACE_ID::uint8>(ptr) == 10);
6535 } else goto handle_unusual;
6536 continue;
6537 // int32 intra_op_parallelism_threads = 2;
6538 case 2:
6539 if (PROTOBUF_PREDICT_TRUE(static_cast<::PROTOBUF_NAMESPACE_ID::uint8>(tag) == 16)) {
6540 intra_op_parallelism_threads_ = ::PROTOBUF_NAMESPACE_ID::internal::ReadVarint(&ptr);
6541 CHK_(ptr);
6542 } else goto handle_unusual;
6543 continue;
6544 // int32 placement_period = 3;
6545 case 3:
6546 if (PROTOBUF_PREDICT_TRUE(static_cast<::PROTOBUF_NAMESPACE_ID::uint8>(tag) == 24)) {
6547 placement_period_ = ::PROTOBUF_NAMESPACE_ID::internal::ReadVarint(&ptr);
6548 CHK_(ptr);
6549 } else goto handle_unusual;
6550 continue;
6551 // repeated string device_filters = 4;
6552 case 4:
6553 if (PROTOBUF_PREDICT_TRUE(static_cast<::PROTOBUF_NAMESPACE_ID::uint8>(tag) == 34)) {
6554 ptr -= 1;
6555 do {
6556 ptr += 1;
6557 ptr = ::PROTOBUF_NAMESPACE_ID::internal::InlineGreedyStringParserUTF8(add_device_filters(), ptr, ctx, "tensorflow.ConfigProto.device_filters");
6558 CHK_(ptr);
6559 if (!ctx->DataAvailable(ptr)) break;
6560 } while (::PROTOBUF_NAMESPACE_ID::internal::UnalignedLoad<::PROTOBUF_NAMESPACE_ID::uint8>(ptr) == 34);
6561 } else goto handle_unusual;
6562 continue;
6563 // int32 inter_op_parallelism_threads = 5;
6564 case 5:
6565 if (PROTOBUF_PREDICT_TRUE(static_cast<::PROTOBUF_NAMESPACE_ID::uint8>(tag) == 40)) {
6566 inter_op_parallelism_threads_ = ::PROTOBUF_NAMESPACE_ID::internal::ReadVarint(&ptr);
6567 CHK_(ptr);
6568 } else goto handle_unusual;
6569 continue;
6570 // .tensorflow.GPUOptions gpu_options = 6;
6571 case 6:
6572 if (PROTOBUF_PREDICT_TRUE(static_cast<::PROTOBUF_NAMESPACE_ID::uint8>(tag) == 50)) {
6573 ptr = ctx->ParseMessage(mutable_gpu_options(), ptr);
6574 CHK_(ptr);
6575 } else goto handle_unusual;
6576 continue;
6577 // bool allow_soft_placement = 7;
6578 case 7:
6579 if (PROTOBUF_PREDICT_TRUE(static_cast<::PROTOBUF_NAMESPACE_ID::uint8>(tag) == 56)) {
6580 allow_soft_placement_ = ::PROTOBUF_NAMESPACE_ID::internal::ReadVarint(&ptr);
6581 CHK_(ptr);
6582 } else goto handle_unusual;
6583 continue;
6584 // bool log_device_placement = 8;
6585 case 8:
6586 if (PROTOBUF_PREDICT_TRUE(static_cast<::PROTOBUF_NAMESPACE_ID::uint8>(tag) == 64)) {
6587 log_device_placement_ = ::PROTOBUF_NAMESPACE_ID::internal::ReadVarint(&ptr);
6588 CHK_(ptr);
6589 } else goto handle_unusual;
6590 continue;
6591 // bool use_per_session_threads = 9;
6592 case 9:
6593 if (PROTOBUF_PREDICT_TRUE(static_cast<::PROTOBUF_NAMESPACE_ID::uint8>(tag) == 72)) {
6594 use_per_session_threads_ = ::PROTOBUF_NAMESPACE_ID::internal::ReadVarint(&ptr);
6595 CHK_(ptr);
6596 } else goto handle_unusual;
6597 continue;
6598 // .tensorflow.GraphOptions graph_options = 10;
6599 case 10:
6600 if (PROTOBUF_PREDICT_TRUE(static_cast<::PROTOBUF_NAMESPACE_ID::uint8>(tag) == 82)) {
6601 ptr = ctx->ParseMessage(mutable_graph_options(), ptr);
6602 CHK_(ptr);
6603 } else goto handle_unusual;
6604 continue;
6605 // int64 operation_timeout_in_ms = 11;
6606 case 11:
6607 if (PROTOBUF_PREDICT_TRUE(static_cast<::PROTOBUF_NAMESPACE_ID::uint8>(tag) == 88)) {
6608 operation_timeout_in_ms_ = ::PROTOBUF_NAMESPACE_ID::internal::ReadVarint(&ptr);
6609 CHK_(ptr);
6610 } else goto handle_unusual;
6611 continue;
6612 // repeated .tensorflow.ThreadPoolOptionProto session_inter_op_thread_pool = 12;
6613 case 12:
6614 if (PROTOBUF_PREDICT_TRUE(static_cast<::PROTOBUF_NAMESPACE_ID::uint8>(tag) == 98)) {
6615 ptr -= 1;
6616 do {
6617 ptr += 1;
6618 ptr = ctx->ParseMessage(add_session_inter_op_thread_pool(), ptr);
6619 CHK_(ptr);
6620 if (!ctx->DataAvailable(ptr)) break;
6621 } while (::PROTOBUF_NAMESPACE_ID::internal::UnalignedLoad<::PROTOBUF_NAMESPACE_ID::uint8>(ptr) == 98);
6622 } else goto handle_unusual;
6623 continue;
6624 // .tensorflow.RPCOptions rpc_options = 13;
6625 case 13:
6626 if (PROTOBUF_PREDICT_TRUE(static_cast<::PROTOBUF_NAMESPACE_ID::uint8>(tag) == 106)) {
6627 ptr = ctx->ParseMessage(mutable_rpc_options(), ptr);
6628 CHK_(ptr);
6629 } else goto handle_unusual;
6630 continue;
6631 // .tensorflow.ClusterDef cluster_def = 14;
6632 case 14:
6633 if (PROTOBUF_PREDICT_TRUE(static_cast<::PROTOBUF_NAMESPACE_ID::uint8>(tag) == 114)) {
6634 ptr = ctx->ParseMessage(mutable_cluster_def(), ptr);
6635 CHK_(ptr);
6636 } else goto handle_unusual;
6637 continue;
6638 // bool isolate_session_state = 15;
6639 case 15:
6640 if (PROTOBUF_PREDICT_TRUE(static_cast<::PROTOBUF_NAMESPACE_ID::uint8>(tag) == 120)) {
6641 isolate_session_state_ = ::PROTOBUF_NAMESPACE_ID::internal::ReadVarint(&ptr);
6642 CHK_(ptr);
6643 } else goto handle_unusual;
6644 continue;
6645 // .tensorflow.ConfigProto.Experimental experimental = 16;
6646 case 16:
6647 if (PROTOBUF_PREDICT_TRUE(static_cast<::PROTOBUF_NAMESPACE_ID::uint8>(tag) == 130)) {
6648 ptr = ctx->ParseMessage(mutable_experimental(), ptr);
6649 CHK_(ptr);
6650 } else goto handle_unusual;
6651 continue;
6652 // bool share_cluster_devices_in_session = 17;
6653 case 17:
6654 if (PROTOBUF_PREDICT_TRUE(static_cast<::PROTOBUF_NAMESPACE_ID::uint8>(tag) == 136)) {
6655 share_cluster_devices_in_session_ = ::PROTOBUF_NAMESPACE_ID::internal::ReadVarint(&ptr);
6656 CHK_(ptr);
6657 } else goto handle_unusual;
6658 continue;
6659 default: {
6660 handle_unusual:
6661 if ((tag & 7) == 4 || tag == 0) {
6662 ctx->SetLastTag(tag);
6663 goto success;
6664 }
6665 ptr = UnknownFieldParse(tag, &_internal_metadata_, ptr, ctx);
6666 CHK_(ptr != nullptr);
6667 continue;
6668 }
6669 } // switch
6670 } // while
6671success:
6672 return ptr;
6673failure:
6674 ptr = nullptr;
6675 goto success;
6676#undef CHK_
6677}
6678#else // GOOGLE_PROTOBUF_ENABLE_EXPERIMENTAL_PARSER
6679bool ConfigProto::MergePartialFromCodedStream(
6680 ::PROTOBUF_NAMESPACE_ID::io::CodedInputStream* input) {
6681#define DO_(EXPRESSION) if (!PROTOBUF_PREDICT_TRUE(EXPRESSION)) goto failure
6682 ::PROTOBUF_NAMESPACE_ID::uint32 tag;
6683 // @@protoc_insertion_point(parse_start:tensorflow.ConfigProto)
6684 for (;;) {
6685 ::std::pair<::PROTOBUF_NAMESPACE_ID::uint32, bool> p = input->ReadTagWithCutoffNoLastTag(16383u);
6686 tag = p.first;
6687 if (!p.second) goto handle_unusual;
6688 switch (::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::GetTagFieldNumber(tag)) {
6689 // map<string, int32> device_count = 1;
6690 case 1: {
6691 if (static_cast< ::PROTOBUF_NAMESPACE_ID::uint8>(tag) == (10 & 0xFF)) {
6692 ConfigProto_DeviceCountEntry_DoNotUse::Parser< ::PROTOBUF_NAMESPACE_ID::internal::MapField<
6693 ConfigProto_DeviceCountEntry_DoNotUse,
6694 std::string, ::PROTOBUF_NAMESPACE_ID::int32,
6695 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::TYPE_STRING,
6696 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::TYPE_INT32,
6697 0 >,
6698 ::PROTOBUF_NAMESPACE_ID::Map< std::string, ::PROTOBUF_NAMESPACE_ID::int32 > > parser(&device_count_);
6699 DO_(::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::ReadMessageNoVirtual(
6700 input, &parser));
6701 DO_(::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::VerifyUtf8String(
6702 parser.key().data(), static_cast<int>(parser.key().length()),
6703 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::PARSE,
6704 "tensorflow.ConfigProto.DeviceCountEntry.key"));
6705 } else {
6706 goto handle_unusual;
6707 }
6708 break;
6709 }
6710
6711 // int32 intra_op_parallelism_threads = 2;
6712 case 2: {
6713 if (static_cast< ::PROTOBUF_NAMESPACE_ID::uint8>(tag) == (16 & 0xFF)) {
6714
6715 DO_((::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::ReadPrimitive<
6716 ::PROTOBUF_NAMESPACE_ID::int32, ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::TYPE_INT32>(
6717 input, &intra_op_parallelism_threads_)));
6718 } else {
6719 goto handle_unusual;
6720 }
6721 break;
6722 }
6723
6724 // int32 placement_period = 3;
6725 case 3: {
6726 if (static_cast< ::PROTOBUF_NAMESPACE_ID::uint8>(tag) == (24 & 0xFF)) {
6727
6728 DO_((::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::ReadPrimitive<
6729 ::PROTOBUF_NAMESPACE_ID::int32, ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::TYPE_INT32>(
6730 input, &placement_period_)));
6731 } else {
6732 goto handle_unusual;
6733 }
6734 break;
6735 }
6736
6737 // repeated string device_filters = 4;
6738 case 4: {
6739 if (static_cast< ::PROTOBUF_NAMESPACE_ID::uint8>(tag) == (34 & 0xFF)) {
6740 DO_(::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::ReadString(
6741 input, this->add_device_filters()));
6742 DO_(::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::VerifyUtf8String(
6743 this->device_filters(this->device_filters_size() - 1).data(),
6744 static_cast<int>(this->device_filters(this->device_filters_size() - 1).length()),
6745 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::PARSE,
6746 "tensorflow.ConfigProto.device_filters"));
6747 } else {
6748 goto handle_unusual;
6749 }
6750 break;
6751 }
6752
6753 // int32 inter_op_parallelism_threads = 5;
6754 case 5: {
6755 if (static_cast< ::PROTOBUF_NAMESPACE_ID::uint8>(tag) == (40 & 0xFF)) {
6756
6757 DO_((::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::ReadPrimitive<
6758 ::PROTOBUF_NAMESPACE_ID::int32, ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::TYPE_INT32>(
6759 input, &inter_op_parallelism_threads_)));
6760 } else {
6761 goto handle_unusual;
6762 }
6763 break;
6764 }
6765
6766 // .tensorflow.GPUOptions gpu_options = 6;
6767 case 6: {
6768 if (static_cast< ::PROTOBUF_NAMESPACE_ID::uint8>(tag) == (50 & 0xFF)) {
6769 DO_(::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::ReadMessage(
6770 input, mutable_gpu_options()));
6771 } else {
6772 goto handle_unusual;
6773 }
6774 break;
6775 }
6776
6777 // bool allow_soft_placement = 7;
6778 case 7: {
6779 if (static_cast< ::PROTOBUF_NAMESPACE_ID::uint8>(tag) == (56 & 0xFF)) {
6780
6781 DO_((::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::ReadPrimitive<
6782 bool, ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::TYPE_BOOL>(
6783 input, &allow_soft_placement_)));
6784 } else {
6785 goto handle_unusual;
6786 }
6787 break;
6788 }
6789
6790 // bool log_device_placement = 8;
6791 case 8: {
6792 if (static_cast< ::PROTOBUF_NAMESPACE_ID::uint8>(tag) == (64 & 0xFF)) {
6793
6794 DO_((::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::ReadPrimitive<
6795 bool, ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::TYPE_BOOL>(
6796 input, &log_device_placement_)));
6797 } else {
6798 goto handle_unusual;
6799 }
6800 break;
6801 }
6802
6803 // bool use_per_session_threads = 9;
6804 case 9: {
6805 if (static_cast< ::PROTOBUF_NAMESPACE_ID::uint8>(tag) == (72 & 0xFF)) {
6806
6807 DO_((::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::ReadPrimitive<
6808 bool, ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::TYPE_BOOL>(
6809 input, &use_per_session_threads_)));
6810 } else {
6811 goto handle_unusual;
6812 }
6813 break;
6814 }
6815
6816 // .tensorflow.GraphOptions graph_options = 10;
6817 case 10: {
6818 if (static_cast< ::PROTOBUF_NAMESPACE_ID::uint8>(tag) == (82 & 0xFF)) {
6819 DO_(::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::ReadMessage(
6820 input, mutable_graph_options()));
6821 } else {
6822 goto handle_unusual;
6823 }
6824 break;
6825 }
6826
6827 // int64 operation_timeout_in_ms = 11;
6828 case 11: {
6829 if (static_cast< ::PROTOBUF_NAMESPACE_ID::uint8>(tag) == (88 & 0xFF)) {
6830
6831 DO_((::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::ReadPrimitive<
6832 ::PROTOBUF_NAMESPACE_ID::int64, ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::TYPE_INT64>(
6833 input, &operation_timeout_in_ms_)));
6834 } else {
6835 goto handle_unusual;
6836 }
6837 break;
6838 }
6839
6840 // repeated .tensorflow.ThreadPoolOptionProto session_inter_op_thread_pool = 12;
6841 case 12: {
6842 if (static_cast< ::PROTOBUF_NAMESPACE_ID::uint8>(tag) == (98 & 0xFF)) {
6843 DO_(::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::ReadMessage(
6844 input, add_session_inter_op_thread_pool()));
6845 } else {
6846 goto handle_unusual;
6847 }
6848 break;
6849 }
6850
6851 // .tensorflow.RPCOptions rpc_options = 13;
6852 case 13: {
6853 if (static_cast< ::PROTOBUF_NAMESPACE_ID::uint8>(tag) == (106 & 0xFF)) {
6854 DO_(::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::ReadMessage(
6855 input, mutable_rpc_options()));
6856 } else {
6857 goto handle_unusual;
6858 }
6859 break;
6860 }
6861
6862 // .tensorflow.ClusterDef cluster_def = 14;
6863 case 14: {
6864 if (static_cast< ::PROTOBUF_NAMESPACE_ID::uint8>(tag) == (114 & 0xFF)) {
6865 DO_(::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::ReadMessage(
6866 input, mutable_cluster_def()));
6867 } else {
6868 goto handle_unusual;
6869 }
6870 break;
6871 }
6872
6873 // bool isolate_session_state = 15;
6874 case 15: {
6875 if (static_cast< ::PROTOBUF_NAMESPACE_ID::uint8>(tag) == (120 & 0xFF)) {
6876
6877 DO_((::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::ReadPrimitive<
6878 bool, ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::TYPE_BOOL>(
6879 input, &isolate_session_state_)));
6880 } else {
6881 goto handle_unusual;
6882 }
6883 break;
6884 }
6885
6886 // .tensorflow.ConfigProto.Experimental experimental = 16;
6887 case 16: {
6888 if (static_cast< ::PROTOBUF_NAMESPACE_ID::uint8>(tag) == (130 & 0xFF)) {
6889 DO_(::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::ReadMessage(
6890 input, mutable_experimental()));
6891 } else {
6892 goto handle_unusual;
6893 }
6894 break;
6895 }
6896
6897 // bool share_cluster_devices_in_session = 17;
6898 case 17: {
6899 if (static_cast< ::PROTOBUF_NAMESPACE_ID::uint8>(tag) == (136 & 0xFF)) {
6900
6901 DO_((::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::ReadPrimitive<
6902 bool, ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::TYPE_BOOL>(
6903 input, &share_cluster_devices_in_session_)));
6904 } else {
6905 goto handle_unusual;
6906 }
6907 break;
6908 }
6909
6910 default: {
6911 handle_unusual:
6912 if (tag == 0) {
6913 goto success;
6914 }
6915 DO_(::PROTOBUF_NAMESPACE_ID::internal::WireFormat::SkipField(
6916 input, tag, _internal_metadata_.mutable_unknown_fields()));
6917 break;
6918 }
6919 }
6920 }
6921success:
6922 // @@protoc_insertion_point(parse_success:tensorflow.ConfigProto)
6923 return true;
6924failure:
6925 // @@protoc_insertion_point(parse_failure:tensorflow.ConfigProto)
6926 return false;
6927#undef DO_
6928}
6929#endif // GOOGLE_PROTOBUF_ENABLE_EXPERIMENTAL_PARSER
6930
6931void ConfigProto::SerializeWithCachedSizes(
6932 ::PROTOBUF_NAMESPACE_ID::io::CodedOutputStream* output) const {
6933 // @@protoc_insertion_point(serialize_start:tensorflow.ConfigProto)
6934 ::PROTOBUF_NAMESPACE_ID::uint32 cached_has_bits = 0;
6935 (void) cached_has_bits;
6936
6937 // map<string, int32> device_count = 1;
6938 if (!this->device_count().empty()) {
6939 typedef ::PROTOBUF_NAMESPACE_ID::Map< std::string, ::PROTOBUF_NAMESPACE_ID::int32 >::const_pointer
6940 ConstPtr;
6941 typedef ConstPtr SortItem;
6942 typedef ::PROTOBUF_NAMESPACE_ID::internal::CompareByDerefFirst<SortItem> Less;
6943 struct Utf8Check {
6944 static void Check(ConstPtr p) {
6945 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::VerifyUtf8String(
6946 p->first.data(), static_cast<int>(p->first.length()),
6947 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::SERIALIZE,
6948 "tensorflow.ConfigProto.DeviceCountEntry.key");
6949 }
6950 };
6951
6952 if (output->IsSerializationDeterministic() &&
6953 this->device_count().size() > 1) {
6954 ::std::unique_ptr<SortItem[]> items(
6955 new SortItem[this->device_count().size()]);
6956 typedef ::PROTOBUF_NAMESPACE_ID::Map< std::string, ::PROTOBUF_NAMESPACE_ID::int32 >::size_type size_type;
6957 size_type n = 0;
6958 for (::PROTOBUF_NAMESPACE_ID::Map< std::string, ::PROTOBUF_NAMESPACE_ID::int32 >::const_iterator
6959 it = this->device_count().begin();
6960 it != this->device_count().end(); ++it, ++n) {
6961 items[static_cast<ptrdiff_t>(n)] = SortItem(&*it);
6962 }
6963 ::std::sort(&items[0], &items[static_cast<ptrdiff_t>(n)], Less());
6964 for (size_type i = 0; i < n; i++) {
6965 ConfigProto_DeviceCountEntry_DoNotUse::Funcs::SerializeToCodedStream(1, items[static_cast<ptrdiff_t>(i)]->first, items[static_cast<ptrdiff_t>(i)]->second, output);
6966 Utf8Check::Check(&(*items[static_cast<ptrdiff_t>(i)]));
6967 }
6968 } else {
6969 for (::PROTOBUF_NAMESPACE_ID::Map< std::string, ::PROTOBUF_NAMESPACE_ID::int32 >::const_iterator
6970 it = this->device_count().begin();
6971 it != this->device_count().end(); ++it) {
6972 ConfigProto_DeviceCountEntry_DoNotUse::Funcs::SerializeToCodedStream(1, it->first, it->second, output);
6973 Utf8Check::Check(&(*it));
6974 }
6975 }
6976 }
6977
6978 // int32 intra_op_parallelism_threads = 2;
6979 if (this->intra_op_parallelism_threads() != 0) {
6980 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::WriteInt32(2, this->intra_op_parallelism_threads(), output);
6981 }
6982
6983 // int32 placement_period = 3;
6984 if (this->placement_period() != 0) {
6985 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::WriteInt32(3, this->placement_period(), output);
6986 }
6987
6988 // repeated string device_filters = 4;
6989 for (int i = 0, n = this->device_filters_size(); i < n; i++) {
6990 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::VerifyUtf8String(
6991 this->device_filters(i).data(), static_cast<int>(this->device_filters(i).length()),
6992 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::SERIALIZE,
6993 "tensorflow.ConfigProto.device_filters");
6994 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::WriteString(
6995 4, this->device_filters(i), output);
6996 }
6997
6998 // int32 inter_op_parallelism_threads = 5;
6999 if (this->inter_op_parallelism_threads() != 0) {
7000 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::WriteInt32(5, this->inter_op_parallelism_threads(), output);
7001 }
7002
7003 // .tensorflow.GPUOptions gpu_options = 6;
7004 if (this->has_gpu_options()) {
7005 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::WriteMessageMaybeToArray(
7006 6, _Internal::gpu_options(this), output);
7007 }
7008
7009 // bool allow_soft_placement = 7;
7010 if (this->allow_soft_placement() != 0) {
7011 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::WriteBool(7, this->allow_soft_placement(), output);
7012 }
7013
7014 // bool log_device_placement = 8;
7015 if (this->log_device_placement() != 0) {
7016 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::WriteBool(8, this->log_device_placement(), output);
7017 }
7018
7019 // bool use_per_session_threads = 9;
7020 if (this->use_per_session_threads() != 0) {
7021 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::WriteBool(9, this->use_per_session_threads(), output);
7022 }
7023
7024 // .tensorflow.GraphOptions graph_options = 10;
7025 if (this->has_graph_options()) {
7026 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::WriteMessageMaybeToArray(
7027 10, _Internal::graph_options(this), output);
7028 }
7029
7030 // int64 operation_timeout_in_ms = 11;
7031 if (this->operation_timeout_in_ms() != 0) {
7032 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::WriteInt64(11, this->operation_timeout_in_ms(), output);
7033 }
7034
7035 // repeated .tensorflow.ThreadPoolOptionProto session_inter_op_thread_pool = 12;
7036 for (unsigned int i = 0,
7037 n = static_cast<unsigned int>(this->session_inter_op_thread_pool_size()); i < n; i++) {
7038 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::WriteMessageMaybeToArray(
7039 12,
7040 this->session_inter_op_thread_pool(static_cast<int>(i)),
7041 output);
7042 }
7043
7044 // .tensorflow.RPCOptions rpc_options = 13;
7045 if (this->has_rpc_options()) {
7046 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::WriteMessageMaybeToArray(
7047 13, _Internal::rpc_options(this), output);
7048 }
7049
7050 // .tensorflow.ClusterDef cluster_def = 14;
7051 if (this->has_cluster_def()) {
7052 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::WriteMessageMaybeToArray(
7053 14, _Internal::cluster_def(this), output);
7054 }
7055
7056 // bool isolate_session_state = 15;
7057 if (this->isolate_session_state() != 0) {
7058 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::WriteBool(15, this->isolate_session_state(), output);
7059 }
7060
7061 // .tensorflow.ConfigProto.Experimental experimental = 16;
7062 if (this->has_experimental()) {
7063 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::WriteMessageMaybeToArray(
7064 16, _Internal::experimental(this), output);
7065 }
7066
7067 // bool share_cluster_devices_in_session = 17;
7068 if (this->share_cluster_devices_in_session() != 0) {
7069 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::WriteBool(17, this->share_cluster_devices_in_session(), output);
7070 }
7071
7072 if (_internal_metadata_.have_unknown_fields()) {
7073 ::PROTOBUF_NAMESPACE_ID::internal::WireFormat::SerializeUnknownFields(
7074 _internal_metadata_.unknown_fields(), output);
7075 }
7076 // @@protoc_insertion_point(serialize_end:tensorflow.ConfigProto)
7077}
7078
7079::PROTOBUF_NAMESPACE_ID::uint8* ConfigProto::InternalSerializeWithCachedSizesToArray(
7080 ::PROTOBUF_NAMESPACE_ID::uint8* target) const {
7081 // @@protoc_insertion_point(serialize_to_array_start:tensorflow.ConfigProto)
7082 ::PROTOBUF_NAMESPACE_ID::uint32 cached_has_bits = 0;
7083 (void) cached_has_bits;
7084
7085 // map<string, int32> device_count = 1;
7086 if (!this->device_count().empty()) {
7087 typedef ::PROTOBUF_NAMESPACE_ID::Map< std::string, ::PROTOBUF_NAMESPACE_ID::int32 >::const_pointer
7088 ConstPtr;
7089 typedef ConstPtr SortItem;
7090 typedef ::PROTOBUF_NAMESPACE_ID::internal::CompareByDerefFirst<SortItem> Less;
7091 struct Utf8Check {
7092 static void Check(ConstPtr p) {
7093 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::VerifyUtf8String(
7094 p->first.data(), static_cast<int>(p->first.length()),
7095 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::SERIALIZE,
7096 "tensorflow.ConfigProto.DeviceCountEntry.key");
7097 }
7098 };
7099
7100 if (false &&
7101 this->device_count().size() > 1) {
7102 ::std::unique_ptr<SortItem[]> items(
7103 new SortItem[this->device_count().size()]);
7104 typedef ::PROTOBUF_NAMESPACE_ID::Map< std::string, ::PROTOBUF_NAMESPACE_ID::int32 >::size_type size_type;
7105 size_type n = 0;
7106 for (::PROTOBUF_NAMESPACE_ID::Map< std::string, ::PROTOBUF_NAMESPACE_ID::int32 >::const_iterator
7107 it = this->device_count().begin();
7108 it != this->device_count().end(); ++it, ++n) {
7109 items[static_cast<ptrdiff_t>(n)] = SortItem(&*it);
7110 }
7111 ::std::sort(&items[0], &items[static_cast<ptrdiff_t>(n)], Less());
7112 for (size_type i = 0; i < n; i++) {
7113 target = ConfigProto_DeviceCountEntry_DoNotUse::Funcs::SerializeToArray(1, items[static_cast<ptrdiff_t>(i)]->first, items[static_cast<ptrdiff_t>(i)]->second, target);
7114 Utf8Check::Check(&(*items[static_cast<ptrdiff_t>(i)]));
7115 }
7116 } else {
7117 for (::PROTOBUF_NAMESPACE_ID::Map< std::string, ::PROTOBUF_NAMESPACE_ID::int32 >::const_iterator
7118 it = this->device_count().begin();
7119 it != this->device_count().end(); ++it) {
7120 target = ConfigProto_DeviceCountEntry_DoNotUse::Funcs::SerializeToArray(1, it->first, it->second, target);
7121 Utf8Check::Check(&(*it));
7122 }
7123 }
7124 }
7125
7126 // int32 intra_op_parallelism_threads = 2;
7127 if (this->intra_op_parallelism_threads() != 0) {
7128 target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::WriteInt32ToArray(2, this->intra_op_parallelism_threads(), target);
7129 }
7130
7131 // int32 placement_period = 3;
7132 if (this->placement_period() != 0) {
7133 target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::WriteInt32ToArray(3, this->placement_period(), target);
7134 }
7135
7136 // repeated string device_filters = 4;
7137 for (int i = 0, n = this->device_filters_size(); i < n; i++) {
7138 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::VerifyUtf8String(
7139 this->device_filters(i).data(), static_cast<int>(this->device_filters(i).length()),
7140 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::SERIALIZE,
7141 "tensorflow.ConfigProto.device_filters");
7142 target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::
7143 WriteStringToArray(4, this->device_filters(i), target);
7144 }
7145
7146 // int32 inter_op_parallelism_threads = 5;
7147 if (this->inter_op_parallelism_threads() != 0) {
7148 target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::WriteInt32ToArray(5, this->inter_op_parallelism_threads(), target);
7149 }
7150
7151 // .tensorflow.GPUOptions gpu_options = 6;
7152 if (this->has_gpu_options()) {
7153 target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::
7154 InternalWriteMessageToArray(
7155 6, _Internal::gpu_options(this), target);
7156 }
7157
7158 // bool allow_soft_placement = 7;
7159 if (this->allow_soft_placement() != 0) {
7160 target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::WriteBoolToArray(7, this->allow_soft_placement(), target);
7161 }
7162
7163 // bool log_device_placement = 8;
7164 if (this->log_device_placement() != 0) {
7165 target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::WriteBoolToArray(8, this->log_device_placement(), target);
7166 }
7167
7168 // bool use_per_session_threads = 9;
7169 if (this->use_per_session_threads() != 0) {
7170 target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::WriteBoolToArray(9, this->use_per_session_threads(), target);
7171 }
7172
7173 // .tensorflow.GraphOptions graph_options = 10;
7174 if (this->has_graph_options()) {
7175 target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::
7176 InternalWriteMessageToArray(
7177 10, _Internal::graph_options(this), target);
7178 }
7179
7180 // int64 operation_timeout_in_ms = 11;
7181 if (this->operation_timeout_in_ms() != 0) {
7182 target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::WriteInt64ToArray(11, this->operation_timeout_in_ms(), target);
7183 }
7184
7185 // repeated .tensorflow.ThreadPoolOptionProto session_inter_op_thread_pool = 12;
7186 for (unsigned int i = 0,
7187 n = static_cast<unsigned int>(this->session_inter_op_thread_pool_size()); i < n; i++) {
7188 target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::
7189 InternalWriteMessageToArray(
7190 12, this->session_inter_op_thread_pool(static_cast<int>(i)), target);
7191 }
7192
7193 // .tensorflow.RPCOptions rpc_options = 13;
7194 if (this->has_rpc_options()) {
7195 target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::
7196 InternalWriteMessageToArray(
7197 13, _Internal::rpc_options(this), target);
7198 }
7199
7200 // .tensorflow.ClusterDef cluster_def = 14;
7201 if (this->has_cluster_def()) {
7202 target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::
7203 InternalWriteMessageToArray(
7204 14, _Internal::cluster_def(this), target);
7205 }
7206
7207 // bool isolate_session_state = 15;
7208 if (this->isolate_session_state() != 0) {
7209 target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::WriteBoolToArray(15, this->isolate_session_state(), target);
7210 }
7211
7212 // .tensorflow.ConfigProto.Experimental experimental = 16;
7213 if (this->has_experimental()) {
7214 target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::
7215 InternalWriteMessageToArray(
7216 16, _Internal::experimental(this), target);
7217 }
7218
7219 // bool share_cluster_devices_in_session = 17;
7220 if (this->share_cluster_devices_in_session() != 0) {
7221 target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::WriteBoolToArray(17, this->share_cluster_devices_in_session(), target);
7222 }
7223
7224 if (_internal_metadata_.have_unknown_fields()) {
7225 target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormat::SerializeUnknownFieldsToArray(
7226 _internal_metadata_.unknown_fields(), target);
7227 }
7228 // @@protoc_insertion_point(serialize_to_array_end:tensorflow.ConfigProto)
7229 return target;
7230}
7231
7232size_t ConfigProto::ByteSizeLong() const {
7233// @@protoc_insertion_point(message_byte_size_start:tensorflow.ConfigProto)
7234 size_t total_size = 0;
7235
7236 if (_internal_metadata_.have_unknown_fields()) {
7237 total_size +=
7238 ::PROTOBUF_NAMESPACE_ID::internal::WireFormat::ComputeUnknownFieldsSize(
7239 _internal_metadata_.unknown_fields());
7240 }
7241 ::PROTOBUF_NAMESPACE_ID::uint32 cached_has_bits = 0;
7242 // Prevent compiler warnings about cached_has_bits being unused
7243 (void) cached_has_bits;
7244
7245 // map<string, int32> device_count = 1;
7246 total_size += 1 *
7247 ::PROTOBUF_NAMESPACE_ID::internal::FromIntSize(this->device_count_size());
7248 for (::PROTOBUF_NAMESPACE_ID::Map< std::string, ::PROTOBUF_NAMESPACE_ID::int32 >::const_iterator
7249 it = this->device_count().begin();
7250 it != this->device_count().end(); ++it) {
7251 total_size += ConfigProto_DeviceCountEntry_DoNotUse::Funcs::ByteSizeLong(it->first, it->second);
7252 }
7253
7254 // repeated string device_filters = 4;
7255 total_size += 1 *
7256 ::PROTOBUF_NAMESPACE_ID::internal::FromIntSize(this->device_filters_size());
7257 for (int i = 0, n = this->device_filters_size(); i < n; i++) {
7258 total_size += ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::StringSize(
7259 this->device_filters(i));
7260 }
7261
7262 // repeated .tensorflow.ThreadPoolOptionProto session_inter_op_thread_pool = 12;
7263 {
7264 unsigned int count = static_cast<unsigned int>(this->session_inter_op_thread_pool_size());
7265 total_size += 1UL * count;
7266 for (unsigned int i = 0; i < count; i++) {
7267 total_size +=
7268 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::MessageSize(
7269 this->session_inter_op_thread_pool(static_cast<int>(i)));
7270 }
7271 }
7272
7273 // .tensorflow.GPUOptions gpu_options = 6;
7274 if (this->has_gpu_options()) {
7275 total_size += 1 +
7276 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::MessageSize(
7277 *gpu_options_);
7278 }
7279
7280 // .tensorflow.GraphOptions graph_options = 10;
7281 if (this->has_graph_options()) {
7282 total_size += 1 +
7283 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::MessageSize(
7284 *graph_options_);
7285 }
7286
7287 // .tensorflow.RPCOptions rpc_options = 13;
7288 if (this->has_rpc_options()) {
7289 total_size += 1 +
7290 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::MessageSize(
7291 *rpc_options_);
7292 }
7293
7294 // .tensorflow.ClusterDef cluster_def = 14;
7295 if (this->has_cluster_def()) {
7296 total_size += 1 +
7297 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::MessageSize(
7298 *cluster_def_);
7299 }
7300
7301 // .tensorflow.ConfigProto.Experimental experimental = 16;
7302 if (this->has_experimental()) {
7303 total_size += 2 +
7304 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::MessageSize(
7305 *experimental_);
7306 }
7307
7308 // int32 intra_op_parallelism_threads = 2;
7309 if (this->intra_op_parallelism_threads() != 0) {
7310 total_size += 1 +
7311 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::Int32Size(
7312 this->intra_op_parallelism_threads());
7313 }
7314
7315 // int32 placement_period = 3;
7316 if (this->placement_period() != 0) {
7317 total_size += 1 +
7318 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::Int32Size(
7319 this->placement_period());
7320 }
7321
7322 // int32 inter_op_parallelism_threads = 5;
7323 if (this->inter_op_parallelism_threads() != 0) {
7324 total_size += 1 +
7325 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::Int32Size(
7326 this->inter_op_parallelism_threads());
7327 }
7328
7329 // bool use_per_session_threads = 9;
7330 if (this->use_per_session_threads() != 0) {
7331 total_size += 1 + 1;
7332 }
7333
7334 // bool allow_soft_placement = 7;
7335 if (this->allow_soft_placement() != 0) {
7336 total_size += 1 + 1;
7337 }
7338
7339 // bool log_device_placement = 8;
7340 if (this->log_device_placement() != 0) {
7341 total_size += 1 + 1;
7342 }
7343
7344 // bool isolate_session_state = 15;
7345 if (this->isolate_session_state() != 0) {
7346 total_size += 1 + 1;
7347 }
7348
7349 // int64 operation_timeout_in_ms = 11;
7350 if (this->operation_timeout_in_ms() != 0) {
7351 total_size += 1 +
7352 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::Int64Size(
7353 this->operation_timeout_in_ms());
7354 }
7355
7356 // bool share_cluster_devices_in_session = 17;
7357 if (this->share_cluster_devices_in_session() != 0) {
7358 total_size += 2 + 1;
7359 }
7360
7361 int cached_size = ::PROTOBUF_NAMESPACE_ID::internal::ToCachedSize(total_size);
7362 SetCachedSize(cached_size);
7363 return total_size;
7364}
7365
7366void ConfigProto::MergeFrom(const ::PROTOBUF_NAMESPACE_ID::Message& from) {
7367// @@protoc_insertion_point(generalized_merge_from_start:tensorflow.ConfigProto)
7368 GOOGLE_DCHECK_NE(&from, this);
7369 const ConfigProto* source =
7370 ::PROTOBUF_NAMESPACE_ID::DynamicCastToGenerated<ConfigProto>(
7371 &from);
7372 if (source == nullptr) {
7373 // @@protoc_insertion_point(generalized_merge_from_cast_fail:tensorflow.ConfigProto)
7374 ::PROTOBUF_NAMESPACE_ID::internal::ReflectionOps::Merge(from, this);
7375 } else {
7376 // @@protoc_insertion_point(generalized_merge_from_cast_success:tensorflow.ConfigProto)
7377 MergeFrom(*source);
7378 }
7379}
7380
7381void ConfigProto::MergeFrom(const ConfigProto& from) {
7382// @@protoc_insertion_point(class_specific_merge_from_start:tensorflow.ConfigProto)
7383 GOOGLE_DCHECK_NE(&from, this);
7384 _internal_metadata_.MergeFrom(from._internal_metadata_);
7385 ::PROTOBUF_NAMESPACE_ID::uint32 cached_has_bits = 0;
7386 (void) cached_has_bits;
7387
7388 device_count_.MergeFrom(from.device_count_);
7389 device_filters_.MergeFrom(from.device_filters_);
7390 session_inter_op_thread_pool_.MergeFrom(from.session_inter_op_thread_pool_);
7391 if (from.has_gpu_options()) {
7392 mutable_gpu_options()->::tensorflow::GPUOptions::MergeFrom(from.gpu_options());
7393 }
7394 if (from.has_graph_options()) {
7395 mutable_graph_options()->::tensorflow::GraphOptions::MergeFrom(from.graph_options());
7396 }
7397 if (from.has_rpc_options()) {
7398 mutable_rpc_options()->::tensorflow::RPCOptions::MergeFrom(from.rpc_options());
7399 }
7400 if (from.has_cluster_def()) {
7401 mutable_cluster_def()->::tensorflow::ClusterDef::MergeFrom(from.cluster_def());
7402 }
7403 if (from.has_experimental()) {
7404 mutable_experimental()->::tensorflow::ConfigProto_Experimental::MergeFrom(from.experimental());
7405 }
7406 if (from.intra_op_parallelism_threads() != 0) {
7407 set_intra_op_parallelism_threads(from.intra_op_parallelism_threads());
7408 }
7409 if (from.placement_period() != 0) {
7410 set_placement_period(from.placement_period());
7411 }
7412 if (from.inter_op_parallelism_threads() != 0) {
7413 set_inter_op_parallelism_threads(from.inter_op_parallelism_threads());
7414 }
7415 if (from.use_per_session_threads() != 0) {
7416 set_use_per_session_threads(from.use_per_session_threads());
7417 }
7418 if (from.allow_soft_placement() != 0) {
7419 set_allow_soft_placement(from.allow_soft_placement());
7420 }
7421 if (from.log_device_placement() != 0) {
7422 set_log_device_placement(from.log_device_placement());
7423 }
7424 if (from.isolate_session_state() != 0) {
7425 set_isolate_session_state(from.isolate_session_state());
7426 }
7427 if (from.operation_timeout_in_ms() != 0) {
7428 set_operation_timeout_in_ms(from.operation_timeout_in_ms());
7429 }
7430 if (from.share_cluster_devices_in_session() != 0) {
7431 set_share_cluster_devices_in_session(from.share_cluster_devices_in_session());
7432 }
7433}
7434
7435void ConfigProto::CopyFrom(const ::PROTOBUF_NAMESPACE_ID::Message& from) {
7436// @@protoc_insertion_point(generalized_copy_from_start:tensorflow.ConfigProto)
7437 if (&from == this) return;
7438 Clear();
7439 MergeFrom(from);
7440}
7441
7442void ConfigProto::CopyFrom(const ConfigProto& from) {
7443// @@protoc_insertion_point(class_specific_copy_from_start:tensorflow.ConfigProto)
7444 if (&from == this) return;
7445 Clear();
7446 MergeFrom(from);
7447}
7448
7449bool ConfigProto::IsInitialized() const {
7450 return true;
7451}
7452
7453void ConfigProto::InternalSwap(ConfigProto* other) {
7454 using std::swap;
7455 _internal_metadata_.Swap(&other->_internal_metadata_);
7456 device_count_.Swap(&other->device_count_);
7457 device_filters_.InternalSwap(CastToBase(&other->device_filters_));
7458 CastToBase(&session_inter_op_thread_pool_)->InternalSwap(CastToBase(&other->session_inter_op_thread_pool_));
7459 swap(gpu_options_, other->gpu_options_);
7460 swap(graph_options_, other->graph_options_);
7461 swap(rpc_options_, other->rpc_options_);
7462 swap(cluster_def_, other->cluster_def_);
7463 swap(experimental_, other->experimental_);
7464 swap(intra_op_parallelism_threads_, other->intra_op_parallelism_threads_);
7465 swap(placement_period_, other->placement_period_);
7466 swap(inter_op_parallelism_threads_, other->inter_op_parallelism_threads_);
7467 swap(use_per_session_threads_, other->use_per_session_threads_);
7468 swap(allow_soft_placement_, other->allow_soft_placement_);
7469 swap(log_device_placement_, other->log_device_placement_);
7470 swap(isolate_session_state_, other->isolate_session_state_);
7471 swap(operation_timeout_in_ms_, other->operation_timeout_in_ms_);
7472 swap(share_cluster_devices_in_session_, other->share_cluster_devices_in_session_);
7473}
7474
7475::PROTOBUF_NAMESPACE_ID::Metadata ConfigProto::GetMetadata() const {
7476 return GetMetadataStatic();
7477}
7478
7479
7480// ===================================================================
7481
7482void RunOptions_Experimental_RunHandlerPoolOptions::InitAsDefaultInstance() {
7483}
7484class RunOptions_Experimental_RunHandlerPoolOptions::_Internal {
7485 public:
7486};
7487
7488RunOptions_Experimental_RunHandlerPoolOptions::RunOptions_Experimental_RunHandlerPoolOptions()
7489 : ::PROTOBUF_NAMESPACE_ID::Message(), _internal_metadata_(nullptr) {
7490 SharedCtor();
7491 // @@protoc_insertion_point(constructor:tensorflow.RunOptions.Experimental.RunHandlerPoolOptions)
7492}
7493RunOptions_Experimental_RunHandlerPoolOptions::RunOptions_Experimental_RunHandlerPoolOptions(::PROTOBUF_NAMESPACE_ID::Arena* arena)
7494 : ::PROTOBUF_NAMESPACE_ID::Message(),
7495 _internal_metadata_(arena) {
7496 SharedCtor();
7497 RegisterArenaDtor(arena);
7498 // @@protoc_insertion_point(arena_constructor:tensorflow.RunOptions.Experimental.RunHandlerPoolOptions)
7499}
7500RunOptions_Experimental_RunHandlerPoolOptions::RunOptions_Experimental_RunHandlerPoolOptions(const RunOptions_Experimental_RunHandlerPoolOptions& from)
7501 : ::PROTOBUF_NAMESPACE_ID::Message(),
7502 _internal_metadata_(nullptr) {
7503 _internal_metadata_.MergeFrom(from._internal_metadata_);
7504 priority_ = from.priority_;
7505 // @@protoc_insertion_point(copy_constructor:tensorflow.RunOptions.Experimental.RunHandlerPoolOptions)
7506}
7507
7508void RunOptions_Experimental_RunHandlerPoolOptions::SharedCtor() {
7509 priority_ = PROTOBUF_LONGLONG(0);
7510}
7511
7512RunOptions_Experimental_RunHandlerPoolOptions::~RunOptions_Experimental_RunHandlerPoolOptions() {
7513 // @@protoc_insertion_point(destructor:tensorflow.RunOptions.Experimental.RunHandlerPoolOptions)
7514 SharedDtor();
7515}
7516
7517void RunOptions_Experimental_RunHandlerPoolOptions::SharedDtor() {
7518 GOOGLE_DCHECK(GetArenaNoVirtual() == nullptr);
7519}
7520
7521void RunOptions_Experimental_RunHandlerPoolOptions::ArenaDtor(void* object) {
7522 RunOptions_Experimental_RunHandlerPoolOptions* _this = reinterpret_cast< RunOptions_Experimental_RunHandlerPoolOptions* >(object);
7523 (void)_this;
7524}
7525void RunOptions_Experimental_RunHandlerPoolOptions::RegisterArenaDtor(::PROTOBUF_NAMESPACE_ID::Arena*) {
7526}
7527void RunOptions_Experimental_RunHandlerPoolOptions::SetCachedSize(int size) const {
7528 _cached_size_.Set(size);
7529}
7530const RunOptions_Experimental_RunHandlerPoolOptions& RunOptions_Experimental_RunHandlerPoolOptions::default_instance() {
7531 ::PROTOBUF_NAMESPACE_ID::internal::InitSCC(&::scc_info_RunOptions_Experimental_RunHandlerPoolOptions_tensorflow_2fcore_2fprotobuf_2fconfig_2eproto.base);
7532 return *internal_default_instance();
7533}
7534
7535
7536void RunOptions_Experimental_RunHandlerPoolOptions::Clear() {
7537// @@protoc_insertion_point(message_clear_start:tensorflow.RunOptions.Experimental.RunHandlerPoolOptions)
7538 ::PROTOBUF_NAMESPACE_ID::uint32 cached_has_bits = 0;
7539 // Prevent compiler warnings about cached_has_bits being unused
7540 (void) cached_has_bits;
7541
7542 priority_ = PROTOBUF_LONGLONG(0);
7543 _internal_metadata_.Clear();
7544}
7545
7546#if GOOGLE_PROTOBUF_ENABLE_EXPERIMENTAL_PARSER
7547const char* RunOptions_Experimental_RunHandlerPoolOptions::_InternalParse(const char* ptr, ::PROTOBUF_NAMESPACE_ID::internal::ParseContext* ctx) {
7548#define CHK_(x) if (PROTOBUF_PREDICT_FALSE(!(x))) goto failure
7549 ::PROTOBUF_NAMESPACE_ID::Arena* arena = GetArenaNoVirtual(); (void)arena;
7550 while (!ctx->Done(&ptr)) {
7551 ::PROTOBUF_NAMESPACE_ID::uint32 tag;
7552 ptr = ::PROTOBUF_NAMESPACE_ID::internal::ReadTag(ptr, &tag);
7553 CHK_(ptr);
7554 switch (tag >> 3) {
7555 // int64 priority = 1;
7556 case 1:
7557 if (PROTOBUF_PREDICT_TRUE(static_cast<::PROTOBUF_NAMESPACE_ID::uint8>(tag) == 8)) {
7558 priority_ = ::PROTOBUF_NAMESPACE_ID::internal::ReadVarint(&ptr);
7559 CHK_(ptr);
7560 } else goto handle_unusual;
7561 continue;
7562 default: {
7563 handle_unusual:
7564 if ((tag & 7) == 4 || tag == 0) {
7565 ctx->SetLastTag(tag);
7566 goto success;
7567 }
7568 ptr = UnknownFieldParse(tag, &_internal_metadata_, ptr, ctx);
7569 CHK_(ptr != nullptr);
7570 continue;
7571 }
7572 } // switch
7573 } // while
7574success:
7575 return ptr;
7576failure:
7577 ptr = nullptr;
7578 goto success;
7579#undef CHK_
7580}
7581#else // GOOGLE_PROTOBUF_ENABLE_EXPERIMENTAL_PARSER
7582bool RunOptions_Experimental_RunHandlerPoolOptions::MergePartialFromCodedStream(
7583 ::PROTOBUF_NAMESPACE_ID::io::CodedInputStream* input) {
7584#define DO_(EXPRESSION) if (!PROTOBUF_PREDICT_TRUE(EXPRESSION)) goto failure
7585 ::PROTOBUF_NAMESPACE_ID::uint32 tag;
7586 // @@protoc_insertion_point(parse_start:tensorflow.RunOptions.Experimental.RunHandlerPoolOptions)
7587 for (;;) {
7588 ::std::pair<::PROTOBUF_NAMESPACE_ID::uint32, bool> p = input->ReadTagWithCutoffNoLastTag(127u);
7589 tag = p.first;
7590 if (!p.second) goto handle_unusual;
7591 switch (::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::GetTagFieldNumber(tag)) {
7592 // int64 priority = 1;
7593 case 1: {
7594 if (static_cast< ::PROTOBUF_NAMESPACE_ID::uint8>(tag) == (8 & 0xFF)) {
7595
7596 DO_((::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::ReadPrimitive<
7597 ::PROTOBUF_NAMESPACE_ID::int64, ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::TYPE_INT64>(
7598 input, &priority_)));
7599 } else {
7600 goto handle_unusual;
7601 }
7602 break;
7603 }
7604
7605 default: {
7606 handle_unusual:
7607 if (tag == 0) {
7608 goto success;
7609 }
7610 DO_(::PROTOBUF_NAMESPACE_ID::internal::WireFormat::SkipField(
7611 input, tag, _internal_metadata_.mutable_unknown_fields()));
7612 break;
7613 }
7614 }
7615 }
7616success:
7617 // @@protoc_insertion_point(parse_success:tensorflow.RunOptions.Experimental.RunHandlerPoolOptions)
7618 return true;
7619failure:
7620 // @@protoc_insertion_point(parse_failure:tensorflow.RunOptions.Experimental.RunHandlerPoolOptions)
7621 return false;
7622#undef DO_
7623}
7624#endif // GOOGLE_PROTOBUF_ENABLE_EXPERIMENTAL_PARSER
7625
7626void RunOptions_Experimental_RunHandlerPoolOptions::SerializeWithCachedSizes(
7627 ::PROTOBUF_NAMESPACE_ID::io::CodedOutputStream* output) const {
7628 // @@protoc_insertion_point(serialize_start:tensorflow.RunOptions.Experimental.RunHandlerPoolOptions)
7629 ::PROTOBUF_NAMESPACE_ID::uint32 cached_has_bits = 0;
7630 (void) cached_has_bits;
7631
7632 // int64 priority = 1;
7633 if (this->priority() != 0) {
7634 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::WriteInt64(1, this->priority(), output);
7635 }
7636
7637 if (_internal_metadata_.have_unknown_fields()) {
7638 ::PROTOBUF_NAMESPACE_ID::internal::WireFormat::SerializeUnknownFields(
7639 _internal_metadata_.unknown_fields(), output);
7640 }
7641 // @@protoc_insertion_point(serialize_end:tensorflow.RunOptions.Experimental.RunHandlerPoolOptions)
7642}
7643
7644::PROTOBUF_NAMESPACE_ID::uint8* RunOptions_Experimental_RunHandlerPoolOptions::InternalSerializeWithCachedSizesToArray(
7645 ::PROTOBUF_NAMESPACE_ID::uint8* target) const {
7646 // @@protoc_insertion_point(serialize_to_array_start:tensorflow.RunOptions.Experimental.RunHandlerPoolOptions)
7647 ::PROTOBUF_NAMESPACE_ID::uint32 cached_has_bits = 0;
7648 (void) cached_has_bits;
7649
7650 // int64 priority = 1;
7651 if (this->priority() != 0) {
7652 target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::WriteInt64ToArray(1, this->priority(), target);
7653 }
7654
7655 if (_internal_metadata_.have_unknown_fields()) {
7656 target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormat::SerializeUnknownFieldsToArray(
7657 _internal_metadata_.unknown_fields(), target);
7658 }
7659 // @@protoc_insertion_point(serialize_to_array_end:tensorflow.RunOptions.Experimental.RunHandlerPoolOptions)
7660 return target;
7661}
7662
7663size_t RunOptions_Experimental_RunHandlerPoolOptions::ByteSizeLong() const {
7664// @@protoc_insertion_point(message_byte_size_start:tensorflow.RunOptions.Experimental.RunHandlerPoolOptions)
7665 size_t total_size = 0;
7666
7667 if (_internal_metadata_.have_unknown_fields()) {
7668 total_size +=
7669 ::PROTOBUF_NAMESPACE_ID::internal::WireFormat::ComputeUnknownFieldsSize(
7670 _internal_metadata_.unknown_fields());
7671 }
7672 ::PROTOBUF_NAMESPACE_ID::uint32 cached_has_bits = 0;
7673 // Prevent compiler warnings about cached_has_bits being unused
7674 (void) cached_has_bits;
7675
7676 // int64 priority = 1;
7677 if (this->priority() != 0) {
7678 total_size += 1 +
7679 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::Int64Size(
7680 this->priority());
7681 }
7682
7683 int cached_size = ::PROTOBUF_NAMESPACE_ID::internal::ToCachedSize(total_size);
7684 SetCachedSize(cached_size);
7685 return total_size;
7686}
7687
7688void RunOptions_Experimental_RunHandlerPoolOptions::MergeFrom(const ::PROTOBUF_NAMESPACE_ID::Message& from) {
7689// @@protoc_insertion_point(generalized_merge_from_start:tensorflow.RunOptions.Experimental.RunHandlerPoolOptions)
7690 GOOGLE_DCHECK_NE(&from, this);
7691 const RunOptions_Experimental_RunHandlerPoolOptions* source =
7692 ::PROTOBUF_NAMESPACE_ID::DynamicCastToGenerated<RunOptions_Experimental_RunHandlerPoolOptions>(
7693 &from);
7694 if (source == nullptr) {
7695 // @@protoc_insertion_point(generalized_merge_from_cast_fail:tensorflow.RunOptions.Experimental.RunHandlerPoolOptions)
7696 ::PROTOBUF_NAMESPACE_ID::internal::ReflectionOps::Merge(from, this);
7697 } else {
7698 // @@protoc_insertion_point(generalized_merge_from_cast_success:tensorflow.RunOptions.Experimental.RunHandlerPoolOptions)
7699 MergeFrom(*source);
7700 }
7701}
7702
7703void RunOptions_Experimental_RunHandlerPoolOptions::MergeFrom(const RunOptions_Experimental_RunHandlerPoolOptions& from) {
7704// @@protoc_insertion_point(class_specific_merge_from_start:tensorflow.RunOptions.Experimental.RunHandlerPoolOptions)
7705 GOOGLE_DCHECK_NE(&from, this);
7706 _internal_metadata_.MergeFrom(from._internal_metadata_);
7707 ::PROTOBUF_NAMESPACE_ID::uint32 cached_has_bits = 0;
7708 (void) cached_has_bits;
7709
7710 if (from.priority() != 0) {
7711 set_priority(from.priority());
7712 }
7713}
7714
7715void RunOptions_Experimental_RunHandlerPoolOptions::CopyFrom(const ::PROTOBUF_NAMESPACE_ID::Message& from) {
7716// @@protoc_insertion_point(generalized_copy_from_start:tensorflow.RunOptions.Experimental.RunHandlerPoolOptions)
7717 if (&from == this) return;
7718 Clear();
7719 MergeFrom(from);
7720}
7721
7722void RunOptions_Experimental_RunHandlerPoolOptions::CopyFrom(const RunOptions_Experimental_RunHandlerPoolOptions& from) {
7723// @@protoc_insertion_point(class_specific_copy_from_start:tensorflow.RunOptions.Experimental.RunHandlerPoolOptions)
7724 if (&from == this) return;
7725 Clear();
7726 MergeFrom(from);
7727}
7728
7729bool RunOptions_Experimental_RunHandlerPoolOptions::IsInitialized() const {
7730 return true;
7731}
7732
7733void RunOptions_Experimental_RunHandlerPoolOptions::InternalSwap(RunOptions_Experimental_RunHandlerPoolOptions* other) {
7734 using std::swap;
7735 _internal_metadata_.Swap(&other->_internal_metadata_);
7736 swap(priority_, other->priority_);
7737}
7738
7739::PROTOBUF_NAMESPACE_ID::Metadata RunOptions_Experimental_RunHandlerPoolOptions::GetMetadata() const {
7740 return GetMetadataStatic();
7741}
7742
7743
7744// ===================================================================
7745
7746void RunOptions_Experimental::InitAsDefaultInstance() {
7747 ::tensorflow::_RunOptions_Experimental_default_instance_._instance.get_mutable()->run_handler_pool_options_ = const_cast< ::tensorflow::RunOptions_Experimental_RunHandlerPoolOptions*>(
7748 ::tensorflow::RunOptions_Experimental_RunHandlerPoolOptions::internal_default_instance());
7749}
7750class RunOptions_Experimental::_Internal {
7751 public:
7752 static const ::tensorflow::RunOptions_Experimental_RunHandlerPoolOptions& run_handler_pool_options(const RunOptions_Experimental* msg);
7753};
7754
7755const ::tensorflow::RunOptions_Experimental_RunHandlerPoolOptions&
7756RunOptions_Experimental::_Internal::run_handler_pool_options(const RunOptions_Experimental* msg) {
7757 return *msg->run_handler_pool_options_;
7758}
7759void RunOptions_Experimental::unsafe_arena_set_allocated_run_handler_pool_options(
7760 ::tensorflow::RunOptions_Experimental_RunHandlerPoolOptions* run_handler_pool_options) {
7761 if (GetArenaNoVirtual() == nullptr) {
7762 delete run_handler_pool_options_;
7763 }
7764 run_handler_pool_options_ = run_handler_pool_options;
7765 if (run_handler_pool_options) {
7766
7767 } else {
7768
7769 }
7770 // @@protoc_insertion_point(field_unsafe_arena_set_allocated:tensorflow.RunOptions.Experimental.run_handler_pool_options)
7771}
7772RunOptions_Experimental::RunOptions_Experimental()
7773 : ::PROTOBUF_NAMESPACE_ID::Message(), _internal_metadata_(nullptr) {
7774 SharedCtor();
7775 // @@protoc_insertion_point(constructor:tensorflow.RunOptions.Experimental)
7776}
7777RunOptions_Experimental::RunOptions_Experimental(::PROTOBUF_NAMESPACE_ID::Arena* arena)
7778 : ::PROTOBUF_NAMESPACE_ID::Message(),
7779 _internal_metadata_(arena) {
7780 SharedCtor();
7781 RegisterArenaDtor(arena);
7782 // @@protoc_insertion_point(arena_constructor:tensorflow.RunOptions.Experimental)
7783}
7784RunOptions_Experimental::RunOptions_Experimental(const RunOptions_Experimental& from)
7785 : ::PROTOBUF_NAMESPACE_ID::Message(),
7786 _internal_metadata_(nullptr) {
7787 _internal_metadata_.MergeFrom(from._internal_metadata_);
7788 if (from.has_run_handler_pool_options()) {
7789 run_handler_pool_options_ = new ::tensorflow::RunOptions_Experimental_RunHandlerPoolOptions(*from.run_handler_pool_options_);
7790 } else {
7791 run_handler_pool_options_ = nullptr;
7792 }
7793 ::memcpy(&collective_graph_key_, &from.collective_graph_key_,
7794 static_cast<size_t>(reinterpret_cast<char*>(&use_run_handler_pool_) -
7795 reinterpret_cast<char*>(&collective_graph_key_)) + sizeof(use_run_handler_pool_));
7796 // @@protoc_insertion_point(copy_constructor:tensorflow.RunOptions.Experimental)
7797}
7798
7799void RunOptions_Experimental::SharedCtor() {
7800 ::PROTOBUF_NAMESPACE_ID::internal::InitSCC(&scc_info_RunOptions_Experimental_tensorflow_2fcore_2fprotobuf_2fconfig_2eproto.base);
7801 ::memset(&run_handler_pool_options_, 0, static_cast<size_t>(
7802 reinterpret_cast<char*>(&use_run_handler_pool_) -
7803 reinterpret_cast<char*>(&run_handler_pool_options_)) + sizeof(use_run_handler_pool_));
7804}
7805
7806RunOptions_Experimental::~RunOptions_Experimental() {
7807 // @@protoc_insertion_point(destructor:tensorflow.RunOptions.Experimental)
7808 SharedDtor();
7809}
7810
7811void RunOptions_Experimental::SharedDtor() {
7812 GOOGLE_DCHECK(GetArenaNoVirtual() == nullptr);
7813 if (this != internal_default_instance()) delete run_handler_pool_options_;
7814}
7815
7816void RunOptions_Experimental::ArenaDtor(void* object) {
7817 RunOptions_Experimental* _this = reinterpret_cast< RunOptions_Experimental* >(object);
7818 (void)_this;
7819}
7820void RunOptions_Experimental::RegisterArenaDtor(::PROTOBUF_NAMESPACE_ID::Arena*) {
7821}
7822void RunOptions_Experimental::SetCachedSize(int size) const {
7823 _cached_size_.Set(size);
7824}
7825const RunOptions_Experimental& RunOptions_Experimental::default_instance() {
7826 ::PROTOBUF_NAMESPACE_ID::internal::InitSCC(&::scc_info_RunOptions_Experimental_tensorflow_2fcore_2fprotobuf_2fconfig_2eproto.base);
7827 return *internal_default_instance();
7828}
7829
7830
7831void RunOptions_Experimental::Clear() {
7832// @@protoc_insertion_point(message_clear_start:tensorflow.RunOptions.Experimental)
7833 ::PROTOBUF_NAMESPACE_ID::uint32 cached_has_bits = 0;
7834 // Prevent compiler warnings about cached_has_bits being unused
7835 (void) cached_has_bits;
7836
7837 if (GetArenaNoVirtual() == nullptr && run_handler_pool_options_ != nullptr) {
7838 delete run_handler_pool_options_;
7839 }
7840 run_handler_pool_options_ = nullptr;
7841 ::memset(&collective_graph_key_, 0, static_cast<size_t>(
7842 reinterpret_cast<char*>(&use_run_handler_pool_) -
7843 reinterpret_cast<char*>(&collective_graph_key_)) + sizeof(use_run_handler_pool_));
7844 _internal_metadata_.Clear();
7845}
7846
7847#if GOOGLE_PROTOBUF_ENABLE_EXPERIMENTAL_PARSER
7848const char* RunOptions_Experimental::_InternalParse(const char* ptr, ::PROTOBUF_NAMESPACE_ID::internal::ParseContext* ctx) {
7849#define CHK_(x) if (PROTOBUF_PREDICT_FALSE(!(x))) goto failure
7850 ::PROTOBUF_NAMESPACE_ID::Arena* arena = GetArenaNoVirtual(); (void)arena;
7851 while (!ctx->Done(&ptr)) {
7852 ::PROTOBUF_NAMESPACE_ID::uint32 tag;
7853 ptr = ::PROTOBUF_NAMESPACE_ID::internal::ReadTag(ptr, &tag);
7854 CHK_(ptr);
7855 switch (tag >> 3) {
7856 // int64 collective_graph_key = 1;
7857 case 1:
7858 if (PROTOBUF_PREDICT_TRUE(static_cast<::PROTOBUF_NAMESPACE_ID::uint8>(tag) == 8)) {
7859 collective_graph_key_ = ::PROTOBUF_NAMESPACE_ID::internal::ReadVarint(&ptr);
7860 CHK_(ptr);
7861 } else goto handle_unusual;
7862 continue;
7863 // bool use_run_handler_pool = 2;
7864 case 2:
7865 if (PROTOBUF_PREDICT_TRUE(static_cast<::PROTOBUF_NAMESPACE_ID::uint8>(tag) == 16)) {
7866 use_run_handler_pool_ = ::PROTOBUF_NAMESPACE_ID::internal::ReadVarint(&ptr);
7867 CHK_(ptr);
7868 } else goto handle_unusual;
7869 continue;
7870 // .tensorflow.RunOptions.Experimental.RunHandlerPoolOptions run_handler_pool_options = 3;
7871 case 3:
7872 if (PROTOBUF_PREDICT_TRUE(static_cast<::PROTOBUF_NAMESPACE_ID::uint8>(tag) == 26)) {
7873 ptr = ctx->ParseMessage(mutable_run_handler_pool_options(), ptr);
7874 CHK_(ptr);
7875 } else goto handle_unusual;
7876 continue;
7877 default: {
7878 handle_unusual:
7879 if ((tag & 7) == 4 || tag == 0) {
7880 ctx->SetLastTag(tag);
7881 goto success;
7882 }
7883 ptr = UnknownFieldParse(tag, &_internal_metadata_, ptr, ctx);
7884 CHK_(ptr != nullptr);
7885 continue;
7886 }
7887 } // switch
7888 } // while
7889success:
7890 return ptr;
7891failure:
7892 ptr = nullptr;
7893 goto success;
7894#undef CHK_
7895}
7896#else // GOOGLE_PROTOBUF_ENABLE_EXPERIMENTAL_PARSER
7897bool RunOptions_Experimental::MergePartialFromCodedStream(
7898 ::PROTOBUF_NAMESPACE_ID::io::CodedInputStream* input) {
7899#define DO_(EXPRESSION) if (!PROTOBUF_PREDICT_TRUE(EXPRESSION)) goto failure
7900 ::PROTOBUF_NAMESPACE_ID::uint32 tag;
7901 // @@protoc_insertion_point(parse_start:tensorflow.RunOptions.Experimental)
7902 for (;;) {
7903 ::std::pair<::PROTOBUF_NAMESPACE_ID::uint32, bool> p = input->ReadTagWithCutoffNoLastTag(127u);
7904 tag = p.first;
7905 if (!p.second) goto handle_unusual;
7906 switch (::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::GetTagFieldNumber(tag)) {
7907 // int64 collective_graph_key = 1;
7908 case 1: {
7909 if (static_cast< ::PROTOBUF_NAMESPACE_ID::uint8>(tag) == (8 & 0xFF)) {
7910
7911 DO_((::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::ReadPrimitive<
7912 ::PROTOBUF_NAMESPACE_ID::int64, ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::TYPE_INT64>(
7913 input, &collective_graph_key_)));
7914 } else {
7915 goto handle_unusual;
7916 }
7917 break;
7918 }
7919
7920 // bool use_run_handler_pool = 2;
7921 case 2: {
7922 if (static_cast< ::PROTOBUF_NAMESPACE_ID::uint8>(tag) == (16 & 0xFF)) {
7923
7924 DO_((::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::ReadPrimitive<
7925 bool, ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::TYPE_BOOL>(
7926 input, &use_run_handler_pool_)));
7927 } else {
7928 goto handle_unusual;
7929 }
7930 break;
7931 }
7932
7933 // .tensorflow.RunOptions.Experimental.RunHandlerPoolOptions run_handler_pool_options = 3;
7934 case 3: {
7935 if (static_cast< ::PROTOBUF_NAMESPACE_ID::uint8>(tag) == (26 & 0xFF)) {
7936 DO_(::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::ReadMessage(
7937 input, mutable_run_handler_pool_options()));
7938 } else {
7939 goto handle_unusual;
7940 }
7941 break;
7942 }
7943
7944 default: {
7945 handle_unusual:
7946 if (tag == 0) {
7947 goto success;
7948 }
7949 DO_(::PROTOBUF_NAMESPACE_ID::internal::WireFormat::SkipField(
7950 input, tag, _internal_metadata_.mutable_unknown_fields()));
7951 break;
7952 }
7953 }
7954 }
7955success:
7956 // @@protoc_insertion_point(parse_success:tensorflow.RunOptions.Experimental)
7957 return true;
7958failure:
7959 // @@protoc_insertion_point(parse_failure:tensorflow.RunOptions.Experimental)
7960 return false;
7961#undef DO_
7962}
7963#endif // GOOGLE_PROTOBUF_ENABLE_EXPERIMENTAL_PARSER
7964
7965void RunOptions_Experimental::SerializeWithCachedSizes(
7966 ::PROTOBUF_NAMESPACE_ID::io::CodedOutputStream* output) const {
7967 // @@protoc_insertion_point(serialize_start:tensorflow.RunOptions.Experimental)
7968 ::PROTOBUF_NAMESPACE_ID::uint32 cached_has_bits = 0;
7969 (void) cached_has_bits;
7970
7971 // int64 collective_graph_key = 1;
7972 if (this->collective_graph_key() != 0) {
7973 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::WriteInt64(1, this->collective_graph_key(), output);
7974 }
7975
7976 // bool use_run_handler_pool = 2;
7977 if (this->use_run_handler_pool() != 0) {
7978 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::WriteBool(2, this->use_run_handler_pool(), output);
7979 }
7980
7981 // .tensorflow.RunOptions.Experimental.RunHandlerPoolOptions run_handler_pool_options = 3;
7982 if (this->has_run_handler_pool_options()) {
7983 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::WriteMessageMaybeToArray(
7984 3, _Internal::run_handler_pool_options(this), output);
7985 }
7986
7987 if (_internal_metadata_.have_unknown_fields()) {
7988 ::PROTOBUF_NAMESPACE_ID::internal::WireFormat::SerializeUnknownFields(
7989 _internal_metadata_.unknown_fields(), output);
7990 }
7991 // @@protoc_insertion_point(serialize_end:tensorflow.RunOptions.Experimental)
7992}
7993
7994::PROTOBUF_NAMESPACE_ID::uint8* RunOptions_Experimental::InternalSerializeWithCachedSizesToArray(
7995 ::PROTOBUF_NAMESPACE_ID::uint8* target) const {
7996 // @@protoc_insertion_point(serialize_to_array_start:tensorflow.RunOptions.Experimental)
7997 ::PROTOBUF_NAMESPACE_ID::uint32 cached_has_bits = 0;
7998 (void) cached_has_bits;
7999
8000 // int64 collective_graph_key = 1;
8001 if (this->collective_graph_key() != 0) {
8002 target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::WriteInt64ToArray(1, this->collective_graph_key(), target);
8003 }
8004
8005 // bool use_run_handler_pool = 2;
8006 if (this->use_run_handler_pool() != 0) {
8007 target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::WriteBoolToArray(2, this->use_run_handler_pool(), target);
8008 }
8009
8010 // .tensorflow.RunOptions.Experimental.RunHandlerPoolOptions run_handler_pool_options = 3;
8011 if (this->has_run_handler_pool_options()) {
8012 target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::
8013 InternalWriteMessageToArray(
8014 3, _Internal::run_handler_pool_options(this), target);
8015 }
8016
8017 if (_internal_metadata_.have_unknown_fields()) {
8018 target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormat::SerializeUnknownFieldsToArray(
8019 _internal_metadata_.unknown_fields(), target);
8020 }
8021 // @@protoc_insertion_point(serialize_to_array_end:tensorflow.RunOptions.Experimental)
8022 return target;
8023}
8024
8025size_t RunOptions_Experimental::ByteSizeLong() const {
8026// @@protoc_insertion_point(message_byte_size_start:tensorflow.RunOptions.Experimental)
8027 size_t total_size = 0;
8028
8029 if (_internal_metadata_.have_unknown_fields()) {
8030 total_size +=
8031 ::PROTOBUF_NAMESPACE_ID::internal::WireFormat::ComputeUnknownFieldsSize(
8032 _internal_metadata_.unknown_fields());
8033 }
8034 ::PROTOBUF_NAMESPACE_ID::uint32 cached_has_bits = 0;
8035 // Prevent compiler warnings about cached_has_bits being unused
8036 (void) cached_has_bits;
8037
8038 // .tensorflow.RunOptions.Experimental.RunHandlerPoolOptions run_handler_pool_options = 3;
8039 if (this->has_run_handler_pool_options()) {
8040 total_size += 1 +
8041 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::MessageSize(
8042 *run_handler_pool_options_);
8043 }
8044
8045 // int64 collective_graph_key = 1;
8046 if (this->collective_graph_key() != 0) {
8047 total_size += 1 +
8048 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::Int64Size(
8049 this->collective_graph_key());
8050 }
8051
8052 // bool use_run_handler_pool = 2;
8053 if (this->use_run_handler_pool() != 0) {
8054 total_size += 1 + 1;
8055 }
8056
8057 int cached_size = ::PROTOBUF_NAMESPACE_ID::internal::ToCachedSize(total_size);
8058 SetCachedSize(cached_size);
8059 return total_size;
8060}
8061
8062void RunOptions_Experimental::MergeFrom(const ::PROTOBUF_NAMESPACE_ID::Message& from) {
8063// @@protoc_insertion_point(generalized_merge_from_start:tensorflow.RunOptions.Experimental)
8064 GOOGLE_DCHECK_NE(&from, this);
8065 const RunOptions_Experimental* source =
8066 ::PROTOBUF_NAMESPACE_ID::DynamicCastToGenerated<RunOptions_Experimental>(
8067 &from);
8068 if (source == nullptr) {
8069 // @@protoc_insertion_point(generalized_merge_from_cast_fail:tensorflow.RunOptions.Experimental)
8070 ::PROTOBUF_NAMESPACE_ID::internal::ReflectionOps::Merge(from, this);
8071 } else {
8072 // @@protoc_insertion_point(generalized_merge_from_cast_success:tensorflow.RunOptions.Experimental)
8073 MergeFrom(*source);
8074 }
8075}
8076
8077void RunOptions_Experimental::MergeFrom(const RunOptions_Experimental& from) {
8078// @@protoc_insertion_point(class_specific_merge_from_start:tensorflow.RunOptions.Experimental)
8079 GOOGLE_DCHECK_NE(&from, this);
8080 _internal_metadata_.MergeFrom(from._internal_metadata_);
8081 ::PROTOBUF_NAMESPACE_ID::uint32 cached_has_bits = 0;
8082 (void) cached_has_bits;
8083
8084 if (from.has_run_handler_pool_options()) {
8085 mutable_run_handler_pool_options()->::tensorflow::RunOptions_Experimental_RunHandlerPoolOptions::MergeFrom(from.run_handler_pool_options());
8086 }
8087 if (from.collective_graph_key() != 0) {
8088 set_collective_graph_key(from.collective_graph_key());
8089 }
8090 if (from.use_run_handler_pool() != 0) {
8091 set_use_run_handler_pool(from.use_run_handler_pool());
8092 }
8093}
8094
8095void RunOptions_Experimental::CopyFrom(const ::PROTOBUF_NAMESPACE_ID::Message& from) {
8096// @@protoc_insertion_point(generalized_copy_from_start:tensorflow.RunOptions.Experimental)
8097 if (&from == this) return;
8098 Clear();
8099 MergeFrom(from);
8100}
8101
8102void RunOptions_Experimental::CopyFrom(const RunOptions_Experimental& from) {
8103// @@protoc_insertion_point(class_specific_copy_from_start:tensorflow.RunOptions.Experimental)
8104 if (&from == this) return;
8105 Clear();
8106 MergeFrom(from);
8107}
8108
8109bool RunOptions_Experimental::IsInitialized() const {
8110 return true;
8111}
8112
8113void RunOptions_Experimental::InternalSwap(RunOptions_Experimental* other) {
8114 using std::swap;
8115 _internal_metadata_.Swap(&other->_internal_metadata_);
8116 swap(run_handler_pool_options_, other->run_handler_pool_options_);
8117 swap(collective_graph_key_, other->collective_graph_key_);
8118 swap(use_run_handler_pool_, other->use_run_handler_pool_);
8119}
8120
8121::PROTOBUF_NAMESPACE_ID::Metadata RunOptions_Experimental::GetMetadata() const {
8122 return GetMetadataStatic();
8123}
8124
8125
8126// ===================================================================
8127
8128void RunOptions::InitAsDefaultInstance() {
8129 ::tensorflow::_RunOptions_default_instance_._instance.get_mutable()->debug_options_ = const_cast< ::tensorflow::DebugOptions*>(
8130 ::tensorflow::DebugOptions::internal_default_instance());
8131 ::tensorflow::_RunOptions_default_instance_._instance.get_mutable()->experimental_ = const_cast< ::tensorflow::RunOptions_Experimental*>(
8132 ::tensorflow::RunOptions_Experimental::internal_default_instance());
8133}
8134class RunOptions::_Internal {
8135 public:
8136 static const ::tensorflow::DebugOptions& debug_options(const RunOptions* msg);
8137 static const ::tensorflow::RunOptions_Experimental& experimental(const RunOptions* msg);
8138};
8139
8140const ::tensorflow::DebugOptions&
8141RunOptions::_Internal::debug_options(const RunOptions* msg) {
8142 return *msg->debug_options_;
8143}
8144const ::tensorflow::RunOptions_Experimental&
8145RunOptions::_Internal::experimental(const RunOptions* msg) {
8146 return *msg->experimental_;
8147}
8148void RunOptions::unsafe_arena_set_allocated_debug_options(
8149 ::tensorflow::DebugOptions* debug_options) {
8150 if (GetArenaNoVirtual() == nullptr) {
8151 delete debug_options_;
8152 }
8153 debug_options_ = debug_options;
8154 if (debug_options) {
8155
8156 } else {
8157
8158 }
8159 // @@protoc_insertion_point(field_unsafe_arena_set_allocated:tensorflow.RunOptions.debug_options)
8160}
8161void RunOptions::clear_debug_options() {
8162 if (GetArenaNoVirtual() == nullptr && debug_options_ != nullptr) {
8163 delete debug_options_;
8164 }
8165 debug_options_ = nullptr;
8166}
8167void RunOptions::unsafe_arena_set_allocated_experimental(
8168 ::tensorflow::RunOptions_Experimental* experimental) {
8169 if (GetArenaNoVirtual() == nullptr) {
8170 delete experimental_;
8171 }
8172 experimental_ = experimental;
8173 if (experimental) {
8174
8175 } else {
8176
8177 }
8178 // @@protoc_insertion_point(field_unsafe_arena_set_allocated:tensorflow.RunOptions.experimental)
8179}
8180RunOptions::RunOptions()
8181 : ::PROTOBUF_NAMESPACE_ID::Message(), _internal_metadata_(nullptr) {
8182 SharedCtor();
8183 // @@protoc_insertion_point(constructor:tensorflow.RunOptions)
8184}
8185RunOptions::RunOptions(::PROTOBUF_NAMESPACE_ID::Arena* arena)
8186 : ::PROTOBUF_NAMESPACE_ID::Message(),
8187 _internal_metadata_(arena) {
8188 SharedCtor();
8189 RegisterArenaDtor(arena);
8190 // @@protoc_insertion_point(arena_constructor:tensorflow.RunOptions)
8191}
8192RunOptions::RunOptions(const RunOptions& from)
8193 : ::PROTOBUF_NAMESPACE_ID::Message(),
8194 _internal_metadata_(nullptr) {
8195 _internal_metadata_.MergeFrom(from._internal_metadata_);
8196 if (from.has_debug_options()) {
8197 debug_options_ = new ::tensorflow::DebugOptions(*from.debug_options_);
8198 } else {
8199 debug_options_ = nullptr;
8200 }
8201 if (from.has_experimental()) {
8202 experimental_ = new ::tensorflow::RunOptions_Experimental(*from.experimental_);
8203 } else {
8204 experimental_ = nullptr;
8205 }
8206 ::memcpy(&timeout_in_ms_, &from.timeout_in_ms_,
8207 static_cast<size_t>(reinterpret_cast<char*>(&report_tensor_allocations_upon_oom_) -
8208 reinterpret_cast<char*>(&timeout_in_ms_)) + sizeof(report_tensor_allocations_upon_oom_));
8209 // @@protoc_insertion_point(copy_constructor:tensorflow.RunOptions)
8210}
8211
8212void RunOptions::SharedCtor() {
8213 ::PROTOBUF_NAMESPACE_ID::internal::InitSCC(&scc_info_RunOptions_tensorflow_2fcore_2fprotobuf_2fconfig_2eproto.base);
8214 ::memset(&debug_options_, 0, static_cast<size_t>(
8215 reinterpret_cast<char*>(&report_tensor_allocations_upon_oom_) -
8216 reinterpret_cast<char*>(&debug_options_)) + sizeof(report_tensor_allocations_upon_oom_));
8217}
8218
8219RunOptions::~RunOptions() {
8220 // @@protoc_insertion_point(destructor:tensorflow.RunOptions)
8221 SharedDtor();
8222}
8223
8224void RunOptions::SharedDtor() {
8225 GOOGLE_DCHECK(GetArenaNoVirtual() == nullptr);
8226 if (this != internal_default_instance()) delete debug_options_;
8227 if (this != internal_default_instance()) delete experimental_;
8228}
8229
8230void RunOptions::ArenaDtor(void* object) {
8231 RunOptions* _this = reinterpret_cast< RunOptions* >(object);
8232 (void)_this;
8233}
8234void RunOptions::RegisterArenaDtor(::PROTOBUF_NAMESPACE_ID::Arena*) {
8235}
8236void RunOptions::SetCachedSize(int size) const {
8237 _cached_size_.Set(size);
8238}
8239const RunOptions& RunOptions::default_instance() {
8240 ::PROTOBUF_NAMESPACE_ID::internal::InitSCC(&::scc_info_RunOptions_tensorflow_2fcore_2fprotobuf_2fconfig_2eproto.base);
8241 return *internal_default_instance();
8242}
8243
8244
8245void RunOptions::Clear() {
8246// @@protoc_insertion_point(message_clear_start:tensorflow.RunOptions)
8247 ::PROTOBUF_NAMESPACE_ID::uint32 cached_has_bits = 0;
8248 // Prevent compiler warnings about cached_has_bits being unused
8249 (void) cached_has_bits;
8250
8251 if (GetArenaNoVirtual() == nullptr && debug_options_ != nullptr) {
8252 delete debug_options_;
8253 }
8254 debug_options_ = nullptr;
8255 if (GetArenaNoVirtual() == nullptr && experimental_ != nullptr) {
8256 delete experimental_;
8257 }
8258 experimental_ = nullptr;
8259 ::memset(&timeout_in_ms_, 0, static_cast<size_t>(
8260 reinterpret_cast<char*>(&report_tensor_allocations_upon_oom_) -
8261 reinterpret_cast<char*>(&timeout_in_ms_)) + sizeof(report_tensor_allocations_upon_oom_));
8262 _internal_metadata_.Clear();
8263}
8264
8265#if GOOGLE_PROTOBUF_ENABLE_EXPERIMENTAL_PARSER
8266const char* RunOptions::_InternalParse(const char* ptr, ::PROTOBUF_NAMESPACE_ID::internal::ParseContext* ctx) {
8267#define CHK_(x) if (PROTOBUF_PREDICT_FALSE(!(x))) goto failure
8268 ::PROTOBUF_NAMESPACE_ID::Arena* arena = GetArenaNoVirtual(); (void)arena;
8269 while (!ctx->Done(&ptr)) {
8270 ::PROTOBUF_NAMESPACE_ID::uint32 tag;
8271 ptr = ::PROTOBUF_NAMESPACE_ID::internal::ReadTag(ptr, &tag);
8272 CHK_(ptr);
8273 switch (tag >> 3) {
8274 // .tensorflow.RunOptions.TraceLevel trace_level = 1;
8275 case 1:
8276 if (PROTOBUF_PREDICT_TRUE(static_cast<::PROTOBUF_NAMESPACE_ID::uint8>(tag) == 8)) {
8277 ::PROTOBUF_NAMESPACE_ID::uint64 val = ::PROTOBUF_NAMESPACE_ID::internal::ReadVarint(&ptr);
8278 CHK_(ptr);
8279 set_trace_level(static_cast<::tensorflow::RunOptions_TraceLevel>(val));
8280 } else goto handle_unusual;
8281 continue;
8282 // int64 timeout_in_ms = 2;
8283 case 2:
8284 if (PROTOBUF_PREDICT_TRUE(static_cast<::PROTOBUF_NAMESPACE_ID::uint8>(tag) == 16)) {
8285 timeout_in_ms_ = ::PROTOBUF_NAMESPACE_ID::internal::ReadVarint(&ptr);
8286 CHK_(ptr);
8287 } else goto handle_unusual;
8288 continue;
8289 // int32 inter_op_thread_pool = 3;
8290 case 3:
8291 if (PROTOBUF_PREDICT_TRUE(static_cast<::PROTOBUF_NAMESPACE_ID::uint8>(tag) == 24)) {
8292 inter_op_thread_pool_ = ::PROTOBUF_NAMESPACE_ID::internal::ReadVarint(&ptr);
8293 CHK_(ptr);
8294 } else goto handle_unusual;
8295 continue;
8296 // bool output_partition_graphs = 5;
8297 case 5:
8298 if (PROTOBUF_PREDICT_TRUE(static_cast<::PROTOBUF_NAMESPACE_ID::uint8>(tag) == 40)) {
8299 output_partition_graphs_ = ::PROTOBUF_NAMESPACE_ID::internal::ReadVarint(&ptr);
8300 CHK_(ptr);
8301 } else goto handle_unusual;
8302 continue;
8303 // .tensorflow.DebugOptions debug_options = 6;
8304 case 6:
8305 if (PROTOBUF_PREDICT_TRUE(static_cast<::PROTOBUF_NAMESPACE_ID::uint8>(tag) == 50)) {
8306 ptr = ctx->ParseMessage(mutable_debug_options(), ptr);
8307 CHK_(ptr);
8308 } else goto handle_unusual;
8309 continue;
8310 // bool report_tensor_allocations_upon_oom = 7;
8311 case 7:
8312 if (PROTOBUF_PREDICT_TRUE(static_cast<::PROTOBUF_NAMESPACE_ID::uint8>(tag) == 56)) {
8313 report_tensor_allocations_upon_oom_ = ::PROTOBUF_NAMESPACE_ID::internal::ReadVarint(&ptr);
8314 CHK_(ptr);
8315 } else goto handle_unusual;
8316 continue;
8317 // .tensorflow.RunOptions.Experimental experimental = 8;
8318 case 8:
8319 if (PROTOBUF_PREDICT_TRUE(static_cast<::PROTOBUF_NAMESPACE_ID::uint8>(tag) == 66)) {
8320 ptr = ctx->ParseMessage(mutable_experimental(), ptr);
8321 CHK_(ptr);
8322 } else goto handle_unusual;
8323 continue;
8324 default: {
8325 handle_unusual:
8326 if ((tag & 7) == 4 || tag == 0) {
8327 ctx->SetLastTag(tag);
8328 goto success;
8329 }
8330 ptr = UnknownFieldParse(tag, &_internal_metadata_, ptr, ctx);
8331 CHK_(ptr != nullptr);
8332 continue;
8333 }
8334 } // switch
8335 } // while
8336success:
8337 return ptr;
8338failure:
8339 ptr = nullptr;
8340 goto success;
8341#undef CHK_
8342}
8343#else // GOOGLE_PROTOBUF_ENABLE_EXPERIMENTAL_PARSER
8344bool RunOptions::MergePartialFromCodedStream(
8345 ::PROTOBUF_NAMESPACE_ID::io::CodedInputStream* input) {
8346#define DO_(EXPRESSION) if (!PROTOBUF_PREDICT_TRUE(EXPRESSION)) goto failure
8347 ::PROTOBUF_NAMESPACE_ID::uint32 tag;
8348 // @@protoc_insertion_point(parse_start:tensorflow.RunOptions)
8349 for (;;) {
8350 ::std::pair<::PROTOBUF_NAMESPACE_ID::uint32, bool> p = input->ReadTagWithCutoffNoLastTag(127u);
8351 tag = p.first;
8352 if (!p.second) goto handle_unusual;
8353 switch (::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::GetTagFieldNumber(tag)) {
8354 // .tensorflow.RunOptions.TraceLevel trace_level = 1;
8355 case 1: {
8356 if (static_cast< ::PROTOBUF_NAMESPACE_ID::uint8>(tag) == (8 & 0xFF)) {
8357 int value = 0;
8358 DO_((::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::ReadPrimitive<
8359 int, ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::TYPE_ENUM>(
8360 input, &value)));
8361 set_trace_level(static_cast< ::tensorflow::RunOptions_TraceLevel >(value));
8362 } else {
8363 goto handle_unusual;
8364 }
8365 break;
8366 }
8367
8368 // int64 timeout_in_ms = 2;
8369 case 2: {
8370 if (static_cast< ::PROTOBUF_NAMESPACE_ID::uint8>(tag) == (16 & 0xFF)) {
8371
8372 DO_((::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::ReadPrimitive<
8373 ::PROTOBUF_NAMESPACE_ID::int64, ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::TYPE_INT64>(
8374 input, &timeout_in_ms_)));
8375 } else {
8376 goto handle_unusual;
8377 }
8378 break;
8379 }
8380
8381 // int32 inter_op_thread_pool = 3;
8382 case 3: {
8383 if (static_cast< ::PROTOBUF_NAMESPACE_ID::uint8>(tag) == (24 & 0xFF)) {
8384
8385 DO_((::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::ReadPrimitive<
8386 ::PROTOBUF_NAMESPACE_ID::int32, ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::TYPE_INT32>(
8387 input, &inter_op_thread_pool_)));
8388 } else {
8389 goto handle_unusual;
8390 }
8391 break;
8392 }
8393
8394 // bool output_partition_graphs = 5;
8395 case 5: {
8396 if (static_cast< ::PROTOBUF_NAMESPACE_ID::uint8>(tag) == (40 & 0xFF)) {
8397
8398 DO_((::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::ReadPrimitive<
8399 bool, ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::TYPE_BOOL>(
8400 input, &output_partition_graphs_)));
8401 } else {
8402 goto handle_unusual;
8403 }
8404 break;
8405 }
8406
8407 // .tensorflow.DebugOptions debug_options = 6;
8408 case 6: {
8409 if (static_cast< ::PROTOBUF_NAMESPACE_ID::uint8>(tag) == (50 & 0xFF)) {
8410 DO_(::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::ReadMessage(
8411 input, mutable_debug_options()));
8412 } else {
8413 goto handle_unusual;
8414 }
8415 break;
8416 }
8417
8418 // bool report_tensor_allocations_upon_oom = 7;
8419 case 7: {
8420 if (static_cast< ::PROTOBUF_NAMESPACE_ID::uint8>(tag) == (56 & 0xFF)) {
8421
8422 DO_((::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::ReadPrimitive<
8423 bool, ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::TYPE_BOOL>(
8424 input, &report_tensor_allocations_upon_oom_)));
8425 } else {
8426 goto handle_unusual;
8427 }
8428 break;
8429 }
8430
8431 // .tensorflow.RunOptions.Experimental experimental = 8;
8432 case 8: {
8433 if (static_cast< ::PROTOBUF_NAMESPACE_ID::uint8>(tag) == (66 & 0xFF)) {
8434 DO_(::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::ReadMessage(
8435 input, mutable_experimental()));
8436 } else {
8437 goto handle_unusual;
8438 }
8439 break;
8440 }
8441
8442 default: {
8443 handle_unusual:
8444 if (tag == 0) {
8445 goto success;
8446 }
8447 DO_(::PROTOBUF_NAMESPACE_ID::internal::WireFormat::SkipField(
8448 input, tag, _internal_metadata_.mutable_unknown_fields()));
8449 break;
8450 }
8451 }
8452 }
8453success:
8454 // @@protoc_insertion_point(parse_success:tensorflow.RunOptions)
8455 return true;
8456failure:
8457 // @@protoc_insertion_point(parse_failure:tensorflow.RunOptions)
8458 return false;
8459#undef DO_
8460}
8461#endif // GOOGLE_PROTOBUF_ENABLE_EXPERIMENTAL_PARSER
8462
8463void RunOptions::SerializeWithCachedSizes(
8464 ::PROTOBUF_NAMESPACE_ID::io::CodedOutputStream* output) const {
8465 // @@protoc_insertion_point(serialize_start:tensorflow.RunOptions)
8466 ::PROTOBUF_NAMESPACE_ID::uint32 cached_has_bits = 0;
8467 (void) cached_has_bits;
8468
8469 // .tensorflow.RunOptions.TraceLevel trace_level = 1;
8470 if (this->trace_level() != 0) {
8471 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::WriteEnum(
8472 1, this->trace_level(), output);
8473 }
8474
8475 // int64 timeout_in_ms = 2;
8476 if (this->timeout_in_ms() != 0) {
8477 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::WriteInt64(2, this->timeout_in_ms(), output);
8478 }
8479
8480 // int32 inter_op_thread_pool = 3;
8481 if (this->inter_op_thread_pool() != 0) {
8482 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::WriteInt32(3, this->inter_op_thread_pool(), output);
8483 }
8484
8485 // bool output_partition_graphs = 5;
8486 if (this->output_partition_graphs() != 0) {
8487 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::WriteBool(5, this->output_partition_graphs(), output);
8488 }
8489
8490 // .tensorflow.DebugOptions debug_options = 6;
8491 if (this->has_debug_options()) {
8492 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::WriteMessageMaybeToArray(
8493 6, _Internal::debug_options(this), output);
8494 }
8495
8496 // bool report_tensor_allocations_upon_oom = 7;
8497 if (this->report_tensor_allocations_upon_oom() != 0) {
8498 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::WriteBool(7, this->report_tensor_allocations_upon_oom(), output);
8499 }
8500
8501 // .tensorflow.RunOptions.Experimental experimental = 8;
8502 if (this->has_experimental()) {
8503 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::WriteMessageMaybeToArray(
8504 8, _Internal::experimental(this), output);
8505 }
8506
8507 if (_internal_metadata_.have_unknown_fields()) {
8508 ::PROTOBUF_NAMESPACE_ID::internal::WireFormat::SerializeUnknownFields(
8509 _internal_metadata_.unknown_fields(), output);
8510 }
8511 // @@protoc_insertion_point(serialize_end:tensorflow.RunOptions)
8512}
8513
8514::PROTOBUF_NAMESPACE_ID::uint8* RunOptions::InternalSerializeWithCachedSizesToArray(
8515 ::PROTOBUF_NAMESPACE_ID::uint8* target) const {
8516 // @@protoc_insertion_point(serialize_to_array_start:tensorflow.RunOptions)
8517 ::PROTOBUF_NAMESPACE_ID::uint32 cached_has_bits = 0;
8518 (void) cached_has_bits;
8519
8520 // .tensorflow.RunOptions.TraceLevel trace_level = 1;
8521 if (this->trace_level() != 0) {
8522 target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::WriteEnumToArray(
8523 1, this->trace_level(), target);
8524 }
8525
8526 // int64 timeout_in_ms = 2;
8527 if (this->timeout_in_ms() != 0) {
8528 target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::WriteInt64ToArray(2, this->timeout_in_ms(), target);
8529 }
8530
8531 // int32 inter_op_thread_pool = 3;
8532 if (this->inter_op_thread_pool() != 0) {
8533 target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::WriteInt32ToArray(3, this->inter_op_thread_pool(), target);
8534 }
8535
8536 // bool output_partition_graphs = 5;
8537 if (this->output_partition_graphs() != 0) {
8538 target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::WriteBoolToArray(5, this->output_partition_graphs(), target);
8539 }
8540
8541 // .tensorflow.DebugOptions debug_options = 6;
8542 if (this->has_debug_options()) {
8543 target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::
8544 InternalWriteMessageToArray(
8545 6, _Internal::debug_options(this), target);
8546 }
8547
8548 // bool report_tensor_allocations_upon_oom = 7;
8549 if (this->report_tensor_allocations_upon_oom() != 0) {
8550 target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::WriteBoolToArray(7, this->report_tensor_allocations_upon_oom(), target);
8551 }
8552
8553 // .tensorflow.RunOptions.Experimental experimental = 8;
8554 if (this->has_experimental()) {
8555 target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::
8556 InternalWriteMessageToArray(
8557 8, _Internal::experimental(this), target);
8558 }
8559
8560 if (_internal_metadata_.have_unknown_fields()) {
8561 target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormat::SerializeUnknownFieldsToArray(
8562 _internal_metadata_.unknown_fields(), target);
8563 }
8564 // @@protoc_insertion_point(serialize_to_array_end:tensorflow.RunOptions)
8565 return target;
8566}
8567
8568size_t RunOptions::ByteSizeLong() const {
8569// @@protoc_insertion_point(message_byte_size_start:tensorflow.RunOptions)
8570 size_t total_size = 0;
8571
8572 if (_internal_metadata_.have_unknown_fields()) {
8573 total_size +=
8574 ::PROTOBUF_NAMESPACE_ID::internal::WireFormat::ComputeUnknownFieldsSize(
8575 _internal_metadata_.unknown_fields());
8576 }
8577 ::PROTOBUF_NAMESPACE_ID::uint32 cached_has_bits = 0;
8578 // Prevent compiler warnings about cached_has_bits being unused
8579 (void) cached_has_bits;
8580
8581 // .tensorflow.DebugOptions debug_options = 6;
8582 if (this->has_debug_options()) {
8583 total_size += 1 +
8584 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::MessageSize(
8585 *debug_options_);
8586 }
8587
8588 // .tensorflow.RunOptions.Experimental experimental = 8;
8589 if (this->has_experimental()) {
8590 total_size += 1 +
8591 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::MessageSize(
8592 *experimental_);
8593 }
8594
8595 // int64 timeout_in_ms = 2;
8596 if (this->timeout_in_ms() != 0) {
8597 total_size += 1 +
8598 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::Int64Size(
8599 this->timeout_in_ms());
8600 }
8601
8602 // .tensorflow.RunOptions.TraceLevel trace_level = 1;
8603 if (this->trace_level() != 0) {
8604 total_size += 1 +
8605 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::EnumSize(this->trace_level());
8606 }
8607
8608 // int32 inter_op_thread_pool = 3;
8609 if (this->inter_op_thread_pool() != 0) {
8610 total_size += 1 +
8611 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::Int32Size(
8612 this->inter_op_thread_pool());
8613 }
8614
8615 // bool output_partition_graphs = 5;
8616 if (this->output_partition_graphs() != 0) {
8617 total_size += 1 + 1;
8618 }
8619
8620 // bool report_tensor_allocations_upon_oom = 7;
8621 if (this->report_tensor_allocations_upon_oom() != 0) {
8622 total_size += 1 + 1;
8623 }
8624
8625 int cached_size = ::PROTOBUF_NAMESPACE_ID::internal::ToCachedSize(total_size);
8626 SetCachedSize(cached_size);
8627 return total_size;
8628}
8629
8630void RunOptions::MergeFrom(const ::PROTOBUF_NAMESPACE_ID::Message& from) {
8631// @@protoc_insertion_point(generalized_merge_from_start:tensorflow.RunOptions)
8632 GOOGLE_DCHECK_NE(&from, this);
8633 const RunOptions* source =
8634 ::PROTOBUF_NAMESPACE_ID::DynamicCastToGenerated<RunOptions>(
8635 &from);
8636 if (source == nullptr) {
8637 // @@protoc_insertion_point(generalized_merge_from_cast_fail:tensorflow.RunOptions)
8638 ::PROTOBUF_NAMESPACE_ID::internal::ReflectionOps::Merge(from, this);
8639 } else {
8640 // @@protoc_insertion_point(generalized_merge_from_cast_success:tensorflow.RunOptions)
8641 MergeFrom(*source);
8642 }
8643}
8644
8645void RunOptions::MergeFrom(const RunOptions& from) {
8646// @@protoc_insertion_point(class_specific_merge_from_start:tensorflow.RunOptions)
8647 GOOGLE_DCHECK_NE(&from, this);
8648 _internal_metadata_.MergeFrom(from._internal_metadata_);
8649 ::PROTOBUF_NAMESPACE_ID::uint32 cached_has_bits = 0;
8650 (void) cached_has_bits;
8651
8652 if (from.has_debug_options()) {
8653 mutable_debug_options()->::tensorflow::DebugOptions::MergeFrom(from.debug_options());
8654 }
8655 if (from.has_experimental()) {
8656 mutable_experimental()->::tensorflow::RunOptions_Experimental::MergeFrom(from.experimental());
8657 }
8658 if (from.timeout_in_ms() != 0) {
8659 set_timeout_in_ms(from.timeout_in_ms());
8660 }
8661 if (from.trace_level() != 0) {
8662 set_trace_level(from.trace_level());
8663 }
8664 if (from.inter_op_thread_pool() != 0) {
8665 set_inter_op_thread_pool(from.inter_op_thread_pool());
8666 }
8667 if (from.output_partition_graphs() != 0) {
8668 set_output_partition_graphs(from.output_partition_graphs());
8669 }
8670 if (from.report_tensor_allocations_upon_oom() != 0) {
8671 set_report_tensor_allocations_upon_oom(from.report_tensor_allocations_upon_oom());
8672 }
8673}
8674
8675void RunOptions::CopyFrom(const ::PROTOBUF_NAMESPACE_ID::Message& from) {
8676// @@protoc_insertion_point(generalized_copy_from_start:tensorflow.RunOptions)
8677 if (&from == this) return;
8678 Clear();
8679 MergeFrom(from);
8680}
8681
8682void RunOptions::CopyFrom(const RunOptions& from) {
8683// @@protoc_insertion_point(class_specific_copy_from_start:tensorflow.RunOptions)
8684 if (&from == this) return;
8685 Clear();
8686 MergeFrom(from);
8687}
8688
8689bool RunOptions::IsInitialized() const {
8690 return true;
8691}
8692
8693void RunOptions::InternalSwap(RunOptions* other) {
8694 using std::swap;
8695 _internal_metadata_.Swap(&other->_internal_metadata_);
8696 swap(debug_options_, other->debug_options_);
8697 swap(experimental_, other->experimental_);
8698 swap(timeout_in_ms_, other->timeout_in_ms_);
8699 swap(trace_level_, other->trace_level_);
8700 swap(inter_op_thread_pool_, other->inter_op_thread_pool_);
8701 swap(output_partition_graphs_, other->output_partition_graphs_);
8702 swap(report_tensor_allocations_upon_oom_, other->report_tensor_allocations_upon_oom_);
8703}
8704
8705::PROTOBUF_NAMESPACE_ID::Metadata RunOptions::GetMetadata() const {
8706 return GetMetadataStatic();
8707}
8708
8709
8710// ===================================================================
8711
8712void RunMetadata_FunctionGraphs::InitAsDefaultInstance() {
8713 ::tensorflow::_RunMetadata_FunctionGraphs_default_instance_._instance.get_mutable()->pre_optimization_graph_ = const_cast< ::tensorflow::GraphDef*>(
8714 ::tensorflow::GraphDef::internal_default_instance());
8715 ::tensorflow::_RunMetadata_FunctionGraphs_default_instance_._instance.get_mutable()->post_optimization_graph_ = const_cast< ::tensorflow::GraphDef*>(
8716 ::tensorflow::GraphDef::internal_default_instance());
8717}
8718class RunMetadata_FunctionGraphs::_Internal {
8719 public:
8720 static const ::tensorflow::GraphDef& pre_optimization_graph(const RunMetadata_FunctionGraphs* msg);
8721 static const ::tensorflow::GraphDef& post_optimization_graph(const RunMetadata_FunctionGraphs* msg);
8722};
8723
8724const ::tensorflow::GraphDef&
8725RunMetadata_FunctionGraphs::_Internal::pre_optimization_graph(const RunMetadata_FunctionGraphs* msg) {
8726 return *msg->pre_optimization_graph_;
8727}
8728const ::tensorflow::GraphDef&
8729RunMetadata_FunctionGraphs::_Internal::post_optimization_graph(const RunMetadata_FunctionGraphs* msg) {
8730 return *msg->post_optimization_graph_;
8731}
8732void RunMetadata_FunctionGraphs::clear_partition_graphs() {
8733 partition_graphs_.Clear();
8734}
8735void RunMetadata_FunctionGraphs::unsafe_arena_set_allocated_pre_optimization_graph(
8736 ::tensorflow::GraphDef* pre_optimization_graph) {
8737 if (GetArenaNoVirtual() == nullptr) {
8738 delete pre_optimization_graph_;
8739 }
8740 pre_optimization_graph_ = pre_optimization_graph;
8741 if (pre_optimization_graph) {
8742
8743 } else {
8744
8745 }
8746 // @@protoc_insertion_point(field_unsafe_arena_set_allocated:tensorflow.RunMetadata.FunctionGraphs.pre_optimization_graph)
8747}
8748void RunMetadata_FunctionGraphs::clear_pre_optimization_graph() {
8749 if (GetArenaNoVirtual() == nullptr && pre_optimization_graph_ != nullptr) {
8750 delete pre_optimization_graph_;
8751 }
8752 pre_optimization_graph_ = nullptr;
8753}
8754void RunMetadata_FunctionGraphs::unsafe_arena_set_allocated_post_optimization_graph(
8755 ::tensorflow::GraphDef* post_optimization_graph) {
8756 if (GetArenaNoVirtual() == nullptr) {
8757 delete post_optimization_graph_;
8758 }
8759 post_optimization_graph_ = post_optimization_graph;
8760 if (post_optimization_graph) {
8761
8762 } else {
8763
8764 }
8765 // @@protoc_insertion_point(field_unsafe_arena_set_allocated:tensorflow.RunMetadata.FunctionGraphs.post_optimization_graph)
8766}
8767void RunMetadata_FunctionGraphs::clear_post_optimization_graph() {
8768 if (GetArenaNoVirtual() == nullptr && post_optimization_graph_ != nullptr) {
8769 delete post_optimization_graph_;
8770 }
8771 post_optimization_graph_ = nullptr;
8772}
8773RunMetadata_FunctionGraphs::RunMetadata_FunctionGraphs()
8774 : ::PROTOBUF_NAMESPACE_ID::Message(), _internal_metadata_(nullptr) {
8775 SharedCtor();
8776 // @@protoc_insertion_point(constructor:tensorflow.RunMetadata.FunctionGraphs)
8777}
8778RunMetadata_FunctionGraphs::RunMetadata_FunctionGraphs(::PROTOBUF_NAMESPACE_ID::Arena* arena)
8779 : ::PROTOBUF_NAMESPACE_ID::Message(),
8780 _internal_metadata_(arena),
8781 partition_graphs_(arena) {
8782 SharedCtor();
8783 RegisterArenaDtor(arena);
8784 // @@protoc_insertion_point(arena_constructor:tensorflow.RunMetadata.FunctionGraphs)
8785}
8786RunMetadata_FunctionGraphs::RunMetadata_FunctionGraphs(const RunMetadata_FunctionGraphs& from)
8787 : ::PROTOBUF_NAMESPACE_ID::Message(),
8788 _internal_metadata_(nullptr),
8789 partition_graphs_(from.partition_graphs_) {
8790 _internal_metadata_.MergeFrom(from._internal_metadata_);
8791 if (from.has_pre_optimization_graph()) {
8792 pre_optimization_graph_ = new ::tensorflow::GraphDef(*from.pre_optimization_graph_);
8793 } else {
8794 pre_optimization_graph_ = nullptr;
8795 }
8796 if (from.has_post_optimization_graph()) {
8797 post_optimization_graph_ = new ::tensorflow::GraphDef(*from.post_optimization_graph_);
8798 } else {
8799 post_optimization_graph_ = nullptr;
8800 }
8801 // @@protoc_insertion_point(copy_constructor:tensorflow.RunMetadata.FunctionGraphs)
8802}
8803
8804void RunMetadata_FunctionGraphs::SharedCtor() {
8805 ::PROTOBUF_NAMESPACE_ID::internal::InitSCC(&scc_info_RunMetadata_FunctionGraphs_tensorflow_2fcore_2fprotobuf_2fconfig_2eproto.base);
8806 ::memset(&pre_optimization_graph_, 0, static_cast<size_t>(
8807 reinterpret_cast<char*>(&post_optimization_graph_) -
8808 reinterpret_cast<char*>(&pre_optimization_graph_)) + sizeof(post_optimization_graph_));
8809}
8810
8811RunMetadata_FunctionGraphs::~RunMetadata_FunctionGraphs() {
8812 // @@protoc_insertion_point(destructor:tensorflow.RunMetadata.FunctionGraphs)
8813 SharedDtor();
8814}
8815
8816void RunMetadata_FunctionGraphs::SharedDtor() {
8817 GOOGLE_DCHECK(GetArenaNoVirtual() == nullptr);
8818 if (this != internal_default_instance()) delete pre_optimization_graph_;
8819 if (this != internal_default_instance()) delete post_optimization_graph_;
8820}
8821
8822void RunMetadata_FunctionGraphs::ArenaDtor(void* object) {
8823 RunMetadata_FunctionGraphs* _this = reinterpret_cast< RunMetadata_FunctionGraphs* >(object);
8824 (void)_this;
8825}
8826void RunMetadata_FunctionGraphs::RegisterArenaDtor(::PROTOBUF_NAMESPACE_ID::Arena*) {
8827}
8828void RunMetadata_FunctionGraphs::SetCachedSize(int size) const {
8829 _cached_size_.Set(size);
8830}
8831const RunMetadata_FunctionGraphs& RunMetadata_FunctionGraphs::default_instance() {
8832 ::PROTOBUF_NAMESPACE_ID::internal::InitSCC(&::scc_info_RunMetadata_FunctionGraphs_tensorflow_2fcore_2fprotobuf_2fconfig_2eproto.base);
8833 return *internal_default_instance();
8834}
8835
8836
8837void RunMetadata_FunctionGraphs::Clear() {
8838// @@protoc_insertion_point(message_clear_start:tensorflow.RunMetadata.FunctionGraphs)
8839 ::PROTOBUF_NAMESPACE_ID::uint32 cached_has_bits = 0;
8840 // Prevent compiler warnings about cached_has_bits being unused
8841 (void) cached_has_bits;
8842
8843 partition_graphs_.Clear();
8844 if (GetArenaNoVirtual() == nullptr && pre_optimization_graph_ != nullptr) {
8845 delete pre_optimization_graph_;
8846 }
8847 pre_optimization_graph_ = nullptr;
8848 if (GetArenaNoVirtual() == nullptr && post_optimization_graph_ != nullptr) {
8849 delete post_optimization_graph_;
8850 }
8851 post_optimization_graph_ = nullptr;
8852 _internal_metadata_.Clear();
8853}
8854
8855#if GOOGLE_PROTOBUF_ENABLE_EXPERIMENTAL_PARSER
8856const char* RunMetadata_FunctionGraphs::_InternalParse(const char* ptr, ::PROTOBUF_NAMESPACE_ID::internal::ParseContext* ctx) {
8857#define CHK_(x) if (PROTOBUF_PREDICT_FALSE(!(x))) goto failure
8858 ::PROTOBUF_NAMESPACE_ID::Arena* arena = GetArenaNoVirtual(); (void)arena;
8859 while (!ctx->Done(&ptr)) {
8860 ::PROTOBUF_NAMESPACE_ID::uint32 tag;
8861 ptr = ::PROTOBUF_NAMESPACE_ID::internal::ReadTag(ptr, &tag);
8862 CHK_(ptr);
8863 switch (tag >> 3) {
8864 // repeated .tensorflow.GraphDef partition_graphs = 1;
8865 case 1:
8866 if (PROTOBUF_PREDICT_TRUE(static_cast<::PROTOBUF_NAMESPACE_ID::uint8>(tag) == 10)) {
8867 ptr -= 1;
8868 do {
8869 ptr += 1;
8870 ptr = ctx->ParseMessage(add_partition_graphs(), ptr);
8871 CHK_(ptr);
8872 if (!ctx->DataAvailable(ptr)) break;
8873 } while (::PROTOBUF_NAMESPACE_ID::internal::UnalignedLoad<::PROTOBUF_NAMESPACE_ID::uint8>(ptr) == 10);
8874 } else goto handle_unusual;
8875 continue;
8876 // .tensorflow.GraphDef pre_optimization_graph = 2;
8877 case 2:
8878 if (PROTOBUF_PREDICT_TRUE(static_cast<::PROTOBUF_NAMESPACE_ID::uint8>(tag) == 18)) {
8879 ptr = ctx->ParseMessage(mutable_pre_optimization_graph(), ptr);
8880 CHK_(ptr);
8881 } else goto handle_unusual;
8882 continue;
8883 // .tensorflow.GraphDef post_optimization_graph = 3;
8884 case 3:
8885 if (PROTOBUF_PREDICT_TRUE(static_cast<::PROTOBUF_NAMESPACE_ID::uint8>(tag) == 26)) {
8886 ptr = ctx->ParseMessage(mutable_post_optimization_graph(), ptr);
8887 CHK_(ptr);
8888 } else goto handle_unusual;
8889 continue;
8890 default: {
8891 handle_unusual:
8892 if ((tag & 7) == 4 || tag == 0) {
8893 ctx->SetLastTag(tag);
8894 goto success;
8895 }
8896 ptr = UnknownFieldParse(tag, &_internal_metadata_, ptr, ctx);
8897 CHK_(ptr != nullptr);
8898 continue;
8899 }
8900 } // switch
8901 } // while
8902success:
8903 return ptr;
8904failure:
8905 ptr = nullptr;
8906 goto success;
8907#undef CHK_
8908}
8909#else // GOOGLE_PROTOBUF_ENABLE_EXPERIMENTAL_PARSER
8910bool RunMetadata_FunctionGraphs::MergePartialFromCodedStream(
8911 ::PROTOBUF_NAMESPACE_ID::io::CodedInputStream* input) {
8912#define DO_(EXPRESSION) if (!PROTOBUF_PREDICT_TRUE(EXPRESSION)) goto failure
8913 ::PROTOBUF_NAMESPACE_ID::uint32 tag;
8914 // @@protoc_insertion_point(parse_start:tensorflow.RunMetadata.FunctionGraphs)
8915 for (;;) {
8916 ::std::pair<::PROTOBUF_NAMESPACE_ID::uint32, bool> p = input->ReadTagWithCutoffNoLastTag(127u);
8917 tag = p.first;
8918 if (!p.second) goto handle_unusual;
8919 switch (::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::GetTagFieldNumber(tag)) {
8920 // repeated .tensorflow.GraphDef partition_graphs = 1;
8921 case 1: {
8922 if (static_cast< ::PROTOBUF_NAMESPACE_ID::uint8>(tag) == (10 & 0xFF)) {
8923 DO_(::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::ReadMessage(
8924 input, add_partition_graphs()));
8925 } else {
8926 goto handle_unusual;
8927 }
8928 break;
8929 }
8930
8931 // .tensorflow.GraphDef pre_optimization_graph = 2;
8932 case 2: {
8933 if (static_cast< ::PROTOBUF_NAMESPACE_ID::uint8>(tag) == (18 & 0xFF)) {
8934 DO_(::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::ReadMessage(
8935 input, mutable_pre_optimization_graph()));
8936 } else {
8937 goto handle_unusual;
8938 }
8939 break;
8940 }
8941
8942 // .tensorflow.GraphDef post_optimization_graph = 3;
8943 case 3: {
8944 if (static_cast< ::PROTOBUF_NAMESPACE_ID::uint8>(tag) == (26 & 0xFF)) {
8945 DO_(::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::ReadMessage(
8946 input, mutable_post_optimization_graph()));
8947 } else {
8948 goto handle_unusual;
8949 }
8950 break;
8951 }
8952
8953 default: {
8954 handle_unusual:
8955 if (tag == 0) {
8956 goto success;
8957 }
8958 DO_(::PROTOBUF_NAMESPACE_ID::internal::WireFormat::SkipField(
8959 input, tag, _internal_metadata_.mutable_unknown_fields()));
8960 break;
8961 }
8962 }
8963 }
8964success:
8965 // @@protoc_insertion_point(parse_success:tensorflow.RunMetadata.FunctionGraphs)
8966 return true;
8967failure:
8968 // @@protoc_insertion_point(parse_failure:tensorflow.RunMetadata.FunctionGraphs)
8969 return false;
8970#undef DO_
8971}
8972#endif // GOOGLE_PROTOBUF_ENABLE_EXPERIMENTAL_PARSER
8973
8974void RunMetadata_FunctionGraphs::SerializeWithCachedSizes(
8975 ::PROTOBUF_NAMESPACE_ID::io::CodedOutputStream* output) const {
8976 // @@protoc_insertion_point(serialize_start:tensorflow.RunMetadata.FunctionGraphs)
8977 ::PROTOBUF_NAMESPACE_ID::uint32 cached_has_bits = 0;
8978 (void) cached_has_bits;
8979
8980 // repeated .tensorflow.GraphDef partition_graphs = 1;
8981 for (unsigned int i = 0,
8982 n = static_cast<unsigned int>(this->partition_graphs_size()); i < n; i++) {
8983 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::WriteMessageMaybeToArray(
8984 1,
8985 this->partition_graphs(static_cast<int>(i)),
8986 output);
8987 }
8988
8989 // .tensorflow.GraphDef pre_optimization_graph = 2;
8990 if (this->has_pre_optimization_graph()) {
8991 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::WriteMessageMaybeToArray(
8992 2, _Internal::pre_optimization_graph(this), output);
8993 }
8994
8995 // .tensorflow.GraphDef post_optimization_graph = 3;
8996 if (this->has_post_optimization_graph()) {
8997 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::WriteMessageMaybeToArray(
8998 3, _Internal::post_optimization_graph(this), output);
8999 }
9000
9001 if (_internal_metadata_.have_unknown_fields()) {
9002 ::PROTOBUF_NAMESPACE_ID::internal::WireFormat::SerializeUnknownFields(
9003 _internal_metadata_.unknown_fields(), output);
9004 }
9005 // @@protoc_insertion_point(serialize_end:tensorflow.RunMetadata.FunctionGraphs)
9006}
9007
9008::PROTOBUF_NAMESPACE_ID::uint8* RunMetadata_FunctionGraphs::InternalSerializeWithCachedSizesToArray(
9009 ::PROTOBUF_NAMESPACE_ID::uint8* target) const {
9010 // @@protoc_insertion_point(serialize_to_array_start:tensorflow.RunMetadata.FunctionGraphs)
9011 ::PROTOBUF_NAMESPACE_ID::uint32 cached_has_bits = 0;
9012 (void) cached_has_bits;
9013
9014 // repeated .tensorflow.GraphDef partition_graphs = 1;
9015 for (unsigned int i = 0,
9016 n = static_cast<unsigned int>(this->partition_graphs_size()); i < n; i++) {
9017 target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::
9018 InternalWriteMessageToArray(
9019 1, this->partition_graphs(static_cast<int>(i)), target);
9020 }
9021
9022 // .tensorflow.GraphDef pre_optimization_graph = 2;
9023 if (this->has_pre_optimization_graph()) {
9024 target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::
9025 InternalWriteMessageToArray(
9026 2, _Internal::pre_optimization_graph(this), target);
9027 }
9028
9029 // .tensorflow.GraphDef post_optimization_graph = 3;
9030 if (this->has_post_optimization_graph()) {
9031 target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::
9032 InternalWriteMessageToArray(
9033 3, _Internal::post_optimization_graph(this), target);
9034 }
9035
9036 if (_internal_metadata_.have_unknown_fields()) {
9037 target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormat::SerializeUnknownFieldsToArray(
9038 _internal_metadata_.unknown_fields(), target);
9039 }
9040 // @@protoc_insertion_point(serialize_to_array_end:tensorflow.RunMetadata.FunctionGraphs)
9041 return target;
9042}
9043
9044size_t RunMetadata_FunctionGraphs::ByteSizeLong() const {
9045// @@protoc_insertion_point(message_byte_size_start:tensorflow.RunMetadata.FunctionGraphs)
9046 size_t total_size = 0;
9047
9048 if (_internal_metadata_.have_unknown_fields()) {
9049 total_size +=
9050 ::PROTOBUF_NAMESPACE_ID::internal::WireFormat::ComputeUnknownFieldsSize(
9051 _internal_metadata_.unknown_fields());
9052 }
9053 ::PROTOBUF_NAMESPACE_ID::uint32 cached_has_bits = 0;
9054 // Prevent compiler warnings about cached_has_bits being unused
9055 (void) cached_has_bits;
9056
9057 // repeated .tensorflow.GraphDef partition_graphs = 1;
9058 {
9059 unsigned int count = static_cast<unsigned int>(this->partition_graphs_size());
9060 total_size += 1UL * count;
9061 for (unsigned int i = 0; i < count; i++) {
9062 total_size +=
9063 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::MessageSize(
9064 this->partition_graphs(static_cast<int>(i)));
9065 }
9066 }
9067
9068 // .tensorflow.GraphDef pre_optimization_graph = 2;
9069 if (this->has_pre_optimization_graph()) {
9070 total_size += 1 +
9071 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::MessageSize(
9072 *pre_optimization_graph_);
9073 }
9074
9075 // .tensorflow.GraphDef post_optimization_graph = 3;
9076 if (this->has_post_optimization_graph()) {
9077 total_size += 1 +
9078 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::MessageSize(
9079 *post_optimization_graph_);
9080 }
9081
9082 int cached_size = ::PROTOBUF_NAMESPACE_ID::internal::ToCachedSize(total_size);
9083 SetCachedSize(cached_size);
9084 return total_size;
9085}
9086
9087void RunMetadata_FunctionGraphs::MergeFrom(const ::PROTOBUF_NAMESPACE_ID::Message& from) {
9088// @@protoc_insertion_point(generalized_merge_from_start:tensorflow.RunMetadata.FunctionGraphs)
9089 GOOGLE_DCHECK_NE(&from, this);
9090 const RunMetadata_FunctionGraphs* source =
9091 ::PROTOBUF_NAMESPACE_ID::DynamicCastToGenerated<RunMetadata_FunctionGraphs>(
9092 &from);
9093 if (source == nullptr) {
9094 // @@protoc_insertion_point(generalized_merge_from_cast_fail:tensorflow.RunMetadata.FunctionGraphs)
9095 ::PROTOBUF_NAMESPACE_ID::internal::ReflectionOps::Merge(from, this);
9096 } else {
9097 // @@protoc_insertion_point(generalized_merge_from_cast_success:tensorflow.RunMetadata.FunctionGraphs)
9098 MergeFrom(*source);
9099 }
9100}
9101
9102void RunMetadata_FunctionGraphs::MergeFrom(const RunMetadata_FunctionGraphs& from) {
9103// @@protoc_insertion_point(class_specific_merge_from_start:tensorflow.RunMetadata.FunctionGraphs)
9104 GOOGLE_DCHECK_NE(&from, this);
9105 _internal_metadata_.MergeFrom(from._internal_metadata_);
9106 ::PROTOBUF_NAMESPACE_ID::uint32 cached_has_bits = 0;
9107 (void) cached_has_bits;
9108
9109 partition_graphs_.MergeFrom(from.partition_graphs_);
9110 if (from.has_pre_optimization_graph()) {
9111 mutable_pre_optimization_graph()->::tensorflow::GraphDef::MergeFrom(from.pre_optimization_graph());
9112 }
9113 if (from.has_post_optimization_graph()) {
9114 mutable_post_optimization_graph()->::tensorflow::GraphDef::MergeFrom(from.post_optimization_graph());
9115 }
9116}
9117
9118void RunMetadata_FunctionGraphs::CopyFrom(const ::PROTOBUF_NAMESPACE_ID::Message& from) {
9119// @@protoc_insertion_point(generalized_copy_from_start:tensorflow.RunMetadata.FunctionGraphs)
9120 if (&from == this) return;
9121 Clear();
9122 MergeFrom(from);
9123}
9124
9125void RunMetadata_FunctionGraphs::CopyFrom(const RunMetadata_FunctionGraphs& from) {
9126// @@protoc_insertion_point(class_specific_copy_from_start:tensorflow.RunMetadata.FunctionGraphs)
9127 if (&from == this) return;
9128 Clear();
9129 MergeFrom(from);
9130}
9131
9132bool RunMetadata_FunctionGraphs::IsInitialized() const {
9133 return true;
9134}
9135
9136void RunMetadata_FunctionGraphs::InternalSwap(RunMetadata_FunctionGraphs* other) {
9137 using std::swap;
9138 _internal_metadata_.Swap(&other->_internal_metadata_);
9139 CastToBase(&partition_graphs_)->InternalSwap(CastToBase(&other->partition_graphs_));
9140 swap(pre_optimization_graph_, other->pre_optimization_graph_);
9141 swap(post_optimization_graph_, other->post_optimization_graph_);
9142}
9143
9144::PROTOBUF_NAMESPACE_ID::Metadata RunMetadata_FunctionGraphs::GetMetadata() const {
9145 return GetMetadataStatic();
9146}
9147
9148
9149// ===================================================================
9150
9151void RunMetadata::InitAsDefaultInstance() {
9152 ::tensorflow::_RunMetadata_default_instance_._instance.get_mutable()->step_stats_ = const_cast< ::tensorflow::StepStats*>(
9153 ::tensorflow::StepStats::internal_default_instance());
9154 ::tensorflow::_RunMetadata_default_instance_._instance.get_mutable()->cost_graph_ = const_cast< ::tensorflow::CostGraphDef*>(
9155 ::tensorflow::CostGraphDef::internal_default_instance());
9156 ::tensorflow::_RunMetadata_default_instance_._instance.get_mutable()->session_metadata_ = const_cast< ::tensorflow::SessionMetadata*>(
9157 ::tensorflow::SessionMetadata::internal_default_instance());
9158}
9159class RunMetadata::_Internal {
9160 public:
9161 static const ::tensorflow::StepStats& step_stats(const RunMetadata* msg);
9162 static const ::tensorflow::CostGraphDef& cost_graph(const RunMetadata* msg);
9163 static const ::tensorflow::SessionMetadata& session_metadata(const RunMetadata* msg);
9164};
9165
9166const ::tensorflow::StepStats&
9167RunMetadata::_Internal::step_stats(const RunMetadata* msg) {
9168 return *msg->step_stats_;
9169}
9170const ::tensorflow::CostGraphDef&
9171RunMetadata::_Internal::cost_graph(const RunMetadata* msg) {
9172 return *msg->cost_graph_;
9173}
9174const ::tensorflow::SessionMetadata&
9175RunMetadata::_Internal::session_metadata(const RunMetadata* msg) {
9176 return *msg->session_metadata_;
9177}
9178void RunMetadata::unsafe_arena_set_allocated_step_stats(
9179 ::tensorflow::StepStats* step_stats) {
9180 if (GetArenaNoVirtual() == nullptr) {
9181 delete step_stats_;
9182 }
9183 step_stats_ = step_stats;
9184 if (step_stats) {
9185
9186 } else {
9187
9188 }
9189 // @@protoc_insertion_point(field_unsafe_arena_set_allocated:tensorflow.RunMetadata.step_stats)
9190}
9191void RunMetadata::clear_step_stats() {
9192 if (GetArenaNoVirtual() == nullptr && step_stats_ != nullptr) {
9193 delete step_stats_;
9194 }
9195 step_stats_ = nullptr;
9196}
9197void RunMetadata::unsafe_arena_set_allocated_cost_graph(
9198 ::tensorflow::CostGraphDef* cost_graph) {
9199 if (GetArenaNoVirtual() == nullptr) {
9200 delete cost_graph_;
9201 }
9202 cost_graph_ = cost_graph;
9203 if (cost_graph) {
9204
9205 } else {
9206
9207 }
9208 // @@protoc_insertion_point(field_unsafe_arena_set_allocated:tensorflow.RunMetadata.cost_graph)
9209}
9210void RunMetadata::clear_cost_graph() {
9211 if (GetArenaNoVirtual() == nullptr && cost_graph_ != nullptr) {
9212 delete cost_graph_;
9213 }
9214 cost_graph_ = nullptr;
9215}
9216void RunMetadata::clear_partition_graphs() {
9217 partition_graphs_.Clear();
9218}
9219void RunMetadata::unsafe_arena_set_allocated_session_metadata(
9220 ::tensorflow::SessionMetadata* session_metadata) {
9221 if (GetArenaNoVirtual() == nullptr) {
9222 delete session_metadata_;
9223 }
9224 session_metadata_ = session_metadata;
9225 if (session_metadata) {
9226
9227 } else {
9228
9229 }
9230 // @@protoc_insertion_point(field_unsafe_arena_set_allocated:tensorflow.RunMetadata.session_metadata)
9231}
9232RunMetadata::RunMetadata()
9233 : ::PROTOBUF_NAMESPACE_ID::Message(), _internal_metadata_(nullptr) {
9234 SharedCtor();
9235 // @@protoc_insertion_point(constructor:tensorflow.RunMetadata)
9236}
9237RunMetadata::RunMetadata(::PROTOBUF_NAMESPACE_ID::Arena* arena)
9238 : ::PROTOBUF_NAMESPACE_ID::Message(),
9239 _internal_metadata_(arena),
9240 partition_graphs_(arena),
9241 function_graphs_(arena) {
9242 SharedCtor();
9243 RegisterArenaDtor(arena);
9244 // @@protoc_insertion_point(arena_constructor:tensorflow.RunMetadata)
9245}
9246RunMetadata::RunMetadata(const RunMetadata& from)
9247 : ::PROTOBUF_NAMESPACE_ID::Message(),
9248 _internal_metadata_(nullptr),
9249 partition_graphs_(from.partition_graphs_),
9250 function_graphs_(from.function_graphs_) {
9251 _internal_metadata_.MergeFrom(from._internal_metadata_);
9252 if (from.has_step_stats()) {
9253 step_stats_ = new ::tensorflow::StepStats(*from.step_stats_);
9254 } else {
9255 step_stats_ = nullptr;
9256 }
9257 if (from.has_cost_graph()) {
9258 cost_graph_ = new ::tensorflow::CostGraphDef(*from.cost_graph_);
9259 } else {
9260 cost_graph_ = nullptr;
9261 }
9262 if (from.has_session_metadata()) {
9263 session_metadata_ = new ::tensorflow::SessionMetadata(*from.session_metadata_);
9264 } else {
9265 session_metadata_ = nullptr;
9266 }
9267 // @@protoc_insertion_point(copy_constructor:tensorflow.RunMetadata)
9268}
9269
9270void RunMetadata::SharedCtor() {
9271 ::PROTOBUF_NAMESPACE_ID::internal::InitSCC(&scc_info_RunMetadata_tensorflow_2fcore_2fprotobuf_2fconfig_2eproto.base);
9272 ::memset(&step_stats_, 0, static_cast<size_t>(
9273 reinterpret_cast<char*>(&session_metadata_) -
9274 reinterpret_cast<char*>(&step_stats_)) + sizeof(session_metadata_));
9275}
9276
9277RunMetadata::~RunMetadata() {
9278 // @@protoc_insertion_point(destructor:tensorflow.RunMetadata)
9279 SharedDtor();
9280}
9281
9282void RunMetadata::SharedDtor() {
9283 GOOGLE_DCHECK(GetArenaNoVirtual() == nullptr);
9284 if (this != internal_default_instance()) delete step_stats_;
9285 if (this != internal_default_instance()) delete cost_graph_;
9286 if (this != internal_default_instance()) delete session_metadata_;
9287}
9288
9289void RunMetadata::ArenaDtor(void* object) {
9290 RunMetadata* _this = reinterpret_cast< RunMetadata* >(object);
9291 (void)_this;
9292}
9293void RunMetadata::RegisterArenaDtor(::PROTOBUF_NAMESPACE_ID::Arena*) {
9294}
9295void RunMetadata::SetCachedSize(int size) const {
9296 _cached_size_.Set(size);
9297}
9298const RunMetadata& RunMetadata::default_instance() {
9299 ::PROTOBUF_NAMESPACE_ID::internal::InitSCC(&::scc_info_RunMetadata_tensorflow_2fcore_2fprotobuf_2fconfig_2eproto.base);
9300 return *internal_default_instance();
9301}
9302
9303
9304void RunMetadata::Clear() {
9305// @@protoc_insertion_point(message_clear_start:tensorflow.RunMetadata)
9306 ::PROTOBUF_NAMESPACE_ID::uint32 cached_has_bits = 0;
9307 // Prevent compiler warnings about cached_has_bits being unused
9308 (void) cached_has_bits;
9309
9310 partition_graphs_.Clear();
9311 function_graphs_.Clear();
9312 if (GetArenaNoVirtual() == nullptr && step_stats_ != nullptr) {
9313 delete step_stats_;
9314 }
9315 step_stats_ = nullptr;
9316 if (GetArenaNoVirtual() == nullptr && cost_graph_ != nullptr) {
9317 delete cost_graph_;
9318 }
9319 cost_graph_ = nullptr;
9320 if (GetArenaNoVirtual() == nullptr && session_metadata_ != nullptr) {
9321 delete session_metadata_;
9322 }
9323 session_metadata_ = nullptr;
9324 _internal_metadata_.Clear();
9325}
9326
9327#if GOOGLE_PROTOBUF_ENABLE_EXPERIMENTAL_PARSER
9328const char* RunMetadata::_InternalParse(const char* ptr, ::PROTOBUF_NAMESPACE_ID::internal::ParseContext* ctx) {
9329#define CHK_(x) if (PROTOBUF_PREDICT_FALSE(!(x))) goto failure
9330 ::PROTOBUF_NAMESPACE_ID::Arena* arena = GetArenaNoVirtual(); (void)arena;
9331 while (!ctx->Done(&ptr)) {
9332 ::PROTOBUF_NAMESPACE_ID::uint32 tag;
9333 ptr = ::PROTOBUF_NAMESPACE_ID::internal::ReadTag(ptr, &tag);
9334 CHK_(ptr);
9335 switch (tag >> 3) {
9336 // .tensorflow.StepStats step_stats = 1;
9337 case 1:
9338 if (PROTOBUF_PREDICT_TRUE(static_cast<::PROTOBUF_NAMESPACE_ID::uint8>(tag) == 10)) {
9339 ptr = ctx->ParseMessage(mutable_step_stats(), ptr);
9340 CHK_(ptr);
9341 } else goto handle_unusual;
9342 continue;
9343 // .tensorflow.CostGraphDef cost_graph = 2;
9344 case 2:
9345 if (PROTOBUF_PREDICT_TRUE(static_cast<::PROTOBUF_NAMESPACE_ID::uint8>(tag) == 18)) {
9346 ptr = ctx->ParseMessage(mutable_cost_graph(), ptr);
9347 CHK_(ptr);
9348 } else goto handle_unusual;
9349 continue;
9350 // repeated .tensorflow.GraphDef partition_graphs = 3;
9351 case 3:
9352 if (PROTOBUF_PREDICT_TRUE(static_cast<::PROTOBUF_NAMESPACE_ID::uint8>(tag) == 26)) {
9353 ptr -= 1;
9354 do {
9355 ptr += 1;
9356 ptr = ctx->ParseMessage(add_partition_graphs(), ptr);
9357 CHK_(ptr);
9358 if (!ctx->DataAvailable(ptr)) break;
9359 } while (::PROTOBUF_NAMESPACE_ID::internal::UnalignedLoad<::PROTOBUF_NAMESPACE_ID::uint8>(ptr) == 26);
9360 } else goto handle_unusual;
9361 continue;
9362 // repeated .tensorflow.RunMetadata.FunctionGraphs function_graphs = 4;
9363 case 4:
9364 if (PROTOBUF_PREDICT_TRUE(static_cast<::PROTOBUF_NAMESPACE_ID::uint8>(tag) == 34)) {
9365 ptr -= 1;
9366 do {
9367 ptr += 1;
9368 ptr = ctx->ParseMessage(add_function_graphs(), ptr);
9369 CHK_(ptr);
9370 if (!ctx->DataAvailable(ptr)) break;
9371 } while (::PROTOBUF_NAMESPACE_ID::internal::UnalignedLoad<::PROTOBUF_NAMESPACE_ID::uint8>(ptr) == 34);
9372 } else goto handle_unusual;
9373 continue;
9374 // .tensorflow.SessionMetadata session_metadata = 5;
9375 case 5:
9376 if (PROTOBUF_PREDICT_TRUE(static_cast<::PROTOBUF_NAMESPACE_ID::uint8>(tag) == 42)) {
9377 ptr = ctx->ParseMessage(mutable_session_metadata(), ptr);
9378 CHK_(ptr);
9379 } else goto handle_unusual;
9380 continue;
9381 default: {
9382 handle_unusual:
9383 if ((tag & 7) == 4 || tag == 0) {
9384 ctx->SetLastTag(tag);
9385 goto success;
9386 }
9387 ptr = UnknownFieldParse(tag, &_internal_metadata_, ptr, ctx);
9388 CHK_(ptr != nullptr);
9389 continue;
9390 }
9391 } // switch
9392 } // while
9393success:
9394 return ptr;
9395failure:
9396 ptr = nullptr;
9397 goto success;
9398#undef CHK_
9399}
9400#else // GOOGLE_PROTOBUF_ENABLE_EXPERIMENTAL_PARSER
9401bool RunMetadata::MergePartialFromCodedStream(
9402 ::PROTOBUF_NAMESPACE_ID::io::CodedInputStream* input) {
9403#define DO_(EXPRESSION) if (!PROTOBUF_PREDICT_TRUE(EXPRESSION)) goto failure
9404 ::PROTOBUF_NAMESPACE_ID::uint32 tag;
9405 // @@protoc_insertion_point(parse_start:tensorflow.RunMetadata)
9406 for (;;) {
9407 ::std::pair<::PROTOBUF_NAMESPACE_ID::uint32, bool> p = input->ReadTagWithCutoffNoLastTag(127u);
9408 tag = p.first;
9409 if (!p.second) goto handle_unusual;
9410 switch (::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::GetTagFieldNumber(tag)) {
9411 // .tensorflow.StepStats step_stats = 1;
9412 case 1: {
9413 if (static_cast< ::PROTOBUF_NAMESPACE_ID::uint8>(tag) == (10 & 0xFF)) {
9414 DO_(::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::ReadMessage(
9415 input, mutable_step_stats()));
9416 } else {
9417 goto handle_unusual;
9418 }
9419 break;
9420 }
9421
9422 // .tensorflow.CostGraphDef cost_graph = 2;
9423 case 2: {
9424 if (static_cast< ::PROTOBUF_NAMESPACE_ID::uint8>(tag) == (18 & 0xFF)) {
9425 DO_(::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::ReadMessage(
9426 input, mutable_cost_graph()));
9427 } else {
9428 goto handle_unusual;
9429 }
9430 break;
9431 }
9432
9433 // repeated .tensorflow.GraphDef partition_graphs = 3;
9434 case 3: {
9435 if (static_cast< ::PROTOBUF_NAMESPACE_ID::uint8>(tag) == (26 & 0xFF)) {
9436 DO_(::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::ReadMessage(
9437 input, add_partition_graphs()));
9438 } else {
9439 goto handle_unusual;
9440 }
9441 break;
9442 }
9443
9444 // repeated .tensorflow.RunMetadata.FunctionGraphs function_graphs = 4;
9445 case 4: {
9446 if (static_cast< ::PROTOBUF_NAMESPACE_ID::uint8>(tag) == (34 & 0xFF)) {
9447 DO_(::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::ReadMessage(
9448 input, add_function_graphs()));
9449 } else {
9450 goto handle_unusual;
9451 }
9452 break;
9453 }
9454
9455 // .tensorflow.SessionMetadata session_metadata = 5;
9456 case 5: {
9457 if (static_cast< ::PROTOBUF_NAMESPACE_ID::uint8>(tag) == (42 & 0xFF)) {
9458 DO_(::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::ReadMessage(
9459 input, mutable_session_metadata()));
9460 } else {
9461 goto handle_unusual;
9462 }
9463 break;
9464 }
9465
9466 default: {
9467 handle_unusual:
9468 if (tag == 0) {
9469 goto success;
9470 }
9471 DO_(::PROTOBUF_NAMESPACE_ID::internal::WireFormat::SkipField(
9472 input, tag, _internal_metadata_.mutable_unknown_fields()));
9473 break;
9474 }
9475 }
9476 }
9477success:
9478 // @@protoc_insertion_point(parse_success:tensorflow.RunMetadata)
9479 return true;
9480failure:
9481 // @@protoc_insertion_point(parse_failure:tensorflow.RunMetadata)
9482 return false;
9483#undef DO_
9484}
9485#endif // GOOGLE_PROTOBUF_ENABLE_EXPERIMENTAL_PARSER
9486
9487void RunMetadata::SerializeWithCachedSizes(
9488 ::PROTOBUF_NAMESPACE_ID::io::CodedOutputStream* output) const {
9489 // @@protoc_insertion_point(serialize_start:tensorflow.RunMetadata)
9490 ::PROTOBUF_NAMESPACE_ID::uint32 cached_has_bits = 0;
9491 (void) cached_has_bits;
9492
9493 // .tensorflow.StepStats step_stats = 1;
9494 if (this->has_step_stats()) {
9495 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::WriteMessageMaybeToArray(
9496 1, _Internal::step_stats(this), output);
9497 }
9498
9499 // .tensorflow.CostGraphDef cost_graph = 2;
9500 if (this->has_cost_graph()) {
9501 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::WriteMessageMaybeToArray(
9502 2, _Internal::cost_graph(this), output);
9503 }
9504
9505 // repeated .tensorflow.GraphDef partition_graphs = 3;
9506 for (unsigned int i = 0,
9507 n = static_cast<unsigned int>(this->partition_graphs_size()); i < n; i++) {
9508 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::WriteMessageMaybeToArray(
9509 3,
9510 this->partition_graphs(static_cast<int>(i)),
9511 output);
9512 }
9513
9514 // repeated .tensorflow.RunMetadata.FunctionGraphs function_graphs = 4;
9515 for (unsigned int i = 0,
9516 n = static_cast<unsigned int>(this->function_graphs_size()); i < n; i++) {
9517 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::WriteMessageMaybeToArray(
9518 4,
9519 this->function_graphs(static_cast<int>(i)),
9520 output);
9521 }
9522
9523 // .tensorflow.SessionMetadata session_metadata = 5;
9524 if (this->has_session_metadata()) {
9525 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::WriteMessageMaybeToArray(
9526 5, _Internal::session_metadata(this), output);
9527 }
9528
9529 if (_internal_metadata_.have_unknown_fields()) {
9530 ::PROTOBUF_NAMESPACE_ID::internal::WireFormat::SerializeUnknownFields(
9531 _internal_metadata_.unknown_fields(), output);
9532 }
9533 // @@protoc_insertion_point(serialize_end:tensorflow.RunMetadata)
9534}
9535
9536::PROTOBUF_NAMESPACE_ID::uint8* RunMetadata::InternalSerializeWithCachedSizesToArray(
9537 ::PROTOBUF_NAMESPACE_ID::uint8* target) const {
9538 // @@protoc_insertion_point(serialize_to_array_start:tensorflow.RunMetadata)
9539 ::PROTOBUF_NAMESPACE_ID::uint32 cached_has_bits = 0;
9540 (void) cached_has_bits;
9541
9542 // .tensorflow.StepStats step_stats = 1;
9543 if (this->has_step_stats()) {
9544 target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::
9545 InternalWriteMessageToArray(
9546 1, _Internal::step_stats(this), target);
9547 }
9548
9549 // .tensorflow.CostGraphDef cost_graph = 2;
9550 if (this->has_cost_graph()) {
9551 target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::
9552 InternalWriteMessageToArray(
9553 2, _Internal::cost_graph(this), target);
9554 }
9555
9556 // repeated .tensorflow.GraphDef partition_graphs = 3;
9557 for (unsigned int i = 0,
9558 n = static_cast<unsigned int>(this->partition_graphs_size()); i < n; i++) {
9559 target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::
9560 InternalWriteMessageToArray(
9561 3, this->partition_graphs(static_cast<int>(i)), target);
9562 }
9563
9564 // repeated .tensorflow.RunMetadata.FunctionGraphs function_graphs = 4;
9565 for (unsigned int i = 0,
9566 n = static_cast<unsigned int>(this->function_graphs_size()); i < n; i++) {
9567 target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::
9568 InternalWriteMessageToArray(
9569 4, this->function_graphs(static_cast<int>(i)), target);
9570 }
9571
9572 // .tensorflow.SessionMetadata session_metadata = 5;
9573 if (this->has_session_metadata()) {
9574 target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::
9575 InternalWriteMessageToArray(
9576 5, _Internal::session_metadata(this), target);
9577 }
9578
9579 if (_internal_metadata_.have_unknown_fields()) {
9580 target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormat::SerializeUnknownFieldsToArray(
9581 _internal_metadata_.unknown_fields(), target);
9582 }
9583 // @@protoc_insertion_point(serialize_to_array_end:tensorflow.RunMetadata)
9584 return target;
9585}
9586
9587size_t RunMetadata::ByteSizeLong() const {
9588// @@protoc_insertion_point(message_byte_size_start:tensorflow.RunMetadata)
9589 size_t total_size = 0;
9590
9591 if (_internal_metadata_.have_unknown_fields()) {
9592 total_size +=
9593 ::PROTOBUF_NAMESPACE_ID::internal::WireFormat::ComputeUnknownFieldsSize(
9594 _internal_metadata_.unknown_fields());
9595 }
9596 ::PROTOBUF_NAMESPACE_ID::uint32 cached_has_bits = 0;
9597 // Prevent compiler warnings about cached_has_bits being unused
9598 (void) cached_has_bits;
9599
9600 // repeated .tensorflow.GraphDef partition_graphs = 3;
9601 {
9602 unsigned int count = static_cast<unsigned int>(this->partition_graphs_size());
9603 total_size += 1UL * count;
9604 for (unsigned int i = 0; i < count; i++) {
9605 total_size +=
9606 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::MessageSize(
9607 this->partition_graphs(static_cast<int>(i)));
9608 }
9609 }
9610
9611 // repeated .tensorflow.RunMetadata.FunctionGraphs function_graphs = 4;
9612 {
9613 unsigned int count = static_cast<unsigned int>(this->function_graphs_size());
9614 total_size += 1UL * count;
9615 for (unsigned int i = 0; i < count; i++) {
9616 total_size +=
9617 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::MessageSize(
9618 this->function_graphs(static_cast<int>(i)));
9619 }
9620 }
9621
9622 // .tensorflow.StepStats step_stats = 1;
9623 if (this->has_step_stats()) {
9624 total_size += 1 +
9625 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::MessageSize(
9626 *step_stats_);
9627 }
9628
9629 // .tensorflow.CostGraphDef cost_graph = 2;
9630 if (this->has_cost_graph()) {
9631 total_size += 1 +
9632 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::MessageSize(
9633 *cost_graph_);
9634 }
9635
9636 // .tensorflow.SessionMetadata session_metadata = 5;
9637 if (this->has_session_metadata()) {
9638 total_size += 1 +
9639 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::MessageSize(
9640 *session_metadata_);
9641 }
9642
9643 int cached_size = ::PROTOBUF_NAMESPACE_ID::internal::ToCachedSize(total_size);
9644 SetCachedSize(cached_size);
9645 return total_size;
9646}
9647
9648void RunMetadata::MergeFrom(const ::PROTOBUF_NAMESPACE_ID::Message& from) {
9649// @@protoc_insertion_point(generalized_merge_from_start:tensorflow.RunMetadata)
9650 GOOGLE_DCHECK_NE(&from, this);
9651 const RunMetadata* source =
9652 ::PROTOBUF_NAMESPACE_ID::DynamicCastToGenerated<RunMetadata>(
9653 &from);
9654 if (source == nullptr) {
9655 // @@protoc_insertion_point(generalized_merge_from_cast_fail:tensorflow.RunMetadata)
9656 ::PROTOBUF_NAMESPACE_ID::internal::ReflectionOps::Merge(from, this);
9657 } else {
9658 // @@protoc_insertion_point(generalized_merge_from_cast_success:tensorflow.RunMetadata)
9659 MergeFrom(*source);
9660 }
9661}
9662
9663void RunMetadata::MergeFrom(const RunMetadata& from) {
9664// @@protoc_insertion_point(class_specific_merge_from_start:tensorflow.RunMetadata)
9665 GOOGLE_DCHECK_NE(&from, this);
9666 _internal_metadata_.MergeFrom(from._internal_metadata_);
9667 ::PROTOBUF_NAMESPACE_ID::uint32 cached_has_bits = 0;
9668 (void) cached_has_bits;
9669
9670 partition_graphs_.MergeFrom(from.partition_graphs_);
9671 function_graphs_.MergeFrom(from.function_graphs_);
9672 if (from.has_step_stats()) {
9673 mutable_step_stats()->::tensorflow::StepStats::MergeFrom(from.step_stats());
9674 }
9675 if (from.has_cost_graph()) {
9676 mutable_cost_graph()->::tensorflow::CostGraphDef::MergeFrom(from.cost_graph());
9677 }
9678 if (from.has_session_metadata()) {
9679 mutable_session_metadata()->::tensorflow::SessionMetadata::MergeFrom(from.session_metadata());
9680 }
9681}
9682
9683void RunMetadata::CopyFrom(const ::PROTOBUF_NAMESPACE_ID::Message& from) {
9684// @@protoc_insertion_point(generalized_copy_from_start:tensorflow.RunMetadata)
9685 if (&from == this) return;
9686 Clear();
9687 MergeFrom(from);
9688}
9689
9690void RunMetadata::CopyFrom(const RunMetadata& from) {
9691// @@protoc_insertion_point(class_specific_copy_from_start:tensorflow.RunMetadata)
9692 if (&from == this) return;
9693 Clear();
9694 MergeFrom(from);
9695}
9696
9697bool RunMetadata::IsInitialized() const {
9698 return true;
9699}
9700
9701void RunMetadata::InternalSwap(RunMetadata* other) {
9702 using std::swap;
9703 _internal_metadata_.Swap(&other->_internal_metadata_);
9704 CastToBase(&partition_graphs_)->InternalSwap(CastToBase(&other->partition_graphs_));
9705 CastToBase(&function_graphs_)->InternalSwap(CastToBase(&other->function_graphs_));
9706 swap(step_stats_, other->step_stats_);
9707 swap(cost_graph_, other->cost_graph_);
9708 swap(session_metadata_, other->session_metadata_);
9709}
9710
9711::PROTOBUF_NAMESPACE_ID::Metadata RunMetadata::GetMetadata() const {
9712 return GetMetadataStatic();
9713}
9714
9715
9716// ===================================================================
9717
9718void TensorConnection::InitAsDefaultInstance() {
9719}
9720class TensorConnection::_Internal {
9721 public:
9722};
9723
9724TensorConnection::TensorConnection()
9725 : ::PROTOBUF_NAMESPACE_ID::Message(), _internal_metadata_(nullptr) {
9726 SharedCtor();
9727 // @@protoc_insertion_point(constructor:tensorflow.TensorConnection)
9728}
9729TensorConnection::TensorConnection(::PROTOBUF_NAMESPACE_ID::Arena* arena)
9730 : ::PROTOBUF_NAMESPACE_ID::Message(),
9731 _internal_metadata_(arena) {
9732 SharedCtor();
9733 RegisterArenaDtor(arena);
9734 // @@protoc_insertion_point(arena_constructor:tensorflow.TensorConnection)
9735}
9736TensorConnection::TensorConnection(const TensorConnection& from)
9737 : ::PROTOBUF_NAMESPACE_ID::Message(),
9738 _internal_metadata_(nullptr) {
9739 _internal_metadata_.MergeFrom(from._internal_metadata_);
9740 from_tensor_.UnsafeSetDefault(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited());
9741 if (!from.from_tensor().empty()) {
9742 from_tensor_.Set(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited(), from.from_tensor(),
9743 GetArenaNoVirtual());
9744 }
9745 to_tensor_.UnsafeSetDefault(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited());
9746 if (!from.to_tensor().empty()) {
9747 to_tensor_.Set(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited(), from.to_tensor(),
9748 GetArenaNoVirtual());
9749 }
9750 // @@protoc_insertion_point(copy_constructor:tensorflow.TensorConnection)
9751}
9752
9753void TensorConnection::SharedCtor() {
9754 ::PROTOBUF_NAMESPACE_ID::internal::InitSCC(&scc_info_TensorConnection_tensorflow_2fcore_2fprotobuf_2fconfig_2eproto.base);
9755 from_tensor_.UnsafeSetDefault(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited());
9756 to_tensor_.UnsafeSetDefault(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited());
9757}
9758
9759TensorConnection::~TensorConnection() {
9760 // @@protoc_insertion_point(destructor:tensorflow.TensorConnection)
9761 SharedDtor();
9762}
9763
9764void TensorConnection::SharedDtor() {
9765 GOOGLE_DCHECK(GetArenaNoVirtual() == nullptr);
9766 from_tensor_.DestroyNoArena(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited());
9767 to_tensor_.DestroyNoArena(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited());
9768}
9769
9770void TensorConnection::ArenaDtor(void* object) {
9771 TensorConnection* _this = reinterpret_cast< TensorConnection* >(object);
9772 (void)_this;
9773}
9774void TensorConnection::RegisterArenaDtor(::PROTOBUF_NAMESPACE_ID::Arena*) {
9775}
9776void TensorConnection::SetCachedSize(int size) const {
9777 _cached_size_.Set(size);
9778}
9779const TensorConnection& TensorConnection::default_instance() {
9780 ::PROTOBUF_NAMESPACE_ID::internal::InitSCC(&::scc_info_TensorConnection_tensorflow_2fcore_2fprotobuf_2fconfig_2eproto.base);
9781 return *internal_default_instance();
9782}
9783
9784
9785void TensorConnection::Clear() {
9786// @@protoc_insertion_point(message_clear_start:tensorflow.TensorConnection)
9787 ::PROTOBUF_NAMESPACE_ID::uint32 cached_has_bits = 0;
9788 // Prevent compiler warnings about cached_has_bits being unused
9789 (void) cached_has_bits;
9790
9791 from_tensor_.ClearToEmpty(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited(), GetArenaNoVirtual());
9792 to_tensor_.ClearToEmpty(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited(), GetArenaNoVirtual());
9793 _internal_metadata_.Clear();
9794}
9795
9796#if GOOGLE_PROTOBUF_ENABLE_EXPERIMENTAL_PARSER
9797const char* TensorConnection::_InternalParse(const char* ptr, ::PROTOBUF_NAMESPACE_ID::internal::ParseContext* ctx) {
9798#define CHK_(x) if (PROTOBUF_PREDICT_FALSE(!(x))) goto failure
9799 ::PROTOBUF_NAMESPACE_ID::Arena* arena = GetArenaNoVirtual(); (void)arena;
9800 while (!ctx->Done(&ptr)) {
9801 ::PROTOBUF_NAMESPACE_ID::uint32 tag;
9802 ptr = ::PROTOBUF_NAMESPACE_ID::internal::ReadTag(ptr, &tag);
9803 CHK_(ptr);
9804 switch (tag >> 3) {
9805 // string from_tensor = 1;
9806 case 1:
9807 if (PROTOBUF_PREDICT_TRUE(static_cast<::PROTOBUF_NAMESPACE_ID::uint8>(tag) == 10)) {
9808 ptr = ::PROTOBUF_NAMESPACE_ID::internal::InlineGreedyStringParserUTF8(mutable_from_tensor(), ptr, ctx, "tensorflow.TensorConnection.from_tensor");
9809 CHK_(ptr);
9810 } else goto handle_unusual;
9811 continue;
9812 // string to_tensor = 2;
9813 case 2:
9814 if (PROTOBUF_PREDICT_TRUE(static_cast<::PROTOBUF_NAMESPACE_ID::uint8>(tag) == 18)) {
9815 ptr = ::PROTOBUF_NAMESPACE_ID::internal::InlineGreedyStringParserUTF8(mutable_to_tensor(), ptr, ctx, "tensorflow.TensorConnection.to_tensor");
9816 CHK_(ptr);
9817 } else goto handle_unusual;
9818 continue;
9819 default: {
9820 handle_unusual:
9821 if ((tag & 7) == 4 || tag == 0) {
9822 ctx->SetLastTag(tag);
9823 goto success;
9824 }
9825 ptr = UnknownFieldParse(tag, &_internal_metadata_, ptr, ctx);
9826 CHK_(ptr != nullptr);
9827 continue;
9828 }
9829 } // switch
9830 } // while
9831success:
9832 return ptr;
9833failure:
9834 ptr = nullptr;
9835 goto success;
9836#undef CHK_
9837}
9838#else // GOOGLE_PROTOBUF_ENABLE_EXPERIMENTAL_PARSER
9839bool TensorConnection::MergePartialFromCodedStream(
9840 ::PROTOBUF_NAMESPACE_ID::io::CodedInputStream* input) {
9841#define DO_(EXPRESSION) if (!PROTOBUF_PREDICT_TRUE(EXPRESSION)) goto failure
9842 ::PROTOBUF_NAMESPACE_ID::uint32 tag;
9843 // @@protoc_insertion_point(parse_start:tensorflow.TensorConnection)
9844 for (;;) {
9845 ::std::pair<::PROTOBUF_NAMESPACE_ID::uint32, bool> p = input->ReadTagWithCutoffNoLastTag(127u);
9846 tag = p.first;
9847 if (!p.second) goto handle_unusual;
9848 switch (::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::GetTagFieldNumber(tag)) {
9849 // string from_tensor = 1;
9850 case 1: {
9851 if (static_cast< ::PROTOBUF_NAMESPACE_ID::uint8>(tag) == (10 & 0xFF)) {
9852 DO_(::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::ReadString(
9853 input, this->mutable_from_tensor()));
9854 DO_(::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::VerifyUtf8String(
9855 this->from_tensor().data(), static_cast<int>(this->from_tensor().length()),
9856 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::PARSE,
9857 "tensorflow.TensorConnection.from_tensor"));
9858 } else {
9859 goto handle_unusual;
9860 }
9861 break;
9862 }
9863
9864 // string to_tensor = 2;
9865 case 2: {
9866 if (static_cast< ::PROTOBUF_NAMESPACE_ID::uint8>(tag) == (18 & 0xFF)) {
9867 DO_(::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::ReadString(
9868 input, this->mutable_to_tensor()));
9869 DO_(::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::VerifyUtf8String(
9870 this->to_tensor().data(), static_cast<int>(this->to_tensor().length()),
9871 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::PARSE,
9872 "tensorflow.TensorConnection.to_tensor"));
9873 } else {
9874 goto handle_unusual;
9875 }
9876 break;
9877 }
9878
9879 default: {
9880 handle_unusual:
9881 if (tag == 0) {
9882 goto success;
9883 }
9884 DO_(::PROTOBUF_NAMESPACE_ID::internal::WireFormat::SkipField(
9885 input, tag, _internal_metadata_.mutable_unknown_fields()));
9886 break;
9887 }
9888 }
9889 }
9890success:
9891 // @@protoc_insertion_point(parse_success:tensorflow.TensorConnection)
9892 return true;
9893failure:
9894 // @@protoc_insertion_point(parse_failure:tensorflow.TensorConnection)
9895 return false;
9896#undef DO_
9897}
9898#endif // GOOGLE_PROTOBUF_ENABLE_EXPERIMENTAL_PARSER
9899
9900void TensorConnection::SerializeWithCachedSizes(
9901 ::PROTOBUF_NAMESPACE_ID::io::CodedOutputStream* output) const {
9902 // @@protoc_insertion_point(serialize_start:tensorflow.TensorConnection)
9903 ::PROTOBUF_NAMESPACE_ID::uint32 cached_has_bits = 0;
9904 (void) cached_has_bits;
9905
9906 // string from_tensor = 1;
9907 if (this->from_tensor().size() > 0) {
9908 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::VerifyUtf8String(
9909 this->from_tensor().data(), static_cast<int>(this->from_tensor().length()),
9910 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::SERIALIZE,
9911 "tensorflow.TensorConnection.from_tensor");
9912 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::WriteStringMaybeAliased(
9913 1, this->from_tensor(), output);
9914 }
9915
9916 // string to_tensor = 2;
9917 if (this->to_tensor().size() > 0) {
9918 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::VerifyUtf8String(
9919 this->to_tensor().data(), static_cast<int>(this->to_tensor().length()),
9920 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::SERIALIZE,
9921 "tensorflow.TensorConnection.to_tensor");
9922 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::WriteStringMaybeAliased(
9923 2, this->to_tensor(), output);
9924 }
9925
9926 if (_internal_metadata_.have_unknown_fields()) {
9927 ::PROTOBUF_NAMESPACE_ID::internal::WireFormat::SerializeUnknownFields(
9928 _internal_metadata_.unknown_fields(), output);
9929 }
9930 // @@protoc_insertion_point(serialize_end:tensorflow.TensorConnection)
9931}
9932
9933::PROTOBUF_NAMESPACE_ID::uint8* TensorConnection::InternalSerializeWithCachedSizesToArray(
9934 ::PROTOBUF_NAMESPACE_ID::uint8* target) const {
9935 // @@protoc_insertion_point(serialize_to_array_start:tensorflow.TensorConnection)
9936 ::PROTOBUF_NAMESPACE_ID::uint32 cached_has_bits = 0;
9937 (void) cached_has_bits;
9938
9939 // string from_tensor = 1;
9940 if (this->from_tensor().size() > 0) {
9941 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::VerifyUtf8String(
9942 this->from_tensor().data(), static_cast<int>(this->from_tensor().length()),
9943 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::SERIALIZE,
9944 "tensorflow.TensorConnection.from_tensor");
9945 target =
9946 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::WriteStringToArray(
9947 1, this->from_tensor(), target);
9948 }
9949
9950 // string to_tensor = 2;
9951 if (this->to_tensor().size() > 0) {
9952 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::VerifyUtf8String(
9953 this->to_tensor().data(), static_cast<int>(this->to_tensor().length()),
9954 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::SERIALIZE,
9955 "tensorflow.TensorConnection.to_tensor");
9956 target =
9957 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::WriteStringToArray(
9958 2, this->to_tensor(), target);
9959 }
9960
9961 if (_internal_metadata_.have_unknown_fields()) {
9962 target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormat::SerializeUnknownFieldsToArray(
9963 _internal_metadata_.unknown_fields(), target);
9964 }
9965 // @@protoc_insertion_point(serialize_to_array_end:tensorflow.TensorConnection)
9966 return target;
9967}
9968
9969size_t TensorConnection::ByteSizeLong() const {
9970// @@protoc_insertion_point(message_byte_size_start:tensorflow.TensorConnection)
9971 size_t total_size = 0;
9972
9973 if (_internal_metadata_.have_unknown_fields()) {
9974 total_size +=
9975 ::PROTOBUF_NAMESPACE_ID::internal::WireFormat::ComputeUnknownFieldsSize(
9976 _internal_metadata_.unknown_fields());
9977 }
9978 ::PROTOBUF_NAMESPACE_ID::uint32 cached_has_bits = 0;
9979 // Prevent compiler warnings about cached_has_bits being unused
9980 (void) cached_has_bits;
9981
9982 // string from_tensor = 1;
9983 if (this->from_tensor().size() > 0) {
9984 total_size += 1 +
9985 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::StringSize(
9986 this->from_tensor());
9987 }
9988
9989 // string to_tensor = 2;
9990 if (this->to_tensor().size() > 0) {
9991 total_size += 1 +
9992 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::StringSize(
9993 this->to_tensor());
9994 }
9995
9996 int cached_size = ::PROTOBUF_NAMESPACE_ID::internal::ToCachedSize(total_size);
9997 SetCachedSize(cached_size);
9998 return total_size;
9999}
10000
10001void TensorConnection::MergeFrom(const ::PROTOBUF_NAMESPACE_ID::Message& from) {
10002// @@protoc_insertion_point(generalized_merge_from_start:tensorflow.TensorConnection)
10003 GOOGLE_DCHECK_NE(&from, this);
10004 const TensorConnection* source =
10005 ::PROTOBUF_NAMESPACE_ID::DynamicCastToGenerated<TensorConnection>(
10006 &from);
10007 if (source == nullptr) {
10008 // @@protoc_insertion_point(generalized_merge_from_cast_fail:tensorflow.TensorConnection)
10009 ::PROTOBUF_NAMESPACE_ID::internal::ReflectionOps::Merge(from, this);
10010 } else {
10011 // @@protoc_insertion_point(generalized_merge_from_cast_success:tensorflow.TensorConnection)
10012 MergeFrom(*source);
10013 }
10014}
10015
10016void TensorConnection::MergeFrom(const TensorConnection& from) {
10017// @@protoc_insertion_point(class_specific_merge_from_start:tensorflow.TensorConnection)
10018 GOOGLE_DCHECK_NE(&from, this);
10019 _internal_metadata_.MergeFrom(from._internal_metadata_);
10020 ::PROTOBUF_NAMESPACE_ID::uint32 cached_has_bits = 0;
10021 (void) cached_has_bits;
10022
10023 if (from.from_tensor().size() > 0) {
10024 set_from_tensor(from.from_tensor());
10025 }
10026 if (from.to_tensor().size() > 0) {
10027 set_to_tensor(from.to_tensor());
10028 }
10029}
10030
10031void TensorConnection::CopyFrom(const ::PROTOBUF_NAMESPACE_ID::Message& from) {
10032// @@protoc_insertion_point(generalized_copy_from_start:tensorflow.TensorConnection)
10033 if (&from == this) return;
10034 Clear();
10035 MergeFrom(from);
10036}
10037
10038void TensorConnection::CopyFrom(const TensorConnection& from) {
10039// @@protoc_insertion_point(class_specific_copy_from_start:tensorflow.TensorConnection)
10040 if (&from == this) return;
10041 Clear();
10042 MergeFrom(from);
10043}
10044
10045bool TensorConnection::IsInitialized() const {
10046 return true;
10047}
10048
10049void TensorConnection::InternalSwap(TensorConnection* other) {
10050 using std::swap;
10051 _internal_metadata_.Swap(&other->_internal_metadata_);
10052 from_tensor_.Swap(&other->from_tensor_, &::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited(),
10053 GetArenaNoVirtual());
10054 to_tensor_.Swap(&other->to_tensor_, &::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited(),
10055 GetArenaNoVirtual());
10056}
10057
10058::PROTOBUF_NAMESPACE_ID::Metadata TensorConnection::GetMetadata() const {
10059 return GetMetadataStatic();
10060}
10061
10062
10063// ===================================================================
10064
10065CallableOptions_FeedDevicesEntry_DoNotUse::CallableOptions_FeedDevicesEntry_DoNotUse() {}
10066CallableOptions_FeedDevicesEntry_DoNotUse::CallableOptions_FeedDevicesEntry_DoNotUse(::PROTOBUF_NAMESPACE_ID::Arena* arena)
10067 : SuperType(arena) {}
10068void CallableOptions_FeedDevicesEntry_DoNotUse::MergeFrom(const CallableOptions_FeedDevicesEntry_DoNotUse& other) {
10069 MergeFromInternal(other);
10070}
10071::PROTOBUF_NAMESPACE_ID::Metadata CallableOptions_FeedDevicesEntry_DoNotUse::GetMetadata() const {
10072 return GetMetadataStatic();
10073}
10074void CallableOptions_FeedDevicesEntry_DoNotUse::MergeFrom(
10075 const ::PROTOBUF_NAMESPACE_ID::Message& other) {
10076 ::PROTOBUF_NAMESPACE_ID::Message::MergeFrom(other);
10077}
10078
10079
10080// ===================================================================
10081
10082CallableOptions_FetchDevicesEntry_DoNotUse::CallableOptions_FetchDevicesEntry_DoNotUse() {}
10083CallableOptions_FetchDevicesEntry_DoNotUse::CallableOptions_FetchDevicesEntry_DoNotUse(::PROTOBUF_NAMESPACE_ID::Arena* arena)
10084 : SuperType(arena) {}
10085void CallableOptions_FetchDevicesEntry_DoNotUse::MergeFrom(const CallableOptions_FetchDevicesEntry_DoNotUse& other) {
10086 MergeFromInternal(other);
10087}
10088::PROTOBUF_NAMESPACE_ID::Metadata CallableOptions_FetchDevicesEntry_DoNotUse::GetMetadata() const {
10089 return GetMetadataStatic();
10090}
10091void CallableOptions_FetchDevicesEntry_DoNotUse::MergeFrom(
10092 const ::PROTOBUF_NAMESPACE_ID::Message& other) {
10093 ::PROTOBUF_NAMESPACE_ID::Message::MergeFrom(other);
10094}
10095
10096
10097// ===================================================================
10098
10099void CallableOptions::InitAsDefaultInstance() {
10100 ::tensorflow::_CallableOptions_default_instance_._instance.get_mutable()->run_options_ = const_cast< ::tensorflow::RunOptions*>(
10101 ::tensorflow::RunOptions::internal_default_instance());
10102}
10103class CallableOptions::_Internal {
10104 public:
10105 static const ::tensorflow::RunOptions& run_options(const CallableOptions* msg);
10106};
10107
10108const ::tensorflow::RunOptions&
10109CallableOptions::_Internal::run_options(const CallableOptions* msg) {
10110 return *msg->run_options_;
10111}
10112void CallableOptions::unsafe_arena_set_allocated_run_options(
10113 ::tensorflow::RunOptions* run_options) {
10114 if (GetArenaNoVirtual() == nullptr) {
10115 delete run_options_;
10116 }
10117 run_options_ = run_options;
10118 if (run_options) {
10119
10120 } else {
10121
10122 }
10123 // @@protoc_insertion_point(field_unsafe_arena_set_allocated:tensorflow.CallableOptions.run_options)
10124}
10125CallableOptions::CallableOptions()
10126 : ::PROTOBUF_NAMESPACE_ID::Message(), _internal_metadata_(nullptr) {
10127 SharedCtor();
10128 // @@protoc_insertion_point(constructor:tensorflow.CallableOptions)
10129}
10130CallableOptions::CallableOptions(::PROTOBUF_NAMESPACE_ID::Arena* arena)
10131 : ::PROTOBUF_NAMESPACE_ID::Message(),
10132 _internal_metadata_(arena),
10133 feed_(arena),
10134 fetch_(arena),
10135 target_(arena),
10136 tensor_connection_(arena),
10137 feed_devices_(arena),
10138 fetch_devices_(arena) {
10139 SharedCtor();
10140 RegisterArenaDtor(arena);
10141 // @@protoc_insertion_point(arena_constructor:tensorflow.CallableOptions)
10142}
10143CallableOptions::CallableOptions(const CallableOptions& from)
10144 : ::PROTOBUF_NAMESPACE_ID::Message(),
10145 _internal_metadata_(nullptr),
10146 feed_(from.feed_),
10147 fetch_(from.fetch_),
10148 target_(from.target_),
10149 tensor_connection_(from.tensor_connection_) {
10150 _internal_metadata_.MergeFrom(from._internal_metadata_);
10151 feed_devices_.MergeFrom(from.feed_devices_);
10152 fetch_devices_.MergeFrom(from.fetch_devices_);
10153 if (from.has_run_options()) {
10154 run_options_ = new ::tensorflow::RunOptions(*from.run_options_);
10155 } else {
10156 run_options_ = nullptr;
10157 }
10158 fetch_skip_sync_ = from.fetch_skip_sync_;
10159 // @@protoc_insertion_point(copy_constructor:tensorflow.CallableOptions)
10160}
10161
10162void CallableOptions::SharedCtor() {
10163 ::PROTOBUF_NAMESPACE_ID::internal::InitSCC(&scc_info_CallableOptions_tensorflow_2fcore_2fprotobuf_2fconfig_2eproto.base);
10164 ::memset(&run_options_, 0, static_cast<size_t>(
10165 reinterpret_cast<char*>(&fetch_skip_sync_) -
10166 reinterpret_cast<char*>(&run_options_)) + sizeof(fetch_skip_sync_));
10167}
10168
10169CallableOptions::~CallableOptions() {
10170 // @@protoc_insertion_point(destructor:tensorflow.CallableOptions)
10171 SharedDtor();
10172}
10173
10174void CallableOptions::SharedDtor() {
10175 GOOGLE_DCHECK(GetArenaNoVirtual() == nullptr);
10176 if (this != internal_default_instance()) delete run_options_;
10177}
10178
10179void CallableOptions::ArenaDtor(void* object) {
10180 CallableOptions* _this = reinterpret_cast< CallableOptions* >(object);
10181 (void)_this;
10182}
10183void CallableOptions::RegisterArenaDtor(::PROTOBUF_NAMESPACE_ID::Arena*) {
10184}
10185void CallableOptions::SetCachedSize(int size) const {
10186 _cached_size_.Set(size);
10187}
10188const CallableOptions& CallableOptions::default_instance() {
10189 ::PROTOBUF_NAMESPACE_ID::internal::InitSCC(&::scc_info_CallableOptions_tensorflow_2fcore_2fprotobuf_2fconfig_2eproto.base);
10190 return *internal_default_instance();
10191}
10192
10193
10194void CallableOptions::Clear() {
10195// @@protoc_insertion_point(message_clear_start:tensorflow.CallableOptions)
10196 ::PROTOBUF_NAMESPACE_ID::uint32 cached_has_bits = 0;
10197 // Prevent compiler warnings about cached_has_bits being unused
10198 (void) cached_has_bits;
10199
10200 feed_.Clear();
10201 fetch_.Clear();
10202 target_.Clear();
10203 tensor_connection_.Clear();
10204 feed_devices_.Clear();
10205 fetch_devices_.Clear();
10206 if (GetArenaNoVirtual() == nullptr && run_options_ != nullptr) {
10207 delete run_options_;
10208 }
10209 run_options_ = nullptr;
10210 fetch_skip_sync_ = false;
10211 _internal_metadata_.Clear();
10212}
10213
10214#if GOOGLE_PROTOBUF_ENABLE_EXPERIMENTAL_PARSER
10215const char* CallableOptions::_InternalParse(const char* ptr, ::PROTOBUF_NAMESPACE_ID::internal::ParseContext* ctx) {
10216#define CHK_(x) if (PROTOBUF_PREDICT_FALSE(!(x))) goto failure
10217 ::PROTOBUF_NAMESPACE_ID::Arena* arena = GetArenaNoVirtual(); (void)arena;
10218 while (!ctx->Done(&ptr)) {
10219 ::PROTOBUF_NAMESPACE_ID::uint32 tag;
10220 ptr = ::PROTOBUF_NAMESPACE_ID::internal::ReadTag(ptr, &tag);
10221 CHK_(ptr);
10222 switch (tag >> 3) {
10223 // repeated string feed = 1;
10224 case 1:
10225 if (PROTOBUF_PREDICT_TRUE(static_cast<::PROTOBUF_NAMESPACE_ID::uint8>(tag) == 10)) {
10226 ptr -= 1;
10227 do {
10228 ptr += 1;
10229 ptr = ::PROTOBUF_NAMESPACE_ID::internal::InlineGreedyStringParserUTF8(add_feed(), ptr, ctx, "tensorflow.CallableOptions.feed");
10230 CHK_(ptr);
10231 if (!ctx->DataAvailable(ptr)) break;
10232 } while (::PROTOBUF_NAMESPACE_ID::internal::UnalignedLoad<::PROTOBUF_NAMESPACE_ID::uint8>(ptr) == 10);
10233 } else goto handle_unusual;
10234 continue;
10235 // repeated string fetch = 2;
10236 case 2:
10237 if (PROTOBUF_PREDICT_TRUE(static_cast<::PROTOBUF_NAMESPACE_ID::uint8>(tag) == 18)) {
10238 ptr -= 1;
10239 do {
10240 ptr += 1;
10241 ptr = ::PROTOBUF_NAMESPACE_ID::internal::InlineGreedyStringParserUTF8(add_fetch(), ptr, ctx, "tensorflow.CallableOptions.fetch");
10242 CHK_(ptr);
10243 if (!ctx->DataAvailable(ptr)) break;
10244 } while (::PROTOBUF_NAMESPACE_ID::internal::UnalignedLoad<::PROTOBUF_NAMESPACE_ID::uint8>(ptr) == 18);
10245 } else goto handle_unusual;
10246 continue;
10247 // repeated string target = 3;
10248 case 3:
10249 if (PROTOBUF_PREDICT_TRUE(static_cast<::PROTOBUF_NAMESPACE_ID::uint8>(tag) == 26)) {
10250 ptr -= 1;
10251 do {
10252 ptr += 1;
10253 ptr = ::PROTOBUF_NAMESPACE_ID::internal::InlineGreedyStringParserUTF8(add_target(), ptr, ctx, "tensorflow.CallableOptions.target");
10254 CHK_(ptr);
10255 if (!ctx->DataAvailable(ptr)) break;
10256 } while (::PROTOBUF_NAMESPACE_ID::internal::UnalignedLoad<::PROTOBUF_NAMESPACE_ID::uint8>(ptr) == 26);
10257 } else goto handle_unusual;
10258 continue;
10259 // .tensorflow.RunOptions run_options = 4;
10260 case 4:
10261 if (PROTOBUF_PREDICT_TRUE(static_cast<::PROTOBUF_NAMESPACE_ID::uint8>(tag) == 34)) {
10262 ptr = ctx->ParseMessage(mutable_run_options(), ptr);
10263 CHK_(ptr);
10264 } else goto handle_unusual;
10265 continue;
10266 // repeated .tensorflow.TensorConnection tensor_connection = 5;
10267 case 5:
10268 if (PROTOBUF_PREDICT_TRUE(static_cast<::PROTOBUF_NAMESPACE_ID::uint8>(tag) == 42)) {
10269 ptr -= 1;
10270 do {
10271 ptr += 1;
10272 ptr = ctx->ParseMessage(add_tensor_connection(), ptr);
10273 CHK_(ptr);
10274 if (!ctx->DataAvailable(ptr)) break;
10275 } while (::PROTOBUF_NAMESPACE_ID::internal::UnalignedLoad<::PROTOBUF_NAMESPACE_ID::uint8>(ptr) == 42);
10276 } else goto handle_unusual;
10277 continue;
10278 // map<string, string> feed_devices = 6;
10279 case 6:
10280 if (PROTOBUF_PREDICT_TRUE(static_cast<::PROTOBUF_NAMESPACE_ID::uint8>(tag) == 50)) {
10281 ptr -= 1;
10282 do {
10283 ptr += 1;
10284 ptr = ctx->ParseMessage(&feed_devices_, ptr);
10285 CHK_(ptr);
10286 if (!ctx->DataAvailable(ptr)) break;
10287 } while (::PROTOBUF_NAMESPACE_ID::internal::UnalignedLoad<::PROTOBUF_NAMESPACE_ID::uint8>(ptr) == 50);
10288 } else goto handle_unusual;
10289 continue;
10290 // map<string, string> fetch_devices = 7;
10291 case 7:
10292 if (PROTOBUF_PREDICT_TRUE(static_cast<::PROTOBUF_NAMESPACE_ID::uint8>(tag) == 58)) {
10293 ptr -= 1;
10294 do {
10295 ptr += 1;
10296 ptr = ctx->ParseMessage(&fetch_devices_, ptr);
10297 CHK_(ptr);
10298 if (!ctx->DataAvailable(ptr)) break;
10299 } while (::PROTOBUF_NAMESPACE_ID::internal::UnalignedLoad<::PROTOBUF_NAMESPACE_ID::uint8>(ptr) == 58);
10300 } else goto handle_unusual;
10301 continue;
10302 // bool fetch_skip_sync = 8;
10303 case 8:
10304 if (PROTOBUF_PREDICT_TRUE(static_cast<::PROTOBUF_NAMESPACE_ID::uint8>(tag) == 64)) {
10305 fetch_skip_sync_ = ::PROTOBUF_NAMESPACE_ID::internal::ReadVarint(&ptr);
10306 CHK_(ptr);
10307 } else goto handle_unusual;
10308 continue;
10309 default: {
10310 handle_unusual:
10311 if ((tag & 7) == 4 || tag == 0) {
10312 ctx->SetLastTag(tag);
10313 goto success;
10314 }
10315 ptr = UnknownFieldParse(tag, &_internal_metadata_, ptr, ctx);
10316 CHK_(ptr != nullptr);
10317 continue;
10318 }
10319 } // switch
10320 } // while
10321success:
10322 return ptr;
10323failure:
10324 ptr = nullptr;
10325 goto success;
10326#undef CHK_
10327}
10328#else // GOOGLE_PROTOBUF_ENABLE_EXPERIMENTAL_PARSER
10329bool CallableOptions::MergePartialFromCodedStream(
10330 ::PROTOBUF_NAMESPACE_ID::io::CodedInputStream* input) {
10331#define DO_(EXPRESSION) if (!PROTOBUF_PREDICT_TRUE(EXPRESSION)) goto failure
10332 ::PROTOBUF_NAMESPACE_ID::uint32 tag;
10333 // @@protoc_insertion_point(parse_start:tensorflow.CallableOptions)
10334 for (;;) {
10335 ::std::pair<::PROTOBUF_NAMESPACE_ID::uint32, bool> p = input->ReadTagWithCutoffNoLastTag(127u);
10336 tag = p.first;
10337 if (!p.second) goto handle_unusual;
10338 switch (::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::GetTagFieldNumber(tag)) {
10339 // repeated string feed = 1;
10340 case 1: {
10341 if (static_cast< ::PROTOBUF_NAMESPACE_ID::uint8>(tag) == (10 & 0xFF)) {
10342 DO_(::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::ReadString(
10343 input, this->add_feed()));
10344 DO_(::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::VerifyUtf8String(
10345 this->feed(this->feed_size() - 1).data(),
10346 static_cast<int>(this->feed(this->feed_size() - 1).length()),
10347 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::PARSE,
10348 "tensorflow.CallableOptions.feed"));
10349 } else {
10350 goto handle_unusual;
10351 }
10352 break;
10353 }
10354
10355 // repeated string fetch = 2;
10356 case 2: {
10357 if (static_cast< ::PROTOBUF_NAMESPACE_ID::uint8>(tag) == (18 & 0xFF)) {
10358 DO_(::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::ReadString(
10359 input, this->add_fetch()));
10360 DO_(::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::VerifyUtf8String(
10361 this->fetch(this->fetch_size() - 1).data(),
10362 static_cast<int>(this->fetch(this->fetch_size() - 1).length()),
10363 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::PARSE,
10364 "tensorflow.CallableOptions.fetch"));
10365 } else {
10366 goto handle_unusual;
10367 }
10368 break;
10369 }
10370
10371 // repeated string target = 3;
10372 case 3: {
10373 if (static_cast< ::PROTOBUF_NAMESPACE_ID::uint8>(tag) == (26 & 0xFF)) {
10374 DO_(::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::ReadString(
10375 input, this->add_target()));
10376 DO_(::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::VerifyUtf8String(
10377 this->target(this->target_size() - 1).data(),
10378 static_cast<int>(this->target(this->target_size() - 1).length()),
10379 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::PARSE,
10380 "tensorflow.CallableOptions.target"));
10381 } else {
10382 goto handle_unusual;
10383 }
10384 break;
10385 }
10386
10387 // .tensorflow.RunOptions run_options = 4;
10388 case 4: {
10389 if (static_cast< ::PROTOBUF_NAMESPACE_ID::uint8>(tag) == (34 & 0xFF)) {
10390 DO_(::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::ReadMessage(
10391 input, mutable_run_options()));
10392 } else {
10393 goto handle_unusual;
10394 }
10395 break;
10396 }
10397
10398 // repeated .tensorflow.TensorConnection tensor_connection = 5;
10399 case 5: {
10400 if (static_cast< ::PROTOBUF_NAMESPACE_ID::uint8>(tag) == (42 & 0xFF)) {
10401 DO_(::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::ReadMessage(
10402 input, add_tensor_connection()));
10403 } else {
10404 goto handle_unusual;
10405 }
10406 break;
10407 }
10408
10409 // map<string, string> feed_devices = 6;
10410 case 6: {
10411 if (static_cast< ::PROTOBUF_NAMESPACE_ID::uint8>(tag) == (50 & 0xFF)) {
10412 CallableOptions_FeedDevicesEntry_DoNotUse::Parser< ::PROTOBUF_NAMESPACE_ID::internal::MapField<
10413 CallableOptions_FeedDevicesEntry_DoNotUse,
10414 std::string, std::string,
10415 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::TYPE_STRING,
10416 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::TYPE_STRING,
10417 0 >,
10418 ::PROTOBUF_NAMESPACE_ID::Map< std::string, std::string > > parser(&feed_devices_);
10419 DO_(::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::ReadMessageNoVirtual(
10420 input, &parser));
10421 DO_(::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::VerifyUtf8String(
10422 parser.key().data(), static_cast<int>(parser.key().length()),
10423 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::PARSE,
10424 "tensorflow.CallableOptions.FeedDevicesEntry.key"));
10425 DO_(::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::VerifyUtf8String(
10426 parser.value().data(), static_cast<int>(parser.value().length()),
10427 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::PARSE,
10428 "tensorflow.CallableOptions.FeedDevicesEntry.value"));
10429 } else {
10430 goto handle_unusual;
10431 }
10432 break;
10433 }
10434
10435 // map<string, string> fetch_devices = 7;
10436 case 7: {
10437 if (static_cast< ::PROTOBUF_NAMESPACE_ID::uint8>(tag) == (58 & 0xFF)) {
10438 CallableOptions_FetchDevicesEntry_DoNotUse::Parser< ::PROTOBUF_NAMESPACE_ID::internal::MapField<
10439 CallableOptions_FetchDevicesEntry_DoNotUse,
10440 std::string, std::string,
10441 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::TYPE_STRING,
10442 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::TYPE_STRING,
10443 0 >,
10444 ::PROTOBUF_NAMESPACE_ID::Map< std::string, std::string > > parser(&fetch_devices_);
10445 DO_(::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::ReadMessageNoVirtual(
10446 input, &parser));
10447 DO_(::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::VerifyUtf8String(
10448 parser.key().data(), static_cast<int>(parser.key().length()),
10449 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::PARSE,
10450 "tensorflow.CallableOptions.FetchDevicesEntry.key"));
10451 DO_(::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::VerifyUtf8String(
10452 parser.value().data(), static_cast<int>(parser.value().length()),
10453 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::PARSE,
10454 "tensorflow.CallableOptions.FetchDevicesEntry.value"));
10455 } else {
10456 goto handle_unusual;
10457 }
10458 break;
10459 }
10460
10461 // bool fetch_skip_sync = 8;
10462 case 8: {
10463 if (static_cast< ::PROTOBUF_NAMESPACE_ID::uint8>(tag) == (64 & 0xFF)) {
10464
10465 DO_((::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::ReadPrimitive<
10466 bool, ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::TYPE_BOOL>(
10467 input, &fetch_skip_sync_)));
10468 } else {
10469 goto handle_unusual;
10470 }
10471 break;
10472 }
10473
10474 default: {
10475 handle_unusual:
10476 if (tag == 0) {
10477 goto success;
10478 }
10479 DO_(::PROTOBUF_NAMESPACE_ID::internal::WireFormat::SkipField(
10480 input, tag, _internal_metadata_.mutable_unknown_fields()));
10481 break;
10482 }
10483 }
10484 }
10485success:
10486 // @@protoc_insertion_point(parse_success:tensorflow.CallableOptions)
10487 return true;
10488failure:
10489 // @@protoc_insertion_point(parse_failure:tensorflow.CallableOptions)
10490 return false;
10491#undef DO_
10492}
10493#endif // GOOGLE_PROTOBUF_ENABLE_EXPERIMENTAL_PARSER
10494
10495void CallableOptions::SerializeWithCachedSizes(
10496 ::PROTOBUF_NAMESPACE_ID::io::CodedOutputStream* output) const {
10497 // @@protoc_insertion_point(serialize_start:tensorflow.CallableOptions)
10498 ::PROTOBUF_NAMESPACE_ID::uint32 cached_has_bits = 0;
10499 (void) cached_has_bits;
10500
10501 // repeated string feed = 1;
10502 for (int i = 0, n = this->feed_size(); i < n; i++) {
10503 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::VerifyUtf8String(
10504 this->feed(i).data(), static_cast<int>(this->feed(i).length()),
10505 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::SERIALIZE,
10506 "tensorflow.CallableOptions.feed");
10507 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::WriteString(
10508 1, this->feed(i), output);
10509 }
10510
10511 // repeated string fetch = 2;
10512 for (int i = 0, n = this->fetch_size(); i < n; i++) {
10513 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::VerifyUtf8String(
10514 this->fetch(i).data(), static_cast<int>(this->fetch(i).length()),
10515 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::SERIALIZE,
10516 "tensorflow.CallableOptions.fetch");
10517 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::WriteString(
10518 2, this->fetch(i), output);
10519 }
10520
10521 // repeated string target = 3;
10522 for (int i = 0, n = this->target_size(); i < n; i++) {
10523 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::VerifyUtf8String(
10524 this->target(i).data(), static_cast<int>(this->target(i).length()),
10525 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::SERIALIZE,
10526 "tensorflow.CallableOptions.target");
10527 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::WriteString(
10528 3, this->target(i), output);
10529 }
10530
10531 // .tensorflow.RunOptions run_options = 4;
10532 if (this->has_run_options()) {
10533 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::WriteMessageMaybeToArray(
10534 4, _Internal::run_options(this), output);
10535 }
10536
10537 // repeated .tensorflow.TensorConnection tensor_connection = 5;
10538 for (unsigned int i = 0,
10539 n = static_cast<unsigned int>(this->tensor_connection_size()); i < n; i++) {
10540 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::WriteMessageMaybeToArray(
10541 5,
10542 this->tensor_connection(static_cast<int>(i)),
10543 output);
10544 }
10545
10546 // map<string, string> feed_devices = 6;
10547 if (!this->feed_devices().empty()) {
10548 typedef ::PROTOBUF_NAMESPACE_ID::Map< std::string, std::string >::const_pointer
10549 ConstPtr;
10550 typedef ConstPtr SortItem;
10551 typedef ::PROTOBUF_NAMESPACE_ID::internal::CompareByDerefFirst<SortItem> Less;
10552 struct Utf8Check {
10553 static void Check(ConstPtr p) {
10554 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::VerifyUtf8String(
10555 p->first.data(), static_cast<int>(p->first.length()),
10556 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::SERIALIZE,
10557 "tensorflow.CallableOptions.FeedDevicesEntry.key");
10558 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::VerifyUtf8String(
10559 p->second.data(), static_cast<int>(p->second.length()),
10560 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::SERIALIZE,
10561 "tensorflow.CallableOptions.FeedDevicesEntry.value");
10562 }
10563 };
10564
10565 if (output->IsSerializationDeterministic() &&
10566 this->feed_devices().size() > 1) {
10567 ::std::unique_ptr<SortItem[]> items(
10568 new SortItem[this->feed_devices().size()]);
10569 typedef ::PROTOBUF_NAMESPACE_ID::Map< std::string, std::string >::size_type size_type;
10570 size_type n = 0;
10571 for (::PROTOBUF_NAMESPACE_ID::Map< std::string, std::string >::const_iterator
10572 it = this->feed_devices().begin();
10573 it != this->feed_devices().end(); ++it, ++n) {
10574 items[static_cast<ptrdiff_t>(n)] = SortItem(&*it);
10575 }
10576 ::std::sort(&items[0], &items[static_cast<ptrdiff_t>(n)], Less());
10577 for (size_type i = 0; i < n; i++) {
10578 CallableOptions_FeedDevicesEntry_DoNotUse::Funcs::SerializeToCodedStream(6, items[static_cast<ptrdiff_t>(i)]->first, items[static_cast<ptrdiff_t>(i)]->second, output);
10579 Utf8Check::Check(&(*items[static_cast<ptrdiff_t>(i)]));
10580 }
10581 } else {
10582 for (::PROTOBUF_NAMESPACE_ID::Map< std::string, std::string >::const_iterator
10583 it = this->feed_devices().begin();
10584 it != this->feed_devices().end(); ++it) {
10585 CallableOptions_FeedDevicesEntry_DoNotUse::Funcs::SerializeToCodedStream(6, it->first, it->second, output);
10586 Utf8Check::Check(&(*it));
10587 }
10588 }
10589 }
10590
10591 // map<string, string> fetch_devices = 7;
10592 if (!this->fetch_devices().empty()) {
10593 typedef ::PROTOBUF_NAMESPACE_ID::Map< std::string, std::string >::const_pointer
10594 ConstPtr;
10595 typedef ConstPtr SortItem;
10596 typedef ::PROTOBUF_NAMESPACE_ID::internal::CompareByDerefFirst<SortItem> Less;
10597 struct Utf8Check {
10598 static void Check(ConstPtr p) {
10599 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::VerifyUtf8String(
10600 p->first.data(), static_cast<int>(p->first.length()),
10601 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::SERIALIZE,
10602 "tensorflow.CallableOptions.FetchDevicesEntry.key");
10603 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::VerifyUtf8String(
10604 p->second.data(), static_cast<int>(p->second.length()),
10605 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::SERIALIZE,
10606 "tensorflow.CallableOptions.FetchDevicesEntry.value");
10607 }
10608 };
10609
10610 if (output->IsSerializationDeterministic() &&
10611 this->fetch_devices().size() > 1) {
10612 ::std::unique_ptr<SortItem[]> items(
10613 new SortItem[this->fetch_devices().size()]);
10614 typedef ::PROTOBUF_NAMESPACE_ID::Map< std::string, std::string >::size_type size_type;
10615 size_type n = 0;
10616 for (::PROTOBUF_NAMESPACE_ID::Map< std::string, std::string >::const_iterator
10617 it = this->fetch_devices().begin();
10618 it != this->fetch_devices().end(); ++it, ++n) {
10619 items[static_cast<ptrdiff_t>(n)] = SortItem(&*it);
10620 }
10621 ::std::sort(&items[0], &items[static_cast<ptrdiff_t>(n)], Less());
10622 for (size_type i = 0; i < n; i++) {
10623 CallableOptions_FetchDevicesEntry_DoNotUse::Funcs::SerializeToCodedStream(7, items[static_cast<ptrdiff_t>(i)]->first, items[static_cast<ptrdiff_t>(i)]->second, output);
10624 Utf8Check::Check(&(*items[static_cast<ptrdiff_t>(i)]));
10625 }
10626 } else {
10627 for (::PROTOBUF_NAMESPACE_ID::Map< std::string, std::string >::const_iterator
10628 it = this->fetch_devices().begin();
10629 it != this->fetch_devices().end(); ++it) {
10630 CallableOptions_FetchDevicesEntry_DoNotUse::Funcs::SerializeToCodedStream(7, it->first, it->second, output);
10631 Utf8Check::Check(&(*it));
10632 }
10633 }
10634 }
10635
10636 // bool fetch_skip_sync = 8;
10637 if (this->fetch_skip_sync() != 0) {
10638 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::WriteBool(8, this->fetch_skip_sync(), output);
10639 }
10640
10641 if (_internal_metadata_.have_unknown_fields()) {
10642 ::PROTOBUF_NAMESPACE_ID::internal::WireFormat::SerializeUnknownFields(
10643 _internal_metadata_.unknown_fields(), output);
10644 }
10645 // @@protoc_insertion_point(serialize_end:tensorflow.CallableOptions)
10646}
10647
10648::PROTOBUF_NAMESPACE_ID::uint8* CallableOptions::InternalSerializeWithCachedSizesToArray(
10649 ::PROTOBUF_NAMESPACE_ID::uint8* target) const {
10650 // @@protoc_insertion_point(serialize_to_array_start:tensorflow.CallableOptions)
10651 ::PROTOBUF_NAMESPACE_ID::uint32 cached_has_bits = 0;
10652 (void) cached_has_bits;
10653
10654 // repeated string feed = 1;
10655 for (int i = 0, n = this->feed_size(); i < n; i++) {
10656 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::VerifyUtf8String(
10657 this->feed(i).data(), static_cast<int>(this->feed(i).length()),
10658 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::SERIALIZE,
10659 "tensorflow.CallableOptions.feed");
10660 target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::
10661 WriteStringToArray(1, this->feed(i), target);
10662 }
10663
10664 // repeated string fetch = 2;
10665 for (int i = 0, n = this->fetch_size(); i < n; i++) {
10666 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::VerifyUtf8String(
10667 this->fetch(i).data(), static_cast<int>(this->fetch(i).length()),
10668 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::SERIALIZE,
10669 "tensorflow.CallableOptions.fetch");
10670 target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::
10671 WriteStringToArray(2, this->fetch(i), target);
10672 }
10673
10674 // repeated string target = 3;
10675 for (int i = 0, n = this->target_size(); i < n; i++) {
10676 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::VerifyUtf8String(
10677 this->target(i).data(), static_cast<int>(this->target(i).length()),
10678 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::SERIALIZE,
10679 "tensorflow.CallableOptions.target");
10680 target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::
10681 WriteStringToArray(3, this->target(i), target);
10682 }
10683
10684 // .tensorflow.RunOptions run_options = 4;
10685 if (this->has_run_options()) {
10686 target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::
10687 InternalWriteMessageToArray(
10688 4, _Internal::run_options(this), target);
10689 }
10690
10691 // repeated .tensorflow.TensorConnection tensor_connection = 5;
10692 for (unsigned int i = 0,
10693 n = static_cast<unsigned int>(this->tensor_connection_size()); i < n; i++) {
10694 target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::
10695 InternalWriteMessageToArray(
10696 5, this->tensor_connection(static_cast<int>(i)), target);
10697 }
10698
10699 // map<string, string> feed_devices = 6;
10700 if (!this->feed_devices().empty()) {
10701 typedef ::PROTOBUF_NAMESPACE_ID::Map< std::string, std::string >::const_pointer
10702 ConstPtr;
10703 typedef ConstPtr SortItem;
10704 typedef ::PROTOBUF_NAMESPACE_ID::internal::CompareByDerefFirst<SortItem> Less;
10705 struct Utf8Check {
10706 static void Check(ConstPtr p) {
10707 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::VerifyUtf8String(
10708 p->first.data(), static_cast<int>(p->first.length()),
10709 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::SERIALIZE,
10710 "tensorflow.CallableOptions.FeedDevicesEntry.key");
10711 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::VerifyUtf8String(
10712 p->second.data(), static_cast<int>(p->second.length()),
10713 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::SERIALIZE,
10714 "tensorflow.CallableOptions.FeedDevicesEntry.value");
10715 }
10716 };
10717
10718 if (false &&
10719 this->feed_devices().size() > 1) {
10720 ::std::unique_ptr<SortItem[]> items(
10721 new SortItem[this->feed_devices().size()]);
10722 typedef ::PROTOBUF_NAMESPACE_ID::Map< std::string, std::string >::size_type size_type;
10723 size_type n = 0;
10724 for (::PROTOBUF_NAMESPACE_ID::Map< std::string, std::string >::const_iterator
10725 it = this->feed_devices().begin();
10726 it != this->feed_devices().end(); ++it, ++n) {
10727 items[static_cast<ptrdiff_t>(n)] = SortItem(&*it);
10728 }
10729 ::std::sort(&items[0], &items[static_cast<ptrdiff_t>(n)], Less());
10730 for (size_type i = 0; i < n; i++) {
10731 target = CallableOptions_FeedDevicesEntry_DoNotUse::Funcs::SerializeToArray(6, items[static_cast<ptrdiff_t>(i)]->first, items[static_cast<ptrdiff_t>(i)]->second, target);
10732 Utf8Check::Check(&(*items[static_cast<ptrdiff_t>(i)]));
10733 }
10734 } else {
10735 for (::PROTOBUF_NAMESPACE_ID::Map< std::string, std::string >::const_iterator
10736 it = this->feed_devices().begin();
10737 it != this->feed_devices().end(); ++it) {
10738 target = CallableOptions_FeedDevicesEntry_DoNotUse::Funcs::SerializeToArray(6, it->first, it->second, target);
10739 Utf8Check::Check(&(*it));
10740 }
10741 }
10742 }
10743
10744 // map<string, string> fetch_devices = 7;
10745 if (!this->fetch_devices().empty()) {
10746 typedef ::PROTOBUF_NAMESPACE_ID::Map< std::string, std::string >::const_pointer
10747 ConstPtr;
10748 typedef ConstPtr SortItem;
10749 typedef ::PROTOBUF_NAMESPACE_ID::internal::CompareByDerefFirst<SortItem> Less;
10750 struct Utf8Check {
10751 static void Check(ConstPtr p) {
10752 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::VerifyUtf8String(
10753 p->first.data(), static_cast<int>(p->first.length()),
10754 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::SERIALIZE,
10755 "tensorflow.CallableOptions.FetchDevicesEntry.key");
10756 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::VerifyUtf8String(
10757 p->second.data(), static_cast<int>(p->second.length()),
10758 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::SERIALIZE,
10759 "tensorflow.CallableOptions.FetchDevicesEntry.value");
10760 }
10761 };
10762
10763 if (false &&
10764 this->fetch_devices().size() > 1) {
10765 ::std::unique_ptr<SortItem[]> items(
10766 new SortItem[this->fetch_devices().size()]);
10767 typedef ::PROTOBUF_NAMESPACE_ID::Map< std::string, std::string >::size_type size_type;
10768 size_type n = 0;
10769 for (::PROTOBUF_NAMESPACE_ID::Map< std::string, std::string >::const_iterator
10770 it = this->fetch_devices().begin();
10771 it != this->fetch_devices().end(); ++it, ++n) {
10772 items[static_cast<ptrdiff_t>(n)] = SortItem(&*it);
10773 }
10774 ::std::sort(&items[0], &items[static_cast<ptrdiff_t>(n)], Less());
10775 for (size_type i = 0; i < n; i++) {
10776 target = CallableOptions_FetchDevicesEntry_DoNotUse::Funcs::SerializeToArray(7, items[static_cast<ptrdiff_t>(i)]->first, items[static_cast<ptrdiff_t>(i)]->second, target);
10777 Utf8Check::Check(&(*items[static_cast<ptrdiff_t>(i)]));
10778 }
10779 } else {
10780 for (::PROTOBUF_NAMESPACE_ID::Map< std::string, std::string >::const_iterator
10781 it = this->fetch_devices().begin();
10782 it != this->fetch_devices().end(); ++it) {
10783 target = CallableOptions_FetchDevicesEntry_DoNotUse::Funcs::SerializeToArray(7, it->first, it->second, target);
10784 Utf8Check::Check(&(*it));
10785 }
10786 }
10787 }
10788
10789 // bool fetch_skip_sync = 8;
10790 if (this->fetch_skip_sync() != 0) {
10791 target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::WriteBoolToArray(8, this->fetch_skip_sync(), target);
10792 }
10793
10794 if (_internal_metadata_.have_unknown_fields()) {
10795 target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormat::SerializeUnknownFieldsToArray(
10796 _internal_metadata_.unknown_fields(), target);
10797 }
10798 // @@protoc_insertion_point(serialize_to_array_end:tensorflow.CallableOptions)
10799 return target;
10800}
10801
10802size_t CallableOptions::ByteSizeLong() const {
10803// @@protoc_insertion_point(message_byte_size_start:tensorflow.CallableOptions)
10804 size_t total_size = 0;
10805
10806 if (_internal_metadata_.have_unknown_fields()) {
10807 total_size +=
10808 ::PROTOBUF_NAMESPACE_ID::internal::WireFormat::ComputeUnknownFieldsSize(
10809 _internal_metadata_.unknown_fields());
10810 }
10811 ::PROTOBUF_NAMESPACE_ID::uint32 cached_has_bits = 0;
10812 // Prevent compiler warnings about cached_has_bits being unused
10813 (void) cached_has_bits;
10814
10815 // repeated string feed = 1;
10816 total_size += 1 *
10817 ::PROTOBUF_NAMESPACE_ID::internal::FromIntSize(this->feed_size());
10818 for (int i = 0, n = this->feed_size(); i < n; i++) {
10819 total_size += ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::StringSize(
10820 this->feed(i));
10821 }
10822
10823 // repeated string fetch = 2;
10824 total_size += 1 *
10825 ::PROTOBUF_NAMESPACE_ID::internal::FromIntSize(this->fetch_size());
10826 for (int i = 0, n = this->fetch_size(); i < n; i++) {
10827 total_size += ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::StringSize(
10828 this->fetch(i));
10829 }
10830
10831 // repeated string target = 3;
10832 total_size += 1 *
10833 ::PROTOBUF_NAMESPACE_ID::internal::FromIntSize(this->target_size());
10834 for (int i = 0, n = this->target_size(); i < n; i++) {
10835 total_size += ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::StringSize(
10836 this->target(i));
10837 }
10838
10839 // repeated .tensorflow.TensorConnection tensor_connection = 5;
10840 {
10841 unsigned int count = static_cast<unsigned int>(this->tensor_connection_size());
10842 total_size += 1UL * count;
10843 for (unsigned int i = 0; i < count; i++) {
10844 total_size +=
10845 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::MessageSize(
10846 this->tensor_connection(static_cast<int>(i)));
10847 }
10848 }
10849
10850 // map<string, string> feed_devices = 6;
10851 total_size += 1 *
10852 ::PROTOBUF_NAMESPACE_ID::internal::FromIntSize(this->feed_devices_size());
10853 for (::PROTOBUF_NAMESPACE_ID::Map< std::string, std::string >::const_iterator
10854 it = this->feed_devices().begin();
10855 it != this->feed_devices().end(); ++it) {
10856 total_size += CallableOptions_FeedDevicesEntry_DoNotUse::Funcs::ByteSizeLong(it->first, it->second);
10857 }
10858
10859 // map<string, string> fetch_devices = 7;
10860 total_size += 1 *
10861 ::PROTOBUF_NAMESPACE_ID::internal::FromIntSize(this->fetch_devices_size());
10862 for (::PROTOBUF_NAMESPACE_ID::Map< std::string, std::string >::const_iterator
10863 it = this->fetch_devices().begin();
10864 it != this->fetch_devices().end(); ++it) {
10865 total_size += CallableOptions_FetchDevicesEntry_DoNotUse::Funcs::ByteSizeLong(it->first, it->second);
10866 }
10867
10868 // .tensorflow.RunOptions run_options = 4;
10869 if (this->has_run_options()) {
10870 total_size += 1 +
10871 ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::MessageSize(
10872 *run_options_);
10873 }
10874
10875 // bool fetch_skip_sync = 8;
10876 if (this->fetch_skip_sync() != 0) {
10877 total_size += 1 + 1;
10878 }
10879
10880 int cached_size = ::PROTOBUF_NAMESPACE_ID::internal::ToCachedSize(total_size);
10881 SetCachedSize(cached_size);
10882 return total_size;
10883}
10884
10885void CallableOptions::MergeFrom(const ::PROTOBUF_NAMESPACE_ID::Message& from) {
10886// @@protoc_insertion_point(generalized_merge_from_start:tensorflow.CallableOptions)
10887 GOOGLE_DCHECK_NE(&from, this);
10888 const CallableOptions* source =
10889 ::PROTOBUF_NAMESPACE_ID::DynamicCastToGenerated<CallableOptions>(
10890 &from);
10891 if (source == nullptr) {
10892 // @@protoc_insertion_point(generalized_merge_from_cast_fail:tensorflow.CallableOptions)
10893 ::PROTOBUF_NAMESPACE_ID::internal::ReflectionOps::Merge(from, this);
10894 } else {
10895 // @@protoc_insertion_point(generalized_merge_from_cast_success:tensorflow.CallableOptions)
10896 MergeFrom(*source);
10897 }
10898}
10899
10900void CallableOptions::MergeFrom(const CallableOptions& from) {
10901// @@protoc_insertion_point(class_specific_merge_from_start:tensorflow.CallableOptions)
10902 GOOGLE_DCHECK_NE(&from, this);
10903 _internal_metadata_.MergeFrom(from._internal_metadata_);
10904 ::PROTOBUF_NAMESPACE_ID::uint32 cached_has_bits = 0;
10905 (void) cached_has_bits;
10906
10907 feed_.MergeFrom(from.feed_);
10908 fetch_.MergeFrom(from.fetch_);
10909 target_.MergeFrom(from.target_);
10910 tensor_connection_.MergeFrom(from.tensor_connection_);
10911 feed_devices_.MergeFrom(from.feed_devices_);
10912 fetch_devices_.MergeFrom(from.fetch_devices_);
10913 if (from.has_run_options()) {
10914 mutable_run_options()->::tensorflow::RunOptions::MergeFrom(from.run_options());
10915 }
10916 if (from.fetch_skip_sync() != 0) {
10917 set_fetch_skip_sync(from.fetch_skip_sync());
10918 }
10919}
10920
10921void CallableOptions::CopyFrom(const ::PROTOBUF_NAMESPACE_ID::Message& from) {
10922// @@protoc_insertion_point(generalized_copy_from_start:tensorflow.CallableOptions)
10923 if (&from == this) return;
10924 Clear();
10925 MergeFrom(from);
10926}
10927
10928void CallableOptions::CopyFrom(const CallableOptions& from) {
10929// @@protoc_insertion_point(class_specific_copy_from_start:tensorflow.CallableOptions)
10930 if (&from == this) return;
10931 Clear();
10932 MergeFrom(from);
10933}
10934
10935bool CallableOptions::IsInitialized() const {
10936 return true;
10937}
10938
10939void CallableOptions::InternalSwap(CallableOptions* other) {
10940 using std::swap;
10941 _internal_metadata_.Swap(&other->_internal_metadata_);
10942 feed_.InternalSwap(CastToBase(&other->feed_));
10943 fetch_.InternalSwap(CastToBase(&other->fetch_));
10944 target_.InternalSwap(CastToBase(&other->target_));
10945 CastToBase(&tensor_connection_)->InternalSwap(CastToBase(&other->tensor_connection_));
10946 feed_devices_.Swap(&other->feed_devices_);
10947 fetch_devices_.Swap(&other->fetch_devices_);
10948 swap(run_options_, other->run_options_);
10949 swap(fetch_skip_sync_, other->fetch_skip_sync_);
10950}
10951
10952::PROTOBUF_NAMESPACE_ID::Metadata CallableOptions::GetMetadata() const {
10953 return GetMetadataStatic();
10954}
10955
10956
10957// @@protoc_insertion_point(namespace_scope)
10958} // namespace tensorflow
10959PROTOBUF_NAMESPACE_OPEN
10960template<> PROTOBUF_NOINLINE ::tensorflow::GPUOptions_Experimental_VirtualDevices* Arena::CreateMaybeMessage< ::tensorflow::GPUOptions_Experimental_VirtualDevices >(Arena* arena) {
10961 return Arena::CreateMessageInternal< ::tensorflow::GPUOptions_Experimental_VirtualDevices >(arena);
10962}
10963template<> PROTOBUF_NOINLINE ::tensorflow::GPUOptions_Experimental* Arena::CreateMaybeMessage< ::tensorflow::GPUOptions_Experimental >(Arena* arena) {
10964 return Arena::CreateMessageInternal< ::tensorflow::GPUOptions_Experimental >(arena);
10965}
10966template<> PROTOBUF_NOINLINE ::tensorflow::GPUOptions* Arena::CreateMaybeMessage< ::tensorflow::GPUOptions >(Arena* arena) {
10967 return Arena::CreateMessageInternal< ::tensorflow::GPUOptions >(arena);
10968}
10969template<> PROTOBUF_NOINLINE ::tensorflow::OptimizerOptions* Arena::CreateMaybeMessage< ::tensorflow::OptimizerOptions >(Arena* arena) {
10970 return Arena::CreateMessageInternal< ::tensorflow::OptimizerOptions >(arena);
10971}
10972template<> PROTOBUF_NOINLINE ::tensorflow::GraphOptions* Arena::CreateMaybeMessage< ::tensorflow::GraphOptions >(Arena* arena) {
10973 return Arena::CreateMessageInternal< ::tensorflow::GraphOptions >(arena);
10974}
10975template<> PROTOBUF_NOINLINE ::tensorflow::ThreadPoolOptionProto* Arena::CreateMaybeMessage< ::tensorflow::ThreadPoolOptionProto >(Arena* arena) {
10976 return Arena::CreateMessageInternal< ::tensorflow::ThreadPoolOptionProto >(arena);
10977}
10978template<> PROTOBUF_NOINLINE ::tensorflow::RPCOptions* Arena::CreateMaybeMessage< ::tensorflow::RPCOptions >(Arena* arena) {
10979 return Arena::CreateMessageInternal< ::tensorflow::RPCOptions >(arena);
10980}
10981template<> PROTOBUF_NOINLINE ::tensorflow::SessionMetadata* Arena::CreateMaybeMessage< ::tensorflow::SessionMetadata >(Arena* arena) {
10982 return Arena::CreateMessageInternal< ::tensorflow::SessionMetadata >(arena);
10983}
10984template<> PROTOBUF_NOINLINE ::tensorflow::ConfigProto_DeviceCountEntry_DoNotUse* Arena::CreateMaybeMessage< ::tensorflow::ConfigProto_DeviceCountEntry_DoNotUse >(Arena* arena) {
10985 return Arena::CreateMessageInternal< ::tensorflow::ConfigProto_DeviceCountEntry_DoNotUse >(arena);
10986}
10987template<> PROTOBUF_NOINLINE ::tensorflow::ConfigProto_Experimental* Arena::CreateMaybeMessage< ::tensorflow::ConfigProto_Experimental >(Arena* arena) {
10988 return Arena::CreateMessageInternal< ::tensorflow::ConfigProto_Experimental >(arena);
10989}
10990template<> PROTOBUF_NOINLINE ::tensorflow::ConfigProto* Arena::CreateMaybeMessage< ::tensorflow::ConfigProto >(Arena* arena) {
10991 return Arena::CreateMessageInternal< ::tensorflow::ConfigProto >(arena);
10992}
10993template<> PROTOBUF_NOINLINE ::tensorflow::RunOptions_Experimental_RunHandlerPoolOptions* Arena::CreateMaybeMessage< ::tensorflow::RunOptions_Experimental_RunHandlerPoolOptions >(Arena* arena) {
10994 return Arena::CreateMessageInternal< ::tensorflow::RunOptions_Experimental_RunHandlerPoolOptions >(arena);
10995}
10996template<> PROTOBUF_NOINLINE ::tensorflow::RunOptions_Experimental* Arena::CreateMaybeMessage< ::tensorflow::RunOptions_Experimental >(Arena* arena) {
10997 return Arena::CreateMessageInternal< ::tensorflow::RunOptions_Experimental >(arena);
10998}
10999template<> PROTOBUF_NOINLINE ::tensorflow::RunOptions* Arena::CreateMaybeMessage< ::tensorflow::RunOptions >(Arena* arena) {
11000 return Arena::CreateMessageInternal< ::tensorflow::RunOptions >(arena);
11001}
11002template<> PROTOBUF_NOINLINE ::tensorflow::RunMetadata_FunctionGraphs* Arena::CreateMaybeMessage< ::tensorflow::RunMetadata_FunctionGraphs >(Arena* arena) {
11003 return Arena::CreateMessageInternal< ::tensorflow::RunMetadata_FunctionGraphs >(arena);
11004}
11005template<> PROTOBUF_NOINLINE ::tensorflow::RunMetadata* Arena::CreateMaybeMessage< ::tensorflow::RunMetadata >(Arena* arena) {
11006 return Arena::CreateMessageInternal< ::tensorflow::RunMetadata >(arena);
11007}
11008template<> PROTOBUF_NOINLINE ::tensorflow::TensorConnection* Arena::CreateMaybeMessage< ::tensorflow::TensorConnection >(Arena* arena) {
11009 return Arena::CreateMessageInternal< ::tensorflow::TensorConnection >(arena);
11010}
11011template<> PROTOBUF_NOINLINE ::tensorflow::CallableOptions_FeedDevicesEntry_DoNotUse* Arena::CreateMaybeMessage< ::tensorflow::CallableOptions_FeedDevicesEntry_DoNotUse >(Arena* arena) {
11012 return Arena::CreateMessageInternal< ::tensorflow::CallableOptions_FeedDevicesEntry_DoNotUse >(arena);
11013}
11014template<> PROTOBUF_NOINLINE ::tensorflow::CallableOptions_FetchDevicesEntry_DoNotUse* Arena::CreateMaybeMessage< ::tensorflow::CallableOptions_FetchDevicesEntry_DoNotUse >(Arena* arena) {
11015 return Arena::CreateMessageInternal< ::tensorflow::CallableOptions_FetchDevicesEntry_DoNotUse >(arena);
11016}
11017template<> PROTOBUF_NOINLINE ::tensorflow::CallableOptions* Arena::CreateMaybeMessage< ::tensorflow::CallableOptions >(Arena* arena) {
11018 return Arena::CreateMessageInternal< ::tensorflow::CallableOptions >(arena);
11019}
11020PROTOBUF_NAMESPACE_CLOSE
11021
11022// @@protoc_insertion_point(global_scope)
11023#include <google/protobuf/port_undef.inc>
11024