1 | // Generated by the gRPC C++ plugin. |
2 | // If you make any local change, they will be lost. |
3 | // source: tensorflow/core/profiler/profiler_service.proto |
4 | #ifndef GRPC_tensorflow_2fcore_2fprofiler_2fprofiler_5fservice_2eproto__INCLUDED |
5 | #define GRPC_tensorflow_2fcore_2fprofiler_2fprofiler_5fservice_2eproto__INCLUDED |
6 | |
7 | #include "tensorflow/core/profiler/profiler_service.pb.h" |
8 | |
9 | #include <functional> |
10 | #include <grpc/impl/codegen/port_platform.h> |
11 | #include <grpcpp/impl/codegen/async_generic_service.h> |
12 | #include <grpcpp/impl/codegen/async_stream.h> |
13 | #include <grpcpp/impl/codegen/async_unary_call.h> |
14 | #include <grpcpp/impl/codegen/client_callback.h> |
15 | #include <grpcpp/impl/codegen/client_context.h> |
16 | #include <grpcpp/impl/codegen/completion_queue.h> |
17 | #include <grpcpp/impl/codegen/message_allocator.h> |
18 | #include <grpcpp/impl/codegen/method_handler.h> |
19 | #include <grpcpp/impl/codegen/proto_utils.h> |
20 | #include <grpcpp/impl/codegen/rpc_method.h> |
21 | #include <grpcpp/impl/codegen/server_callback.h> |
22 | #include <grpcpp/impl/codegen/server_callback_handlers.h> |
23 | #include <grpcpp/impl/codegen/server_context.h> |
24 | #include <grpcpp/impl/codegen/service_type.h> |
25 | #include <grpcpp/impl/codegen/status.h> |
26 | #include <grpcpp/impl/codegen/stub_options.h> |
27 | #include <grpcpp/impl/codegen/sync_stream.h> |
28 | |
29 | namespace tensorflow { |
30 | |
31 | |
32 | namespace grpc { |
33 | |
34 | // The ProfilerService service retrieves performance information about |
35 | // the programs running on connected devices over a period of time. |
36 | class ProfilerService final { |
37 | public: |
38 | static constexpr char const* service_full_name() { |
39 | return "tensorflow.ProfilerService" ; |
40 | } |
41 | class StubInterface { |
42 | public: |
43 | virtual ~StubInterface() {} |
44 | // Starts a profiling session, blocks until it completes, and returns data. |
45 | virtual ::grpc::Status Profile(::grpc::ClientContext* context, const ::tensorflow::ProfileRequest& request, ::tensorflow::ProfileResponse* response) = 0; |
46 | std::unique_ptr< ::grpc::ClientAsyncResponseReaderInterface< ::tensorflow::ProfileResponse>> AsyncProfile(::grpc::ClientContext* context, const ::tensorflow::ProfileRequest& request, ::grpc::CompletionQueue* cq) { |
47 | return std::unique_ptr< ::grpc::ClientAsyncResponseReaderInterface< ::tensorflow::ProfileResponse>>(AsyncProfileRaw(context, request, cq)); |
48 | } |
49 | std::unique_ptr< ::grpc::ClientAsyncResponseReaderInterface< ::tensorflow::ProfileResponse>> PrepareAsyncProfile(::grpc::ClientContext* context, const ::tensorflow::ProfileRequest& request, ::grpc::CompletionQueue* cq) { |
50 | return std::unique_ptr< ::grpc::ClientAsyncResponseReaderInterface< ::tensorflow::ProfileResponse>>(PrepareAsyncProfileRaw(context, request, cq)); |
51 | } |
52 | // Signal to terminate the Profile rpc for a on-going profiling session, |
53 | // The Profile rpc will return successfully and prematurely without timeout. |
54 | // This is used by programmatic mode to end the session in workers. |
55 | virtual ::grpc::Status Terminate(::grpc::ClientContext* context, const ::tensorflow::TerminateRequest& request, ::tensorflow::TerminateResponse* response) = 0; |
56 | std::unique_ptr< ::grpc::ClientAsyncResponseReaderInterface< ::tensorflow::TerminateResponse>> AsyncTerminate(::grpc::ClientContext* context, const ::tensorflow::TerminateRequest& request, ::grpc::CompletionQueue* cq) { |
57 | return std::unique_ptr< ::grpc::ClientAsyncResponseReaderInterface< ::tensorflow::TerminateResponse>>(AsyncTerminateRaw(context, request, cq)); |
58 | } |
59 | std::unique_ptr< ::grpc::ClientAsyncResponseReaderInterface< ::tensorflow::TerminateResponse>> PrepareAsyncTerminate(::grpc::ClientContext* context, const ::tensorflow::TerminateRequest& request, ::grpc::CompletionQueue* cq) { |
60 | return std::unique_ptr< ::grpc::ClientAsyncResponseReaderInterface< ::tensorflow::TerminateResponse>>(PrepareAsyncTerminateRaw(context, request, cq)); |
61 | } |
62 | // Collects profiling data and returns user-friendly metrics. |
63 | virtual ::grpc::Status Monitor(::grpc::ClientContext* context, const ::tensorflow::MonitorRequest& request, ::tensorflow::MonitorResponse* response) = 0; |
64 | std::unique_ptr< ::grpc::ClientAsyncResponseReaderInterface< ::tensorflow::MonitorResponse>> AsyncMonitor(::grpc::ClientContext* context, const ::tensorflow::MonitorRequest& request, ::grpc::CompletionQueue* cq) { |
65 | return std::unique_ptr< ::grpc::ClientAsyncResponseReaderInterface< ::tensorflow::MonitorResponse>>(AsyncMonitorRaw(context, request, cq)); |
66 | } |
67 | std::unique_ptr< ::grpc::ClientAsyncResponseReaderInterface< ::tensorflow::MonitorResponse>> PrepareAsyncMonitor(::grpc::ClientContext* context, const ::tensorflow::MonitorRequest& request, ::grpc::CompletionQueue* cq) { |
68 | return std::unique_ptr< ::grpc::ClientAsyncResponseReaderInterface< ::tensorflow::MonitorResponse>>(PrepareAsyncMonitorRaw(context, request, cq)); |
69 | } |
70 | class experimental_async_interface { |
71 | public: |
72 | virtual ~experimental_async_interface() {} |
73 | // Starts a profiling session, blocks until it completes, and returns data. |
74 | virtual void Profile(::grpc::ClientContext* context, const ::tensorflow::ProfileRequest* request, ::tensorflow::ProfileResponse* response, std::function<void(::grpc::Status)>) = 0; |
75 | virtual void Profile(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::tensorflow::ProfileResponse* response, std::function<void(::grpc::Status)>) = 0; |
76 | #ifdef GRPC_CALLBACK_API_NONEXPERIMENTAL |
77 | virtual void Profile(::grpc::ClientContext* context, const ::tensorflow::ProfileRequest* request, ::tensorflow::ProfileResponse* response, ::grpc::ClientUnaryReactor* reactor) = 0; |
78 | #else |
79 | virtual void Profile(::grpc::ClientContext* context, const ::tensorflow::ProfileRequest* request, ::tensorflow::ProfileResponse* response, ::grpc::experimental::ClientUnaryReactor* reactor) = 0; |
80 | #endif |
81 | #ifdef GRPC_CALLBACK_API_NONEXPERIMENTAL |
82 | virtual void Profile(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::tensorflow::ProfileResponse* response, ::grpc::ClientUnaryReactor* reactor) = 0; |
83 | #else |
84 | virtual void Profile(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::tensorflow::ProfileResponse* response, ::grpc::experimental::ClientUnaryReactor* reactor) = 0; |
85 | #endif |
86 | // Signal to terminate the Profile rpc for a on-going profiling session, |
87 | // The Profile rpc will return successfully and prematurely without timeout. |
88 | // This is used by programmatic mode to end the session in workers. |
89 | virtual void Terminate(::grpc::ClientContext* context, const ::tensorflow::TerminateRequest* request, ::tensorflow::TerminateResponse* response, std::function<void(::grpc::Status)>) = 0; |
90 | virtual void Terminate(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::tensorflow::TerminateResponse* response, std::function<void(::grpc::Status)>) = 0; |
91 | #ifdef GRPC_CALLBACK_API_NONEXPERIMENTAL |
92 | virtual void Terminate(::grpc::ClientContext* context, const ::tensorflow::TerminateRequest* request, ::tensorflow::TerminateResponse* response, ::grpc::ClientUnaryReactor* reactor) = 0; |
93 | #else |
94 | virtual void Terminate(::grpc::ClientContext* context, const ::tensorflow::TerminateRequest* request, ::tensorflow::TerminateResponse* response, ::grpc::experimental::ClientUnaryReactor* reactor) = 0; |
95 | #endif |
96 | #ifdef GRPC_CALLBACK_API_NONEXPERIMENTAL |
97 | virtual void Terminate(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::tensorflow::TerminateResponse* response, ::grpc::ClientUnaryReactor* reactor) = 0; |
98 | #else |
99 | virtual void Terminate(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::tensorflow::TerminateResponse* response, ::grpc::experimental::ClientUnaryReactor* reactor) = 0; |
100 | #endif |
101 | // Collects profiling data and returns user-friendly metrics. |
102 | virtual void Monitor(::grpc::ClientContext* context, const ::tensorflow::MonitorRequest* request, ::tensorflow::MonitorResponse* response, std::function<void(::grpc::Status)>) = 0; |
103 | virtual void Monitor(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::tensorflow::MonitorResponse* response, std::function<void(::grpc::Status)>) = 0; |
104 | #ifdef GRPC_CALLBACK_API_NONEXPERIMENTAL |
105 | virtual void Monitor(::grpc::ClientContext* context, const ::tensorflow::MonitorRequest* request, ::tensorflow::MonitorResponse* response, ::grpc::ClientUnaryReactor* reactor) = 0; |
106 | #else |
107 | virtual void Monitor(::grpc::ClientContext* context, const ::tensorflow::MonitorRequest* request, ::tensorflow::MonitorResponse* response, ::grpc::experimental::ClientUnaryReactor* reactor) = 0; |
108 | #endif |
109 | #ifdef GRPC_CALLBACK_API_NONEXPERIMENTAL |
110 | virtual void Monitor(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::tensorflow::MonitorResponse* response, ::grpc::ClientUnaryReactor* reactor) = 0; |
111 | #else |
112 | virtual void Monitor(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::tensorflow::MonitorResponse* response, ::grpc::experimental::ClientUnaryReactor* reactor) = 0; |
113 | #endif |
114 | }; |
115 | #ifdef GRPC_CALLBACK_API_NONEXPERIMENTAL |
116 | typedef class experimental_async_interface async_interface; |
117 | #endif |
118 | #ifdef GRPC_CALLBACK_API_NONEXPERIMENTAL |
119 | async_interface* async() { return experimental_async(); } |
120 | #endif |
121 | virtual class experimental_async_interface* experimental_async() { return nullptr; } |
122 | private: |
123 | virtual ::grpc::ClientAsyncResponseReaderInterface< ::tensorflow::ProfileResponse>* AsyncProfileRaw(::grpc::ClientContext* context, const ::tensorflow::ProfileRequest& request, ::grpc::CompletionQueue* cq) = 0; |
124 | virtual ::grpc::ClientAsyncResponseReaderInterface< ::tensorflow::ProfileResponse>* PrepareAsyncProfileRaw(::grpc::ClientContext* context, const ::tensorflow::ProfileRequest& request, ::grpc::CompletionQueue* cq) = 0; |
125 | virtual ::grpc::ClientAsyncResponseReaderInterface< ::tensorflow::TerminateResponse>* AsyncTerminateRaw(::grpc::ClientContext* context, const ::tensorflow::TerminateRequest& request, ::grpc::CompletionQueue* cq) = 0; |
126 | virtual ::grpc::ClientAsyncResponseReaderInterface< ::tensorflow::TerminateResponse>* PrepareAsyncTerminateRaw(::grpc::ClientContext* context, const ::tensorflow::TerminateRequest& request, ::grpc::CompletionQueue* cq) = 0; |
127 | virtual ::grpc::ClientAsyncResponseReaderInterface< ::tensorflow::MonitorResponse>* AsyncMonitorRaw(::grpc::ClientContext* context, const ::tensorflow::MonitorRequest& request, ::grpc::CompletionQueue* cq) = 0; |
128 | virtual ::grpc::ClientAsyncResponseReaderInterface< ::tensorflow::MonitorResponse>* PrepareAsyncMonitorRaw(::grpc::ClientContext* context, const ::tensorflow::MonitorRequest& request, ::grpc::CompletionQueue* cq) = 0; |
129 | }; |
130 | class Stub final : public StubInterface { |
131 | public: |
132 | Stub(const std::shared_ptr< ::grpc::ChannelInterface>& channel); |
133 | ::grpc::Status Profile(::grpc::ClientContext* context, const ::tensorflow::ProfileRequest& request, ::tensorflow::ProfileResponse* response) override; |
134 | std::unique_ptr< ::grpc::ClientAsyncResponseReader< ::tensorflow::ProfileResponse>> AsyncProfile(::grpc::ClientContext* context, const ::tensorflow::ProfileRequest& request, ::grpc::CompletionQueue* cq) { |
135 | return std::unique_ptr< ::grpc::ClientAsyncResponseReader< ::tensorflow::ProfileResponse>>(AsyncProfileRaw(context, request, cq)); |
136 | } |
137 | std::unique_ptr< ::grpc::ClientAsyncResponseReader< ::tensorflow::ProfileResponse>> PrepareAsyncProfile(::grpc::ClientContext* context, const ::tensorflow::ProfileRequest& request, ::grpc::CompletionQueue* cq) { |
138 | return std::unique_ptr< ::grpc::ClientAsyncResponseReader< ::tensorflow::ProfileResponse>>(PrepareAsyncProfileRaw(context, request, cq)); |
139 | } |
140 | ::grpc::Status Terminate(::grpc::ClientContext* context, const ::tensorflow::TerminateRequest& request, ::tensorflow::TerminateResponse* response) override; |
141 | std::unique_ptr< ::grpc::ClientAsyncResponseReader< ::tensorflow::TerminateResponse>> AsyncTerminate(::grpc::ClientContext* context, const ::tensorflow::TerminateRequest& request, ::grpc::CompletionQueue* cq) { |
142 | return std::unique_ptr< ::grpc::ClientAsyncResponseReader< ::tensorflow::TerminateResponse>>(AsyncTerminateRaw(context, request, cq)); |
143 | } |
144 | std::unique_ptr< ::grpc::ClientAsyncResponseReader< ::tensorflow::TerminateResponse>> PrepareAsyncTerminate(::grpc::ClientContext* context, const ::tensorflow::TerminateRequest& request, ::grpc::CompletionQueue* cq) { |
145 | return std::unique_ptr< ::grpc::ClientAsyncResponseReader< ::tensorflow::TerminateResponse>>(PrepareAsyncTerminateRaw(context, request, cq)); |
146 | } |
147 | ::grpc::Status Monitor(::grpc::ClientContext* context, const ::tensorflow::MonitorRequest& request, ::tensorflow::MonitorResponse* response) override; |
148 | std::unique_ptr< ::grpc::ClientAsyncResponseReader< ::tensorflow::MonitorResponse>> AsyncMonitor(::grpc::ClientContext* context, const ::tensorflow::MonitorRequest& request, ::grpc::CompletionQueue* cq) { |
149 | return std::unique_ptr< ::grpc::ClientAsyncResponseReader< ::tensorflow::MonitorResponse>>(AsyncMonitorRaw(context, request, cq)); |
150 | } |
151 | std::unique_ptr< ::grpc::ClientAsyncResponseReader< ::tensorflow::MonitorResponse>> PrepareAsyncMonitor(::grpc::ClientContext* context, const ::tensorflow::MonitorRequest& request, ::grpc::CompletionQueue* cq) { |
152 | return std::unique_ptr< ::grpc::ClientAsyncResponseReader< ::tensorflow::MonitorResponse>>(PrepareAsyncMonitorRaw(context, request, cq)); |
153 | } |
154 | class experimental_async final : |
155 | public StubInterface::experimental_async_interface { |
156 | public: |
157 | void Profile(::grpc::ClientContext* context, const ::tensorflow::ProfileRequest* request, ::tensorflow::ProfileResponse* response, std::function<void(::grpc::Status)>) override; |
158 | void Profile(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::tensorflow::ProfileResponse* response, std::function<void(::grpc::Status)>) override; |
159 | #ifdef GRPC_CALLBACK_API_NONEXPERIMENTAL |
160 | void Profile(::grpc::ClientContext* context, const ::tensorflow::ProfileRequest* request, ::tensorflow::ProfileResponse* response, ::grpc::ClientUnaryReactor* reactor) override; |
161 | #else |
162 | void Profile(::grpc::ClientContext* context, const ::tensorflow::ProfileRequest* request, ::tensorflow::ProfileResponse* response, ::grpc::experimental::ClientUnaryReactor* reactor) override; |
163 | #endif |
164 | #ifdef GRPC_CALLBACK_API_NONEXPERIMENTAL |
165 | void Profile(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::tensorflow::ProfileResponse* response, ::grpc::ClientUnaryReactor* reactor) override; |
166 | #else |
167 | void Profile(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::tensorflow::ProfileResponse* response, ::grpc::experimental::ClientUnaryReactor* reactor) override; |
168 | #endif |
169 | void Terminate(::grpc::ClientContext* context, const ::tensorflow::TerminateRequest* request, ::tensorflow::TerminateResponse* response, std::function<void(::grpc::Status)>) override; |
170 | void Terminate(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::tensorflow::TerminateResponse* response, std::function<void(::grpc::Status)>) override; |
171 | #ifdef GRPC_CALLBACK_API_NONEXPERIMENTAL |
172 | void Terminate(::grpc::ClientContext* context, const ::tensorflow::TerminateRequest* request, ::tensorflow::TerminateResponse* response, ::grpc::ClientUnaryReactor* reactor) override; |
173 | #else |
174 | void Terminate(::grpc::ClientContext* context, const ::tensorflow::TerminateRequest* request, ::tensorflow::TerminateResponse* response, ::grpc::experimental::ClientUnaryReactor* reactor) override; |
175 | #endif |
176 | #ifdef GRPC_CALLBACK_API_NONEXPERIMENTAL |
177 | void Terminate(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::tensorflow::TerminateResponse* response, ::grpc::ClientUnaryReactor* reactor) override; |
178 | #else |
179 | void Terminate(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::tensorflow::TerminateResponse* response, ::grpc::experimental::ClientUnaryReactor* reactor) override; |
180 | #endif |
181 | void Monitor(::grpc::ClientContext* context, const ::tensorflow::MonitorRequest* request, ::tensorflow::MonitorResponse* response, std::function<void(::grpc::Status)>) override; |
182 | void Monitor(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::tensorflow::MonitorResponse* response, std::function<void(::grpc::Status)>) override; |
183 | #ifdef GRPC_CALLBACK_API_NONEXPERIMENTAL |
184 | void Monitor(::grpc::ClientContext* context, const ::tensorflow::MonitorRequest* request, ::tensorflow::MonitorResponse* response, ::grpc::ClientUnaryReactor* reactor) override; |
185 | #else |
186 | void Monitor(::grpc::ClientContext* context, const ::tensorflow::MonitorRequest* request, ::tensorflow::MonitorResponse* response, ::grpc::experimental::ClientUnaryReactor* reactor) override; |
187 | #endif |
188 | #ifdef GRPC_CALLBACK_API_NONEXPERIMENTAL |
189 | void Monitor(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::tensorflow::MonitorResponse* response, ::grpc::ClientUnaryReactor* reactor) override; |
190 | #else |
191 | void Monitor(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::tensorflow::MonitorResponse* response, ::grpc::experimental::ClientUnaryReactor* reactor) override; |
192 | #endif |
193 | private: |
194 | friend class Stub; |
195 | explicit experimental_async(Stub* stub): stub_(stub) { } |
196 | Stub* stub() { return stub_; } |
197 | Stub* stub_; |
198 | }; |
199 | class experimental_async_interface* experimental_async() override { return &async_stub_; } |
200 | |
201 | private: |
202 | std::shared_ptr< ::grpc::ChannelInterface> channel_; |
203 | class experimental_async async_stub_{this}; |
204 | ::grpc::ClientAsyncResponseReader< ::tensorflow::ProfileResponse>* AsyncProfileRaw(::grpc::ClientContext* context, const ::tensorflow::ProfileRequest& request, ::grpc::CompletionQueue* cq) override; |
205 | ::grpc::ClientAsyncResponseReader< ::tensorflow::ProfileResponse>* PrepareAsyncProfileRaw(::grpc::ClientContext* context, const ::tensorflow::ProfileRequest& request, ::grpc::CompletionQueue* cq) override; |
206 | ::grpc::ClientAsyncResponseReader< ::tensorflow::TerminateResponse>* AsyncTerminateRaw(::grpc::ClientContext* context, const ::tensorflow::TerminateRequest& request, ::grpc::CompletionQueue* cq) override; |
207 | ::grpc::ClientAsyncResponseReader< ::tensorflow::TerminateResponse>* PrepareAsyncTerminateRaw(::grpc::ClientContext* context, const ::tensorflow::TerminateRequest& request, ::grpc::CompletionQueue* cq) override; |
208 | ::grpc::ClientAsyncResponseReader< ::tensorflow::MonitorResponse>* AsyncMonitorRaw(::grpc::ClientContext* context, const ::tensorflow::MonitorRequest& request, ::grpc::CompletionQueue* cq) override; |
209 | ::grpc::ClientAsyncResponseReader< ::tensorflow::MonitorResponse>* PrepareAsyncMonitorRaw(::grpc::ClientContext* context, const ::tensorflow::MonitorRequest& request, ::grpc::CompletionQueue* cq) override; |
210 | const ::grpc::internal::RpcMethod rpcmethod_Profile_; |
211 | const ::grpc::internal::RpcMethod rpcmethod_Terminate_; |
212 | const ::grpc::internal::RpcMethod rpcmethod_Monitor_; |
213 | }; |
214 | static std::unique_ptr<Stub> NewStub(const std::shared_ptr< ::grpc::ChannelInterface>& channel, const ::grpc::StubOptions& options = ::grpc::StubOptions()); |
215 | |
216 | class Service : public ::grpc::Service { |
217 | public: |
218 | Service(); |
219 | virtual ~Service(); |
220 | // Starts a profiling session, blocks until it completes, and returns data. |
221 | virtual ::grpc::Status Profile(::grpc::ServerContext* context, const ::tensorflow::ProfileRequest* request, ::tensorflow::ProfileResponse* response); |
222 | // Signal to terminate the Profile rpc for a on-going profiling session, |
223 | // The Profile rpc will return successfully and prematurely without timeout. |
224 | // This is used by programmatic mode to end the session in workers. |
225 | virtual ::grpc::Status Terminate(::grpc::ServerContext* context, const ::tensorflow::TerminateRequest* request, ::tensorflow::TerminateResponse* response); |
226 | // Collects profiling data and returns user-friendly metrics. |
227 | virtual ::grpc::Status Monitor(::grpc::ServerContext* context, const ::tensorflow::MonitorRequest* request, ::tensorflow::MonitorResponse* response); |
228 | }; |
229 | template <class BaseClass> |
230 | class WithAsyncMethod_Profile : public BaseClass { |
231 | private: |
232 | void BaseClassMustBeDerivedFromService(const Service* /*service*/) {} |
233 | public: |
234 | WithAsyncMethod_Profile() { |
235 | ::grpc::Service::MarkMethodAsync(0); |
236 | } |
237 | ~WithAsyncMethod_Profile() override { |
238 | BaseClassMustBeDerivedFromService(this); |
239 | } |
240 | // disable synchronous version of this method |
241 | ::grpc::Status Profile(::grpc::ServerContext* /*context*/, const ::tensorflow::ProfileRequest* /*request*/, ::tensorflow::ProfileResponse* /*response*/) override { |
242 | abort(); |
243 | return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, "" ); |
244 | } |
245 | void RequestProfile(::grpc::ServerContext* context, ::tensorflow::ProfileRequest* request, ::grpc::ServerAsyncResponseWriter< ::tensorflow::ProfileResponse>* response, ::grpc::CompletionQueue* new_call_cq, ::grpc::ServerCompletionQueue* notification_cq, void *tag) { |
246 | ::grpc::Service::RequestAsyncUnary(0, context, request, response, new_call_cq, notification_cq, tag); |
247 | } |
248 | }; |
249 | template <class BaseClass> |
250 | class WithAsyncMethod_Terminate : public BaseClass { |
251 | private: |
252 | void BaseClassMustBeDerivedFromService(const Service* /*service*/) {} |
253 | public: |
254 | WithAsyncMethod_Terminate() { |
255 | ::grpc::Service::MarkMethodAsync(1); |
256 | } |
257 | ~WithAsyncMethod_Terminate() override { |
258 | BaseClassMustBeDerivedFromService(this); |
259 | } |
260 | // disable synchronous version of this method |
261 | ::grpc::Status Terminate(::grpc::ServerContext* /*context*/, const ::tensorflow::TerminateRequest* /*request*/, ::tensorflow::TerminateResponse* /*response*/) override { |
262 | abort(); |
263 | return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, "" ); |
264 | } |
265 | void RequestTerminate(::grpc::ServerContext* context, ::tensorflow::TerminateRequest* request, ::grpc::ServerAsyncResponseWriter< ::tensorflow::TerminateResponse>* response, ::grpc::CompletionQueue* new_call_cq, ::grpc::ServerCompletionQueue* notification_cq, void *tag) { |
266 | ::grpc::Service::RequestAsyncUnary(1, context, request, response, new_call_cq, notification_cq, tag); |
267 | } |
268 | }; |
269 | template <class BaseClass> |
270 | class WithAsyncMethod_Monitor : public BaseClass { |
271 | private: |
272 | void BaseClassMustBeDerivedFromService(const Service* /*service*/) {} |
273 | public: |
274 | WithAsyncMethod_Monitor() { |
275 | ::grpc::Service::MarkMethodAsync(2); |
276 | } |
277 | ~WithAsyncMethod_Monitor() override { |
278 | BaseClassMustBeDerivedFromService(this); |
279 | } |
280 | // disable synchronous version of this method |
281 | ::grpc::Status Monitor(::grpc::ServerContext* /*context*/, const ::tensorflow::MonitorRequest* /*request*/, ::tensorflow::MonitorResponse* /*response*/) override { |
282 | abort(); |
283 | return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, "" ); |
284 | } |
285 | void RequestMonitor(::grpc::ServerContext* context, ::tensorflow::MonitorRequest* request, ::grpc::ServerAsyncResponseWriter< ::tensorflow::MonitorResponse>* response, ::grpc::CompletionQueue* new_call_cq, ::grpc::ServerCompletionQueue* notification_cq, void *tag) { |
286 | ::grpc::Service::RequestAsyncUnary(2, context, request, response, new_call_cq, notification_cq, tag); |
287 | } |
288 | }; |
289 | typedef WithAsyncMethod_Profile<WithAsyncMethod_Terminate<WithAsyncMethod_Monitor<Service > > > AsyncService; |
290 | template <class BaseClass> |
291 | class ExperimentalWithCallbackMethod_Profile : public BaseClass { |
292 | private: |
293 | void BaseClassMustBeDerivedFromService(const Service* /*service*/) {} |
294 | public: |
295 | ExperimentalWithCallbackMethod_Profile() { |
296 | #ifdef GRPC_CALLBACK_API_NONEXPERIMENTAL |
297 | ::grpc::Service:: |
298 | #else |
299 | ::grpc::Service::experimental(). |
300 | #endif |
301 | MarkMethodCallback(0, |
302 | new ::grpc_impl::internal::CallbackUnaryHandler< ::tensorflow::ProfileRequest, ::tensorflow::ProfileResponse>( |
303 | [this]( |
304 | #ifdef GRPC_CALLBACK_API_NONEXPERIMENTAL |
305 | ::grpc::CallbackServerContext* |
306 | #else |
307 | ::grpc::experimental::CallbackServerContext* |
308 | #endif |
309 | context, const ::tensorflow::ProfileRequest* request, ::tensorflow::ProfileResponse* response) { return this->Profile(context, request, response); }));} |
310 | void SetMessageAllocatorFor_Profile( |
311 | ::grpc::experimental::MessageAllocator< ::tensorflow::ProfileRequest, ::tensorflow::ProfileResponse>* allocator) { |
312 | #ifdef GRPC_CALLBACK_API_NONEXPERIMENTAL |
313 | ::grpc::internal::MethodHandler* const handler = ::grpc::Service::GetHandler(0); |
314 | #else |
315 | ::grpc::internal::MethodHandler* const handler = ::grpc::Service::experimental().GetHandler(0); |
316 | #endif |
317 | static_cast<::grpc_impl::internal::CallbackUnaryHandler< ::tensorflow::ProfileRequest, ::tensorflow::ProfileResponse>*>(handler) |
318 | ->SetMessageAllocator(allocator); |
319 | } |
320 | ~ExperimentalWithCallbackMethod_Profile() override { |
321 | BaseClassMustBeDerivedFromService(this); |
322 | } |
323 | // disable synchronous version of this method |
324 | ::grpc::Status Profile(::grpc::ServerContext* /*context*/, const ::tensorflow::ProfileRequest* /*request*/, ::tensorflow::ProfileResponse* /*response*/) override { |
325 | abort(); |
326 | return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, "" ); |
327 | } |
328 | #ifdef GRPC_CALLBACK_API_NONEXPERIMENTAL |
329 | virtual ::grpc::ServerUnaryReactor* Profile( |
330 | ::grpc::CallbackServerContext* /*context*/, const ::tensorflow::ProfileRequest* /*request*/, ::tensorflow::ProfileResponse* /*response*/) |
331 | #else |
332 | virtual ::grpc::experimental::ServerUnaryReactor* Profile( |
333 | ::grpc::experimental::CallbackServerContext* /*context*/, const ::tensorflow::ProfileRequest* /*request*/, ::tensorflow::ProfileResponse* /*response*/) |
334 | #endif |
335 | { return nullptr; } |
336 | }; |
337 | template <class BaseClass> |
338 | class ExperimentalWithCallbackMethod_Terminate : public BaseClass { |
339 | private: |
340 | void BaseClassMustBeDerivedFromService(const Service* /*service*/) {} |
341 | public: |
342 | ExperimentalWithCallbackMethod_Terminate() { |
343 | #ifdef GRPC_CALLBACK_API_NONEXPERIMENTAL |
344 | ::grpc::Service:: |
345 | #else |
346 | ::grpc::Service::experimental(). |
347 | #endif |
348 | MarkMethodCallback(1, |
349 | new ::grpc_impl::internal::CallbackUnaryHandler< ::tensorflow::TerminateRequest, ::tensorflow::TerminateResponse>( |
350 | [this]( |
351 | #ifdef GRPC_CALLBACK_API_NONEXPERIMENTAL |
352 | ::grpc::CallbackServerContext* |
353 | #else |
354 | ::grpc::experimental::CallbackServerContext* |
355 | #endif |
356 | context, const ::tensorflow::TerminateRequest* request, ::tensorflow::TerminateResponse* response) { return this->Terminate(context, request, response); }));} |
357 | void SetMessageAllocatorFor_Terminate( |
358 | ::grpc::experimental::MessageAllocator< ::tensorflow::TerminateRequest, ::tensorflow::TerminateResponse>* allocator) { |
359 | #ifdef GRPC_CALLBACK_API_NONEXPERIMENTAL |
360 | ::grpc::internal::MethodHandler* const handler = ::grpc::Service::GetHandler(1); |
361 | #else |
362 | ::grpc::internal::MethodHandler* const handler = ::grpc::Service::experimental().GetHandler(1); |
363 | #endif |
364 | static_cast<::grpc_impl::internal::CallbackUnaryHandler< ::tensorflow::TerminateRequest, ::tensorflow::TerminateResponse>*>(handler) |
365 | ->SetMessageAllocator(allocator); |
366 | } |
367 | ~ExperimentalWithCallbackMethod_Terminate() override { |
368 | BaseClassMustBeDerivedFromService(this); |
369 | } |
370 | // disable synchronous version of this method |
371 | ::grpc::Status Terminate(::grpc::ServerContext* /*context*/, const ::tensorflow::TerminateRequest* /*request*/, ::tensorflow::TerminateResponse* /*response*/) override { |
372 | abort(); |
373 | return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, "" ); |
374 | } |
375 | #ifdef GRPC_CALLBACK_API_NONEXPERIMENTAL |
376 | virtual ::grpc::ServerUnaryReactor* Terminate( |
377 | ::grpc::CallbackServerContext* /*context*/, const ::tensorflow::TerminateRequest* /*request*/, ::tensorflow::TerminateResponse* /*response*/) |
378 | #else |
379 | virtual ::grpc::experimental::ServerUnaryReactor* Terminate( |
380 | ::grpc::experimental::CallbackServerContext* /*context*/, const ::tensorflow::TerminateRequest* /*request*/, ::tensorflow::TerminateResponse* /*response*/) |
381 | #endif |
382 | { return nullptr; } |
383 | }; |
384 | template <class BaseClass> |
385 | class ExperimentalWithCallbackMethod_Monitor : public BaseClass { |
386 | private: |
387 | void BaseClassMustBeDerivedFromService(const Service* /*service*/) {} |
388 | public: |
389 | ExperimentalWithCallbackMethod_Monitor() { |
390 | #ifdef GRPC_CALLBACK_API_NONEXPERIMENTAL |
391 | ::grpc::Service:: |
392 | #else |
393 | ::grpc::Service::experimental(). |
394 | #endif |
395 | MarkMethodCallback(2, |
396 | new ::grpc_impl::internal::CallbackUnaryHandler< ::tensorflow::MonitorRequest, ::tensorflow::MonitorResponse>( |
397 | [this]( |
398 | #ifdef GRPC_CALLBACK_API_NONEXPERIMENTAL |
399 | ::grpc::CallbackServerContext* |
400 | #else |
401 | ::grpc::experimental::CallbackServerContext* |
402 | #endif |
403 | context, const ::tensorflow::MonitorRequest* request, ::tensorflow::MonitorResponse* response) { return this->Monitor(context, request, response); }));} |
404 | void SetMessageAllocatorFor_Monitor( |
405 | ::grpc::experimental::MessageAllocator< ::tensorflow::MonitorRequest, ::tensorflow::MonitorResponse>* allocator) { |
406 | #ifdef GRPC_CALLBACK_API_NONEXPERIMENTAL |
407 | ::grpc::internal::MethodHandler* const handler = ::grpc::Service::GetHandler(2); |
408 | #else |
409 | ::grpc::internal::MethodHandler* const handler = ::grpc::Service::experimental().GetHandler(2); |
410 | #endif |
411 | static_cast<::grpc_impl::internal::CallbackUnaryHandler< ::tensorflow::MonitorRequest, ::tensorflow::MonitorResponse>*>(handler) |
412 | ->SetMessageAllocator(allocator); |
413 | } |
414 | ~ExperimentalWithCallbackMethod_Monitor() override { |
415 | BaseClassMustBeDerivedFromService(this); |
416 | } |
417 | // disable synchronous version of this method |
418 | ::grpc::Status Monitor(::grpc::ServerContext* /*context*/, const ::tensorflow::MonitorRequest* /*request*/, ::tensorflow::MonitorResponse* /*response*/) override { |
419 | abort(); |
420 | return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, "" ); |
421 | } |
422 | #ifdef GRPC_CALLBACK_API_NONEXPERIMENTAL |
423 | virtual ::grpc::ServerUnaryReactor* Monitor( |
424 | ::grpc::CallbackServerContext* /*context*/, const ::tensorflow::MonitorRequest* /*request*/, ::tensorflow::MonitorResponse* /*response*/) |
425 | #else |
426 | virtual ::grpc::experimental::ServerUnaryReactor* Monitor( |
427 | ::grpc::experimental::CallbackServerContext* /*context*/, const ::tensorflow::MonitorRequest* /*request*/, ::tensorflow::MonitorResponse* /*response*/) |
428 | #endif |
429 | { return nullptr; } |
430 | }; |
431 | #ifdef GRPC_CALLBACK_API_NONEXPERIMENTAL |
432 | typedef ExperimentalWithCallbackMethod_Profile<ExperimentalWithCallbackMethod_Terminate<ExperimentalWithCallbackMethod_Monitor<Service > > > CallbackService; |
433 | #endif |
434 | |
435 | typedef ExperimentalWithCallbackMethod_Profile<ExperimentalWithCallbackMethod_Terminate<ExperimentalWithCallbackMethod_Monitor<Service > > > ExperimentalCallbackService; |
436 | template <class BaseClass> |
437 | class WithGenericMethod_Profile : public BaseClass { |
438 | private: |
439 | void BaseClassMustBeDerivedFromService(const Service* /*service*/) {} |
440 | public: |
441 | WithGenericMethod_Profile() { |
442 | ::grpc::Service::MarkMethodGeneric(0); |
443 | } |
444 | ~WithGenericMethod_Profile() override { |
445 | BaseClassMustBeDerivedFromService(this); |
446 | } |
447 | // disable synchronous version of this method |
448 | ::grpc::Status Profile(::grpc::ServerContext* /*context*/, const ::tensorflow::ProfileRequest* /*request*/, ::tensorflow::ProfileResponse* /*response*/) override { |
449 | abort(); |
450 | return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, "" ); |
451 | } |
452 | }; |
453 | template <class BaseClass> |
454 | class WithGenericMethod_Terminate : public BaseClass { |
455 | private: |
456 | void BaseClassMustBeDerivedFromService(const Service* /*service*/) {} |
457 | public: |
458 | WithGenericMethod_Terminate() { |
459 | ::grpc::Service::MarkMethodGeneric(1); |
460 | } |
461 | ~WithGenericMethod_Terminate() override { |
462 | BaseClassMustBeDerivedFromService(this); |
463 | } |
464 | // disable synchronous version of this method |
465 | ::grpc::Status Terminate(::grpc::ServerContext* /*context*/, const ::tensorflow::TerminateRequest* /*request*/, ::tensorflow::TerminateResponse* /*response*/) override { |
466 | abort(); |
467 | return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, "" ); |
468 | } |
469 | }; |
470 | template <class BaseClass> |
471 | class WithGenericMethod_Monitor : public BaseClass { |
472 | private: |
473 | void BaseClassMustBeDerivedFromService(const Service* /*service*/) {} |
474 | public: |
475 | WithGenericMethod_Monitor() { |
476 | ::grpc::Service::MarkMethodGeneric(2); |
477 | } |
478 | ~WithGenericMethod_Monitor() override { |
479 | BaseClassMustBeDerivedFromService(this); |
480 | } |
481 | // disable synchronous version of this method |
482 | ::grpc::Status Monitor(::grpc::ServerContext* /*context*/, const ::tensorflow::MonitorRequest* /*request*/, ::tensorflow::MonitorResponse* /*response*/) override { |
483 | abort(); |
484 | return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, "" ); |
485 | } |
486 | }; |
487 | template <class BaseClass> |
488 | class WithRawMethod_Profile : public BaseClass { |
489 | private: |
490 | void BaseClassMustBeDerivedFromService(const Service* /*service*/) {} |
491 | public: |
492 | WithRawMethod_Profile() { |
493 | ::grpc::Service::MarkMethodRaw(0); |
494 | } |
495 | ~WithRawMethod_Profile() override { |
496 | BaseClassMustBeDerivedFromService(this); |
497 | } |
498 | // disable synchronous version of this method |
499 | ::grpc::Status Profile(::grpc::ServerContext* /*context*/, const ::tensorflow::ProfileRequest* /*request*/, ::tensorflow::ProfileResponse* /*response*/) override { |
500 | abort(); |
501 | return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, "" ); |
502 | } |
503 | void RequestProfile(::grpc::ServerContext* context, ::grpc::ByteBuffer* request, ::grpc::ServerAsyncResponseWriter< ::grpc::ByteBuffer>* response, ::grpc::CompletionQueue* new_call_cq, ::grpc::ServerCompletionQueue* notification_cq, void *tag) { |
504 | ::grpc::Service::RequestAsyncUnary(0, context, request, response, new_call_cq, notification_cq, tag); |
505 | } |
506 | }; |
507 | template <class BaseClass> |
508 | class WithRawMethod_Terminate : public BaseClass { |
509 | private: |
510 | void BaseClassMustBeDerivedFromService(const Service* /*service*/) {} |
511 | public: |
512 | WithRawMethod_Terminate() { |
513 | ::grpc::Service::MarkMethodRaw(1); |
514 | } |
515 | ~WithRawMethod_Terminate() override { |
516 | BaseClassMustBeDerivedFromService(this); |
517 | } |
518 | // disable synchronous version of this method |
519 | ::grpc::Status Terminate(::grpc::ServerContext* /*context*/, const ::tensorflow::TerminateRequest* /*request*/, ::tensorflow::TerminateResponse* /*response*/) override { |
520 | abort(); |
521 | return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, "" ); |
522 | } |
523 | void RequestTerminate(::grpc::ServerContext* context, ::grpc::ByteBuffer* request, ::grpc::ServerAsyncResponseWriter< ::grpc::ByteBuffer>* response, ::grpc::CompletionQueue* new_call_cq, ::grpc::ServerCompletionQueue* notification_cq, void *tag) { |
524 | ::grpc::Service::RequestAsyncUnary(1, context, request, response, new_call_cq, notification_cq, tag); |
525 | } |
526 | }; |
527 | template <class BaseClass> |
528 | class WithRawMethod_Monitor : public BaseClass { |
529 | private: |
530 | void BaseClassMustBeDerivedFromService(const Service* /*service*/) {} |
531 | public: |
532 | WithRawMethod_Monitor() { |
533 | ::grpc::Service::MarkMethodRaw(2); |
534 | } |
535 | ~WithRawMethod_Monitor() override { |
536 | BaseClassMustBeDerivedFromService(this); |
537 | } |
538 | // disable synchronous version of this method |
539 | ::grpc::Status Monitor(::grpc::ServerContext* /*context*/, const ::tensorflow::MonitorRequest* /*request*/, ::tensorflow::MonitorResponse* /*response*/) override { |
540 | abort(); |
541 | return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, "" ); |
542 | } |
543 | void RequestMonitor(::grpc::ServerContext* context, ::grpc::ByteBuffer* request, ::grpc::ServerAsyncResponseWriter< ::grpc::ByteBuffer>* response, ::grpc::CompletionQueue* new_call_cq, ::grpc::ServerCompletionQueue* notification_cq, void *tag) { |
544 | ::grpc::Service::RequestAsyncUnary(2, context, request, response, new_call_cq, notification_cq, tag); |
545 | } |
546 | }; |
547 | template <class BaseClass> |
548 | class ExperimentalWithRawCallbackMethod_Profile : public BaseClass { |
549 | private: |
550 | void BaseClassMustBeDerivedFromService(const Service* /*service*/) {} |
551 | public: |
552 | ExperimentalWithRawCallbackMethod_Profile() { |
553 | #ifdef GRPC_CALLBACK_API_NONEXPERIMENTAL |
554 | ::grpc::Service:: |
555 | #else |
556 | ::grpc::Service::experimental(). |
557 | #endif |
558 | MarkMethodRawCallback(0, |
559 | new ::grpc_impl::internal::CallbackUnaryHandler< ::grpc::ByteBuffer, ::grpc::ByteBuffer>( |
560 | [this]( |
561 | #ifdef GRPC_CALLBACK_API_NONEXPERIMENTAL |
562 | ::grpc::CallbackServerContext* |
563 | #else |
564 | ::grpc::experimental::CallbackServerContext* |
565 | #endif |
566 | context, const ::grpc::ByteBuffer* request, ::grpc::ByteBuffer* response) { return this->Profile(context, request, response); })); |
567 | } |
568 | ~ExperimentalWithRawCallbackMethod_Profile() override { |
569 | BaseClassMustBeDerivedFromService(this); |
570 | } |
571 | // disable synchronous version of this method |
572 | ::grpc::Status Profile(::grpc::ServerContext* /*context*/, const ::tensorflow::ProfileRequest* /*request*/, ::tensorflow::ProfileResponse* /*response*/) override { |
573 | abort(); |
574 | return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, "" ); |
575 | } |
576 | #ifdef GRPC_CALLBACK_API_NONEXPERIMENTAL |
577 | virtual ::grpc::ServerUnaryReactor* Profile( |
578 | ::grpc::CallbackServerContext* /*context*/, const ::grpc::ByteBuffer* /*request*/, ::grpc::ByteBuffer* /*response*/) |
579 | #else |
580 | virtual ::grpc::experimental::ServerUnaryReactor* Profile( |
581 | ::grpc::experimental::CallbackServerContext* /*context*/, const ::grpc::ByteBuffer* /*request*/, ::grpc::ByteBuffer* /*response*/) |
582 | #endif |
583 | { return nullptr; } |
584 | }; |
585 | template <class BaseClass> |
586 | class ExperimentalWithRawCallbackMethod_Terminate : public BaseClass { |
587 | private: |
588 | void BaseClassMustBeDerivedFromService(const Service* /*service*/) {} |
589 | public: |
590 | ExperimentalWithRawCallbackMethod_Terminate() { |
591 | #ifdef GRPC_CALLBACK_API_NONEXPERIMENTAL |
592 | ::grpc::Service:: |
593 | #else |
594 | ::grpc::Service::experimental(). |
595 | #endif |
596 | MarkMethodRawCallback(1, |
597 | new ::grpc_impl::internal::CallbackUnaryHandler< ::grpc::ByteBuffer, ::grpc::ByteBuffer>( |
598 | [this]( |
599 | #ifdef GRPC_CALLBACK_API_NONEXPERIMENTAL |
600 | ::grpc::CallbackServerContext* |
601 | #else |
602 | ::grpc::experimental::CallbackServerContext* |
603 | #endif |
604 | context, const ::grpc::ByteBuffer* request, ::grpc::ByteBuffer* response) { return this->Terminate(context, request, response); })); |
605 | } |
606 | ~ExperimentalWithRawCallbackMethod_Terminate() override { |
607 | BaseClassMustBeDerivedFromService(this); |
608 | } |
609 | // disable synchronous version of this method |
610 | ::grpc::Status Terminate(::grpc::ServerContext* /*context*/, const ::tensorflow::TerminateRequest* /*request*/, ::tensorflow::TerminateResponse* /*response*/) override { |
611 | abort(); |
612 | return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, "" ); |
613 | } |
614 | #ifdef GRPC_CALLBACK_API_NONEXPERIMENTAL |
615 | virtual ::grpc::ServerUnaryReactor* Terminate( |
616 | ::grpc::CallbackServerContext* /*context*/, const ::grpc::ByteBuffer* /*request*/, ::grpc::ByteBuffer* /*response*/) |
617 | #else |
618 | virtual ::grpc::experimental::ServerUnaryReactor* Terminate( |
619 | ::grpc::experimental::CallbackServerContext* /*context*/, const ::grpc::ByteBuffer* /*request*/, ::grpc::ByteBuffer* /*response*/) |
620 | #endif |
621 | { return nullptr; } |
622 | }; |
623 | template <class BaseClass> |
624 | class ExperimentalWithRawCallbackMethod_Monitor : public BaseClass { |
625 | private: |
626 | void BaseClassMustBeDerivedFromService(const Service* /*service*/) {} |
627 | public: |
628 | ExperimentalWithRawCallbackMethod_Monitor() { |
629 | #ifdef GRPC_CALLBACK_API_NONEXPERIMENTAL |
630 | ::grpc::Service:: |
631 | #else |
632 | ::grpc::Service::experimental(). |
633 | #endif |
634 | MarkMethodRawCallback(2, |
635 | new ::grpc_impl::internal::CallbackUnaryHandler< ::grpc::ByteBuffer, ::grpc::ByteBuffer>( |
636 | [this]( |
637 | #ifdef GRPC_CALLBACK_API_NONEXPERIMENTAL |
638 | ::grpc::CallbackServerContext* |
639 | #else |
640 | ::grpc::experimental::CallbackServerContext* |
641 | #endif |
642 | context, const ::grpc::ByteBuffer* request, ::grpc::ByteBuffer* response) { return this->Monitor(context, request, response); })); |
643 | } |
644 | ~ExperimentalWithRawCallbackMethod_Monitor() override { |
645 | BaseClassMustBeDerivedFromService(this); |
646 | } |
647 | // disable synchronous version of this method |
648 | ::grpc::Status Monitor(::grpc::ServerContext* /*context*/, const ::tensorflow::MonitorRequest* /*request*/, ::tensorflow::MonitorResponse* /*response*/) override { |
649 | abort(); |
650 | return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, "" ); |
651 | } |
652 | #ifdef GRPC_CALLBACK_API_NONEXPERIMENTAL |
653 | virtual ::grpc::ServerUnaryReactor* Monitor( |
654 | ::grpc::CallbackServerContext* /*context*/, const ::grpc::ByteBuffer* /*request*/, ::grpc::ByteBuffer* /*response*/) |
655 | #else |
656 | virtual ::grpc::experimental::ServerUnaryReactor* Monitor( |
657 | ::grpc::experimental::CallbackServerContext* /*context*/, const ::grpc::ByteBuffer* /*request*/, ::grpc::ByteBuffer* /*response*/) |
658 | #endif |
659 | { return nullptr; } |
660 | }; |
661 | template <class BaseClass> |
662 | class WithStreamedUnaryMethod_Profile : public BaseClass { |
663 | private: |
664 | void BaseClassMustBeDerivedFromService(const Service* /*service*/) {} |
665 | public: |
666 | WithStreamedUnaryMethod_Profile() { |
667 | ::grpc::Service::MarkMethodStreamed(0, |
668 | new ::grpc::internal::StreamedUnaryHandler< ::tensorflow::ProfileRequest, ::tensorflow::ProfileResponse>(std::bind(&WithStreamedUnaryMethod_Profile<BaseClass>::StreamedProfile, this, std::placeholders::_1, std::placeholders::_2))); |
669 | } |
670 | ~WithStreamedUnaryMethod_Profile() override { |
671 | BaseClassMustBeDerivedFromService(this); |
672 | } |
673 | // disable regular version of this method |
674 | ::grpc::Status Profile(::grpc::ServerContext* /*context*/, const ::tensorflow::ProfileRequest* /*request*/, ::tensorflow::ProfileResponse* /*response*/) override { |
675 | abort(); |
676 | return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, "" ); |
677 | } |
678 | // replace default version of method with streamed unary |
679 | virtual ::grpc::Status StreamedProfile(::grpc::ServerContext* context, ::grpc::ServerUnaryStreamer< ::tensorflow::ProfileRequest,::tensorflow::ProfileResponse>* server_unary_streamer) = 0; |
680 | }; |
681 | template <class BaseClass> |
682 | class WithStreamedUnaryMethod_Terminate : public BaseClass { |
683 | private: |
684 | void BaseClassMustBeDerivedFromService(const Service* /*service*/) {} |
685 | public: |
686 | WithStreamedUnaryMethod_Terminate() { |
687 | ::grpc::Service::MarkMethodStreamed(1, |
688 | new ::grpc::internal::StreamedUnaryHandler< ::tensorflow::TerminateRequest, ::tensorflow::TerminateResponse>(std::bind(&WithStreamedUnaryMethod_Terminate<BaseClass>::StreamedTerminate, this, std::placeholders::_1, std::placeholders::_2))); |
689 | } |
690 | ~WithStreamedUnaryMethod_Terminate() override { |
691 | BaseClassMustBeDerivedFromService(this); |
692 | } |
693 | // disable regular version of this method |
694 | ::grpc::Status Terminate(::grpc::ServerContext* /*context*/, const ::tensorflow::TerminateRequest* /*request*/, ::tensorflow::TerminateResponse* /*response*/) override { |
695 | abort(); |
696 | return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, "" ); |
697 | } |
698 | // replace default version of method with streamed unary |
699 | virtual ::grpc::Status StreamedTerminate(::grpc::ServerContext* context, ::grpc::ServerUnaryStreamer< ::tensorflow::TerminateRequest,::tensorflow::TerminateResponse>* server_unary_streamer) = 0; |
700 | }; |
701 | template <class BaseClass> |
702 | class WithStreamedUnaryMethod_Monitor : public BaseClass { |
703 | private: |
704 | void BaseClassMustBeDerivedFromService(const Service* /*service*/) {} |
705 | public: |
706 | WithStreamedUnaryMethod_Monitor() { |
707 | ::grpc::Service::MarkMethodStreamed(2, |
708 | new ::grpc::internal::StreamedUnaryHandler< ::tensorflow::MonitorRequest, ::tensorflow::MonitorResponse>(std::bind(&WithStreamedUnaryMethod_Monitor<BaseClass>::StreamedMonitor, this, std::placeholders::_1, std::placeholders::_2))); |
709 | } |
710 | ~WithStreamedUnaryMethod_Monitor() override { |
711 | BaseClassMustBeDerivedFromService(this); |
712 | } |
713 | // disable regular version of this method |
714 | ::grpc::Status Monitor(::grpc::ServerContext* /*context*/, const ::tensorflow::MonitorRequest* /*request*/, ::tensorflow::MonitorResponse* /*response*/) override { |
715 | abort(); |
716 | return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, "" ); |
717 | } |
718 | // replace default version of method with streamed unary |
719 | virtual ::grpc::Status StreamedMonitor(::grpc::ServerContext* context, ::grpc::ServerUnaryStreamer< ::tensorflow::MonitorRequest,::tensorflow::MonitorResponse>* server_unary_streamer) = 0; |
720 | }; |
721 | typedef WithStreamedUnaryMethod_Profile<WithStreamedUnaryMethod_Terminate<WithStreamedUnaryMethod_Monitor<Service > > > StreamedUnaryService; |
722 | typedef Service SplitStreamedService; |
723 | typedef WithStreamedUnaryMethod_Profile<WithStreamedUnaryMethod_Terminate<WithStreamedUnaryMethod_Monitor<Service > > > StreamedService; |
724 | }; |
725 | |
726 | } // namespace grpc |
727 | |
728 | } // namespace tensorflow |
729 | |
730 | |
731 | #endif // GRPC_tensorflow_2fcore_2fprofiler_2fprofiler_5fservice_2eproto__INCLUDED |
732 | |