1// Generated by the gRPC C++ plugin.
2// If you make any local change, they will be lost.
3// source: tensorflow/core/protobuf/eager_service.proto
4#ifndef GRPC_tensorflow_2fcore_2fprotobuf_2feager_5fservice_2eproto__INCLUDED
5#define GRPC_tensorflow_2fcore_2fprotobuf_2feager_5fservice_2eproto__INCLUDED
6
7#include "tensorflow/core/protobuf/eager_service.pb.h"
8
9#include <functional>
10#include <grpc/impl/codegen/port_platform.h>
11#include <grpcpp/impl/codegen/async_generic_service.h>
12#include <grpcpp/impl/codegen/async_stream.h>
13#include <grpcpp/impl/codegen/async_unary_call.h>
14#include <grpcpp/impl/codegen/client_callback.h>
15#include <grpcpp/impl/codegen/client_context.h>
16#include <grpcpp/impl/codegen/completion_queue.h>
17#include <grpcpp/impl/codegen/message_allocator.h>
18#include <grpcpp/impl/codegen/method_handler.h>
19#include <grpcpp/impl/codegen/proto_utils.h>
20#include <grpcpp/impl/codegen/rpc_method.h>
21#include <grpcpp/impl/codegen/server_callback.h>
22#include <grpcpp/impl/codegen/server_callback_handlers.h>
23#include <grpcpp/impl/codegen/server_context.h>
24#include <grpcpp/impl/codegen/service_type.h>
25#include <grpcpp/impl/codegen/status.h>
26#include <grpcpp/impl/codegen/stub_options.h>
27#include <grpcpp/impl/codegen/sync_stream.h>
28
29namespace tensorflow {
30namespace eager {
31
32
33namespace grpc {
34
35// //////////////////////////////////////////////////////////////////////////////
36//
37// Eager Service defines a TensorFlow service that executes operations eagerly
38// on a set of local devices, on behalf of a remote Eager executor.
39//
40// The service impl will keep track of the various clients and devices it has
41// access to and allows the client to enqueue ops on any devices that it is able
42// to access and schedule data transfers from/to any of the peers.
43//
44// A client can generate multiple contexts to be able to independently execute
45// operations, but cannot share data between the two contexts.
46//
47// NOTE: Even though contexts generated by clients should be independent, the
48// lower level tensorflow execution engine is not, so they might share some data
49// (e.g. a Device's ResourceMgr).
50//
51// //////////////////////////////////////////////////////////////////////////////
52class EagerService final {
53 public:
54 static constexpr char const* service_full_name() {
55 return "tensorflow.eager.EagerService";
56 }
57 class StubInterface {
58 public:
59 virtual ~StubInterface() {}
60 // This initializes the worker, informing it about the other workers in the
61 // cluster and exchanging authentication tokens which will be used in all
62 // other RPCs to detect whether the worker has restarted.
63 virtual ::grpc::Status CreateContext(::grpc::ClientContext* context, const ::tensorflow::eager::CreateContextRequest& request, ::tensorflow::eager::CreateContextResponse* response) = 0;
64 std::unique_ptr< ::grpc::ClientAsyncResponseReaderInterface< ::tensorflow::eager::CreateContextResponse>> AsyncCreateContext(::grpc::ClientContext* context, const ::tensorflow::eager::CreateContextRequest& request, ::grpc::CompletionQueue* cq) {
65 return std::unique_ptr< ::grpc::ClientAsyncResponseReaderInterface< ::tensorflow::eager::CreateContextResponse>>(AsyncCreateContextRaw(context, request, cq));
66 }
67 std::unique_ptr< ::grpc::ClientAsyncResponseReaderInterface< ::tensorflow::eager::CreateContextResponse>> PrepareAsyncCreateContext(::grpc::ClientContext* context, const ::tensorflow::eager::CreateContextRequest& request, ::grpc::CompletionQueue* cq) {
68 return std::unique_ptr< ::grpc::ClientAsyncResponseReaderInterface< ::tensorflow::eager::CreateContextResponse>>(PrepareAsyncCreateContextRaw(context, request, cq));
69 }
70 // This updates the eager context on an existing worker when updating the set
71 // of servers in a distributed eager cluster.
72 virtual ::grpc::Status UpdateContext(::grpc::ClientContext* context, const ::tensorflow::eager::UpdateContextRequest& request, ::tensorflow::eager::UpdateContextResponse* response) = 0;
73 std::unique_ptr< ::grpc::ClientAsyncResponseReaderInterface< ::tensorflow::eager::UpdateContextResponse>> AsyncUpdateContext(::grpc::ClientContext* context, const ::tensorflow::eager::UpdateContextRequest& request, ::grpc::CompletionQueue* cq) {
74 return std::unique_ptr< ::grpc::ClientAsyncResponseReaderInterface< ::tensorflow::eager::UpdateContextResponse>>(AsyncUpdateContextRaw(context, request, cq));
75 }
76 std::unique_ptr< ::grpc::ClientAsyncResponseReaderInterface< ::tensorflow::eager::UpdateContextResponse>> PrepareAsyncUpdateContext(::grpc::ClientContext* context, const ::tensorflow::eager::UpdateContextRequest& request, ::grpc::CompletionQueue* cq) {
77 return std::unique_ptr< ::grpc::ClientAsyncResponseReaderInterface< ::tensorflow::eager::UpdateContextResponse>>(PrepareAsyncUpdateContextRaw(context, request, cq));
78 }
79 // This takes a list of Execute and DeleteTensorHandle operations and enqueues
80 // (in async mode) or executes (in sync mode) them on the remote server.
81 // All outputs of ops which were not explicitly deleted with
82 // DeleteTensorHandle entries will be assumed to be alive and are usable by
83 // future calls to Enqueue.
84 virtual ::grpc::Status Enqueue(::grpc::ClientContext* context, const ::tensorflow::eager::EnqueueRequest& request, ::tensorflow::eager::EnqueueResponse* response) = 0;
85 std::unique_ptr< ::grpc::ClientAsyncResponseReaderInterface< ::tensorflow::eager::EnqueueResponse>> AsyncEnqueue(::grpc::ClientContext* context, const ::tensorflow::eager::EnqueueRequest& request, ::grpc::CompletionQueue* cq) {
86 return std::unique_ptr< ::grpc::ClientAsyncResponseReaderInterface< ::tensorflow::eager::EnqueueResponse>>(AsyncEnqueueRaw(context, request, cq));
87 }
88 std::unique_ptr< ::grpc::ClientAsyncResponseReaderInterface< ::tensorflow::eager::EnqueueResponse>> PrepareAsyncEnqueue(::grpc::ClientContext* context, const ::tensorflow::eager::EnqueueRequest& request, ::grpc::CompletionQueue* cq) {
89 return std::unique_ptr< ::grpc::ClientAsyncResponseReaderInterface< ::tensorflow::eager::EnqueueResponse>>(PrepareAsyncEnqueueRaw(context, request, cq));
90 }
91 // A streaming version of Enqueue.
92 // Current server implementation sends one response per received request.
93 // The benefit for using a streaming version is that subsequent requests
94 // can be sent without waiting for a response to the previous request. This
95 // synchronization is required in the regular Enqueue call because gRPC does
96 // not guarantee to preserve request order.
97 std::unique_ptr< ::grpc::ClientReaderWriterInterface< ::tensorflow::eager::EnqueueRequest, ::tensorflow::eager::EnqueueResponse>> StreamingEnqueue(::grpc::ClientContext* context) {
98 return std::unique_ptr< ::grpc::ClientReaderWriterInterface< ::tensorflow::eager::EnqueueRequest, ::tensorflow::eager::EnqueueResponse>>(StreamingEnqueueRaw(context));
99 }
100 std::unique_ptr< ::grpc::ClientAsyncReaderWriterInterface< ::tensorflow::eager::EnqueueRequest, ::tensorflow::eager::EnqueueResponse>> AsyncStreamingEnqueue(::grpc::ClientContext* context, ::grpc::CompletionQueue* cq, void* tag) {
101 return std::unique_ptr< ::grpc::ClientAsyncReaderWriterInterface< ::tensorflow::eager::EnqueueRequest, ::tensorflow::eager::EnqueueResponse>>(AsyncStreamingEnqueueRaw(context, cq, tag));
102 }
103 std::unique_ptr< ::grpc::ClientAsyncReaderWriterInterface< ::tensorflow::eager::EnqueueRequest, ::tensorflow::eager::EnqueueResponse>> PrepareAsyncStreamingEnqueue(::grpc::ClientContext* context, ::grpc::CompletionQueue* cq) {
104 return std::unique_ptr< ::grpc::ClientAsyncReaderWriterInterface< ::tensorflow::eager::EnqueueRequest, ::tensorflow::eager::EnqueueResponse>>(PrepareAsyncStreamingEnqueueRaw(context, cq));
105 }
106 // Takes a set of op IDs and waits until those ops are done. Returns any error
107 // in the stream so far.
108 virtual ::grpc::Status WaitQueueDone(::grpc::ClientContext* context, const ::tensorflow::eager::WaitQueueDoneRequest& request, ::tensorflow::eager::WaitQueueDoneResponse* response) = 0;
109 std::unique_ptr< ::grpc::ClientAsyncResponseReaderInterface< ::tensorflow::eager::WaitQueueDoneResponse>> AsyncWaitQueueDone(::grpc::ClientContext* context, const ::tensorflow::eager::WaitQueueDoneRequest& request, ::grpc::CompletionQueue* cq) {
110 return std::unique_ptr< ::grpc::ClientAsyncResponseReaderInterface< ::tensorflow::eager::WaitQueueDoneResponse>>(AsyncWaitQueueDoneRaw(context, request, cq));
111 }
112 std::unique_ptr< ::grpc::ClientAsyncResponseReaderInterface< ::tensorflow::eager::WaitQueueDoneResponse>> PrepareAsyncWaitQueueDone(::grpc::ClientContext* context, const ::tensorflow::eager::WaitQueueDoneRequest& request, ::grpc::CompletionQueue* cq) {
113 return std::unique_ptr< ::grpc::ClientAsyncResponseReaderInterface< ::tensorflow::eager::WaitQueueDoneResponse>>(PrepareAsyncWaitQueueDoneRaw(context, request, cq));
114 }
115 // This takes an Eager operation and executes it in async mode on the remote
116 // server. Different from EnqueueRequest, ops/functions sent through this
117 // type of requests are allowed to execute in parallel and no ordering is
118 // preserved by RPC stream or executor.
119 // This request type should only be used for executing component functions.
120 // Ordering of component functions should be enforced by their corresponding
121 // main functions. The runtime ensures the following invarients for component
122 // functions (CFs) and their main functions (MFs):
123 // (1) MF1 -> MF2 ==> CF1 -> CF2 ("->" indicates order of execution);
124 // (2) MF1 || MF2 ==> CF1 || CF2 ("||" indicates possible parallel execution);
125 // (3) For CF1 and CF2 that come from the same MF, CF1 || CF2
126 // For executing ops/main functions, use Enqueue or StreamingEnqueue instead
127 // for correct ordering.
128 virtual ::grpc::Status RunComponentFunction(::grpc::ClientContext* context, const ::tensorflow::eager::RunComponentFunctionRequest& request, ::tensorflow::eager::RunComponentFunctionResponse* response) = 0;
129 std::unique_ptr< ::grpc::ClientAsyncResponseReaderInterface< ::tensorflow::eager::RunComponentFunctionResponse>> AsyncRunComponentFunction(::grpc::ClientContext* context, const ::tensorflow::eager::RunComponentFunctionRequest& request, ::grpc::CompletionQueue* cq) {
130 return std::unique_ptr< ::grpc::ClientAsyncResponseReaderInterface< ::tensorflow::eager::RunComponentFunctionResponse>>(AsyncRunComponentFunctionRaw(context, request, cq));
131 }
132 std::unique_ptr< ::grpc::ClientAsyncResponseReaderInterface< ::tensorflow::eager::RunComponentFunctionResponse>> PrepareAsyncRunComponentFunction(::grpc::ClientContext* context, const ::tensorflow::eager::RunComponentFunctionRequest& request, ::grpc::CompletionQueue* cq) {
133 return std::unique_ptr< ::grpc::ClientAsyncResponseReaderInterface< ::tensorflow::eager::RunComponentFunctionResponse>>(PrepareAsyncRunComponentFunctionRaw(context, request, cq));
134 }
135 // Contexts are always created with a deadline and no RPCs within a deadline
136 // will trigger a context garbage collection. KeepAlive calls can be used to
137 // delay this. It can also be used to validate the existence of a context ID
138 // on remote eager worker. If the context is on remote worker, return the same
139 // ID and the current context view ID. This is useful for checking if the
140 // remote worker (potentially with the same task name and hostname / port) is
141 // replaced with a new process.
142 virtual ::grpc::Status KeepAlive(::grpc::ClientContext* context, const ::tensorflow::eager::KeepAliveRequest& request, ::tensorflow::eager::KeepAliveResponse* response) = 0;
143 std::unique_ptr< ::grpc::ClientAsyncResponseReaderInterface< ::tensorflow::eager::KeepAliveResponse>> AsyncKeepAlive(::grpc::ClientContext* context, const ::tensorflow::eager::KeepAliveRequest& request, ::grpc::CompletionQueue* cq) {
144 return std::unique_ptr< ::grpc::ClientAsyncResponseReaderInterface< ::tensorflow::eager::KeepAliveResponse>>(AsyncKeepAliveRaw(context, request, cq));
145 }
146 std::unique_ptr< ::grpc::ClientAsyncResponseReaderInterface< ::tensorflow::eager::KeepAliveResponse>> PrepareAsyncKeepAlive(::grpc::ClientContext* context, const ::tensorflow::eager::KeepAliveRequest& request, ::grpc::CompletionQueue* cq) {
147 return std::unique_ptr< ::grpc::ClientAsyncResponseReaderInterface< ::tensorflow::eager::KeepAliveResponse>>(PrepareAsyncKeepAliveRaw(context, request, cq));
148 }
149 // Closes the context. No calls to other methods using the existing context ID
150 // are valid after this.
151 virtual ::grpc::Status CloseContext(::grpc::ClientContext* context, const ::tensorflow::eager::CloseContextRequest& request, ::tensorflow::eager::CloseContextResponse* response) = 0;
152 std::unique_ptr< ::grpc::ClientAsyncResponseReaderInterface< ::tensorflow::eager::CloseContextResponse>> AsyncCloseContext(::grpc::ClientContext* context, const ::tensorflow::eager::CloseContextRequest& request, ::grpc::CompletionQueue* cq) {
153 return std::unique_ptr< ::grpc::ClientAsyncResponseReaderInterface< ::tensorflow::eager::CloseContextResponse>>(AsyncCloseContextRaw(context, request, cq));
154 }
155 std::unique_ptr< ::grpc::ClientAsyncResponseReaderInterface< ::tensorflow::eager::CloseContextResponse>> PrepareAsyncCloseContext(::grpc::ClientContext* context, const ::tensorflow::eager::CloseContextRequest& request, ::grpc::CompletionQueue* cq) {
156 return std::unique_ptr< ::grpc::ClientAsyncResponseReaderInterface< ::tensorflow::eager::CloseContextResponse>>(PrepareAsyncCloseContextRaw(context, request, cq));
157 }
158 class experimental_async_interface {
159 public:
160 virtual ~experimental_async_interface() {}
161 // This initializes the worker, informing it about the other workers in the
162 // cluster and exchanging authentication tokens which will be used in all
163 // other RPCs to detect whether the worker has restarted.
164 virtual void CreateContext(::grpc::ClientContext* context, const ::tensorflow::eager::CreateContextRequest* request, ::tensorflow::eager::CreateContextResponse* response, std::function<void(::grpc::Status)>) = 0;
165 virtual void CreateContext(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::tensorflow::eager::CreateContextResponse* response, std::function<void(::grpc::Status)>) = 0;
166 #ifdef GRPC_CALLBACK_API_NONEXPERIMENTAL
167 virtual void CreateContext(::grpc::ClientContext* context, const ::tensorflow::eager::CreateContextRequest* request, ::tensorflow::eager::CreateContextResponse* response, ::grpc::ClientUnaryReactor* reactor) = 0;
168 #else
169 virtual void CreateContext(::grpc::ClientContext* context, const ::tensorflow::eager::CreateContextRequest* request, ::tensorflow::eager::CreateContextResponse* response, ::grpc::experimental::ClientUnaryReactor* reactor) = 0;
170 #endif
171 #ifdef GRPC_CALLBACK_API_NONEXPERIMENTAL
172 virtual void CreateContext(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::tensorflow::eager::CreateContextResponse* response, ::grpc::ClientUnaryReactor* reactor) = 0;
173 #else
174 virtual void CreateContext(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::tensorflow::eager::CreateContextResponse* response, ::grpc::experimental::ClientUnaryReactor* reactor) = 0;
175 #endif
176 // This updates the eager context on an existing worker when updating the set
177 // of servers in a distributed eager cluster.
178 virtual void UpdateContext(::grpc::ClientContext* context, const ::tensorflow::eager::UpdateContextRequest* request, ::tensorflow::eager::UpdateContextResponse* response, std::function<void(::grpc::Status)>) = 0;
179 virtual void UpdateContext(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::tensorflow::eager::UpdateContextResponse* response, std::function<void(::grpc::Status)>) = 0;
180 #ifdef GRPC_CALLBACK_API_NONEXPERIMENTAL
181 virtual void UpdateContext(::grpc::ClientContext* context, const ::tensorflow::eager::UpdateContextRequest* request, ::tensorflow::eager::UpdateContextResponse* response, ::grpc::ClientUnaryReactor* reactor) = 0;
182 #else
183 virtual void UpdateContext(::grpc::ClientContext* context, const ::tensorflow::eager::UpdateContextRequest* request, ::tensorflow::eager::UpdateContextResponse* response, ::grpc::experimental::ClientUnaryReactor* reactor) = 0;
184 #endif
185 #ifdef GRPC_CALLBACK_API_NONEXPERIMENTAL
186 virtual void UpdateContext(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::tensorflow::eager::UpdateContextResponse* response, ::grpc::ClientUnaryReactor* reactor) = 0;
187 #else
188 virtual void UpdateContext(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::tensorflow::eager::UpdateContextResponse* response, ::grpc::experimental::ClientUnaryReactor* reactor) = 0;
189 #endif
190 // This takes a list of Execute and DeleteTensorHandle operations and enqueues
191 // (in async mode) or executes (in sync mode) them on the remote server.
192 // All outputs of ops which were not explicitly deleted with
193 // DeleteTensorHandle entries will be assumed to be alive and are usable by
194 // future calls to Enqueue.
195 virtual void Enqueue(::grpc::ClientContext* context, const ::tensorflow::eager::EnqueueRequest* request, ::tensorflow::eager::EnqueueResponse* response, std::function<void(::grpc::Status)>) = 0;
196 virtual void Enqueue(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::tensorflow::eager::EnqueueResponse* response, std::function<void(::grpc::Status)>) = 0;
197 #ifdef GRPC_CALLBACK_API_NONEXPERIMENTAL
198 virtual void Enqueue(::grpc::ClientContext* context, const ::tensorflow::eager::EnqueueRequest* request, ::tensorflow::eager::EnqueueResponse* response, ::grpc::ClientUnaryReactor* reactor) = 0;
199 #else
200 virtual void Enqueue(::grpc::ClientContext* context, const ::tensorflow::eager::EnqueueRequest* request, ::tensorflow::eager::EnqueueResponse* response, ::grpc::experimental::ClientUnaryReactor* reactor) = 0;
201 #endif
202 #ifdef GRPC_CALLBACK_API_NONEXPERIMENTAL
203 virtual void Enqueue(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::tensorflow::eager::EnqueueResponse* response, ::grpc::ClientUnaryReactor* reactor) = 0;
204 #else
205 virtual void Enqueue(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::tensorflow::eager::EnqueueResponse* response, ::grpc::experimental::ClientUnaryReactor* reactor) = 0;
206 #endif
207 // A streaming version of Enqueue.
208 // Current server implementation sends one response per received request.
209 // The benefit for using a streaming version is that subsequent requests
210 // can be sent without waiting for a response to the previous request. This
211 // synchronization is required in the regular Enqueue call because gRPC does
212 // not guarantee to preserve request order.
213 #ifdef GRPC_CALLBACK_API_NONEXPERIMENTAL
214 virtual void StreamingEnqueue(::grpc::ClientContext* context, ::grpc::ClientBidiReactor< ::tensorflow::eager::EnqueueRequest,::tensorflow::eager::EnqueueResponse>* reactor) = 0;
215 #else
216 virtual void StreamingEnqueue(::grpc::ClientContext* context, ::grpc::experimental::ClientBidiReactor< ::tensorflow::eager::EnqueueRequest,::tensorflow::eager::EnqueueResponse>* reactor) = 0;
217 #endif
218 // Takes a set of op IDs and waits until those ops are done. Returns any error
219 // in the stream so far.
220 virtual void WaitQueueDone(::grpc::ClientContext* context, const ::tensorflow::eager::WaitQueueDoneRequest* request, ::tensorflow::eager::WaitQueueDoneResponse* response, std::function<void(::grpc::Status)>) = 0;
221 virtual void WaitQueueDone(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::tensorflow::eager::WaitQueueDoneResponse* response, std::function<void(::grpc::Status)>) = 0;
222 #ifdef GRPC_CALLBACK_API_NONEXPERIMENTAL
223 virtual void WaitQueueDone(::grpc::ClientContext* context, const ::tensorflow::eager::WaitQueueDoneRequest* request, ::tensorflow::eager::WaitQueueDoneResponse* response, ::grpc::ClientUnaryReactor* reactor) = 0;
224 #else
225 virtual void WaitQueueDone(::grpc::ClientContext* context, const ::tensorflow::eager::WaitQueueDoneRequest* request, ::tensorflow::eager::WaitQueueDoneResponse* response, ::grpc::experimental::ClientUnaryReactor* reactor) = 0;
226 #endif
227 #ifdef GRPC_CALLBACK_API_NONEXPERIMENTAL
228 virtual void WaitQueueDone(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::tensorflow::eager::WaitQueueDoneResponse* response, ::grpc::ClientUnaryReactor* reactor) = 0;
229 #else
230 virtual void WaitQueueDone(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::tensorflow::eager::WaitQueueDoneResponse* response, ::grpc::experimental::ClientUnaryReactor* reactor) = 0;
231 #endif
232 // This takes an Eager operation and executes it in async mode on the remote
233 // server. Different from EnqueueRequest, ops/functions sent through this
234 // type of requests are allowed to execute in parallel and no ordering is
235 // preserved by RPC stream or executor.
236 // This request type should only be used for executing component functions.
237 // Ordering of component functions should be enforced by their corresponding
238 // main functions. The runtime ensures the following invarients for component
239 // functions (CFs) and their main functions (MFs):
240 // (1) MF1 -> MF2 ==> CF1 -> CF2 ("->" indicates order of execution);
241 // (2) MF1 || MF2 ==> CF1 || CF2 ("||" indicates possible parallel execution);
242 // (3) For CF1 and CF2 that come from the same MF, CF1 || CF2
243 // For executing ops/main functions, use Enqueue or StreamingEnqueue instead
244 // for correct ordering.
245 virtual void RunComponentFunction(::grpc::ClientContext* context, const ::tensorflow::eager::RunComponentFunctionRequest* request, ::tensorflow::eager::RunComponentFunctionResponse* response, std::function<void(::grpc::Status)>) = 0;
246 virtual void RunComponentFunction(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::tensorflow::eager::RunComponentFunctionResponse* response, std::function<void(::grpc::Status)>) = 0;
247 #ifdef GRPC_CALLBACK_API_NONEXPERIMENTAL
248 virtual void RunComponentFunction(::grpc::ClientContext* context, const ::tensorflow::eager::RunComponentFunctionRequest* request, ::tensorflow::eager::RunComponentFunctionResponse* response, ::grpc::ClientUnaryReactor* reactor) = 0;
249 #else
250 virtual void RunComponentFunction(::grpc::ClientContext* context, const ::tensorflow::eager::RunComponentFunctionRequest* request, ::tensorflow::eager::RunComponentFunctionResponse* response, ::grpc::experimental::ClientUnaryReactor* reactor) = 0;
251 #endif
252 #ifdef GRPC_CALLBACK_API_NONEXPERIMENTAL
253 virtual void RunComponentFunction(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::tensorflow::eager::RunComponentFunctionResponse* response, ::grpc::ClientUnaryReactor* reactor) = 0;
254 #else
255 virtual void RunComponentFunction(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::tensorflow::eager::RunComponentFunctionResponse* response, ::grpc::experimental::ClientUnaryReactor* reactor) = 0;
256 #endif
257 // Contexts are always created with a deadline and no RPCs within a deadline
258 // will trigger a context garbage collection. KeepAlive calls can be used to
259 // delay this. It can also be used to validate the existence of a context ID
260 // on remote eager worker. If the context is on remote worker, return the same
261 // ID and the current context view ID. This is useful for checking if the
262 // remote worker (potentially with the same task name and hostname / port) is
263 // replaced with a new process.
264 virtual void KeepAlive(::grpc::ClientContext* context, const ::tensorflow::eager::KeepAliveRequest* request, ::tensorflow::eager::KeepAliveResponse* response, std::function<void(::grpc::Status)>) = 0;
265 virtual void KeepAlive(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::tensorflow::eager::KeepAliveResponse* response, std::function<void(::grpc::Status)>) = 0;
266 #ifdef GRPC_CALLBACK_API_NONEXPERIMENTAL
267 virtual void KeepAlive(::grpc::ClientContext* context, const ::tensorflow::eager::KeepAliveRequest* request, ::tensorflow::eager::KeepAliveResponse* response, ::grpc::ClientUnaryReactor* reactor) = 0;
268 #else
269 virtual void KeepAlive(::grpc::ClientContext* context, const ::tensorflow::eager::KeepAliveRequest* request, ::tensorflow::eager::KeepAliveResponse* response, ::grpc::experimental::ClientUnaryReactor* reactor) = 0;
270 #endif
271 #ifdef GRPC_CALLBACK_API_NONEXPERIMENTAL
272 virtual void KeepAlive(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::tensorflow::eager::KeepAliveResponse* response, ::grpc::ClientUnaryReactor* reactor) = 0;
273 #else
274 virtual void KeepAlive(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::tensorflow::eager::KeepAliveResponse* response, ::grpc::experimental::ClientUnaryReactor* reactor) = 0;
275 #endif
276 // Closes the context. No calls to other methods using the existing context ID
277 // are valid after this.
278 virtual void CloseContext(::grpc::ClientContext* context, const ::tensorflow::eager::CloseContextRequest* request, ::tensorflow::eager::CloseContextResponse* response, std::function<void(::grpc::Status)>) = 0;
279 virtual void CloseContext(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::tensorflow::eager::CloseContextResponse* response, std::function<void(::grpc::Status)>) = 0;
280 #ifdef GRPC_CALLBACK_API_NONEXPERIMENTAL
281 virtual void CloseContext(::grpc::ClientContext* context, const ::tensorflow::eager::CloseContextRequest* request, ::tensorflow::eager::CloseContextResponse* response, ::grpc::ClientUnaryReactor* reactor) = 0;
282 #else
283 virtual void CloseContext(::grpc::ClientContext* context, const ::tensorflow::eager::CloseContextRequest* request, ::tensorflow::eager::CloseContextResponse* response, ::grpc::experimental::ClientUnaryReactor* reactor) = 0;
284 #endif
285 #ifdef GRPC_CALLBACK_API_NONEXPERIMENTAL
286 virtual void CloseContext(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::tensorflow::eager::CloseContextResponse* response, ::grpc::ClientUnaryReactor* reactor) = 0;
287 #else
288 virtual void CloseContext(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::tensorflow::eager::CloseContextResponse* response, ::grpc::experimental::ClientUnaryReactor* reactor) = 0;
289 #endif
290 };
291 #ifdef GRPC_CALLBACK_API_NONEXPERIMENTAL
292 typedef class experimental_async_interface async_interface;
293 #endif
294 #ifdef GRPC_CALLBACK_API_NONEXPERIMENTAL
295 async_interface* async() { return experimental_async(); }
296 #endif
297 virtual class experimental_async_interface* experimental_async() { return nullptr; }
298 private:
299 virtual ::grpc::ClientAsyncResponseReaderInterface< ::tensorflow::eager::CreateContextResponse>* AsyncCreateContextRaw(::grpc::ClientContext* context, const ::tensorflow::eager::CreateContextRequest& request, ::grpc::CompletionQueue* cq) = 0;
300 virtual ::grpc::ClientAsyncResponseReaderInterface< ::tensorflow::eager::CreateContextResponse>* PrepareAsyncCreateContextRaw(::grpc::ClientContext* context, const ::tensorflow::eager::CreateContextRequest& request, ::grpc::CompletionQueue* cq) = 0;
301 virtual ::grpc::ClientAsyncResponseReaderInterface< ::tensorflow::eager::UpdateContextResponse>* AsyncUpdateContextRaw(::grpc::ClientContext* context, const ::tensorflow::eager::UpdateContextRequest& request, ::grpc::CompletionQueue* cq) = 0;
302 virtual ::grpc::ClientAsyncResponseReaderInterface< ::tensorflow::eager::UpdateContextResponse>* PrepareAsyncUpdateContextRaw(::grpc::ClientContext* context, const ::tensorflow::eager::UpdateContextRequest& request, ::grpc::CompletionQueue* cq) = 0;
303 virtual ::grpc::ClientAsyncResponseReaderInterface< ::tensorflow::eager::EnqueueResponse>* AsyncEnqueueRaw(::grpc::ClientContext* context, const ::tensorflow::eager::EnqueueRequest& request, ::grpc::CompletionQueue* cq) = 0;
304 virtual ::grpc::ClientAsyncResponseReaderInterface< ::tensorflow::eager::EnqueueResponse>* PrepareAsyncEnqueueRaw(::grpc::ClientContext* context, const ::tensorflow::eager::EnqueueRequest& request, ::grpc::CompletionQueue* cq) = 0;
305 virtual ::grpc::ClientReaderWriterInterface< ::tensorflow::eager::EnqueueRequest, ::tensorflow::eager::EnqueueResponse>* StreamingEnqueueRaw(::grpc::ClientContext* context) = 0;
306 virtual ::grpc::ClientAsyncReaderWriterInterface< ::tensorflow::eager::EnqueueRequest, ::tensorflow::eager::EnqueueResponse>* AsyncStreamingEnqueueRaw(::grpc::ClientContext* context, ::grpc::CompletionQueue* cq, void* tag) = 0;
307 virtual ::grpc::ClientAsyncReaderWriterInterface< ::tensorflow::eager::EnqueueRequest, ::tensorflow::eager::EnqueueResponse>* PrepareAsyncStreamingEnqueueRaw(::grpc::ClientContext* context, ::grpc::CompletionQueue* cq) = 0;
308 virtual ::grpc::ClientAsyncResponseReaderInterface< ::tensorflow::eager::WaitQueueDoneResponse>* AsyncWaitQueueDoneRaw(::grpc::ClientContext* context, const ::tensorflow::eager::WaitQueueDoneRequest& request, ::grpc::CompletionQueue* cq) = 0;
309 virtual ::grpc::ClientAsyncResponseReaderInterface< ::tensorflow::eager::WaitQueueDoneResponse>* PrepareAsyncWaitQueueDoneRaw(::grpc::ClientContext* context, const ::tensorflow::eager::WaitQueueDoneRequest& request, ::grpc::CompletionQueue* cq) = 0;
310 virtual ::grpc::ClientAsyncResponseReaderInterface< ::tensorflow::eager::RunComponentFunctionResponse>* AsyncRunComponentFunctionRaw(::grpc::ClientContext* context, const ::tensorflow::eager::RunComponentFunctionRequest& request, ::grpc::CompletionQueue* cq) = 0;
311 virtual ::grpc::ClientAsyncResponseReaderInterface< ::tensorflow::eager::RunComponentFunctionResponse>* PrepareAsyncRunComponentFunctionRaw(::grpc::ClientContext* context, const ::tensorflow::eager::RunComponentFunctionRequest& request, ::grpc::CompletionQueue* cq) = 0;
312 virtual ::grpc::ClientAsyncResponseReaderInterface< ::tensorflow::eager::KeepAliveResponse>* AsyncKeepAliveRaw(::grpc::ClientContext* context, const ::tensorflow::eager::KeepAliveRequest& request, ::grpc::CompletionQueue* cq) = 0;
313 virtual ::grpc::ClientAsyncResponseReaderInterface< ::tensorflow::eager::KeepAliveResponse>* PrepareAsyncKeepAliveRaw(::grpc::ClientContext* context, const ::tensorflow::eager::KeepAliveRequest& request, ::grpc::CompletionQueue* cq) = 0;
314 virtual ::grpc::ClientAsyncResponseReaderInterface< ::tensorflow::eager::CloseContextResponse>* AsyncCloseContextRaw(::grpc::ClientContext* context, const ::tensorflow::eager::CloseContextRequest& request, ::grpc::CompletionQueue* cq) = 0;
315 virtual ::grpc::ClientAsyncResponseReaderInterface< ::tensorflow::eager::CloseContextResponse>* PrepareAsyncCloseContextRaw(::grpc::ClientContext* context, const ::tensorflow::eager::CloseContextRequest& request, ::grpc::CompletionQueue* cq) = 0;
316 };
317 class Stub final : public StubInterface {
318 public:
319 Stub(const std::shared_ptr< ::grpc::ChannelInterface>& channel);
320 ::grpc::Status CreateContext(::grpc::ClientContext* context, const ::tensorflow::eager::CreateContextRequest& request, ::tensorflow::eager::CreateContextResponse* response) override;
321 std::unique_ptr< ::grpc::ClientAsyncResponseReader< ::tensorflow::eager::CreateContextResponse>> AsyncCreateContext(::grpc::ClientContext* context, const ::tensorflow::eager::CreateContextRequest& request, ::grpc::CompletionQueue* cq) {
322 return std::unique_ptr< ::grpc::ClientAsyncResponseReader< ::tensorflow::eager::CreateContextResponse>>(AsyncCreateContextRaw(context, request, cq));
323 }
324 std::unique_ptr< ::grpc::ClientAsyncResponseReader< ::tensorflow::eager::CreateContextResponse>> PrepareAsyncCreateContext(::grpc::ClientContext* context, const ::tensorflow::eager::CreateContextRequest& request, ::grpc::CompletionQueue* cq) {
325 return std::unique_ptr< ::grpc::ClientAsyncResponseReader< ::tensorflow::eager::CreateContextResponse>>(PrepareAsyncCreateContextRaw(context, request, cq));
326 }
327 ::grpc::Status UpdateContext(::grpc::ClientContext* context, const ::tensorflow::eager::UpdateContextRequest& request, ::tensorflow::eager::UpdateContextResponse* response) override;
328 std::unique_ptr< ::grpc::ClientAsyncResponseReader< ::tensorflow::eager::UpdateContextResponse>> AsyncUpdateContext(::grpc::ClientContext* context, const ::tensorflow::eager::UpdateContextRequest& request, ::grpc::CompletionQueue* cq) {
329 return std::unique_ptr< ::grpc::ClientAsyncResponseReader< ::tensorflow::eager::UpdateContextResponse>>(AsyncUpdateContextRaw(context, request, cq));
330 }
331 std::unique_ptr< ::grpc::ClientAsyncResponseReader< ::tensorflow::eager::UpdateContextResponse>> PrepareAsyncUpdateContext(::grpc::ClientContext* context, const ::tensorflow::eager::UpdateContextRequest& request, ::grpc::CompletionQueue* cq) {
332 return std::unique_ptr< ::grpc::ClientAsyncResponseReader< ::tensorflow::eager::UpdateContextResponse>>(PrepareAsyncUpdateContextRaw(context, request, cq));
333 }
334 ::grpc::Status Enqueue(::grpc::ClientContext* context, const ::tensorflow::eager::EnqueueRequest& request, ::tensorflow::eager::EnqueueResponse* response) override;
335 std::unique_ptr< ::grpc::ClientAsyncResponseReader< ::tensorflow::eager::EnqueueResponse>> AsyncEnqueue(::grpc::ClientContext* context, const ::tensorflow::eager::EnqueueRequest& request, ::grpc::CompletionQueue* cq) {
336 return std::unique_ptr< ::grpc::ClientAsyncResponseReader< ::tensorflow::eager::EnqueueResponse>>(AsyncEnqueueRaw(context, request, cq));
337 }
338 std::unique_ptr< ::grpc::ClientAsyncResponseReader< ::tensorflow::eager::EnqueueResponse>> PrepareAsyncEnqueue(::grpc::ClientContext* context, const ::tensorflow::eager::EnqueueRequest& request, ::grpc::CompletionQueue* cq) {
339 return std::unique_ptr< ::grpc::ClientAsyncResponseReader< ::tensorflow::eager::EnqueueResponse>>(PrepareAsyncEnqueueRaw(context, request, cq));
340 }
341 std::unique_ptr< ::grpc::ClientReaderWriter< ::tensorflow::eager::EnqueueRequest, ::tensorflow::eager::EnqueueResponse>> StreamingEnqueue(::grpc::ClientContext* context) {
342 return std::unique_ptr< ::grpc::ClientReaderWriter< ::tensorflow::eager::EnqueueRequest, ::tensorflow::eager::EnqueueResponse>>(StreamingEnqueueRaw(context));
343 }
344 std::unique_ptr< ::grpc::ClientAsyncReaderWriter< ::tensorflow::eager::EnqueueRequest, ::tensorflow::eager::EnqueueResponse>> AsyncStreamingEnqueue(::grpc::ClientContext* context, ::grpc::CompletionQueue* cq, void* tag) {
345 return std::unique_ptr< ::grpc::ClientAsyncReaderWriter< ::tensorflow::eager::EnqueueRequest, ::tensorflow::eager::EnqueueResponse>>(AsyncStreamingEnqueueRaw(context, cq, tag));
346 }
347 std::unique_ptr< ::grpc::ClientAsyncReaderWriter< ::tensorflow::eager::EnqueueRequest, ::tensorflow::eager::EnqueueResponse>> PrepareAsyncStreamingEnqueue(::grpc::ClientContext* context, ::grpc::CompletionQueue* cq) {
348 return std::unique_ptr< ::grpc::ClientAsyncReaderWriter< ::tensorflow::eager::EnqueueRequest, ::tensorflow::eager::EnqueueResponse>>(PrepareAsyncStreamingEnqueueRaw(context, cq));
349 }
350 ::grpc::Status WaitQueueDone(::grpc::ClientContext* context, const ::tensorflow::eager::WaitQueueDoneRequest& request, ::tensorflow::eager::WaitQueueDoneResponse* response) override;
351 std::unique_ptr< ::grpc::ClientAsyncResponseReader< ::tensorflow::eager::WaitQueueDoneResponse>> AsyncWaitQueueDone(::grpc::ClientContext* context, const ::tensorflow::eager::WaitQueueDoneRequest& request, ::grpc::CompletionQueue* cq) {
352 return std::unique_ptr< ::grpc::ClientAsyncResponseReader< ::tensorflow::eager::WaitQueueDoneResponse>>(AsyncWaitQueueDoneRaw(context, request, cq));
353 }
354 std::unique_ptr< ::grpc::ClientAsyncResponseReader< ::tensorflow::eager::WaitQueueDoneResponse>> PrepareAsyncWaitQueueDone(::grpc::ClientContext* context, const ::tensorflow::eager::WaitQueueDoneRequest& request, ::grpc::CompletionQueue* cq) {
355 return std::unique_ptr< ::grpc::ClientAsyncResponseReader< ::tensorflow::eager::WaitQueueDoneResponse>>(PrepareAsyncWaitQueueDoneRaw(context, request, cq));
356 }
357 ::grpc::Status RunComponentFunction(::grpc::ClientContext* context, const ::tensorflow::eager::RunComponentFunctionRequest& request, ::tensorflow::eager::RunComponentFunctionResponse* response) override;
358 std::unique_ptr< ::grpc::ClientAsyncResponseReader< ::tensorflow::eager::RunComponentFunctionResponse>> AsyncRunComponentFunction(::grpc::ClientContext* context, const ::tensorflow::eager::RunComponentFunctionRequest& request, ::grpc::CompletionQueue* cq) {
359 return std::unique_ptr< ::grpc::ClientAsyncResponseReader< ::tensorflow::eager::RunComponentFunctionResponse>>(AsyncRunComponentFunctionRaw(context, request, cq));
360 }
361 std::unique_ptr< ::grpc::ClientAsyncResponseReader< ::tensorflow::eager::RunComponentFunctionResponse>> PrepareAsyncRunComponentFunction(::grpc::ClientContext* context, const ::tensorflow::eager::RunComponentFunctionRequest& request, ::grpc::CompletionQueue* cq) {
362 return std::unique_ptr< ::grpc::ClientAsyncResponseReader< ::tensorflow::eager::RunComponentFunctionResponse>>(PrepareAsyncRunComponentFunctionRaw(context, request, cq));
363 }
364 ::grpc::Status KeepAlive(::grpc::ClientContext* context, const ::tensorflow::eager::KeepAliveRequest& request, ::tensorflow::eager::KeepAliveResponse* response) override;
365 std::unique_ptr< ::grpc::ClientAsyncResponseReader< ::tensorflow::eager::KeepAliveResponse>> AsyncKeepAlive(::grpc::ClientContext* context, const ::tensorflow::eager::KeepAliveRequest& request, ::grpc::CompletionQueue* cq) {
366 return std::unique_ptr< ::grpc::ClientAsyncResponseReader< ::tensorflow::eager::KeepAliveResponse>>(AsyncKeepAliveRaw(context, request, cq));
367 }
368 std::unique_ptr< ::grpc::ClientAsyncResponseReader< ::tensorflow::eager::KeepAliveResponse>> PrepareAsyncKeepAlive(::grpc::ClientContext* context, const ::tensorflow::eager::KeepAliveRequest& request, ::grpc::CompletionQueue* cq) {
369 return std::unique_ptr< ::grpc::ClientAsyncResponseReader< ::tensorflow::eager::KeepAliveResponse>>(PrepareAsyncKeepAliveRaw(context, request, cq));
370 }
371 ::grpc::Status CloseContext(::grpc::ClientContext* context, const ::tensorflow::eager::CloseContextRequest& request, ::tensorflow::eager::CloseContextResponse* response) override;
372 std::unique_ptr< ::grpc::ClientAsyncResponseReader< ::tensorflow::eager::CloseContextResponse>> AsyncCloseContext(::grpc::ClientContext* context, const ::tensorflow::eager::CloseContextRequest& request, ::grpc::CompletionQueue* cq) {
373 return std::unique_ptr< ::grpc::ClientAsyncResponseReader< ::tensorflow::eager::CloseContextResponse>>(AsyncCloseContextRaw(context, request, cq));
374 }
375 std::unique_ptr< ::grpc::ClientAsyncResponseReader< ::tensorflow::eager::CloseContextResponse>> PrepareAsyncCloseContext(::grpc::ClientContext* context, const ::tensorflow::eager::CloseContextRequest& request, ::grpc::CompletionQueue* cq) {
376 return std::unique_ptr< ::grpc::ClientAsyncResponseReader< ::tensorflow::eager::CloseContextResponse>>(PrepareAsyncCloseContextRaw(context, request, cq));
377 }
378 class experimental_async final :
379 public StubInterface::experimental_async_interface {
380 public:
381 void CreateContext(::grpc::ClientContext* context, const ::tensorflow::eager::CreateContextRequest* request, ::tensorflow::eager::CreateContextResponse* response, std::function<void(::grpc::Status)>) override;
382 void CreateContext(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::tensorflow::eager::CreateContextResponse* response, std::function<void(::grpc::Status)>) override;
383 #ifdef GRPC_CALLBACK_API_NONEXPERIMENTAL
384 void CreateContext(::grpc::ClientContext* context, const ::tensorflow::eager::CreateContextRequest* request, ::tensorflow::eager::CreateContextResponse* response, ::grpc::ClientUnaryReactor* reactor) override;
385 #else
386 void CreateContext(::grpc::ClientContext* context, const ::tensorflow::eager::CreateContextRequest* request, ::tensorflow::eager::CreateContextResponse* response, ::grpc::experimental::ClientUnaryReactor* reactor) override;
387 #endif
388 #ifdef GRPC_CALLBACK_API_NONEXPERIMENTAL
389 void CreateContext(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::tensorflow::eager::CreateContextResponse* response, ::grpc::ClientUnaryReactor* reactor) override;
390 #else
391 void CreateContext(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::tensorflow::eager::CreateContextResponse* response, ::grpc::experimental::ClientUnaryReactor* reactor) override;
392 #endif
393 void UpdateContext(::grpc::ClientContext* context, const ::tensorflow::eager::UpdateContextRequest* request, ::tensorflow::eager::UpdateContextResponse* response, std::function<void(::grpc::Status)>) override;
394 void UpdateContext(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::tensorflow::eager::UpdateContextResponse* response, std::function<void(::grpc::Status)>) override;
395 #ifdef GRPC_CALLBACK_API_NONEXPERIMENTAL
396 void UpdateContext(::grpc::ClientContext* context, const ::tensorflow::eager::UpdateContextRequest* request, ::tensorflow::eager::UpdateContextResponse* response, ::grpc::ClientUnaryReactor* reactor) override;
397 #else
398 void UpdateContext(::grpc::ClientContext* context, const ::tensorflow::eager::UpdateContextRequest* request, ::tensorflow::eager::UpdateContextResponse* response, ::grpc::experimental::ClientUnaryReactor* reactor) override;
399 #endif
400 #ifdef GRPC_CALLBACK_API_NONEXPERIMENTAL
401 void UpdateContext(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::tensorflow::eager::UpdateContextResponse* response, ::grpc::ClientUnaryReactor* reactor) override;
402 #else
403 void UpdateContext(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::tensorflow::eager::UpdateContextResponse* response, ::grpc::experimental::ClientUnaryReactor* reactor) override;
404 #endif
405 void Enqueue(::grpc::ClientContext* context, const ::tensorflow::eager::EnqueueRequest* request, ::tensorflow::eager::EnqueueResponse* response, std::function<void(::grpc::Status)>) override;
406 void Enqueue(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::tensorflow::eager::EnqueueResponse* response, std::function<void(::grpc::Status)>) override;
407 #ifdef GRPC_CALLBACK_API_NONEXPERIMENTAL
408 void Enqueue(::grpc::ClientContext* context, const ::tensorflow::eager::EnqueueRequest* request, ::tensorflow::eager::EnqueueResponse* response, ::grpc::ClientUnaryReactor* reactor) override;
409 #else
410 void Enqueue(::grpc::ClientContext* context, const ::tensorflow::eager::EnqueueRequest* request, ::tensorflow::eager::EnqueueResponse* response, ::grpc::experimental::ClientUnaryReactor* reactor) override;
411 #endif
412 #ifdef GRPC_CALLBACK_API_NONEXPERIMENTAL
413 void Enqueue(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::tensorflow::eager::EnqueueResponse* response, ::grpc::ClientUnaryReactor* reactor) override;
414 #else
415 void Enqueue(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::tensorflow::eager::EnqueueResponse* response, ::grpc::experimental::ClientUnaryReactor* reactor) override;
416 #endif
417 #ifdef GRPC_CALLBACK_API_NONEXPERIMENTAL
418 void StreamingEnqueue(::grpc::ClientContext* context, ::grpc::ClientBidiReactor< ::tensorflow::eager::EnqueueRequest,::tensorflow::eager::EnqueueResponse>* reactor) override;
419 #else
420 void StreamingEnqueue(::grpc::ClientContext* context, ::grpc::experimental::ClientBidiReactor< ::tensorflow::eager::EnqueueRequest,::tensorflow::eager::EnqueueResponse>* reactor) override;
421 #endif
422 void WaitQueueDone(::grpc::ClientContext* context, const ::tensorflow::eager::WaitQueueDoneRequest* request, ::tensorflow::eager::WaitQueueDoneResponse* response, std::function<void(::grpc::Status)>) override;
423 void WaitQueueDone(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::tensorflow::eager::WaitQueueDoneResponse* response, std::function<void(::grpc::Status)>) override;
424 #ifdef GRPC_CALLBACK_API_NONEXPERIMENTAL
425 void WaitQueueDone(::grpc::ClientContext* context, const ::tensorflow::eager::WaitQueueDoneRequest* request, ::tensorflow::eager::WaitQueueDoneResponse* response, ::grpc::ClientUnaryReactor* reactor) override;
426 #else
427 void WaitQueueDone(::grpc::ClientContext* context, const ::tensorflow::eager::WaitQueueDoneRequest* request, ::tensorflow::eager::WaitQueueDoneResponse* response, ::grpc::experimental::ClientUnaryReactor* reactor) override;
428 #endif
429 #ifdef GRPC_CALLBACK_API_NONEXPERIMENTAL
430 void WaitQueueDone(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::tensorflow::eager::WaitQueueDoneResponse* response, ::grpc::ClientUnaryReactor* reactor) override;
431 #else
432 void WaitQueueDone(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::tensorflow::eager::WaitQueueDoneResponse* response, ::grpc::experimental::ClientUnaryReactor* reactor) override;
433 #endif
434 void RunComponentFunction(::grpc::ClientContext* context, const ::tensorflow::eager::RunComponentFunctionRequest* request, ::tensorflow::eager::RunComponentFunctionResponse* response, std::function<void(::grpc::Status)>) override;
435 void RunComponentFunction(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::tensorflow::eager::RunComponentFunctionResponse* response, std::function<void(::grpc::Status)>) override;
436 #ifdef GRPC_CALLBACK_API_NONEXPERIMENTAL
437 void RunComponentFunction(::grpc::ClientContext* context, const ::tensorflow::eager::RunComponentFunctionRequest* request, ::tensorflow::eager::RunComponentFunctionResponse* response, ::grpc::ClientUnaryReactor* reactor) override;
438 #else
439 void RunComponentFunction(::grpc::ClientContext* context, const ::tensorflow::eager::RunComponentFunctionRequest* request, ::tensorflow::eager::RunComponentFunctionResponse* response, ::grpc::experimental::ClientUnaryReactor* reactor) override;
440 #endif
441 #ifdef GRPC_CALLBACK_API_NONEXPERIMENTAL
442 void RunComponentFunction(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::tensorflow::eager::RunComponentFunctionResponse* response, ::grpc::ClientUnaryReactor* reactor) override;
443 #else
444 void RunComponentFunction(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::tensorflow::eager::RunComponentFunctionResponse* response, ::grpc::experimental::ClientUnaryReactor* reactor) override;
445 #endif
446 void KeepAlive(::grpc::ClientContext* context, const ::tensorflow::eager::KeepAliveRequest* request, ::tensorflow::eager::KeepAliveResponse* response, std::function<void(::grpc::Status)>) override;
447 void KeepAlive(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::tensorflow::eager::KeepAliveResponse* response, std::function<void(::grpc::Status)>) override;
448 #ifdef GRPC_CALLBACK_API_NONEXPERIMENTAL
449 void KeepAlive(::grpc::ClientContext* context, const ::tensorflow::eager::KeepAliveRequest* request, ::tensorflow::eager::KeepAliveResponse* response, ::grpc::ClientUnaryReactor* reactor) override;
450 #else
451 void KeepAlive(::grpc::ClientContext* context, const ::tensorflow::eager::KeepAliveRequest* request, ::tensorflow::eager::KeepAliveResponse* response, ::grpc::experimental::ClientUnaryReactor* reactor) override;
452 #endif
453 #ifdef GRPC_CALLBACK_API_NONEXPERIMENTAL
454 void KeepAlive(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::tensorflow::eager::KeepAliveResponse* response, ::grpc::ClientUnaryReactor* reactor) override;
455 #else
456 void KeepAlive(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::tensorflow::eager::KeepAliveResponse* response, ::grpc::experimental::ClientUnaryReactor* reactor) override;
457 #endif
458 void CloseContext(::grpc::ClientContext* context, const ::tensorflow::eager::CloseContextRequest* request, ::tensorflow::eager::CloseContextResponse* response, std::function<void(::grpc::Status)>) override;
459 void CloseContext(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::tensorflow::eager::CloseContextResponse* response, std::function<void(::grpc::Status)>) override;
460 #ifdef GRPC_CALLBACK_API_NONEXPERIMENTAL
461 void CloseContext(::grpc::ClientContext* context, const ::tensorflow::eager::CloseContextRequest* request, ::tensorflow::eager::CloseContextResponse* response, ::grpc::ClientUnaryReactor* reactor) override;
462 #else
463 void CloseContext(::grpc::ClientContext* context, const ::tensorflow::eager::CloseContextRequest* request, ::tensorflow::eager::CloseContextResponse* response, ::grpc::experimental::ClientUnaryReactor* reactor) override;
464 #endif
465 #ifdef GRPC_CALLBACK_API_NONEXPERIMENTAL
466 void CloseContext(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::tensorflow::eager::CloseContextResponse* response, ::grpc::ClientUnaryReactor* reactor) override;
467 #else
468 void CloseContext(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::tensorflow::eager::CloseContextResponse* response, ::grpc::experimental::ClientUnaryReactor* reactor) override;
469 #endif
470 private:
471 friend class Stub;
472 explicit experimental_async(Stub* stub): stub_(stub) { }
473 Stub* stub() { return stub_; }
474 Stub* stub_;
475 };
476 class experimental_async_interface* experimental_async() override { return &async_stub_; }
477
478 private:
479 std::shared_ptr< ::grpc::ChannelInterface> channel_;
480 class experimental_async async_stub_{this};
481 ::grpc::ClientAsyncResponseReader< ::tensorflow::eager::CreateContextResponse>* AsyncCreateContextRaw(::grpc::ClientContext* context, const ::tensorflow::eager::CreateContextRequest& request, ::grpc::CompletionQueue* cq) override;
482 ::grpc::ClientAsyncResponseReader< ::tensorflow::eager::CreateContextResponse>* PrepareAsyncCreateContextRaw(::grpc::ClientContext* context, const ::tensorflow::eager::CreateContextRequest& request, ::grpc::CompletionQueue* cq) override;
483 ::grpc::ClientAsyncResponseReader< ::tensorflow::eager::UpdateContextResponse>* AsyncUpdateContextRaw(::grpc::ClientContext* context, const ::tensorflow::eager::UpdateContextRequest& request, ::grpc::CompletionQueue* cq) override;
484 ::grpc::ClientAsyncResponseReader< ::tensorflow::eager::UpdateContextResponse>* PrepareAsyncUpdateContextRaw(::grpc::ClientContext* context, const ::tensorflow::eager::UpdateContextRequest& request, ::grpc::CompletionQueue* cq) override;
485 ::grpc::ClientAsyncResponseReader< ::tensorflow::eager::EnqueueResponse>* AsyncEnqueueRaw(::grpc::ClientContext* context, const ::tensorflow::eager::EnqueueRequest& request, ::grpc::CompletionQueue* cq) override;
486 ::grpc::ClientAsyncResponseReader< ::tensorflow::eager::EnqueueResponse>* PrepareAsyncEnqueueRaw(::grpc::ClientContext* context, const ::tensorflow::eager::EnqueueRequest& request, ::grpc::CompletionQueue* cq) override;
487 ::grpc::ClientReaderWriter< ::tensorflow::eager::EnqueueRequest, ::tensorflow::eager::EnqueueResponse>* StreamingEnqueueRaw(::grpc::ClientContext* context) override;
488 ::grpc::ClientAsyncReaderWriter< ::tensorflow::eager::EnqueueRequest, ::tensorflow::eager::EnqueueResponse>* AsyncStreamingEnqueueRaw(::grpc::ClientContext* context, ::grpc::CompletionQueue* cq, void* tag) override;
489 ::grpc::ClientAsyncReaderWriter< ::tensorflow::eager::EnqueueRequest, ::tensorflow::eager::EnqueueResponse>* PrepareAsyncStreamingEnqueueRaw(::grpc::ClientContext* context, ::grpc::CompletionQueue* cq) override;
490 ::grpc::ClientAsyncResponseReader< ::tensorflow::eager::WaitQueueDoneResponse>* AsyncWaitQueueDoneRaw(::grpc::ClientContext* context, const ::tensorflow::eager::WaitQueueDoneRequest& request, ::grpc::CompletionQueue* cq) override;
491 ::grpc::ClientAsyncResponseReader< ::tensorflow::eager::WaitQueueDoneResponse>* PrepareAsyncWaitQueueDoneRaw(::grpc::ClientContext* context, const ::tensorflow::eager::WaitQueueDoneRequest& request, ::grpc::CompletionQueue* cq) override;
492 ::grpc::ClientAsyncResponseReader< ::tensorflow::eager::RunComponentFunctionResponse>* AsyncRunComponentFunctionRaw(::grpc::ClientContext* context, const ::tensorflow::eager::RunComponentFunctionRequest& request, ::grpc::CompletionQueue* cq) override;
493 ::grpc::ClientAsyncResponseReader< ::tensorflow::eager::RunComponentFunctionResponse>* PrepareAsyncRunComponentFunctionRaw(::grpc::ClientContext* context, const ::tensorflow::eager::RunComponentFunctionRequest& request, ::grpc::CompletionQueue* cq) override;
494 ::grpc::ClientAsyncResponseReader< ::tensorflow::eager::KeepAliveResponse>* AsyncKeepAliveRaw(::grpc::ClientContext* context, const ::tensorflow::eager::KeepAliveRequest& request, ::grpc::CompletionQueue* cq) override;
495 ::grpc::ClientAsyncResponseReader< ::tensorflow::eager::KeepAliveResponse>* PrepareAsyncKeepAliveRaw(::grpc::ClientContext* context, const ::tensorflow::eager::KeepAliveRequest& request, ::grpc::CompletionQueue* cq) override;
496 ::grpc::ClientAsyncResponseReader< ::tensorflow::eager::CloseContextResponse>* AsyncCloseContextRaw(::grpc::ClientContext* context, const ::tensorflow::eager::CloseContextRequest& request, ::grpc::CompletionQueue* cq) override;
497 ::grpc::ClientAsyncResponseReader< ::tensorflow::eager::CloseContextResponse>* PrepareAsyncCloseContextRaw(::grpc::ClientContext* context, const ::tensorflow::eager::CloseContextRequest& request, ::grpc::CompletionQueue* cq) override;
498 const ::grpc::internal::RpcMethod rpcmethod_CreateContext_;
499 const ::grpc::internal::RpcMethod rpcmethod_UpdateContext_;
500 const ::grpc::internal::RpcMethod rpcmethod_Enqueue_;
501 const ::grpc::internal::RpcMethod rpcmethod_StreamingEnqueue_;
502 const ::grpc::internal::RpcMethod rpcmethod_WaitQueueDone_;
503 const ::grpc::internal::RpcMethod rpcmethod_RunComponentFunction_;
504 const ::grpc::internal::RpcMethod rpcmethod_KeepAlive_;
505 const ::grpc::internal::RpcMethod rpcmethod_CloseContext_;
506 };
507 static std::unique_ptr<Stub> NewStub(const std::shared_ptr< ::grpc::ChannelInterface>& channel, const ::grpc::StubOptions& options = ::grpc::StubOptions());
508
509 class Service : public ::grpc::Service {
510 public:
511 Service();
512 virtual ~Service();
513 // This initializes the worker, informing it about the other workers in the
514 // cluster and exchanging authentication tokens which will be used in all
515 // other RPCs to detect whether the worker has restarted.
516 virtual ::grpc::Status CreateContext(::grpc::ServerContext* context, const ::tensorflow::eager::CreateContextRequest* request, ::tensorflow::eager::CreateContextResponse* response);
517 // This updates the eager context on an existing worker when updating the set
518 // of servers in a distributed eager cluster.
519 virtual ::grpc::Status UpdateContext(::grpc::ServerContext* context, const ::tensorflow::eager::UpdateContextRequest* request, ::tensorflow::eager::UpdateContextResponse* response);
520 // This takes a list of Execute and DeleteTensorHandle operations and enqueues
521 // (in async mode) or executes (in sync mode) them on the remote server.
522 // All outputs of ops which were not explicitly deleted with
523 // DeleteTensorHandle entries will be assumed to be alive and are usable by
524 // future calls to Enqueue.
525 virtual ::grpc::Status Enqueue(::grpc::ServerContext* context, const ::tensorflow::eager::EnqueueRequest* request, ::tensorflow::eager::EnqueueResponse* response);
526 // A streaming version of Enqueue.
527 // Current server implementation sends one response per received request.
528 // The benefit for using a streaming version is that subsequent requests
529 // can be sent without waiting for a response to the previous request. This
530 // synchronization is required in the regular Enqueue call because gRPC does
531 // not guarantee to preserve request order.
532 virtual ::grpc::Status StreamingEnqueue(::grpc::ServerContext* context, ::grpc::ServerReaderWriter< ::tensorflow::eager::EnqueueResponse, ::tensorflow::eager::EnqueueRequest>* stream);
533 // Takes a set of op IDs and waits until those ops are done. Returns any error
534 // in the stream so far.
535 virtual ::grpc::Status WaitQueueDone(::grpc::ServerContext* context, const ::tensorflow::eager::WaitQueueDoneRequest* request, ::tensorflow::eager::WaitQueueDoneResponse* response);
536 // This takes an Eager operation and executes it in async mode on the remote
537 // server. Different from EnqueueRequest, ops/functions sent through this
538 // type of requests are allowed to execute in parallel and no ordering is
539 // preserved by RPC stream or executor.
540 // This request type should only be used for executing component functions.
541 // Ordering of component functions should be enforced by their corresponding
542 // main functions. The runtime ensures the following invarients for component
543 // functions (CFs) and their main functions (MFs):
544 // (1) MF1 -> MF2 ==> CF1 -> CF2 ("->" indicates order of execution);
545 // (2) MF1 || MF2 ==> CF1 || CF2 ("||" indicates possible parallel execution);
546 // (3) For CF1 and CF2 that come from the same MF, CF1 || CF2
547 // For executing ops/main functions, use Enqueue or StreamingEnqueue instead
548 // for correct ordering.
549 virtual ::grpc::Status RunComponentFunction(::grpc::ServerContext* context, const ::tensorflow::eager::RunComponentFunctionRequest* request, ::tensorflow::eager::RunComponentFunctionResponse* response);
550 // Contexts are always created with a deadline and no RPCs within a deadline
551 // will trigger a context garbage collection. KeepAlive calls can be used to
552 // delay this. It can also be used to validate the existence of a context ID
553 // on remote eager worker. If the context is on remote worker, return the same
554 // ID and the current context view ID. This is useful for checking if the
555 // remote worker (potentially with the same task name and hostname / port) is
556 // replaced with a new process.
557 virtual ::grpc::Status KeepAlive(::grpc::ServerContext* context, const ::tensorflow::eager::KeepAliveRequest* request, ::tensorflow::eager::KeepAliveResponse* response);
558 // Closes the context. No calls to other methods using the existing context ID
559 // are valid after this.
560 virtual ::grpc::Status CloseContext(::grpc::ServerContext* context, const ::tensorflow::eager::CloseContextRequest* request, ::tensorflow::eager::CloseContextResponse* response);
561 };
562 template <class BaseClass>
563 class WithAsyncMethod_CreateContext : public BaseClass {
564 private:
565 void BaseClassMustBeDerivedFromService(const Service* /*service*/) {}
566 public:
567 WithAsyncMethod_CreateContext() {
568 ::grpc::Service::MarkMethodAsync(0);
569 }
570 ~WithAsyncMethod_CreateContext() override {
571 BaseClassMustBeDerivedFromService(this);
572 }
573 // disable synchronous version of this method
574 ::grpc::Status CreateContext(::grpc::ServerContext* /*context*/, const ::tensorflow::eager::CreateContextRequest* /*request*/, ::tensorflow::eager::CreateContextResponse* /*response*/) override {
575 abort();
576 return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, "");
577 }
578 void RequestCreateContext(::grpc::ServerContext* context, ::tensorflow::eager::CreateContextRequest* request, ::grpc::ServerAsyncResponseWriter< ::tensorflow::eager::CreateContextResponse>* response, ::grpc::CompletionQueue* new_call_cq, ::grpc::ServerCompletionQueue* notification_cq, void *tag) {
579 ::grpc::Service::RequestAsyncUnary(0, context, request, response, new_call_cq, notification_cq, tag);
580 }
581 };
582 template <class BaseClass>
583 class WithAsyncMethod_UpdateContext : public BaseClass {
584 private:
585 void BaseClassMustBeDerivedFromService(const Service* /*service*/) {}
586 public:
587 WithAsyncMethod_UpdateContext() {
588 ::grpc::Service::MarkMethodAsync(1);
589 }
590 ~WithAsyncMethod_UpdateContext() override {
591 BaseClassMustBeDerivedFromService(this);
592 }
593 // disable synchronous version of this method
594 ::grpc::Status UpdateContext(::grpc::ServerContext* /*context*/, const ::tensorflow::eager::UpdateContextRequest* /*request*/, ::tensorflow::eager::UpdateContextResponse* /*response*/) override {
595 abort();
596 return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, "");
597 }
598 void RequestUpdateContext(::grpc::ServerContext* context, ::tensorflow::eager::UpdateContextRequest* request, ::grpc::ServerAsyncResponseWriter< ::tensorflow::eager::UpdateContextResponse>* response, ::grpc::CompletionQueue* new_call_cq, ::grpc::ServerCompletionQueue* notification_cq, void *tag) {
599 ::grpc::Service::RequestAsyncUnary(1, context, request, response, new_call_cq, notification_cq, tag);
600 }
601 };
602 template <class BaseClass>
603 class WithAsyncMethod_Enqueue : public BaseClass {
604 private:
605 void BaseClassMustBeDerivedFromService(const Service* /*service*/) {}
606 public:
607 WithAsyncMethod_Enqueue() {
608 ::grpc::Service::MarkMethodAsync(2);
609 }
610 ~WithAsyncMethod_Enqueue() override {
611 BaseClassMustBeDerivedFromService(this);
612 }
613 // disable synchronous version of this method
614 ::grpc::Status Enqueue(::grpc::ServerContext* /*context*/, const ::tensorflow::eager::EnqueueRequest* /*request*/, ::tensorflow::eager::EnqueueResponse* /*response*/) override {
615 abort();
616 return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, "");
617 }
618 void RequestEnqueue(::grpc::ServerContext* context, ::tensorflow::eager::EnqueueRequest* request, ::grpc::ServerAsyncResponseWriter< ::tensorflow::eager::EnqueueResponse>* response, ::grpc::CompletionQueue* new_call_cq, ::grpc::ServerCompletionQueue* notification_cq, void *tag) {
619 ::grpc::Service::RequestAsyncUnary(2, context, request, response, new_call_cq, notification_cq, tag);
620 }
621 };
622 template <class BaseClass>
623 class WithAsyncMethod_StreamingEnqueue : public BaseClass {
624 private:
625 void BaseClassMustBeDerivedFromService(const Service* /*service*/) {}
626 public:
627 WithAsyncMethod_StreamingEnqueue() {
628 ::grpc::Service::MarkMethodAsync(3);
629 }
630 ~WithAsyncMethod_StreamingEnqueue() override {
631 BaseClassMustBeDerivedFromService(this);
632 }
633 // disable synchronous version of this method
634 ::grpc::Status StreamingEnqueue(::grpc::ServerContext* /*context*/, ::grpc::ServerReaderWriter< ::tensorflow::eager::EnqueueResponse, ::tensorflow::eager::EnqueueRequest>* /*stream*/) override {
635 abort();
636 return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, "");
637 }
638 void RequestStreamingEnqueue(::grpc::ServerContext* context, ::grpc::ServerAsyncReaderWriter< ::tensorflow::eager::EnqueueResponse, ::tensorflow::eager::EnqueueRequest>* stream, ::grpc::CompletionQueue* new_call_cq, ::grpc::ServerCompletionQueue* notification_cq, void *tag) {
639 ::grpc::Service::RequestAsyncBidiStreaming(3, context, stream, new_call_cq, notification_cq, tag);
640 }
641 };
642 template <class BaseClass>
643 class WithAsyncMethod_WaitQueueDone : public BaseClass {
644 private:
645 void BaseClassMustBeDerivedFromService(const Service* /*service*/) {}
646 public:
647 WithAsyncMethod_WaitQueueDone() {
648 ::grpc::Service::MarkMethodAsync(4);
649 }
650 ~WithAsyncMethod_WaitQueueDone() override {
651 BaseClassMustBeDerivedFromService(this);
652 }
653 // disable synchronous version of this method
654 ::grpc::Status WaitQueueDone(::grpc::ServerContext* /*context*/, const ::tensorflow::eager::WaitQueueDoneRequest* /*request*/, ::tensorflow::eager::WaitQueueDoneResponse* /*response*/) override {
655 abort();
656 return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, "");
657 }
658 void RequestWaitQueueDone(::grpc::ServerContext* context, ::tensorflow::eager::WaitQueueDoneRequest* request, ::grpc::ServerAsyncResponseWriter< ::tensorflow::eager::WaitQueueDoneResponse>* response, ::grpc::CompletionQueue* new_call_cq, ::grpc::ServerCompletionQueue* notification_cq, void *tag) {
659 ::grpc::Service::RequestAsyncUnary(4, context, request, response, new_call_cq, notification_cq, tag);
660 }
661 };
662 template <class BaseClass>
663 class WithAsyncMethod_RunComponentFunction : public BaseClass {
664 private:
665 void BaseClassMustBeDerivedFromService(const Service* /*service*/) {}
666 public:
667 WithAsyncMethod_RunComponentFunction() {
668 ::grpc::Service::MarkMethodAsync(5);
669 }
670 ~WithAsyncMethod_RunComponentFunction() override {
671 BaseClassMustBeDerivedFromService(this);
672 }
673 // disable synchronous version of this method
674 ::grpc::Status RunComponentFunction(::grpc::ServerContext* /*context*/, const ::tensorflow::eager::RunComponentFunctionRequest* /*request*/, ::tensorflow::eager::RunComponentFunctionResponse* /*response*/) override {
675 abort();
676 return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, "");
677 }
678 void RequestRunComponentFunction(::grpc::ServerContext* context, ::tensorflow::eager::RunComponentFunctionRequest* request, ::grpc::ServerAsyncResponseWriter< ::tensorflow::eager::RunComponentFunctionResponse>* response, ::grpc::CompletionQueue* new_call_cq, ::grpc::ServerCompletionQueue* notification_cq, void *tag) {
679 ::grpc::Service::RequestAsyncUnary(5, context, request, response, new_call_cq, notification_cq, tag);
680 }
681 };
682 template <class BaseClass>
683 class WithAsyncMethod_KeepAlive : public BaseClass {
684 private:
685 void BaseClassMustBeDerivedFromService(const Service* /*service*/) {}
686 public:
687 WithAsyncMethod_KeepAlive() {
688 ::grpc::Service::MarkMethodAsync(6);
689 }
690 ~WithAsyncMethod_KeepAlive() override {
691 BaseClassMustBeDerivedFromService(this);
692 }
693 // disable synchronous version of this method
694 ::grpc::Status KeepAlive(::grpc::ServerContext* /*context*/, const ::tensorflow::eager::KeepAliveRequest* /*request*/, ::tensorflow::eager::KeepAliveResponse* /*response*/) override {
695 abort();
696 return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, "");
697 }
698 void RequestKeepAlive(::grpc::ServerContext* context, ::tensorflow::eager::KeepAliveRequest* request, ::grpc::ServerAsyncResponseWriter< ::tensorflow::eager::KeepAliveResponse>* response, ::grpc::CompletionQueue* new_call_cq, ::grpc::ServerCompletionQueue* notification_cq, void *tag) {
699 ::grpc::Service::RequestAsyncUnary(6, context, request, response, new_call_cq, notification_cq, tag);
700 }
701 };
702 template <class BaseClass>
703 class WithAsyncMethod_CloseContext : public BaseClass {
704 private:
705 void BaseClassMustBeDerivedFromService(const Service* /*service*/) {}
706 public:
707 WithAsyncMethod_CloseContext() {
708 ::grpc::Service::MarkMethodAsync(7);
709 }
710 ~WithAsyncMethod_CloseContext() override {
711 BaseClassMustBeDerivedFromService(this);
712 }
713 // disable synchronous version of this method
714 ::grpc::Status CloseContext(::grpc::ServerContext* /*context*/, const ::tensorflow::eager::CloseContextRequest* /*request*/, ::tensorflow::eager::CloseContextResponse* /*response*/) override {
715 abort();
716 return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, "");
717 }
718 void RequestCloseContext(::grpc::ServerContext* context, ::tensorflow::eager::CloseContextRequest* request, ::grpc::ServerAsyncResponseWriter< ::tensorflow::eager::CloseContextResponse>* response, ::grpc::CompletionQueue* new_call_cq, ::grpc::ServerCompletionQueue* notification_cq, void *tag) {
719 ::grpc::Service::RequestAsyncUnary(7, context, request, response, new_call_cq, notification_cq, tag);
720 }
721 };
722 typedef WithAsyncMethod_CreateContext<WithAsyncMethod_UpdateContext<WithAsyncMethod_Enqueue<WithAsyncMethod_StreamingEnqueue<WithAsyncMethod_WaitQueueDone<WithAsyncMethod_RunComponentFunction<WithAsyncMethod_KeepAlive<WithAsyncMethod_CloseContext<Service > > > > > > > > AsyncService;
723 template <class BaseClass>
724 class ExperimentalWithCallbackMethod_CreateContext : public BaseClass {
725 private:
726 void BaseClassMustBeDerivedFromService(const Service* /*service*/) {}
727 public:
728 ExperimentalWithCallbackMethod_CreateContext() {
729 #ifdef GRPC_CALLBACK_API_NONEXPERIMENTAL
730 ::grpc::Service::
731 #else
732 ::grpc::Service::experimental().
733 #endif
734 MarkMethodCallback(0,
735 new ::grpc_impl::internal::CallbackUnaryHandler< ::tensorflow::eager::CreateContextRequest, ::tensorflow::eager::CreateContextResponse>(
736 [this](
737 #ifdef GRPC_CALLBACK_API_NONEXPERIMENTAL
738 ::grpc::CallbackServerContext*
739 #else
740 ::grpc::experimental::CallbackServerContext*
741 #endif
742 context, const ::tensorflow::eager::CreateContextRequest* request, ::tensorflow::eager::CreateContextResponse* response) { return this->CreateContext(context, request, response); }));}
743 void SetMessageAllocatorFor_CreateContext(
744 ::grpc::experimental::MessageAllocator< ::tensorflow::eager::CreateContextRequest, ::tensorflow::eager::CreateContextResponse>* allocator) {
745 #ifdef GRPC_CALLBACK_API_NONEXPERIMENTAL
746 ::grpc::internal::MethodHandler* const handler = ::grpc::Service::GetHandler(0);
747 #else
748 ::grpc::internal::MethodHandler* const handler = ::grpc::Service::experimental().GetHandler(0);
749 #endif
750 static_cast<::grpc_impl::internal::CallbackUnaryHandler< ::tensorflow::eager::CreateContextRequest, ::tensorflow::eager::CreateContextResponse>*>(handler)
751 ->SetMessageAllocator(allocator);
752 }
753 ~ExperimentalWithCallbackMethod_CreateContext() override {
754 BaseClassMustBeDerivedFromService(this);
755 }
756 // disable synchronous version of this method
757 ::grpc::Status CreateContext(::grpc::ServerContext* /*context*/, const ::tensorflow::eager::CreateContextRequest* /*request*/, ::tensorflow::eager::CreateContextResponse* /*response*/) override {
758 abort();
759 return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, "");
760 }
761 #ifdef GRPC_CALLBACK_API_NONEXPERIMENTAL
762 virtual ::grpc::ServerUnaryReactor* CreateContext(
763 ::grpc::CallbackServerContext* /*context*/, const ::tensorflow::eager::CreateContextRequest* /*request*/, ::tensorflow::eager::CreateContextResponse* /*response*/)
764 #else
765 virtual ::grpc::experimental::ServerUnaryReactor* CreateContext(
766 ::grpc::experimental::CallbackServerContext* /*context*/, const ::tensorflow::eager::CreateContextRequest* /*request*/, ::tensorflow::eager::CreateContextResponse* /*response*/)
767 #endif
768 { return nullptr; }
769 };
770 template <class BaseClass>
771 class ExperimentalWithCallbackMethod_UpdateContext : public BaseClass {
772 private:
773 void BaseClassMustBeDerivedFromService(const Service* /*service*/) {}
774 public:
775 ExperimentalWithCallbackMethod_UpdateContext() {
776 #ifdef GRPC_CALLBACK_API_NONEXPERIMENTAL
777 ::grpc::Service::
778 #else
779 ::grpc::Service::experimental().
780 #endif
781 MarkMethodCallback(1,
782 new ::grpc_impl::internal::CallbackUnaryHandler< ::tensorflow::eager::UpdateContextRequest, ::tensorflow::eager::UpdateContextResponse>(
783 [this](
784 #ifdef GRPC_CALLBACK_API_NONEXPERIMENTAL
785 ::grpc::CallbackServerContext*
786 #else
787 ::grpc::experimental::CallbackServerContext*
788 #endif
789 context, const ::tensorflow::eager::UpdateContextRequest* request, ::tensorflow::eager::UpdateContextResponse* response) { return this->UpdateContext(context, request, response); }));}
790 void SetMessageAllocatorFor_UpdateContext(
791 ::grpc::experimental::MessageAllocator< ::tensorflow::eager::UpdateContextRequest, ::tensorflow::eager::UpdateContextResponse>* allocator) {
792 #ifdef GRPC_CALLBACK_API_NONEXPERIMENTAL
793 ::grpc::internal::MethodHandler* const handler = ::grpc::Service::GetHandler(1);
794 #else
795 ::grpc::internal::MethodHandler* const handler = ::grpc::Service::experimental().GetHandler(1);
796 #endif
797 static_cast<::grpc_impl::internal::CallbackUnaryHandler< ::tensorflow::eager::UpdateContextRequest, ::tensorflow::eager::UpdateContextResponse>*>(handler)
798 ->SetMessageAllocator(allocator);
799 }
800 ~ExperimentalWithCallbackMethod_UpdateContext() override {
801 BaseClassMustBeDerivedFromService(this);
802 }
803 // disable synchronous version of this method
804 ::grpc::Status UpdateContext(::grpc::ServerContext* /*context*/, const ::tensorflow::eager::UpdateContextRequest* /*request*/, ::tensorflow::eager::UpdateContextResponse* /*response*/) override {
805 abort();
806 return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, "");
807 }
808 #ifdef GRPC_CALLBACK_API_NONEXPERIMENTAL
809 virtual ::grpc::ServerUnaryReactor* UpdateContext(
810 ::grpc::CallbackServerContext* /*context*/, const ::tensorflow::eager::UpdateContextRequest* /*request*/, ::tensorflow::eager::UpdateContextResponse* /*response*/)
811 #else
812 virtual ::grpc::experimental::ServerUnaryReactor* UpdateContext(
813 ::grpc::experimental::CallbackServerContext* /*context*/, const ::tensorflow::eager::UpdateContextRequest* /*request*/, ::tensorflow::eager::UpdateContextResponse* /*response*/)
814 #endif
815 { return nullptr; }
816 };
817 template <class BaseClass>
818 class ExperimentalWithCallbackMethod_Enqueue : public BaseClass {
819 private:
820 void BaseClassMustBeDerivedFromService(const Service* /*service*/) {}
821 public:
822 ExperimentalWithCallbackMethod_Enqueue() {
823 #ifdef GRPC_CALLBACK_API_NONEXPERIMENTAL
824 ::grpc::Service::
825 #else
826 ::grpc::Service::experimental().
827 #endif
828 MarkMethodCallback(2,
829 new ::grpc_impl::internal::CallbackUnaryHandler< ::tensorflow::eager::EnqueueRequest, ::tensorflow::eager::EnqueueResponse>(
830 [this](
831 #ifdef GRPC_CALLBACK_API_NONEXPERIMENTAL
832 ::grpc::CallbackServerContext*
833 #else
834 ::grpc::experimental::CallbackServerContext*
835 #endif
836 context, const ::tensorflow::eager::EnqueueRequest* request, ::tensorflow::eager::EnqueueResponse* response) { return this->Enqueue(context, request, response); }));}
837 void SetMessageAllocatorFor_Enqueue(
838 ::grpc::experimental::MessageAllocator< ::tensorflow::eager::EnqueueRequest, ::tensorflow::eager::EnqueueResponse>* allocator) {
839 #ifdef GRPC_CALLBACK_API_NONEXPERIMENTAL
840 ::grpc::internal::MethodHandler* const handler = ::grpc::Service::GetHandler(2);
841 #else
842 ::grpc::internal::MethodHandler* const handler = ::grpc::Service::experimental().GetHandler(2);
843 #endif
844 static_cast<::grpc_impl::internal::CallbackUnaryHandler< ::tensorflow::eager::EnqueueRequest, ::tensorflow::eager::EnqueueResponse>*>(handler)
845 ->SetMessageAllocator(allocator);
846 }
847 ~ExperimentalWithCallbackMethod_Enqueue() override {
848 BaseClassMustBeDerivedFromService(this);
849 }
850 // disable synchronous version of this method
851 ::grpc::Status Enqueue(::grpc::ServerContext* /*context*/, const ::tensorflow::eager::EnqueueRequest* /*request*/, ::tensorflow::eager::EnqueueResponse* /*response*/) override {
852 abort();
853 return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, "");
854 }
855 #ifdef GRPC_CALLBACK_API_NONEXPERIMENTAL
856 virtual ::grpc::ServerUnaryReactor* Enqueue(
857 ::grpc::CallbackServerContext* /*context*/, const ::tensorflow::eager::EnqueueRequest* /*request*/, ::tensorflow::eager::EnqueueResponse* /*response*/)
858 #else
859 virtual ::grpc::experimental::ServerUnaryReactor* Enqueue(
860 ::grpc::experimental::CallbackServerContext* /*context*/, const ::tensorflow::eager::EnqueueRequest* /*request*/, ::tensorflow::eager::EnqueueResponse* /*response*/)
861 #endif
862 { return nullptr; }
863 };
864 template <class BaseClass>
865 class ExperimentalWithCallbackMethod_StreamingEnqueue : public BaseClass {
866 private:
867 void BaseClassMustBeDerivedFromService(const Service* /*service*/) {}
868 public:
869 ExperimentalWithCallbackMethod_StreamingEnqueue() {
870 #ifdef GRPC_CALLBACK_API_NONEXPERIMENTAL
871 ::grpc::Service::
872 #else
873 ::grpc::Service::experimental().
874 #endif
875 MarkMethodCallback(3,
876 new ::grpc_impl::internal::CallbackBidiHandler< ::tensorflow::eager::EnqueueRequest, ::tensorflow::eager::EnqueueResponse>(
877 [this](
878 #ifdef GRPC_CALLBACK_API_NONEXPERIMENTAL
879 ::grpc::CallbackServerContext*
880 #else
881 ::grpc::experimental::CallbackServerContext*
882 #endif
883 context) { return this->StreamingEnqueue(context); }));
884 }
885 ~ExperimentalWithCallbackMethod_StreamingEnqueue() override {
886 BaseClassMustBeDerivedFromService(this);
887 }
888 // disable synchronous version of this method
889 ::grpc::Status StreamingEnqueue(::grpc::ServerContext* /*context*/, ::grpc::ServerReaderWriter< ::tensorflow::eager::EnqueueResponse, ::tensorflow::eager::EnqueueRequest>* /*stream*/) override {
890 abort();
891 return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, "");
892 }
893 #ifdef GRPC_CALLBACK_API_NONEXPERIMENTAL
894 virtual ::grpc::ServerBidiReactor< ::tensorflow::eager::EnqueueRequest, ::tensorflow::eager::EnqueueResponse>* StreamingEnqueue(
895 ::grpc::CallbackServerContext* /*context*/)
896 #else
897 virtual ::grpc::experimental::ServerBidiReactor< ::tensorflow::eager::EnqueueRequest, ::tensorflow::eager::EnqueueResponse>* StreamingEnqueue(
898 ::grpc::experimental::CallbackServerContext* /*context*/)
899 #endif
900 { return nullptr; }
901 };
902 template <class BaseClass>
903 class ExperimentalWithCallbackMethod_WaitQueueDone : public BaseClass {
904 private:
905 void BaseClassMustBeDerivedFromService(const Service* /*service*/) {}
906 public:
907 ExperimentalWithCallbackMethod_WaitQueueDone() {
908 #ifdef GRPC_CALLBACK_API_NONEXPERIMENTAL
909 ::grpc::Service::
910 #else
911 ::grpc::Service::experimental().
912 #endif
913 MarkMethodCallback(4,
914 new ::grpc_impl::internal::CallbackUnaryHandler< ::tensorflow::eager::WaitQueueDoneRequest, ::tensorflow::eager::WaitQueueDoneResponse>(
915 [this](
916 #ifdef GRPC_CALLBACK_API_NONEXPERIMENTAL
917 ::grpc::CallbackServerContext*
918 #else
919 ::grpc::experimental::CallbackServerContext*
920 #endif
921 context, const ::tensorflow::eager::WaitQueueDoneRequest* request, ::tensorflow::eager::WaitQueueDoneResponse* response) { return this->WaitQueueDone(context, request, response); }));}
922 void SetMessageAllocatorFor_WaitQueueDone(
923 ::grpc::experimental::MessageAllocator< ::tensorflow::eager::WaitQueueDoneRequest, ::tensorflow::eager::WaitQueueDoneResponse>* allocator) {
924 #ifdef GRPC_CALLBACK_API_NONEXPERIMENTAL
925 ::grpc::internal::MethodHandler* const handler = ::grpc::Service::GetHandler(4);
926 #else
927 ::grpc::internal::MethodHandler* const handler = ::grpc::Service::experimental().GetHandler(4);
928 #endif
929 static_cast<::grpc_impl::internal::CallbackUnaryHandler< ::tensorflow::eager::WaitQueueDoneRequest, ::tensorflow::eager::WaitQueueDoneResponse>*>(handler)
930 ->SetMessageAllocator(allocator);
931 }
932 ~ExperimentalWithCallbackMethod_WaitQueueDone() override {
933 BaseClassMustBeDerivedFromService(this);
934 }
935 // disable synchronous version of this method
936 ::grpc::Status WaitQueueDone(::grpc::ServerContext* /*context*/, const ::tensorflow::eager::WaitQueueDoneRequest* /*request*/, ::tensorflow::eager::WaitQueueDoneResponse* /*response*/) override {
937 abort();
938 return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, "");
939 }
940 #ifdef GRPC_CALLBACK_API_NONEXPERIMENTAL
941 virtual ::grpc::ServerUnaryReactor* WaitQueueDone(
942 ::grpc::CallbackServerContext* /*context*/, const ::tensorflow::eager::WaitQueueDoneRequest* /*request*/, ::tensorflow::eager::WaitQueueDoneResponse* /*response*/)
943 #else
944 virtual ::grpc::experimental::ServerUnaryReactor* WaitQueueDone(
945 ::grpc::experimental::CallbackServerContext* /*context*/, const ::tensorflow::eager::WaitQueueDoneRequest* /*request*/, ::tensorflow::eager::WaitQueueDoneResponse* /*response*/)
946 #endif
947 { return nullptr; }
948 };
949 template <class BaseClass>
950 class ExperimentalWithCallbackMethod_RunComponentFunction : public BaseClass {
951 private:
952 void BaseClassMustBeDerivedFromService(const Service* /*service*/) {}
953 public:
954 ExperimentalWithCallbackMethod_RunComponentFunction() {
955 #ifdef GRPC_CALLBACK_API_NONEXPERIMENTAL
956 ::grpc::Service::
957 #else
958 ::grpc::Service::experimental().
959 #endif
960 MarkMethodCallback(5,
961 new ::grpc_impl::internal::CallbackUnaryHandler< ::tensorflow::eager::RunComponentFunctionRequest, ::tensorflow::eager::RunComponentFunctionResponse>(
962 [this](
963 #ifdef GRPC_CALLBACK_API_NONEXPERIMENTAL
964 ::grpc::CallbackServerContext*
965 #else
966 ::grpc::experimental::CallbackServerContext*
967 #endif
968 context, const ::tensorflow::eager::RunComponentFunctionRequest* request, ::tensorflow::eager::RunComponentFunctionResponse* response) { return this->RunComponentFunction(context, request, response); }));}
969 void SetMessageAllocatorFor_RunComponentFunction(
970 ::grpc::experimental::MessageAllocator< ::tensorflow::eager::RunComponentFunctionRequest, ::tensorflow::eager::RunComponentFunctionResponse>* allocator) {
971 #ifdef GRPC_CALLBACK_API_NONEXPERIMENTAL
972 ::grpc::internal::MethodHandler* const handler = ::grpc::Service::GetHandler(5);
973 #else
974 ::grpc::internal::MethodHandler* const handler = ::grpc::Service::experimental().GetHandler(5);
975 #endif
976 static_cast<::grpc_impl::internal::CallbackUnaryHandler< ::tensorflow::eager::RunComponentFunctionRequest, ::tensorflow::eager::RunComponentFunctionResponse>*>(handler)
977 ->SetMessageAllocator(allocator);
978 }
979 ~ExperimentalWithCallbackMethod_RunComponentFunction() override {
980 BaseClassMustBeDerivedFromService(this);
981 }
982 // disable synchronous version of this method
983 ::grpc::Status RunComponentFunction(::grpc::ServerContext* /*context*/, const ::tensorflow::eager::RunComponentFunctionRequest* /*request*/, ::tensorflow::eager::RunComponentFunctionResponse* /*response*/) override {
984 abort();
985 return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, "");
986 }
987 #ifdef GRPC_CALLBACK_API_NONEXPERIMENTAL
988 virtual ::grpc::ServerUnaryReactor* RunComponentFunction(
989 ::grpc::CallbackServerContext* /*context*/, const ::tensorflow::eager::RunComponentFunctionRequest* /*request*/, ::tensorflow::eager::RunComponentFunctionResponse* /*response*/)
990 #else
991 virtual ::grpc::experimental::ServerUnaryReactor* RunComponentFunction(
992 ::grpc::experimental::CallbackServerContext* /*context*/, const ::tensorflow::eager::RunComponentFunctionRequest* /*request*/, ::tensorflow::eager::RunComponentFunctionResponse* /*response*/)
993 #endif
994 { return nullptr; }
995 };
996 template <class BaseClass>
997 class ExperimentalWithCallbackMethod_KeepAlive : public BaseClass {
998 private:
999 void BaseClassMustBeDerivedFromService(const Service* /*service*/) {}
1000 public:
1001 ExperimentalWithCallbackMethod_KeepAlive() {
1002 #ifdef GRPC_CALLBACK_API_NONEXPERIMENTAL
1003 ::grpc::Service::
1004 #else
1005 ::grpc::Service::experimental().
1006 #endif
1007 MarkMethodCallback(6,
1008 new ::grpc_impl::internal::CallbackUnaryHandler< ::tensorflow::eager::KeepAliveRequest, ::tensorflow::eager::KeepAliveResponse>(
1009 [this](
1010 #ifdef GRPC_CALLBACK_API_NONEXPERIMENTAL
1011 ::grpc::CallbackServerContext*
1012 #else
1013 ::grpc::experimental::CallbackServerContext*
1014 #endif
1015 context, const ::tensorflow::eager::KeepAliveRequest* request, ::tensorflow::eager::KeepAliveResponse* response) { return this->KeepAlive(context, request, response); }));}
1016 void SetMessageAllocatorFor_KeepAlive(
1017 ::grpc::experimental::MessageAllocator< ::tensorflow::eager::KeepAliveRequest, ::tensorflow::eager::KeepAliveResponse>* allocator) {
1018 #ifdef GRPC_CALLBACK_API_NONEXPERIMENTAL
1019 ::grpc::internal::MethodHandler* const handler = ::grpc::Service::GetHandler(6);
1020 #else
1021 ::grpc::internal::MethodHandler* const handler = ::grpc::Service::experimental().GetHandler(6);
1022 #endif
1023 static_cast<::grpc_impl::internal::CallbackUnaryHandler< ::tensorflow::eager::KeepAliveRequest, ::tensorflow::eager::KeepAliveResponse>*>(handler)
1024 ->SetMessageAllocator(allocator);
1025 }
1026 ~ExperimentalWithCallbackMethod_KeepAlive() override {
1027 BaseClassMustBeDerivedFromService(this);
1028 }
1029 // disable synchronous version of this method
1030 ::grpc::Status KeepAlive(::grpc::ServerContext* /*context*/, const ::tensorflow::eager::KeepAliveRequest* /*request*/, ::tensorflow::eager::KeepAliveResponse* /*response*/) override {
1031 abort();
1032 return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, "");
1033 }
1034 #ifdef GRPC_CALLBACK_API_NONEXPERIMENTAL
1035 virtual ::grpc::ServerUnaryReactor* KeepAlive(
1036 ::grpc::CallbackServerContext* /*context*/, const ::tensorflow::eager::KeepAliveRequest* /*request*/, ::tensorflow::eager::KeepAliveResponse* /*response*/)
1037 #else
1038 virtual ::grpc::experimental::ServerUnaryReactor* KeepAlive(
1039 ::grpc::experimental::CallbackServerContext* /*context*/, const ::tensorflow::eager::KeepAliveRequest* /*request*/, ::tensorflow::eager::KeepAliveResponse* /*response*/)
1040 #endif
1041 { return nullptr; }
1042 };
1043 template <class BaseClass>
1044 class ExperimentalWithCallbackMethod_CloseContext : public BaseClass {
1045 private:
1046 void BaseClassMustBeDerivedFromService(const Service* /*service*/) {}
1047 public:
1048 ExperimentalWithCallbackMethod_CloseContext() {
1049 #ifdef GRPC_CALLBACK_API_NONEXPERIMENTAL
1050 ::grpc::Service::
1051 #else
1052 ::grpc::Service::experimental().
1053 #endif
1054 MarkMethodCallback(7,
1055 new ::grpc_impl::internal::CallbackUnaryHandler< ::tensorflow::eager::CloseContextRequest, ::tensorflow::eager::CloseContextResponse>(
1056 [this](
1057 #ifdef GRPC_CALLBACK_API_NONEXPERIMENTAL
1058 ::grpc::CallbackServerContext*
1059 #else
1060 ::grpc::experimental::CallbackServerContext*
1061 #endif
1062 context, const ::tensorflow::eager::CloseContextRequest* request, ::tensorflow::eager::CloseContextResponse* response) { return this->CloseContext(context, request, response); }));}
1063 void SetMessageAllocatorFor_CloseContext(
1064 ::grpc::experimental::MessageAllocator< ::tensorflow::eager::CloseContextRequest, ::tensorflow::eager::CloseContextResponse>* allocator) {
1065 #ifdef GRPC_CALLBACK_API_NONEXPERIMENTAL
1066 ::grpc::internal::MethodHandler* const handler = ::grpc::Service::GetHandler(7);
1067 #else
1068 ::grpc::internal::MethodHandler* const handler = ::grpc::Service::experimental().GetHandler(7);
1069 #endif
1070 static_cast<::grpc_impl::internal::CallbackUnaryHandler< ::tensorflow::eager::CloseContextRequest, ::tensorflow::eager::CloseContextResponse>*>(handler)
1071 ->SetMessageAllocator(allocator);
1072 }
1073 ~ExperimentalWithCallbackMethod_CloseContext() override {
1074 BaseClassMustBeDerivedFromService(this);
1075 }
1076 // disable synchronous version of this method
1077 ::grpc::Status CloseContext(::grpc::ServerContext* /*context*/, const ::tensorflow::eager::CloseContextRequest* /*request*/, ::tensorflow::eager::CloseContextResponse* /*response*/) override {
1078 abort();
1079 return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, "");
1080 }
1081 #ifdef GRPC_CALLBACK_API_NONEXPERIMENTAL
1082 virtual ::grpc::ServerUnaryReactor* CloseContext(
1083 ::grpc::CallbackServerContext* /*context*/, const ::tensorflow::eager::CloseContextRequest* /*request*/, ::tensorflow::eager::CloseContextResponse* /*response*/)
1084 #else
1085 virtual ::grpc::experimental::ServerUnaryReactor* CloseContext(
1086 ::grpc::experimental::CallbackServerContext* /*context*/, const ::tensorflow::eager::CloseContextRequest* /*request*/, ::tensorflow::eager::CloseContextResponse* /*response*/)
1087 #endif
1088 { return nullptr; }
1089 };
1090 #ifdef GRPC_CALLBACK_API_NONEXPERIMENTAL
1091 typedef ExperimentalWithCallbackMethod_CreateContext<ExperimentalWithCallbackMethod_UpdateContext<ExperimentalWithCallbackMethod_Enqueue<ExperimentalWithCallbackMethod_StreamingEnqueue<ExperimentalWithCallbackMethod_WaitQueueDone<ExperimentalWithCallbackMethod_RunComponentFunction<ExperimentalWithCallbackMethod_KeepAlive<ExperimentalWithCallbackMethod_CloseContext<Service > > > > > > > > CallbackService;
1092 #endif
1093
1094 typedef ExperimentalWithCallbackMethod_CreateContext<ExperimentalWithCallbackMethod_UpdateContext<ExperimentalWithCallbackMethod_Enqueue<ExperimentalWithCallbackMethod_StreamingEnqueue<ExperimentalWithCallbackMethod_WaitQueueDone<ExperimentalWithCallbackMethod_RunComponentFunction<ExperimentalWithCallbackMethod_KeepAlive<ExperimentalWithCallbackMethod_CloseContext<Service > > > > > > > > ExperimentalCallbackService;
1095 template <class BaseClass>
1096 class WithGenericMethod_CreateContext : public BaseClass {
1097 private:
1098 void BaseClassMustBeDerivedFromService(const Service* /*service*/) {}
1099 public:
1100 WithGenericMethod_CreateContext() {
1101 ::grpc::Service::MarkMethodGeneric(0);
1102 }
1103 ~WithGenericMethod_CreateContext() override {
1104 BaseClassMustBeDerivedFromService(this);
1105 }
1106 // disable synchronous version of this method
1107 ::grpc::Status CreateContext(::grpc::ServerContext* /*context*/, const ::tensorflow::eager::CreateContextRequest* /*request*/, ::tensorflow::eager::CreateContextResponse* /*response*/) override {
1108 abort();
1109 return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, "");
1110 }
1111 };
1112 template <class BaseClass>
1113 class WithGenericMethod_UpdateContext : public BaseClass {
1114 private:
1115 void BaseClassMustBeDerivedFromService(const Service* /*service*/) {}
1116 public:
1117 WithGenericMethod_UpdateContext() {
1118 ::grpc::Service::MarkMethodGeneric(1);
1119 }
1120 ~WithGenericMethod_UpdateContext() override {
1121 BaseClassMustBeDerivedFromService(this);
1122 }
1123 // disable synchronous version of this method
1124 ::grpc::Status UpdateContext(::grpc::ServerContext* /*context*/, const ::tensorflow::eager::UpdateContextRequest* /*request*/, ::tensorflow::eager::UpdateContextResponse* /*response*/) override {
1125 abort();
1126 return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, "");
1127 }
1128 };
1129 template <class BaseClass>
1130 class WithGenericMethod_Enqueue : public BaseClass {
1131 private:
1132 void BaseClassMustBeDerivedFromService(const Service* /*service*/) {}
1133 public:
1134 WithGenericMethod_Enqueue() {
1135 ::grpc::Service::MarkMethodGeneric(2);
1136 }
1137 ~WithGenericMethod_Enqueue() override {
1138 BaseClassMustBeDerivedFromService(this);
1139 }
1140 // disable synchronous version of this method
1141 ::grpc::Status Enqueue(::grpc::ServerContext* /*context*/, const ::tensorflow::eager::EnqueueRequest* /*request*/, ::tensorflow::eager::EnqueueResponse* /*response*/) override {
1142 abort();
1143 return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, "");
1144 }
1145 };
1146 template <class BaseClass>
1147 class WithGenericMethod_StreamingEnqueue : public BaseClass {
1148 private:
1149 void BaseClassMustBeDerivedFromService(const Service* /*service*/) {}
1150 public:
1151 WithGenericMethod_StreamingEnqueue() {
1152 ::grpc::Service::MarkMethodGeneric(3);
1153 }
1154 ~WithGenericMethod_StreamingEnqueue() override {
1155 BaseClassMustBeDerivedFromService(this);
1156 }
1157 // disable synchronous version of this method
1158 ::grpc::Status StreamingEnqueue(::grpc::ServerContext* /*context*/, ::grpc::ServerReaderWriter< ::tensorflow::eager::EnqueueResponse, ::tensorflow::eager::EnqueueRequest>* /*stream*/) override {
1159 abort();
1160 return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, "");
1161 }
1162 };
1163 template <class BaseClass>
1164 class WithGenericMethod_WaitQueueDone : public BaseClass {
1165 private:
1166 void BaseClassMustBeDerivedFromService(const Service* /*service*/) {}
1167 public:
1168 WithGenericMethod_WaitQueueDone() {
1169 ::grpc::Service::MarkMethodGeneric(4);
1170 }
1171 ~WithGenericMethod_WaitQueueDone() override {
1172 BaseClassMustBeDerivedFromService(this);
1173 }
1174 // disable synchronous version of this method
1175 ::grpc::Status WaitQueueDone(::grpc::ServerContext* /*context*/, const ::tensorflow::eager::WaitQueueDoneRequest* /*request*/, ::tensorflow::eager::WaitQueueDoneResponse* /*response*/) override {
1176 abort();
1177 return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, "");
1178 }
1179 };
1180 template <class BaseClass>
1181 class WithGenericMethod_RunComponentFunction : public BaseClass {
1182 private:
1183 void BaseClassMustBeDerivedFromService(const Service* /*service*/) {}
1184 public:
1185 WithGenericMethod_RunComponentFunction() {
1186 ::grpc::Service::MarkMethodGeneric(5);
1187 }
1188 ~WithGenericMethod_RunComponentFunction() override {
1189 BaseClassMustBeDerivedFromService(this);
1190 }
1191 // disable synchronous version of this method
1192 ::grpc::Status RunComponentFunction(::grpc::ServerContext* /*context*/, const ::tensorflow::eager::RunComponentFunctionRequest* /*request*/, ::tensorflow::eager::RunComponentFunctionResponse* /*response*/) override {
1193 abort();
1194 return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, "");
1195 }
1196 };
1197 template <class BaseClass>
1198 class WithGenericMethod_KeepAlive : public BaseClass {
1199 private:
1200 void BaseClassMustBeDerivedFromService(const Service* /*service*/) {}
1201 public:
1202 WithGenericMethod_KeepAlive() {
1203 ::grpc::Service::MarkMethodGeneric(6);
1204 }
1205 ~WithGenericMethod_KeepAlive() override {
1206 BaseClassMustBeDerivedFromService(this);
1207 }
1208 // disable synchronous version of this method
1209 ::grpc::Status KeepAlive(::grpc::ServerContext* /*context*/, const ::tensorflow::eager::KeepAliveRequest* /*request*/, ::tensorflow::eager::KeepAliveResponse* /*response*/) override {
1210 abort();
1211 return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, "");
1212 }
1213 };
1214 template <class BaseClass>
1215 class WithGenericMethod_CloseContext : public BaseClass {
1216 private:
1217 void BaseClassMustBeDerivedFromService(const Service* /*service*/) {}
1218 public:
1219 WithGenericMethod_CloseContext() {
1220 ::grpc::Service::MarkMethodGeneric(7);
1221 }
1222 ~WithGenericMethod_CloseContext() override {
1223 BaseClassMustBeDerivedFromService(this);
1224 }
1225 // disable synchronous version of this method
1226 ::grpc::Status CloseContext(::grpc::ServerContext* /*context*/, const ::tensorflow::eager::CloseContextRequest* /*request*/, ::tensorflow::eager::CloseContextResponse* /*response*/) override {
1227 abort();
1228 return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, "");
1229 }
1230 };
1231 template <class BaseClass>
1232 class WithRawMethod_CreateContext : public BaseClass {
1233 private:
1234 void BaseClassMustBeDerivedFromService(const Service* /*service*/) {}
1235 public:
1236 WithRawMethod_CreateContext() {
1237 ::grpc::Service::MarkMethodRaw(0);
1238 }
1239 ~WithRawMethod_CreateContext() override {
1240 BaseClassMustBeDerivedFromService(this);
1241 }
1242 // disable synchronous version of this method
1243 ::grpc::Status CreateContext(::grpc::ServerContext* /*context*/, const ::tensorflow::eager::CreateContextRequest* /*request*/, ::tensorflow::eager::CreateContextResponse* /*response*/) override {
1244 abort();
1245 return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, "");
1246 }
1247 void RequestCreateContext(::grpc::ServerContext* context, ::grpc::ByteBuffer* request, ::grpc::ServerAsyncResponseWriter< ::grpc::ByteBuffer>* response, ::grpc::CompletionQueue* new_call_cq, ::grpc::ServerCompletionQueue* notification_cq, void *tag) {
1248 ::grpc::Service::RequestAsyncUnary(0, context, request, response, new_call_cq, notification_cq, tag);
1249 }
1250 };
1251 template <class BaseClass>
1252 class WithRawMethod_UpdateContext : public BaseClass {
1253 private:
1254 void BaseClassMustBeDerivedFromService(const Service* /*service*/) {}
1255 public:
1256 WithRawMethod_UpdateContext() {
1257 ::grpc::Service::MarkMethodRaw(1);
1258 }
1259 ~WithRawMethod_UpdateContext() override {
1260 BaseClassMustBeDerivedFromService(this);
1261 }
1262 // disable synchronous version of this method
1263 ::grpc::Status UpdateContext(::grpc::ServerContext* /*context*/, const ::tensorflow::eager::UpdateContextRequest* /*request*/, ::tensorflow::eager::UpdateContextResponse* /*response*/) override {
1264 abort();
1265 return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, "");
1266 }
1267 void RequestUpdateContext(::grpc::ServerContext* context, ::grpc::ByteBuffer* request, ::grpc::ServerAsyncResponseWriter< ::grpc::ByteBuffer>* response, ::grpc::CompletionQueue* new_call_cq, ::grpc::ServerCompletionQueue* notification_cq, void *tag) {
1268 ::grpc::Service::RequestAsyncUnary(1, context, request, response, new_call_cq, notification_cq, tag);
1269 }
1270 };
1271 template <class BaseClass>
1272 class WithRawMethod_Enqueue : public BaseClass {
1273 private:
1274 void BaseClassMustBeDerivedFromService(const Service* /*service*/) {}
1275 public:
1276 WithRawMethod_Enqueue() {
1277 ::grpc::Service::MarkMethodRaw(2);
1278 }
1279 ~WithRawMethod_Enqueue() override {
1280 BaseClassMustBeDerivedFromService(this);
1281 }
1282 // disable synchronous version of this method
1283 ::grpc::Status Enqueue(::grpc::ServerContext* /*context*/, const ::tensorflow::eager::EnqueueRequest* /*request*/, ::tensorflow::eager::EnqueueResponse* /*response*/) override {
1284 abort();
1285 return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, "");
1286 }
1287 void RequestEnqueue(::grpc::ServerContext* context, ::grpc::ByteBuffer* request, ::grpc::ServerAsyncResponseWriter< ::grpc::ByteBuffer>* response, ::grpc::CompletionQueue* new_call_cq, ::grpc::ServerCompletionQueue* notification_cq, void *tag) {
1288 ::grpc::Service::RequestAsyncUnary(2, context, request, response, new_call_cq, notification_cq, tag);
1289 }
1290 };
1291 template <class BaseClass>
1292 class WithRawMethod_StreamingEnqueue : public BaseClass {
1293 private:
1294 void BaseClassMustBeDerivedFromService(const Service* /*service*/) {}
1295 public:
1296 WithRawMethod_StreamingEnqueue() {
1297 ::grpc::Service::MarkMethodRaw(3);
1298 }
1299 ~WithRawMethod_StreamingEnqueue() override {
1300 BaseClassMustBeDerivedFromService(this);
1301 }
1302 // disable synchronous version of this method
1303 ::grpc::Status StreamingEnqueue(::grpc::ServerContext* /*context*/, ::grpc::ServerReaderWriter< ::tensorflow::eager::EnqueueResponse, ::tensorflow::eager::EnqueueRequest>* /*stream*/) override {
1304 abort();
1305 return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, "");
1306 }
1307 void RequestStreamingEnqueue(::grpc::ServerContext* context, ::grpc::ServerAsyncReaderWriter< ::grpc::ByteBuffer, ::grpc::ByteBuffer>* stream, ::grpc::CompletionQueue* new_call_cq, ::grpc::ServerCompletionQueue* notification_cq, void *tag) {
1308 ::grpc::Service::RequestAsyncBidiStreaming(3, context, stream, new_call_cq, notification_cq, tag);
1309 }
1310 };
1311 template <class BaseClass>
1312 class WithRawMethod_WaitQueueDone : public BaseClass {
1313 private:
1314 void BaseClassMustBeDerivedFromService(const Service* /*service*/) {}
1315 public:
1316 WithRawMethod_WaitQueueDone() {
1317 ::grpc::Service::MarkMethodRaw(4);
1318 }
1319 ~WithRawMethod_WaitQueueDone() override {
1320 BaseClassMustBeDerivedFromService(this);
1321 }
1322 // disable synchronous version of this method
1323 ::grpc::Status WaitQueueDone(::grpc::ServerContext* /*context*/, const ::tensorflow::eager::WaitQueueDoneRequest* /*request*/, ::tensorflow::eager::WaitQueueDoneResponse* /*response*/) override {
1324 abort();
1325 return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, "");
1326 }
1327 void RequestWaitQueueDone(::grpc::ServerContext* context, ::grpc::ByteBuffer* request, ::grpc::ServerAsyncResponseWriter< ::grpc::ByteBuffer>* response, ::grpc::CompletionQueue* new_call_cq, ::grpc::ServerCompletionQueue* notification_cq, void *tag) {
1328 ::grpc::Service::RequestAsyncUnary(4, context, request, response, new_call_cq, notification_cq, tag);
1329 }
1330 };
1331 template <class BaseClass>
1332 class WithRawMethod_RunComponentFunction : public BaseClass {
1333 private:
1334 void BaseClassMustBeDerivedFromService(const Service* /*service*/) {}
1335 public:
1336 WithRawMethod_RunComponentFunction() {
1337 ::grpc::Service::MarkMethodRaw(5);
1338 }
1339 ~WithRawMethod_RunComponentFunction() override {
1340 BaseClassMustBeDerivedFromService(this);
1341 }
1342 // disable synchronous version of this method
1343 ::grpc::Status RunComponentFunction(::grpc::ServerContext* /*context*/, const ::tensorflow::eager::RunComponentFunctionRequest* /*request*/, ::tensorflow::eager::RunComponentFunctionResponse* /*response*/) override {
1344 abort();
1345 return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, "");
1346 }
1347 void RequestRunComponentFunction(::grpc::ServerContext* context, ::grpc::ByteBuffer* request, ::grpc::ServerAsyncResponseWriter< ::grpc::ByteBuffer>* response, ::grpc::CompletionQueue* new_call_cq, ::grpc::ServerCompletionQueue* notification_cq, void *tag) {
1348 ::grpc::Service::RequestAsyncUnary(5, context, request, response, new_call_cq, notification_cq, tag);
1349 }
1350 };
1351 template <class BaseClass>
1352 class WithRawMethod_KeepAlive : public BaseClass {
1353 private:
1354 void BaseClassMustBeDerivedFromService(const Service* /*service*/) {}
1355 public:
1356 WithRawMethod_KeepAlive() {
1357 ::grpc::Service::MarkMethodRaw(6);
1358 }
1359 ~WithRawMethod_KeepAlive() override {
1360 BaseClassMustBeDerivedFromService(this);
1361 }
1362 // disable synchronous version of this method
1363 ::grpc::Status KeepAlive(::grpc::ServerContext* /*context*/, const ::tensorflow::eager::KeepAliveRequest* /*request*/, ::tensorflow::eager::KeepAliveResponse* /*response*/) override {
1364 abort();
1365 return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, "");
1366 }
1367 void RequestKeepAlive(::grpc::ServerContext* context, ::grpc::ByteBuffer* request, ::grpc::ServerAsyncResponseWriter< ::grpc::ByteBuffer>* response, ::grpc::CompletionQueue* new_call_cq, ::grpc::ServerCompletionQueue* notification_cq, void *tag) {
1368 ::grpc::Service::RequestAsyncUnary(6, context, request, response, new_call_cq, notification_cq, tag);
1369 }
1370 };
1371 template <class BaseClass>
1372 class WithRawMethod_CloseContext : public BaseClass {
1373 private:
1374 void BaseClassMustBeDerivedFromService(const Service* /*service*/) {}
1375 public:
1376 WithRawMethod_CloseContext() {
1377 ::grpc::Service::MarkMethodRaw(7);
1378 }
1379 ~WithRawMethod_CloseContext() override {
1380 BaseClassMustBeDerivedFromService(this);
1381 }
1382 // disable synchronous version of this method
1383 ::grpc::Status CloseContext(::grpc::ServerContext* /*context*/, const ::tensorflow::eager::CloseContextRequest* /*request*/, ::tensorflow::eager::CloseContextResponse* /*response*/) override {
1384 abort();
1385 return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, "");
1386 }
1387 void RequestCloseContext(::grpc::ServerContext* context, ::grpc::ByteBuffer* request, ::grpc::ServerAsyncResponseWriter< ::grpc::ByteBuffer>* response, ::grpc::CompletionQueue* new_call_cq, ::grpc::ServerCompletionQueue* notification_cq, void *tag) {
1388 ::grpc::Service::RequestAsyncUnary(7, context, request, response, new_call_cq, notification_cq, tag);
1389 }
1390 };
1391 template <class BaseClass>
1392 class ExperimentalWithRawCallbackMethod_CreateContext : public BaseClass {
1393 private:
1394 void BaseClassMustBeDerivedFromService(const Service* /*service*/) {}
1395 public:
1396 ExperimentalWithRawCallbackMethod_CreateContext() {
1397 #ifdef GRPC_CALLBACK_API_NONEXPERIMENTAL
1398 ::grpc::Service::
1399 #else
1400 ::grpc::Service::experimental().
1401 #endif
1402 MarkMethodRawCallback(0,
1403 new ::grpc_impl::internal::CallbackUnaryHandler< ::grpc::ByteBuffer, ::grpc::ByteBuffer>(
1404 [this](
1405 #ifdef GRPC_CALLBACK_API_NONEXPERIMENTAL
1406 ::grpc::CallbackServerContext*
1407 #else
1408 ::grpc::experimental::CallbackServerContext*
1409 #endif
1410 context, const ::grpc::ByteBuffer* request, ::grpc::ByteBuffer* response) { return this->CreateContext(context, request, response); }));
1411 }
1412 ~ExperimentalWithRawCallbackMethod_CreateContext() override {
1413 BaseClassMustBeDerivedFromService(this);
1414 }
1415 // disable synchronous version of this method
1416 ::grpc::Status CreateContext(::grpc::ServerContext* /*context*/, const ::tensorflow::eager::CreateContextRequest* /*request*/, ::tensorflow::eager::CreateContextResponse* /*response*/) override {
1417 abort();
1418 return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, "");
1419 }
1420 #ifdef GRPC_CALLBACK_API_NONEXPERIMENTAL
1421 virtual ::grpc::ServerUnaryReactor* CreateContext(
1422 ::grpc::CallbackServerContext* /*context*/, const ::grpc::ByteBuffer* /*request*/, ::grpc::ByteBuffer* /*response*/)
1423 #else
1424 virtual ::grpc::experimental::ServerUnaryReactor* CreateContext(
1425 ::grpc::experimental::CallbackServerContext* /*context*/, const ::grpc::ByteBuffer* /*request*/, ::grpc::ByteBuffer* /*response*/)
1426 #endif
1427 { return nullptr; }
1428 };
1429 template <class BaseClass>
1430 class ExperimentalWithRawCallbackMethod_UpdateContext : public BaseClass {
1431 private:
1432 void BaseClassMustBeDerivedFromService(const Service* /*service*/) {}
1433 public:
1434 ExperimentalWithRawCallbackMethod_UpdateContext() {
1435 #ifdef GRPC_CALLBACK_API_NONEXPERIMENTAL
1436 ::grpc::Service::
1437 #else
1438 ::grpc::Service::experimental().
1439 #endif
1440 MarkMethodRawCallback(1,
1441 new ::grpc_impl::internal::CallbackUnaryHandler< ::grpc::ByteBuffer, ::grpc::ByteBuffer>(
1442 [this](
1443 #ifdef GRPC_CALLBACK_API_NONEXPERIMENTAL
1444 ::grpc::CallbackServerContext*
1445 #else
1446 ::grpc::experimental::CallbackServerContext*
1447 #endif
1448 context, const ::grpc::ByteBuffer* request, ::grpc::ByteBuffer* response) { return this->UpdateContext(context, request, response); }));
1449 }
1450 ~ExperimentalWithRawCallbackMethod_UpdateContext() override {
1451 BaseClassMustBeDerivedFromService(this);
1452 }
1453 // disable synchronous version of this method
1454 ::grpc::Status UpdateContext(::grpc::ServerContext* /*context*/, const ::tensorflow::eager::UpdateContextRequest* /*request*/, ::tensorflow::eager::UpdateContextResponse* /*response*/) override {
1455 abort();
1456 return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, "");
1457 }
1458 #ifdef GRPC_CALLBACK_API_NONEXPERIMENTAL
1459 virtual ::grpc::ServerUnaryReactor* UpdateContext(
1460 ::grpc::CallbackServerContext* /*context*/, const ::grpc::ByteBuffer* /*request*/, ::grpc::ByteBuffer* /*response*/)
1461 #else
1462 virtual ::grpc::experimental::ServerUnaryReactor* UpdateContext(
1463 ::grpc::experimental::CallbackServerContext* /*context*/, const ::grpc::ByteBuffer* /*request*/, ::grpc::ByteBuffer* /*response*/)
1464 #endif
1465 { return nullptr; }
1466 };
1467 template <class BaseClass>
1468 class ExperimentalWithRawCallbackMethod_Enqueue : public BaseClass {
1469 private:
1470 void BaseClassMustBeDerivedFromService(const Service* /*service*/) {}
1471 public:
1472 ExperimentalWithRawCallbackMethod_Enqueue() {
1473 #ifdef GRPC_CALLBACK_API_NONEXPERIMENTAL
1474 ::grpc::Service::
1475 #else
1476 ::grpc::Service::experimental().
1477 #endif
1478 MarkMethodRawCallback(2,
1479 new ::grpc_impl::internal::CallbackUnaryHandler< ::grpc::ByteBuffer, ::grpc::ByteBuffer>(
1480 [this](
1481 #ifdef GRPC_CALLBACK_API_NONEXPERIMENTAL
1482 ::grpc::CallbackServerContext*
1483 #else
1484 ::grpc::experimental::CallbackServerContext*
1485 #endif
1486 context, const ::grpc::ByteBuffer* request, ::grpc::ByteBuffer* response) { return this->Enqueue(context, request, response); }));
1487 }
1488 ~ExperimentalWithRawCallbackMethod_Enqueue() override {
1489 BaseClassMustBeDerivedFromService(this);
1490 }
1491 // disable synchronous version of this method
1492 ::grpc::Status Enqueue(::grpc::ServerContext* /*context*/, const ::tensorflow::eager::EnqueueRequest* /*request*/, ::tensorflow::eager::EnqueueResponse* /*response*/) override {
1493 abort();
1494 return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, "");
1495 }
1496 #ifdef GRPC_CALLBACK_API_NONEXPERIMENTAL
1497 virtual ::grpc::ServerUnaryReactor* Enqueue(
1498 ::grpc::CallbackServerContext* /*context*/, const ::grpc::ByteBuffer* /*request*/, ::grpc::ByteBuffer* /*response*/)
1499 #else
1500 virtual ::grpc::experimental::ServerUnaryReactor* Enqueue(
1501 ::grpc::experimental::CallbackServerContext* /*context*/, const ::grpc::ByteBuffer* /*request*/, ::grpc::ByteBuffer* /*response*/)
1502 #endif
1503 { return nullptr; }
1504 };
1505 template <class BaseClass>
1506 class ExperimentalWithRawCallbackMethod_StreamingEnqueue : public BaseClass {
1507 private:
1508 void BaseClassMustBeDerivedFromService(const Service* /*service*/) {}
1509 public:
1510 ExperimentalWithRawCallbackMethod_StreamingEnqueue() {
1511 #ifdef GRPC_CALLBACK_API_NONEXPERIMENTAL
1512 ::grpc::Service::
1513 #else
1514 ::grpc::Service::experimental().
1515 #endif
1516 MarkMethodRawCallback(3,
1517 new ::grpc_impl::internal::CallbackBidiHandler< ::grpc::ByteBuffer, ::grpc::ByteBuffer>(
1518 [this](
1519 #ifdef GRPC_CALLBACK_API_NONEXPERIMENTAL
1520 ::grpc::CallbackServerContext*
1521 #else
1522 ::grpc::experimental::CallbackServerContext*
1523 #endif
1524 context) { return this->StreamingEnqueue(context); }));
1525 }
1526 ~ExperimentalWithRawCallbackMethod_StreamingEnqueue() override {
1527 BaseClassMustBeDerivedFromService(this);
1528 }
1529 // disable synchronous version of this method
1530 ::grpc::Status StreamingEnqueue(::grpc::ServerContext* /*context*/, ::grpc::ServerReaderWriter< ::tensorflow::eager::EnqueueResponse, ::tensorflow::eager::EnqueueRequest>* /*stream*/) override {
1531 abort();
1532 return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, "");
1533 }
1534 #ifdef GRPC_CALLBACK_API_NONEXPERIMENTAL
1535 virtual ::grpc::ServerBidiReactor< ::grpc::ByteBuffer, ::grpc::ByteBuffer>* StreamingEnqueue(
1536 ::grpc::CallbackServerContext* /*context*/)
1537 #else
1538 virtual ::grpc::experimental::ServerBidiReactor< ::grpc::ByteBuffer, ::grpc::ByteBuffer>* StreamingEnqueue(
1539 ::grpc::experimental::CallbackServerContext* /*context*/)
1540 #endif
1541 { return nullptr; }
1542 };
1543 template <class BaseClass>
1544 class ExperimentalWithRawCallbackMethod_WaitQueueDone : public BaseClass {
1545 private:
1546 void BaseClassMustBeDerivedFromService(const Service* /*service*/) {}
1547 public:
1548 ExperimentalWithRawCallbackMethod_WaitQueueDone() {
1549 #ifdef GRPC_CALLBACK_API_NONEXPERIMENTAL
1550 ::grpc::Service::
1551 #else
1552 ::grpc::Service::experimental().
1553 #endif
1554 MarkMethodRawCallback(4,
1555 new ::grpc_impl::internal::CallbackUnaryHandler< ::grpc::ByteBuffer, ::grpc::ByteBuffer>(
1556 [this](
1557 #ifdef GRPC_CALLBACK_API_NONEXPERIMENTAL
1558 ::grpc::CallbackServerContext*
1559 #else
1560 ::grpc::experimental::CallbackServerContext*
1561 #endif
1562 context, const ::grpc::ByteBuffer* request, ::grpc::ByteBuffer* response) { return this->WaitQueueDone(context, request, response); }));
1563 }
1564 ~ExperimentalWithRawCallbackMethod_WaitQueueDone() override {
1565 BaseClassMustBeDerivedFromService(this);
1566 }
1567 // disable synchronous version of this method
1568 ::grpc::Status WaitQueueDone(::grpc::ServerContext* /*context*/, const ::tensorflow::eager::WaitQueueDoneRequest* /*request*/, ::tensorflow::eager::WaitQueueDoneResponse* /*response*/) override {
1569 abort();
1570 return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, "");
1571 }
1572 #ifdef GRPC_CALLBACK_API_NONEXPERIMENTAL
1573 virtual ::grpc::ServerUnaryReactor* WaitQueueDone(
1574 ::grpc::CallbackServerContext* /*context*/, const ::grpc::ByteBuffer* /*request*/, ::grpc::ByteBuffer* /*response*/)
1575 #else
1576 virtual ::grpc::experimental::ServerUnaryReactor* WaitQueueDone(
1577 ::grpc::experimental::CallbackServerContext* /*context*/, const ::grpc::ByteBuffer* /*request*/, ::grpc::ByteBuffer* /*response*/)
1578 #endif
1579 { return nullptr; }
1580 };
1581 template <class BaseClass>
1582 class ExperimentalWithRawCallbackMethod_RunComponentFunction : public BaseClass {
1583 private:
1584 void BaseClassMustBeDerivedFromService(const Service* /*service*/) {}
1585 public:
1586 ExperimentalWithRawCallbackMethod_RunComponentFunction() {
1587 #ifdef GRPC_CALLBACK_API_NONEXPERIMENTAL
1588 ::grpc::Service::
1589 #else
1590 ::grpc::Service::experimental().
1591 #endif
1592 MarkMethodRawCallback(5,
1593 new ::grpc_impl::internal::CallbackUnaryHandler< ::grpc::ByteBuffer, ::grpc::ByteBuffer>(
1594 [this](
1595 #ifdef GRPC_CALLBACK_API_NONEXPERIMENTAL
1596 ::grpc::CallbackServerContext*
1597 #else
1598 ::grpc::experimental::CallbackServerContext*
1599 #endif
1600 context, const ::grpc::ByteBuffer* request, ::grpc::ByteBuffer* response) { return this->RunComponentFunction(context, request, response); }));
1601 }
1602 ~ExperimentalWithRawCallbackMethod_RunComponentFunction() override {
1603 BaseClassMustBeDerivedFromService(this);
1604 }
1605 // disable synchronous version of this method
1606 ::grpc::Status RunComponentFunction(::grpc::ServerContext* /*context*/, const ::tensorflow::eager::RunComponentFunctionRequest* /*request*/, ::tensorflow::eager::RunComponentFunctionResponse* /*response*/) override {
1607 abort();
1608 return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, "");
1609 }
1610 #ifdef GRPC_CALLBACK_API_NONEXPERIMENTAL
1611 virtual ::grpc::ServerUnaryReactor* RunComponentFunction(
1612 ::grpc::CallbackServerContext* /*context*/, const ::grpc::ByteBuffer* /*request*/, ::grpc::ByteBuffer* /*response*/)
1613 #else
1614 virtual ::grpc::experimental::ServerUnaryReactor* RunComponentFunction(
1615 ::grpc::experimental::CallbackServerContext* /*context*/, const ::grpc::ByteBuffer* /*request*/, ::grpc::ByteBuffer* /*response*/)
1616 #endif
1617 { return nullptr; }
1618 };
1619 template <class BaseClass>
1620 class ExperimentalWithRawCallbackMethod_KeepAlive : public BaseClass {
1621 private:
1622 void BaseClassMustBeDerivedFromService(const Service* /*service*/) {}
1623 public:
1624 ExperimentalWithRawCallbackMethod_KeepAlive() {
1625 #ifdef GRPC_CALLBACK_API_NONEXPERIMENTAL
1626 ::grpc::Service::
1627 #else
1628 ::grpc::Service::experimental().
1629 #endif
1630 MarkMethodRawCallback(6,
1631 new ::grpc_impl::internal::CallbackUnaryHandler< ::grpc::ByteBuffer, ::grpc::ByteBuffer>(
1632 [this](
1633 #ifdef GRPC_CALLBACK_API_NONEXPERIMENTAL
1634 ::grpc::CallbackServerContext*
1635 #else
1636 ::grpc::experimental::CallbackServerContext*
1637 #endif
1638 context, const ::grpc::ByteBuffer* request, ::grpc::ByteBuffer* response) { return this->KeepAlive(context, request, response); }));
1639 }
1640 ~ExperimentalWithRawCallbackMethod_KeepAlive() override {
1641 BaseClassMustBeDerivedFromService(this);
1642 }
1643 // disable synchronous version of this method
1644 ::grpc::Status KeepAlive(::grpc::ServerContext* /*context*/, const ::tensorflow::eager::KeepAliveRequest* /*request*/, ::tensorflow::eager::KeepAliveResponse* /*response*/) override {
1645 abort();
1646 return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, "");
1647 }
1648 #ifdef GRPC_CALLBACK_API_NONEXPERIMENTAL
1649 virtual ::grpc::ServerUnaryReactor* KeepAlive(
1650 ::grpc::CallbackServerContext* /*context*/, const ::grpc::ByteBuffer* /*request*/, ::grpc::ByteBuffer* /*response*/)
1651 #else
1652 virtual ::grpc::experimental::ServerUnaryReactor* KeepAlive(
1653 ::grpc::experimental::CallbackServerContext* /*context*/, const ::grpc::ByteBuffer* /*request*/, ::grpc::ByteBuffer* /*response*/)
1654 #endif
1655 { return nullptr; }
1656 };
1657 template <class BaseClass>
1658 class ExperimentalWithRawCallbackMethod_CloseContext : public BaseClass {
1659 private:
1660 void BaseClassMustBeDerivedFromService(const Service* /*service*/) {}
1661 public:
1662 ExperimentalWithRawCallbackMethod_CloseContext() {
1663 #ifdef GRPC_CALLBACK_API_NONEXPERIMENTAL
1664 ::grpc::Service::
1665 #else
1666 ::grpc::Service::experimental().
1667 #endif
1668 MarkMethodRawCallback(7,
1669 new ::grpc_impl::internal::CallbackUnaryHandler< ::grpc::ByteBuffer, ::grpc::ByteBuffer>(
1670 [this](
1671 #ifdef GRPC_CALLBACK_API_NONEXPERIMENTAL
1672 ::grpc::CallbackServerContext*
1673 #else
1674 ::grpc::experimental::CallbackServerContext*
1675 #endif
1676 context, const ::grpc::ByteBuffer* request, ::grpc::ByteBuffer* response) { return this->CloseContext(context, request, response); }));
1677 }
1678 ~ExperimentalWithRawCallbackMethod_CloseContext() override {
1679 BaseClassMustBeDerivedFromService(this);
1680 }
1681 // disable synchronous version of this method
1682 ::grpc::Status CloseContext(::grpc::ServerContext* /*context*/, const ::tensorflow::eager::CloseContextRequest* /*request*/, ::tensorflow::eager::CloseContextResponse* /*response*/) override {
1683 abort();
1684 return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, "");
1685 }
1686 #ifdef GRPC_CALLBACK_API_NONEXPERIMENTAL
1687 virtual ::grpc::ServerUnaryReactor* CloseContext(
1688 ::grpc::CallbackServerContext* /*context*/, const ::grpc::ByteBuffer* /*request*/, ::grpc::ByteBuffer* /*response*/)
1689 #else
1690 virtual ::grpc::experimental::ServerUnaryReactor* CloseContext(
1691 ::grpc::experimental::CallbackServerContext* /*context*/, const ::grpc::ByteBuffer* /*request*/, ::grpc::ByteBuffer* /*response*/)
1692 #endif
1693 { return nullptr; }
1694 };
1695 template <class BaseClass>
1696 class WithStreamedUnaryMethod_CreateContext : public BaseClass {
1697 private:
1698 void BaseClassMustBeDerivedFromService(const Service* /*service*/) {}
1699 public:
1700 WithStreamedUnaryMethod_CreateContext() {
1701 ::grpc::Service::MarkMethodStreamed(0,
1702 new ::grpc::internal::StreamedUnaryHandler< ::tensorflow::eager::CreateContextRequest, ::tensorflow::eager::CreateContextResponse>(std::bind(&WithStreamedUnaryMethod_CreateContext<BaseClass>::StreamedCreateContext, this, std::placeholders::_1, std::placeholders::_2)));
1703 }
1704 ~WithStreamedUnaryMethod_CreateContext() override {
1705 BaseClassMustBeDerivedFromService(this);
1706 }
1707 // disable regular version of this method
1708 ::grpc::Status CreateContext(::grpc::ServerContext* /*context*/, const ::tensorflow::eager::CreateContextRequest* /*request*/, ::tensorflow::eager::CreateContextResponse* /*response*/) override {
1709 abort();
1710 return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, "");
1711 }
1712 // replace default version of method with streamed unary
1713 virtual ::grpc::Status StreamedCreateContext(::grpc::ServerContext* context, ::grpc::ServerUnaryStreamer< ::tensorflow::eager::CreateContextRequest,::tensorflow::eager::CreateContextResponse>* server_unary_streamer) = 0;
1714 };
1715 template <class BaseClass>
1716 class WithStreamedUnaryMethod_UpdateContext : public BaseClass {
1717 private:
1718 void BaseClassMustBeDerivedFromService(const Service* /*service*/) {}
1719 public:
1720 WithStreamedUnaryMethod_UpdateContext() {
1721 ::grpc::Service::MarkMethodStreamed(1,
1722 new ::grpc::internal::StreamedUnaryHandler< ::tensorflow::eager::UpdateContextRequest, ::tensorflow::eager::UpdateContextResponse>(std::bind(&WithStreamedUnaryMethod_UpdateContext<BaseClass>::StreamedUpdateContext, this, std::placeholders::_1, std::placeholders::_2)));
1723 }
1724 ~WithStreamedUnaryMethod_UpdateContext() override {
1725 BaseClassMustBeDerivedFromService(this);
1726 }
1727 // disable regular version of this method
1728 ::grpc::Status UpdateContext(::grpc::ServerContext* /*context*/, const ::tensorflow::eager::UpdateContextRequest* /*request*/, ::tensorflow::eager::UpdateContextResponse* /*response*/) override {
1729 abort();
1730 return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, "");
1731 }
1732 // replace default version of method with streamed unary
1733 virtual ::grpc::Status StreamedUpdateContext(::grpc::ServerContext* context, ::grpc::ServerUnaryStreamer< ::tensorflow::eager::UpdateContextRequest,::tensorflow::eager::UpdateContextResponse>* server_unary_streamer) = 0;
1734 };
1735 template <class BaseClass>
1736 class WithStreamedUnaryMethod_Enqueue : public BaseClass {
1737 private:
1738 void BaseClassMustBeDerivedFromService(const Service* /*service*/) {}
1739 public:
1740 WithStreamedUnaryMethod_Enqueue() {
1741 ::grpc::Service::MarkMethodStreamed(2,
1742 new ::grpc::internal::StreamedUnaryHandler< ::tensorflow::eager::EnqueueRequest, ::tensorflow::eager::EnqueueResponse>(std::bind(&WithStreamedUnaryMethod_Enqueue<BaseClass>::StreamedEnqueue, this, std::placeholders::_1, std::placeholders::_2)));
1743 }
1744 ~WithStreamedUnaryMethod_Enqueue() override {
1745 BaseClassMustBeDerivedFromService(this);
1746 }
1747 // disable regular version of this method
1748 ::grpc::Status Enqueue(::grpc::ServerContext* /*context*/, const ::tensorflow::eager::EnqueueRequest* /*request*/, ::tensorflow::eager::EnqueueResponse* /*response*/) override {
1749 abort();
1750 return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, "");
1751 }
1752 // replace default version of method with streamed unary
1753 virtual ::grpc::Status StreamedEnqueue(::grpc::ServerContext* context, ::grpc::ServerUnaryStreamer< ::tensorflow::eager::EnqueueRequest,::tensorflow::eager::EnqueueResponse>* server_unary_streamer) = 0;
1754 };
1755 template <class BaseClass>
1756 class WithStreamedUnaryMethod_WaitQueueDone : public BaseClass {
1757 private:
1758 void BaseClassMustBeDerivedFromService(const Service* /*service*/) {}
1759 public:
1760 WithStreamedUnaryMethod_WaitQueueDone() {
1761 ::grpc::Service::MarkMethodStreamed(4,
1762 new ::grpc::internal::StreamedUnaryHandler< ::tensorflow::eager::WaitQueueDoneRequest, ::tensorflow::eager::WaitQueueDoneResponse>(std::bind(&WithStreamedUnaryMethod_WaitQueueDone<BaseClass>::StreamedWaitQueueDone, this, std::placeholders::_1, std::placeholders::_2)));
1763 }
1764 ~WithStreamedUnaryMethod_WaitQueueDone() override {
1765 BaseClassMustBeDerivedFromService(this);
1766 }
1767 // disable regular version of this method
1768 ::grpc::Status WaitQueueDone(::grpc::ServerContext* /*context*/, const ::tensorflow::eager::WaitQueueDoneRequest* /*request*/, ::tensorflow::eager::WaitQueueDoneResponse* /*response*/) override {
1769 abort();
1770 return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, "");
1771 }
1772 // replace default version of method with streamed unary
1773 virtual ::grpc::Status StreamedWaitQueueDone(::grpc::ServerContext* context, ::grpc::ServerUnaryStreamer< ::tensorflow::eager::WaitQueueDoneRequest,::tensorflow::eager::WaitQueueDoneResponse>* server_unary_streamer) = 0;
1774 };
1775 template <class BaseClass>
1776 class WithStreamedUnaryMethod_RunComponentFunction : public BaseClass {
1777 private:
1778 void BaseClassMustBeDerivedFromService(const Service* /*service*/) {}
1779 public:
1780 WithStreamedUnaryMethod_RunComponentFunction() {
1781 ::grpc::Service::MarkMethodStreamed(5,
1782 new ::grpc::internal::StreamedUnaryHandler< ::tensorflow::eager::RunComponentFunctionRequest, ::tensorflow::eager::RunComponentFunctionResponse>(std::bind(&WithStreamedUnaryMethod_RunComponentFunction<BaseClass>::StreamedRunComponentFunction, this, std::placeholders::_1, std::placeholders::_2)));
1783 }
1784 ~WithStreamedUnaryMethod_RunComponentFunction() override {
1785 BaseClassMustBeDerivedFromService(this);
1786 }
1787 // disable regular version of this method
1788 ::grpc::Status RunComponentFunction(::grpc::ServerContext* /*context*/, const ::tensorflow::eager::RunComponentFunctionRequest* /*request*/, ::tensorflow::eager::RunComponentFunctionResponse* /*response*/) override {
1789 abort();
1790 return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, "");
1791 }
1792 // replace default version of method with streamed unary
1793 virtual ::grpc::Status StreamedRunComponentFunction(::grpc::ServerContext* context, ::grpc::ServerUnaryStreamer< ::tensorflow::eager::RunComponentFunctionRequest,::tensorflow::eager::RunComponentFunctionResponse>* server_unary_streamer) = 0;
1794 };
1795 template <class BaseClass>
1796 class WithStreamedUnaryMethod_KeepAlive : public BaseClass {
1797 private:
1798 void BaseClassMustBeDerivedFromService(const Service* /*service*/) {}
1799 public:
1800 WithStreamedUnaryMethod_KeepAlive() {
1801 ::grpc::Service::MarkMethodStreamed(6,
1802 new ::grpc::internal::StreamedUnaryHandler< ::tensorflow::eager::KeepAliveRequest, ::tensorflow::eager::KeepAliveResponse>(std::bind(&WithStreamedUnaryMethod_KeepAlive<BaseClass>::StreamedKeepAlive, this, std::placeholders::_1, std::placeholders::_2)));
1803 }
1804 ~WithStreamedUnaryMethod_KeepAlive() override {
1805 BaseClassMustBeDerivedFromService(this);
1806 }
1807 // disable regular version of this method
1808 ::grpc::Status KeepAlive(::grpc::ServerContext* /*context*/, const ::tensorflow::eager::KeepAliveRequest* /*request*/, ::tensorflow::eager::KeepAliveResponse* /*response*/) override {
1809 abort();
1810 return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, "");
1811 }
1812 // replace default version of method with streamed unary
1813 virtual ::grpc::Status StreamedKeepAlive(::grpc::ServerContext* context, ::grpc::ServerUnaryStreamer< ::tensorflow::eager::KeepAliveRequest,::tensorflow::eager::KeepAliveResponse>* server_unary_streamer) = 0;
1814 };
1815 template <class BaseClass>
1816 class WithStreamedUnaryMethod_CloseContext : public BaseClass {
1817 private:
1818 void BaseClassMustBeDerivedFromService(const Service* /*service*/) {}
1819 public:
1820 WithStreamedUnaryMethod_CloseContext() {
1821 ::grpc::Service::MarkMethodStreamed(7,
1822 new ::grpc::internal::StreamedUnaryHandler< ::tensorflow::eager::CloseContextRequest, ::tensorflow::eager::CloseContextResponse>(std::bind(&WithStreamedUnaryMethod_CloseContext<BaseClass>::StreamedCloseContext, this, std::placeholders::_1, std::placeholders::_2)));
1823 }
1824 ~WithStreamedUnaryMethod_CloseContext() override {
1825 BaseClassMustBeDerivedFromService(this);
1826 }
1827 // disable regular version of this method
1828 ::grpc::Status CloseContext(::grpc::ServerContext* /*context*/, const ::tensorflow::eager::CloseContextRequest* /*request*/, ::tensorflow::eager::CloseContextResponse* /*response*/) override {
1829 abort();
1830 return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, "");
1831 }
1832 // replace default version of method with streamed unary
1833 virtual ::grpc::Status StreamedCloseContext(::grpc::ServerContext* context, ::grpc::ServerUnaryStreamer< ::tensorflow::eager::CloseContextRequest,::tensorflow::eager::CloseContextResponse>* server_unary_streamer) = 0;
1834 };
1835 typedef WithStreamedUnaryMethod_CreateContext<WithStreamedUnaryMethod_UpdateContext<WithStreamedUnaryMethod_Enqueue<WithStreamedUnaryMethod_WaitQueueDone<WithStreamedUnaryMethod_RunComponentFunction<WithStreamedUnaryMethod_KeepAlive<WithStreamedUnaryMethod_CloseContext<Service > > > > > > > StreamedUnaryService;
1836 typedef Service SplitStreamedService;
1837 typedef WithStreamedUnaryMethod_CreateContext<WithStreamedUnaryMethod_UpdateContext<WithStreamedUnaryMethod_Enqueue<WithStreamedUnaryMethod_WaitQueueDone<WithStreamedUnaryMethod_RunComponentFunction<WithStreamedUnaryMethod_KeepAlive<WithStreamedUnaryMethod_CloseContext<Service > > > > > > > StreamedService;
1838};
1839
1840} // namespace grpc
1841
1842} // namespace eager
1843} // namespace tensorflow
1844
1845
1846#endif // GRPC_tensorflow_2fcore_2fprotobuf_2feager_5fservice_2eproto__INCLUDED
1847