1/* Copyright 2017 The TensorFlow Authors. All Rights Reserved.
2
3Licensed under the Apache License, Version 2.0 (the "License");
4you may not use this file except in compliance with the License.
5You may obtain a copy of the License at
6
7 http://www.apache.org/licenses/LICENSE-2.0
8
9Unless required by applicable law or agreed to in writing, software
10distributed under the License is distributed on an "AS IS" BASIS,
11WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12See the License for the specific language governing permissions and
13limitations under the License.
14==============================================================================*/
15
16#ifndef TENSORFLOW_C_EAGER_C_API_H_
17#define TENSORFLOW_C_EAGER_C_API_H_
18
19// C API extensions to experiment with eager execution of kernels.
20// WARNING: Unlike tensorflow/c/c_api.h, the API here is not guaranteed to be
21// stable and can change without notice.
22
23#include "tensorflow/c/c_api.h"
24
25// Macro to control visibility of exported symbols in the shared library (.so,
26// .dylib, .dll).
27// This duplicates the TF_EXPORT macro definition in
28// tensorflow/core/platform/macros.h in order to keep this .h file independent
29// of any other includes.$a
30#ifdef SWIG
31#define TF_CAPI_EXPORT
32#else
33#if defined(_WIN32)
34#ifdef TF_COMPILE_LIBRARY
35#define TF_CAPI_EXPORT __declspec(dllexport)
36#else
37#define TF_CAPI_EXPORT __declspec(dllimport)
38#endif // TF_COMPILE_LIBRARY
39#else
40#define TF_CAPI_EXPORT __attribute__((visibility("default")))
41#endif // _WIN32
42#endif // SWIG
43
44#ifdef __cplusplus
45extern "C" {
46#endif
47
48typedef struct TFE_ContextOptions TFE_ContextOptions;
49
50// Return a new options object.
51TF_CAPI_EXPORT extern TFE_ContextOptions* TFE_NewContextOptions(void);
52
53// Set the config in TF_ContextOptions.options.
54// config should be a serialized tensorflow.ConfigProto proto.
55// If config was not parsed successfully as a ConfigProto, record the
56// error information in *status.
57TF_CAPI_EXPORT extern void TFE_ContextOptionsSetConfig(
58 TFE_ContextOptions* options, const void* proto, size_t proto_len,
59 TF_Status* status);
60
61// Controls how to act when we try to run an operation on a given device but
62// some input tensors are not on that device.
63// LINT.IfChange
64// Note: Keep in sync with internal copy of enum in eager/context.h.
65typedef enum TFE_ContextDevicePlacementPolicy {
66 // Running operations with input tensors on the wrong device will fail.
67 TFE_DEVICE_PLACEMENT_EXPLICIT = 0,
68 // Copy the tensor to the right device but log a warning.
69 TFE_DEVICE_PLACEMENT_WARN = 1,
70 // Silently copy the tensor, which has a performance cost since the operation
71 // will be blocked till the copy completes. This is the default placement
72 // policy.
73 TFE_DEVICE_PLACEMENT_SILENT = 2,
74 // Placement policy which silently copies int32 tensors but not other dtypes.
75 TFE_DEVICE_PLACEMENT_SILENT_FOR_INT32 = 3,
76} TFE_ContextDevicePlacementPolicy;
77// LINT.ThenChange(//tensorflow/c/eager/immediate_execution_context.h)
78
79// Sets the default execution mode (sync/async). Note that this can be
80// overridden per thread using TFE_ContextSetExecutorForThread.
81TF_CAPI_EXPORT extern void TFE_ContextOptionsSetAsync(TFE_ContextOptions*,
82 unsigned char enable);
83
84TF_CAPI_EXPORT extern void TFE_ContextOptionsSetDevicePlacementPolicy(
85 TFE_ContextOptions*, TFE_ContextDevicePlacementPolicy);
86
87// Destroy an options object.
88TF_CAPI_EXPORT extern void TFE_DeleteContextOptions(TFE_ContextOptions*);
89
90// "Context" under which operations/functions are executed. It encapsulates
91// things like the available devices, resource manager etc.
92// TFE_Context must outlive all tensor handles created using it. In other
93// words, TFE_DeleteContext() must be called after all tensor handles have
94// been deleted (with TFE_DeleteTensorHandle).
95//
96// TODO(ashankar): Merge with TF_Session?
97typedef struct TFE_Context TFE_Context;
98
99TF_CAPI_EXPORT extern TFE_Context* TFE_NewContext(
100 const TFE_ContextOptions* opts, TF_Status* status);
101TF_CAPI_EXPORT extern void TFE_DeleteContext(TFE_Context* ctx);
102TF_CAPI_EXPORT extern TF_DeviceList* TFE_ContextListDevices(TFE_Context* ctx,
103 TF_Status* status);
104
105// Clears the internal caches in the TFE context. Useful when reseeding random
106// ops.
107TF_CAPI_EXPORT extern void TFE_ContextClearCaches(TFE_Context* ctx);
108
109// Sets a thread-local device placement policy. After this call, other calls to
110// TFE_Execute in the same thread will use the device policy specified here
111// instead of the device policy used to construct the context. This has no
112// effect on the device policy used by other program threads.
113TF_CAPI_EXPORT extern void TFE_ContextSetThreadLocalDevicePlacementPolicy(
114 TFE_Context* ctx, TFE_ContextDevicePlacementPolicy policy);
115
116// Returns the device placement policy to be used by this context in the current
117// thread.
118TF_CAPI_EXPORT extern TFE_ContextDevicePlacementPolicy
119TFE_ContextGetDevicePlacementPolicy(TFE_Context* ctx);
120
121// A tensorflow.ServerDef specifies remote workers (in addition to the current
122// workers name). Operations created in this context can then be executed on
123// any of these remote workers by setting an appropriate device.
124//
125// If the following is set, all servers identified by the
126// ServerDef must be up when the context is created.
127TF_CAPI_EXPORT extern void TFE_ContextSetServerDef(TFE_Context* ctx,
128 int keep_alive_secs,
129 const void* proto,
130 size_t proto_len,
131 TF_Status* status);
132
133// A handle to a tensor on a device.
134//
135// Like a TF_Tensor, a TFE_TensorHandle refers to a tensor with a value, shape,
136// type etc. Unlike a TF_Tensor, a TFE_TensorHandle may refer to such tensors
137// placed in the memory of different devices or remote address spaces.
138typedef struct TFE_TensorHandle TFE_TensorHandle;
139
140TF_CAPI_EXPORT extern TFE_TensorHandle* TFE_NewTensorHandle(const TF_Tensor* t,
141 TF_Status* status);
142// Indicates that the caller will not be using `h` any more.
143TF_CAPI_EXPORT extern void TFE_DeleteTensorHandle(TFE_TensorHandle* h);
144TF_CAPI_EXPORT extern TF_DataType TFE_TensorHandleDataType(TFE_TensorHandle* h);
145// This function will block till the operation that produces `h` has completed.
146TF_CAPI_EXPORT extern int TFE_TensorHandleNumDims(TFE_TensorHandle* h,
147 TF_Status* status);
148TF_CAPI_EXPORT extern int64_t TFE_TensorHandleNumElements(TFE_TensorHandle* h,
149 TF_Status* status);
150// This function will block till the operation that produces `h` has completed.
151TF_CAPI_EXPORT extern int64_t TFE_TensorHandleDim(TFE_TensorHandle* h,
152 int dim_index,
153 TF_Status* status);
154
155// Returns the device of the operation that produced `h`. If `h` was produced by
156// a copy, returns the destination device of the copy. Note that the returned
157// device name is not always the device holding the tensor handle's memory. If
158// you want the latter, use TFE_TensorHandleBackingDeviceName. This function
159// will block till the operation that produces `h` has completed.
160TF_CAPI_EXPORT extern const char* TFE_TensorHandleDeviceName(
161 TFE_TensorHandle* h, TF_Status* status);
162
163// Returns the name of the device in whose memory `h` resides.
164//
165// This function will block till the operation that produces `h` has completed.
166TF_CAPI_EXPORT extern const char* TFE_TensorHandleBackingDeviceName(
167 TFE_TensorHandle* h, TF_Status* status);
168
169// Return a pointer to a new TFE_TensorHandle that shares the underlying tensor
170// with `h`. On success, `status` is set to OK. On failure, `status` reflects
171// the error and a nullptr is returned.
172TF_CAPI_EXPORT extern TFE_TensorHandle* TFE_TensorHandleCopySharingTensor(
173 TFE_TensorHandle* h, TF_Status* status);
174
175// This function will block till the operation that produces `h` has
176// completed. The memory returned might alias the internal memory used by
177// TensorFlow. Hence, callers should not mutate this memory (for example by
178// modifying the memory region pointed to by TF_TensorData() on the returned
179// TF_Tensor).
180TF_CAPI_EXPORT extern TF_Tensor* TFE_TensorHandleResolve(TFE_TensorHandle* h,
181 TF_Status* status);
182
183// Create a new TFE_TensorHandle with the same contents as 'h' but placed
184// in the memory of the device name 'device_name'.
185// If source and destination are the same device, then this creates a new handle
186// that shares the underlying buffer. Otherwise, it currently requires at least
187// one of the source or destination devices to be CPU (i.e., for the source or
188// destination tensor to be placed in host memory).
189// If async execution is enabled, the copy may be enqueued and the call will
190// return "non-ready" handle. Else, this function returns after the copy has
191// been done.
192TF_CAPI_EXPORT extern TFE_TensorHandle* TFE_TensorHandleCopyToDevice(
193 TFE_TensorHandle* h, TFE_Context* ctx, const char* device_name,
194 TF_Status* status);
195
196// Debugging/Profiling information for TFE_TensorHandle
197//
198// TFE_TensorDebugInfo contains information useful for debugging and
199// profiling tensors.
200typedef struct TFE_TensorDebugInfo TFE_TensorDebugInfo;
201
202// Retrieves TFE_TensorDebugInfo for `handle`.
203// If TFE_TensorHandleTensorDebugInfo succeeds, `status` is set to OK and caller
204// is responsible for deleting returned TFE_TensorDebugInfo.
205// If TFE_TensorHandleTensorDebugInfo fails, `status` is set to appropriate
206// error and nullptr is returned. This function can block till the operation
207// that produces `handle` has completed.
208TF_CAPI_EXPORT extern TFE_TensorDebugInfo* TFE_TensorHandleTensorDebugInfo(
209 TFE_TensorHandle* h, TF_Status* status);
210
211// Deletes `debug_info`.
212TF_CAPI_EXPORT extern void TFE_DeleteTensorDebugInfo(
213 TFE_TensorDebugInfo* debug_info);
214
215// Returns the number of dimensions used to represent the tensor on its device.
216// The number of dimensions used to represent the tensor on device can be
217// different from the number returned by TFE_TensorHandleNumDims.
218// The return value was current at the time of TFE_TensorDebugInfo creation.
219TF_CAPI_EXPORT extern int TFE_TensorDebugInfoOnDeviceNumDims(
220 TFE_TensorDebugInfo* debug_info);
221
222// Returns the number of elements in dimension `dim_index`.
223// Tensor representation on device can be transposed from its representation
224// on host. The data contained in dimension `dim_index` on device
225// can correspond to the data contained in another dimension in on-host
226// representation. The dimensions are indexed using the standard TensorFlow
227// major-to-minor order (slowest varying dimension first),
228// not the XLA's minor-to-major order.
229// On-device dimensions can be padded. TFE_TensorDebugInfoOnDeviceDim returns
230// the number of elements in a dimension after padding.
231// The return value was current at the time of TFE_TensorDebugInfo creation.
232TF_CAPI_EXPORT extern int64_t TFE_TensorDebugInfoOnDeviceDim(
233 TFE_TensorDebugInfo* debug_info, int dim_index);
234
235// Description of the TensorFlow op to execute.
236//
237// Assumes that the provided 'ctx' outlives the returned TFE_Op, i.e.,
238// TFE_DeleteOp() is called before TFE_DeleteContext().
239//
240// Very similar to TF_OperationDescription with some differences:
241// (1) TF_Output or TFE_TensorHandle* as arguments to TF_AddInput,
242// TF_AddInputList
243// (2) TF_ColocateWith, TF_AddControlInput etc. do not make sense.
244// (3) Implementation detail: Avoid use of NodeBuilder/NodeDefBuilder since
245// the additional sanity checks there seem unnecessary;
246typedef struct TFE_Op TFE_Op;
247
248TF_CAPI_EXPORT extern TFE_Op* TFE_NewOp(TFE_Context* ctx,
249 const char* op_or_function_name,
250 TF_Status* status);
251TF_CAPI_EXPORT extern void TFE_DeleteOp(TFE_Op* op);
252
253// Returns the op or function name `op` will execute.
254//
255// The returned string remains valid throughout the lifetime of 'op'.
256TF_CAPI_EXPORT extern const char* TFE_OpGetName(const TFE_Op* op,
257 TF_Status* status);
258TF_CAPI_EXPORT extern TFE_Context* TFE_OpGetContext(const TFE_Op* op,
259 TF_Status* status);
260
261TF_CAPI_EXPORT extern void TFE_OpSetDevice(TFE_Op* op, const char* device_name,
262 TF_Status* status);
263// The returned string remains valid throughout the lifetime of 'op'.
264TF_CAPI_EXPORT extern const char* TFE_OpGetDevice(const TFE_Op* op,
265 TF_Status* status);
266
267TF_CAPI_EXPORT extern void TFE_OpAddInput(TFE_Op* op, TFE_TensorHandle* input,
268 TF_Status* status);
269
270TF_CAPI_EXPORT extern void TFE_OpAddInputList(TFE_Op* op,
271 TFE_TensorHandle** inputs,
272 int num_inputs,
273 TF_Status* status);
274
275// Fetches the current number of inputs attached to `op`.
276//
277// Does not use the operation's definition to determine how many inputs should
278// be attached. It is intended for use with TFE_OpGetFlatInput to inspect an
279// already-finalized operation.
280//
281// Note that TFE_OpGetFlatInputCount and TFE_OpGetFlatInput operate on a flat
282// sequence of inputs, unlike TFE_OpGetInputLength (for getting the length of a
283// particular named input list, which may only be part of the op's inputs).
284TF_CAPI_EXPORT extern int TFE_OpGetFlatInputCount(const TFE_Op* op,
285 TF_Status* status);
286// Returns a borrowed reference to one of `op`'s inputs. Use
287// `TFE_TensorHandleCopySharingTensor` to make a new reference.
288TF_CAPI_EXPORT extern TFE_TensorHandle* TFE_OpGetFlatInput(const TFE_Op* op,
289 int index,
290 TF_Status* status);
291
292TF_CAPI_EXPORT extern TF_AttrType TFE_OpGetAttrType(TFE_Op* op,
293 const char* attr_name,
294 unsigned char* is_list,
295 TF_Status* status);
296// Get an attribute type given an op name; a fusion of TFE_NewOp and
297// TFE_OpGetAttrType for use from Python without the overhead of the individual
298// calls and memory management of TFE_Op.
299TF_CAPI_EXPORT extern TF_AttrType TFE_OpNameGetAttrType(
300 TFE_Context* ctx, const char* op_or_function_name, const char* attr_name,
301 unsigned char* is_list, TF_Status* status);
302
303TF_CAPI_EXPORT extern void TFE_OpSetAttrString(TFE_Op* op,
304 const char* attr_name,
305 const void* value,
306 size_t length);
307TF_CAPI_EXPORT extern void TFE_OpSetAttrInt(TFE_Op* op, const char* attr_name,
308 int64_t value);
309TF_CAPI_EXPORT extern void TFE_OpSetAttrFloat(TFE_Op* op, const char* attr_name,
310 float value);
311TF_CAPI_EXPORT extern void TFE_OpSetAttrBool(TFE_Op* op, const char* attr_name,
312 unsigned char value);
313TF_CAPI_EXPORT extern void TFE_OpSetAttrType(TFE_Op* op, const char* attr_name,
314 TF_DataType value);
315// If the number of dimensions is unknown, `num_dims` must be set to
316// -1 and `dims` can be null. If a dimension is unknown, the
317// corresponding entry in the `dims` array must be -1.
318TF_CAPI_EXPORT extern void TFE_OpSetAttrShape(TFE_Op* op, const char* attr_name,
319 const int64_t* dims,
320 const int num_dims,
321 TF_Status* out_status);
322
323// Sets the attribute attr_name to be a function specified by 'function'.
324//
325// TODO(ashankar,iga): Add this functionality to the C API for graph
326// construction. Perhaps we want an AttrValueMap equivalent in the C API?
327TF_CAPI_EXPORT extern void TFE_OpSetAttrFunction(TFE_Op* op,
328 const char* attr_name,
329 const TFE_Op* value);
330
331TF_CAPI_EXPORT void TFE_OpSetAttrFunctionName(TFE_Op* op, const char* attr_name,
332 const char* data, size_t length);
333
334TF_CAPI_EXPORT extern void TFE_OpSetAttrTensor(TFE_Op* op,
335 const char* attr_name,
336 TF_Tensor* tensor,
337 TF_Status* status);
338
339TF_CAPI_EXPORT extern void TFE_OpSetAttrStringList(TFE_Op* op,
340 const char* attr_name,
341 const void* const* values,
342 const size_t* lengths,
343 int num_values);
344TF_CAPI_EXPORT extern void TFE_OpSetAttrIntList(TFE_Op* op,
345 const char* attr_name,
346 const int64_t* values,
347 int num_values);
348TF_CAPI_EXPORT extern void TFE_OpSetAttrFloatList(TFE_Op* op,
349 const char* attr_name,
350 const float* values,
351 int num_values);
352TF_CAPI_EXPORT extern void TFE_OpSetAttrBoolList(TFE_Op* op,
353 const char* attr_name,
354 const unsigned char* values,
355 int num_values);
356TF_CAPI_EXPORT extern void TFE_OpSetAttrTypeList(TFE_Op* op,
357 const char* attr_name,
358 const TF_DataType* values,
359 int num_values);
360TF_CAPI_EXPORT extern void TFE_OpSetAttrShapeList(
361 TFE_Op* op, const char* attr_name, const int64_t** dims,
362 const int* num_dims, int num_values, TF_Status* out_status);
363TF_CAPI_EXPORT extern void TFE_OpSetAttrFunctionList(TFE_Op* op,
364 const char* attr_name,
365 const TFE_Op** value,
366 int num_values);
367
368// Returns the length (number of tensors) of the input argument `input_name`
369// found in the provided `op`.
370TF_CAPI_EXPORT extern int TFE_OpGetInputLength(TFE_Op* op,
371 const char* input_name,
372 TF_Status* status);
373
374// Returns the length (number of tensors) of the output argument `output_name`
375// found in the provided `op`.
376TF_CAPI_EXPORT extern int TFE_OpGetOutputLength(TFE_Op* op,
377 const char* output_name,
378 TF_Status* status);
379
380// Execute the operation defined by 'op' and return handles to computed
381// tensors in `retvals`.
382//
383// 'retvals' must point to a pre-allocated array of TFE_TensorHandle* and
384// '*num_retvals' should be set to the size of this array. It is an error if
385// the size of 'retvals' is less than the number of outputs. This call sets
386// *num_retvals to the number of outputs.
387//
388// If async execution is enabled, the call may simply enqueue the execution
389// and return "non-ready" handles in `retvals`. Note that any handles contained
390// in 'op' should not be mutated till the kernel execution actually finishes.
391//
392// For sync execution, if any of the inputs to `op` are not ready, this call
393// will block till they become ready and then return when the kernel execution
394// is done.
395// TODO(agarwal): change num_retvals to int from int*.
396TF_CAPI_EXPORT extern void TFE_Execute(TFE_Op* op, TFE_TensorHandle** retvals,
397 int* num_retvals, TF_Status* status);
398
399// Add a function (serialized FunctionDef protocol buffer) to ctx so
400// that it can be invoked using TFE_Execute.
401TF_CAPI_EXPORT extern void TFE_ContextAddFunctionDef(
402 TFE_Context* ctx, const char* serialized_function_def, size_t size,
403 TF_Status* status);
404
405// Adds a function (created from TF_GraphToFunction or
406// TF_FunctionImportFunctionDef) to the context, allowing it to be executed with
407// TFE_Execute by creating an op with the same name as the function.
408TF_CAPI_EXPORT extern void TFE_ContextAddFunction(TFE_Context* ctx,
409 TF_Function* function,
410 TF_Status* status);
411
412// Removes a function from the context. Once removed, you can no longer
413// TFE_Execute it or TFE_Execute any TFE_Op which has it as an attribute or any
414// other function which calls it as an attribute.
415TF_CAPI_EXPORT extern void TFE_ContextRemoveFunction(TFE_Context* ctx,
416 const char* name,
417 TF_Status* status);
418
419// Checks whether a function is registered under `name`.
420TF_CAPI_EXPORT unsigned char TFE_ContextHasFunction(TFE_Context* ctx,
421 const char* name);
422
423// Enables tracing of RunMetadata on the ops executed from this context.
424TF_CAPI_EXPORT extern void TFE_ContextEnableRunMetadata(TFE_Context* ctx);
425
426// Disables tracing of RunMetadata on the ops executed from this context.
427TF_CAPI_EXPORT extern void TFE_ContextDisableRunMetadata(TFE_Context* ctx);
428
429// Populates the passed-in buffer with a serialized RunMetadata protocol buffer
430// containing any run metadata information accumulated so far and clears this
431// information.
432// If async mode is enabled, this call blocks till all currently pending ops are
433// done.
434TF_CAPI_EXPORT extern void TFE_ContextExportRunMetadata(TFE_Context* ctx,
435 TF_Buffer* buf,
436 TF_Status* status);
437
438// Some TF ops need a step container to be set to limit the lifetime of some
439// resources (mostly TensorArray and Stack, used in while loop gradients in
440// graph mode). Calling this on a context tells it to start a step.
441TF_CAPI_EXPORT extern void TFE_ContextStartStep(TFE_Context* ctx);
442
443// Ends a step. When there is no active step (that is, every started step has
444// been ended) step containers will be cleared. Note: it is not safe to call
445// TFE_ContextEndStep while ops that rely on the step container may be running.
446TF_CAPI_EXPORT extern void TFE_ContextEndStep(TFE_Context* ctx);
447
448#ifdef __cplusplus
449} /* end extern "C" */
450#endif
451
452#ifdef __cplusplus
453// A workaround to ease conversion to and from numpy objects and
454// TFE_TensorHandle's.
455//
456// TODO(ashankar): Figure out an alternative scheme that precludes the need for
457// these API-boundary breaking methods.
458namespace tensorflow {
459class Tensor;
460} // namespace tensorflow
461
462TFE_TensorHandle* TFE_NewTensorHandle(const tensorflow::Tensor& t,
463 TF_Status* status);
464#endif
465
466#endif // TENSORFLOW_C_EAGER_C_API_H_
467