1 | /* Copyright 2018 The TensorFlow Authors. All Rights Reserved. |
2 | |
3 | Licensed under the Apache License, Version 2.0 (the "License"); |
4 | you may not use this file except in compliance with the License. |
5 | You may obtain a copy of the License at |
6 | |
7 | http://www.apache.org/licenses/LICENSE-2.0 |
8 | |
9 | Unless required by applicable law or agreed to in writing, software |
10 | distributed under the License is distributed on an "AS IS" BASIS, |
11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
12 | See the License for the specific language governing permissions and |
13 | limitations under the License. |
14 | ==============================================================================*/ |
15 | |
16 | #ifndef TENSORFLOW_C_C_API_EXPERIMENTAL_H_ |
17 | #define TENSORFLOW_C_C_API_EXPERIMENTAL_H_ |
18 | |
19 | #include <stddef.h> |
20 | #include <stdint.h> |
21 | |
22 | #include "tensorflow/c/c_api.h" |
23 | #include "tensorflow/c/eager/c_api.h" |
24 | |
25 | // -------------------------------------------------------------------------- |
26 | // Experimental C API for TensorFlow. |
27 | // |
28 | // The API here is subject to changes in the future. |
29 | // -------------------------------------------------------------------------- |
30 | |
31 | // Macro to control visibility of exported symbols in the shared library (.so, |
32 | // .dylib, .dll). |
33 | // This duplicates the TF_EXPORT macro definition in |
34 | // tensorflow/core/platform/macros.h in order to keep this .h file independent |
35 | // of any other includes.$a |
36 | #ifdef SWIG |
37 | #define TF_CAPI_EXPORT |
38 | #else |
39 | #if defined(_WIN32) |
40 | #ifdef TF_COMPILE_LIBRARY |
41 | #define TF_CAPI_EXPORT __declspec(dllexport) |
42 | #else |
43 | #define TF_CAPI_EXPORT __declspec(dllimport) |
44 | #endif // TF_COMPILE_LIBRARY |
45 | #else |
46 | #define TF_CAPI_EXPORT __attribute__((visibility("default"))) |
47 | #endif // _WIN32 |
48 | #endif // SWIG |
49 | |
50 | #ifdef __cplusplus |
51 | extern "C" { |
52 | #endif |
53 | |
54 | // When `enable` is true, set |
55 | // tensorflow.ConfigProto.OptimizerOptions.global_jit_level to ON_1, and also |
56 | // set XLA flag values to prepare for XLA compilation. Otherwise set |
57 | // global_jit_level to OFF. |
58 | // |
59 | // This and the next API are syntax sugar over TF_SetConfig(), and is used by |
60 | // clients that cannot read/write the tensorflow.ConfigProto proto. |
61 | // TODO: Migrate to TF_CreateConfig() below. |
62 | TF_CAPI_EXPORT extern void TF_EnableXLACompilation(TF_SessionOptions* options, |
63 | unsigned char enable); |
64 | |
65 | // Set XLA's internal BuildXlaOpsPassFlags.tf_xla_enable_lazy_compilation to the |
66 | // value of 'enabled'. Also returns the original value of that flag. |
67 | // |
68 | // Use in tests to allow XLA to fallback to TF classic. This has global effect. |
69 | TF_CAPI_EXPORT unsigned char TF_SetXlaEnableLazyCompilation( |
70 | unsigned char enable); |
71 | TF_CAPI_EXPORT unsigned char TF_SetTfXlaCpuGlobalJit(unsigned char enable); |
72 | |
73 | // Sets XLA's auto jit mode according to the specified string, which is parsed |
74 | // as if passed in XLA_FLAGS. This has global effect. |
75 | TF_CAPI_EXPORT void TF_SetXlaAutoJitMode(const char* mode); |
76 | |
77 | // Returns whether the single GPU or general XLA auto jit optimizations are |
78 | // enabled through MarkForCompilationPassFlags. |
79 | TF_CAPI_EXPORT unsigned char TF_GetXlaAutoJitEnabled(); |
80 | |
81 | // Sets XLA's minimum cluster size. This has global effect. |
82 | TF_CAPI_EXPORT void TF_SetXlaMinClusterSize(int size); |
83 | |
84 | // Gets/Sets TF/XLA flag for whether(true) or not(false) to disable constant |
85 | // folding. This is for testing to ensure that XLA is being tested rather than |
86 | // Tensorflow's CPU implementation through constant folding. |
87 | TF_CAPI_EXPORT unsigned char TF_GetXlaConstantFoldingDisabled(); |
88 | TF_CAPI_EXPORT void TF_SetXlaConstantFoldingDisabled( |
89 | unsigned char should_enable); |
90 | |
91 | // Create a serialized tensorflow.ConfigProto proto, where: |
92 | // |
93 | // a) ConfigProto.optimizer_options.global_jit_level is set to ON_1 if |
94 | // `enable_xla_compilation` is non-zero, and OFF otherwise. |
95 | // b) ConfigProto.gpu_options.allow_growth is set to `gpu_memory_allow_growth`. |
96 | // c) ConfigProto.device_count is set to `num_cpu_devices`. |
97 | TF_CAPI_EXPORT extern TF_Buffer* TF_CreateConfig( |
98 | unsigned char enable_xla_compilation, unsigned char gpu_memory_allow_growth, |
99 | unsigned int num_cpu_devices); |
100 | |
101 | // Create a serialized tensorflow.RunOptions proto, where RunOptions.trace_level |
102 | // is set to FULL_TRACE if `enable_full_trace` is non-zero, and NO_TRACE |
103 | // otherwise. |
104 | TF_CAPI_EXPORT extern TF_Buffer* TF_CreateRunOptions( |
105 | unsigned char enable_full_trace); |
106 | |
107 | // Returns the graph content in a human-readable format, with length set in |
108 | // `len`. The format is subject to change in the future. |
109 | // The returned string is heap-allocated, and caller should call free() on it. |
110 | TF_CAPI_EXPORT extern const char* TF_GraphDebugString(TF_Graph* graph, |
111 | size_t* len); |
112 | |
113 | // Returns the function content in a human-readable format, with length set in |
114 | // `len`. The format is subject to change in the future. |
115 | // The returned string is heap-allocated, and caller should call free() on it. |
116 | // |
117 | // Do not return const char*, because some foreign language binding |
118 | // (e.g. swift) cannot then call free() on the returned pointer. |
119 | TF_CAPI_EXPORT extern char* TF_FunctionDebugString(TF_Function* func, |
120 | size_t* len); |
121 | |
122 | // On success, dequeues a tensor from a TF-managed FifoQueue given by |
123 | // `tensor_id`, associated with `session`. There must be a graph node named |
124 | // "fifo_queue_dequeue_<tensor_id>", to be executed by this API call. |
125 | |
126 | // Caller must call TF_DeleteTensor() over the returned tensor. If the queue is |
127 | // empty, this call is blocked. |
128 | // |
129 | // Tensors are enqueued via the corresponding TF enqueue op. |
130 | // TODO(hongm): Add support for `timeout_ms`. |
131 | TF_CAPI_EXPORT extern TF_Tensor* TF_DequeueNamedTensor(TF_Session* session, |
132 | int tensor_id, |
133 | TF_Status* status); |
134 | |
135 | // On success, enqueues `tensor` into a TF-managed FifoQueue given by |
136 | // `tensor_id`, associated with `session`. There must be a graph node named |
137 | // "fifo_queue_enqueue_<tensor_id>", to be executed by this API call. It reads |
138 | // from a placeholder node "arg_tensor_enqueue_<tensor_id>". |
139 | // |
140 | // `tensor` is still owned by the caller. This call will be blocked if the queue |
141 | // has reached its capacity, and will be unblocked when the queued tensors again |
142 | // drop below the capacity due to dequeuing. |
143 | // |
144 | // Tensors are dequeued via the corresponding TF dequeue op. |
145 | // TODO(hongm): Add support for `timeout_ms`. |
146 | TF_CAPI_EXPORT extern void TF_EnqueueNamedTensor(TF_Session* session, |
147 | int tensor_id, |
148 | TF_Tensor* tensor, |
149 | TF_Status* status); |
150 | // Create a serialized tensorflow.ServerDef proto. |
151 | TF_Buffer* TFE_GetServerDef(const char* text_proto, TF_Status* status); |
152 | |
153 | TF_CAPI_EXPORT extern void TF_MakeInternalErrorStatus(TF_Status* status, |
154 | const char* errMsg); |
155 | |
156 | // TF_NewCheckpointReader() return the CheckpointReader that can be use to |
157 | // investigate or load the variable from the checkpoint file |
158 | typedef struct TF_CheckpointReader TF_CheckpointReader; |
159 | TF_CAPI_EXPORT extern TF_CheckpointReader* TF_NewCheckpointReader( |
160 | const char* filename, TF_Status* status); |
161 | TF_CAPI_EXPORT extern void TF_DeleteCheckpointReader( |
162 | TF_CheckpointReader* reader); |
163 | TF_CAPI_EXPORT extern int TF_CheckpointReaderHasTensor( |
164 | TF_CheckpointReader* reader, const char* name); |
165 | // Get the variable name at the given index |
166 | TF_CAPI_EXPORT extern const char* TF_CheckpointReaderGetVariable( |
167 | TF_CheckpointReader* reader, int index); |
168 | // Get the number of variable in the checkpoint |
169 | TF_CAPI_EXPORT extern int TF_CheckpointReaderSize(TF_CheckpointReader* reader); |
170 | // Get the DataType of a variable |
171 | TF_CAPI_EXPORT extern TF_DataType TF_CheckpointReaderGetVariableDataType( |
172 | TF_CheckpointReader* reader, const char* name); |
173 | // Read the shape of a variable and write to `dims` |
174 | TF_CAPI_EXPORT extern void TF_CheckpointReaderGetVariableShape( |
175 | TF_CheckpointReader* reader, const char* name, int64_t* dims, int num_dims, |
176 | TF_Status* status); |
177 | // Get the number of dimension of a variable |
178 | TF_CAPI_EXPORT extern int TF_CheckpointReaderGetVariableNumDims( |
179 | TF_CheckpointReader* reader, const char* name); |
180 | // Load the weight of a variable |
181 | TF_CAPI_EXPORT extern TF_Tensor* TF_CheckpointReaderGetTensor( |
182 | TF_CheckpointReader* reader, const char* name, TF_Status* status); |
183 | |
184 | // TF_NewAttrBuilder() returns an object that you can set attributes on as |
185 | // though it were an op. This allows querying properties of that op for |
186 | // type-checking purposes like if the op will run on a particular device type. |
187 | typedef struct TF_AttrBuilder TF_AttrBuilder; |
188 | TF_CAPI_EXPORT extern TF_AttrBuilder* TF_NewAttrBuilder(const char* op_name); |
189 | TF_CAPI_EXPORT extern void TF_DeleteAttrBuilder(TF_AttrBuilder* builder); |
190 | TF_CAPI_EXPORT extern void TF_AttrBuilderSetType(TF_AttrBuilder* builder, |
191 | const char* attr_name, |
192 | TF_DataType value); |
193 | TF_CAPI_EXPORT extern void TF_AttrBuilderSetTypeList(TF_AttrBuilder* builder, |
194 | const char* attr_name, |
195 | const TF_DataType* values, |
196 | int num_values); |
197 | |
198 | // Checks the tensorflow::NodeDef built via the methods above to see if it can |
199 | // run on device_type. |
200 | TF_CAPI_EXPORT extern void TF_AttrBuilderCheckCanRunOnDevice( |
201 | TF_AttrBuilder* builder, const char* device_type, TF_Status* status); |
202 | |
203 | // For argument number input_index, fetch the corresponding number_attr that |
204 | // needs to be updated with the argument length of the input list. |
205 | // Returns nullptr if there is any problem like op_name is not found, or the |
206 | // argument does not support this attribute type. |
207 | TF_CAPI_EXPORT extern const char* TF_GetNumberAttrForOpListInput( |
208 | const char* op_name, int input_index, TF_Status* status); |
209 | |
210 | // Returns 1 if the op is stateful, 0 otherwise. The return value is undefined |
211 | // if the status is not ok. |
212 | TF_CAPI_EXPORT extern int TF_OpIsStateful(const char* op_type, |
213 | TF_Status* status); |
214 | |
215 | // Platform specific initialization routine. Very few platforms actually require |
216 | // this to be called. |
217 | TF_CAPI_EXPORT void TF_InitMain(const char* usage, int* argc, char*** argv); |
218 | |
219 | // Platform-specific implementation to return an unused port. (This should used |
220 | // in tests only.) |
221 | TF_CAPI_EXPORT int TF_PickUnusedPortOrDie(void); |
222 | |
223 | // Fast path method that makes constructing a single scalar tensor require less |
224 | // overhead and copies. |
225 | TF_CAPI_EXPORT extern TFE_TensorHandle* TFE_NewTensorHandleFromScalar( |
226 | TF_DataType data_type, void* data, size_t len, TF_Status* status); |
227 | |
228 | // Specify the server_def that enables collective ops. |
229 | // This is different to the above function in that it doesn't create remote |
230 | // contexts, and remotely executing ops is not possible. It just enables |
231 | // communication for collective ops. |
232 | TF_CAPI_EXPORT extern void TFE_EnableCollectiveOps(TFE_Context* ctx, |
233 | const void* proto, |
234 | size_t proto_len, |
235 | TF_Status* status); |
236 | |
237 | // Aborts all ongoing collectives with the specified status. After abortion, |
238 | // subsequent collectives will error with this status immediately. To reset the |
239 | // collectives, create a new EagerContext. |
240 | // |
241 | // This is intended to be used when a peer failure is detected. |
242 | TF_CAPI_EXPORT extern void TFE_AbortCollectiveOps(TFE_Context* ctx, |
243 | TF_Status* status); |
244 | |
245 | // Checks the health of collective ops peers. Explicit health check is needed in |
246 | // multi worker collective ops to detect failures in the cluster. If a peer is |
247 | // down, collective ops may hang. |
248 | TF_CAPI_EXPORT extern void TFE_CollectiveOpsCheckPeerHealth( |
249 | TFE_Context* ctx, const char* task, int64_t timeout_in_ms, |
250 | TF_Status* status); |
251 | |
252 | // Information about the shape of a Tensor and its type. |
253 | struct TF_ShapeAndType { |
254 | // Number of dimensions. -1 indicates unknown rank. |
255 | int num_dims; |
256 | // Array of dimensions. -1 indicates unknown dim. |
257 | int64_t* dims; |
258 | // The data type. May be 0 to denote unknown type. |
259 | TF_DataType dtype; |
260 | }; |
261 | |
262 | typedef struct TF_ShapeAndType TF_ShapeAndType; |
263 | |
264 | // A list of TF_ShapeAndType elements.. |
265 | struct TF_ShapeAndTypeList { |
266 | int num_items; |
267 | TF_ShapeAndType* items; |
268 | }; |
269 | typedef struct TF_ShapeAndTypeList TF_ShapeAndTypeList; |
270 | |
271 | // API for manipulating TF_ShapeAndTypeList objects. |
272 | // |
273 | TF_CAPI_EXPORT extern TF_ShapeAndTypeList* TF_NewShapeAndTypeList( |
274 | int num_shapes); |
275 | TF_CAPI_EXPORT extern void TF_ShapeAndTypeListSetShape( |
276 | TF_ShapeAndTypeList* shape_list, int index, const int64_t* dims, |
277 | int num_dims); |
278 | TF_CAPI_EXPORT extern void TF_ShapeAndTypeListSetUnknownShape( |
279 | TF_ShapeAndTypeList* shape_list, int index); |
280 | TF_CAPI_EXPORT extern void TF_ShapeAndTypeListSetDtype( |
281 | TF_ShapeAndTypeList* shape_list, int index, TF_DataType dtype); |
282 | TF_CAPI_EXPORT extern void TF_DeleteShapeAndTypeList( |
283 | TF_ShapeAndTypeList* shape_list); |
284 | TF_CAPI_EXPORT extern void TF_DeleteShapeAndTypeListArray( |
285 | TF_ShapeAndTypeList** shape_list_array, int num_items); |
286 | |
287 | // Infer shapes for the given `op`. The arguments mimic the arguments of the |
288 | // `shape_inference::InferenceContext` constructor. Note the following: |
289 | // - The inputs of the `op` are not used for shape inference. So, it is |
290 | // OK to not have the inputs properly set in `op`. See `input_tensors` |
291 | // if you want shape inference to consider the input tensors of the |
292 | // op for shape inference. |
293 | // - The types need not be set in `input_shapes` as it is not used. |
294 | // - The number of `input_tensors` should be the same as the number of items |
295 | // in `input_shapes`. |
296 | // |
297 | // The results are returned in `output_shapes` and |
298 | // `output_resource_shapes_and_types`. The caller is responsible for freeing the |
299 | // memory in these buffers by calling `TF_DeleteShapeAndTypeList`. |
300 | TF_CAPI_EXPORT extern void TFE_InferShapes( |
301 | TFE_Op* op, TF_ShapeAndTypeList* input_shapes, TF_Tensor** input_tensors, |
302 | TF_ShapeAndTypeList* input_tensor_as_shapes, |
303 | TF_ShapeAndTypeList** input_resource_shapes_and_types, |
304 | TF_ShapeAndTypeList** output_shapes, |
305 | TF_ShapeAndTypeList*** output_resource_shapes_and_types, TF_Status* status); |
306 | |
307 | TF_CAPI_EXPORT extern void |
308 | TF_ImportGraphDefOptionsSetValidateColocationConstraints( |
309 | TF_ImportGraphDefOptions* opts, unsigned char enable); |
310 | |
311 | // Load the library specified by library_filename and register the pluggable |
312 | // device and related kernels present in that library. This function is not |
313 | // supported on embedded on mobile and embedded platforms and will fail if |
314 | // called. |
315 | // |
316 | // Pass "library_filename" to a platform-specific mechanism for dynamically |
317 | // loading a library. The rules for determining the exact location of the |
318 | // library are platform-specific and are not documented here. |
319 | // |
320 | // On success, returns the newly created library handle and places OK in status. |
321 | // The caller owns the library handle. |
322 | // |
323 | // On failure, returns nullptr and places an error status in status. |
324 | TF_CAPI_EXPORT extern TF_Library* TF_LoadPluggableDeviceLibrary( |
325 | const char* library_filename, TF_Status* status); |
326 | |
327 | // Frees the memory associated with the library handle. |
328 | // Does NOT unload the library. |
329 | TF_CAPI_EXPORT extern void TF_DeletePluggableDeviceLibraryHandle( |
330 | TF_Library* lib_handle); |
331 | |
332 | #ifdef __cplusplus |
333 | } /* end extern "C" */ |
334 | #endif |
335 | |
336 | #endif // TENSORFLOW_C_C_API_EXPERIMENTAL_H_ |
337 | |