1/* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
2
3Licensed under the Apache License, Version 2.0 (the "License");
4you may not use this file except in compliance with the License.
5You may obtain a copy of the License at
6
7 http://www.apache.org/licenses/LICENSE-2.0
8
9Unless required by applicable law or agreed to in writing, software
10distributed under the License is distributed on an "AS IS" BASIS,
11WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12See the License for the specific language governing permissions and
13limitations under the License.
14==============================================================================*/
15
16// This file defines common C types and APIs for implementing operations,
17// delegates and other constructs in TensorFlow Lite. The actual operations and
18// delegates can be defined using C++, but the interface between the interpreter
19// and the operations are C.
20//
21// Summary of abstractions
22// TF_LITE_ENSURE - Self-sufficient error checking
23// TfLiteStatus - Status reporting
24// TfLiteIntArray - stores tensor shapes (dims),
25// TfLiteContext - allows an op to access the tensors
26// TfLiteTensor - tensor (a multidimensional array)
27// TfLiteNode - a single node or operation
28// TfLiteRegistration - the implementation of a conceptual operation.
29// TfLiteDelegate - allows delegation of nodes to alternative backends.
30//
31// Some abstractions in this file are created and managed by Interpreter.
32//
33// NOTE: The order of values in these structs are "semi-ABI stable". New values
34// should be added only to the end of structs and never reordered.
35
36#ifndef TENSORFLOW_LITE_C_COMMON_H_
37#define TENSORFLOW_LITE_C_COMMON_H_
38
39#include <stdbool.h>
40#include <stddef.h>
41#include <stdint.h>
42
43#include "tensorflow/lite/c/c_api_types.h" // IWYU pragma: export
44
45#ifdef __cplusplus
46extern "C" {
47#endif // __cplusplus
48
49// The list of external context types known to TF Lite. This list exists solely
50// to avoid conflicts and to ensure ops can share the external contexts they
51// need. Access to the external contexts is controlled by one of the
52// corresponding support files.
53typedef enum TfLiteExternalContextType {
54 kTfLiteEigenContext = 0, // include eigen_support.h to use.
55 kTfLiteGemmLowpContext = 1, // include gemm_support.h to use.
56 kTfLiteEdgeTpuContext = 2, // Placeholder for Edge TPU support.
57 kTfLiteCpuBackendContext = 3, // include cpu_backend_context.h to use.
58 kTfLiteMaxExternalContexts = 4
59} TfLiteExternalContextType;
60
61// Forward declare so dependent structs and methods can reference these types
62// prior to the struct definitions.
63struct TfLiteContext;
64struct TfLiteDelegate;
65struct TfLiteRegistration;
66struct TfLiteOpaqueDelegateStruct;
67struct TfLiteOpaqueDelegateBuilder;
68
69// An external context is a collection of information unrelated to the TF Lite
70// framework, but useful to a subset of the ops. TF Lite knows very little
71// about the actual contexts, but it keeps a list of them, and is able to
72// refresh them if configurations like the number of recommended threads
73// change.
74typedef struct TfLiteExternalContext {
75 TfLiteExternalContextType type;
76 TfLiteStatus (*Refresh)(struct TfLiteContext* context);
77} TfLiteExternalContext;
78
79#define kTfLiteOptionalTensor (-1)
80
81// Fixed size list of integers. Used for dimensions and inputs/outputs tensor
82// indices
83typedef struct TfLiteIntArray {
84 int size;
85
86#if defined(_MSC_VER)
87 // Context for why this is needed is in http://b/189926408#comment21
88 int data[1];
89#elif (!defined(__clang__) && defined(__GNUC__) && __GNUC__ == 6 && \
90 __GNUC_MINOR__ >= 1) || \
91 defined(HEXAGON) || \
92 (defined(__clang__) && __clang_major__ == 7 && __clang_minor__ == 1)
93 // gcc 6.1+ have a bug where flexible members aren't properly handled
94 // https://github.com/google/re2/commit/b94b7cd42e9f02673cd748c1ac1d16db4052514c
95 int data[0];
96#else
97 int data[];
98#endif
99} TfLiteIntArray;
100
101// Given the size (number of elements) in a TfLiteIntArray, calculate its size
102// in bytes.
103size_t TfLiteIntArrayGetSizeInBytes(int size);
104
105#ifndef TF_LITE_STATIC_MEMORY
106// Create a array of a given `size` (uninitialized entries).
107// This returns a pointer, that you must free using TfLiteIntArrayFree().
108TfLiteIntArray* TfLiteIntArrayCreate(int size);
109#endif
110
111// Check if two intarrays are equal. Returns 1 if they are equal, 0 otherwise.
112int TfLiteIntArrayEqual(const TfLiteIntArray* a, const TfLiteIntArray* b);
113
114// Check if an intarray equals an array. Returns 1 if equals, 0 otherwise.
115int TfLiteIntArrayEqualsArray(const TfLiteIntArray* a, int b_size,
116 const int b_data[]);
117
118#ifndef TF_LITE_STATIC_MEMORY
119// Create a copy of an array passed as `src`.
120// You are expected to free memory with TfLiteIntArrayFree
121TfLiteIntArray* TfLiteIntArrayCopy(const TfLiteIntArray* src);
122
123// Free memory of array `a`.
124void TfLiteIntArrayFree(TfLiteIntArray* a);
125#endif // TF_LITE_STATIC_MEMORY
126
127// Fixed size list of floats. Used for per-channel quantization.
128typedef struct TfLiteFloatArray {
129 int size;
130#if defined(_MSC_VER)
131 // Context for why this is needed is in http://b/189926408#comment21
132 float data[1];
133#elif (!defined(__clang__) && defined(__GNUC__) && __GNUC__ == 6 && \
134 __GNUC_MINOR__ >= 1) || \
135 defined(HEXAGON) || \
136 (defined(__clang__) && __clang_major__ == 7 && __clang_minor__ == 1)
137 // gcc 6.1+ have a bug where flexible members aren't properly handled
138 // https://github.com/google/re2/commit/b94b7cd42e9f02673cd748c1ac1d16db4052514c
139 float data[0];
140#else
141 float data[];
142#endif
143} TfLiteFloatArray;
144
145// Given the size (number of elements) in a TfLiteFloatArray, calculate its size
146// in bytes.
147int TfLiteFloatArrayGetSizeInBytes(int size);
148
149#ifndef TF_LITE_STATIC_MEMORY
150// Create a array of a given `size` (uninitialized entries).
151// This returns a pointer, that you must free using TfLiteFloatArrayFree().
152TfLiteFloatArray* TfLiteFloatArrayCreate(int size);
153
154// Free memory of array `a`.
155void TfLiteFloatArrayFree(TfLiteFloatArray* a);
156#endif // TF_LITE_STATIC_MEMORY
157
158// Since we must not depend on any libraries, define a minimal subset of
159// error macros while avoiding names that have pre-conceived meanings like
160// assert and check.
161
162// Try to make all reporting calls through TF_LITE_KERNEL_LOG rather than
163// calling the context->ReportError function directly, so that message strings
164// can be stripped out if the binary size needs to be severely optimized.
165#ifndef TF_LITE_STRIP_ERROR_STRINGS
166#define TF_LITE_KERNEL_LOG(context, ...) \
167 do { \
168 (context)->ReportError((context), __VA_ARGS__); \
169 } while (false)
170
171#define TF_LITE_MAYBE_KERNEL_LOG(context, ...) \
172 do { \
173 if ((context) != nullptr) { \
174 (context)->ReportError((context), __VA_ARGS__); \
175 } \
176 } while (false)
177#else // TF_LITE_STRIP_ERROR_STRINGS
178#define ARGS_UNUSED(...) (void)sizeof(#__VA_ARGS__)
179#define TF_LITE_KERNEL_LOG(context, ...) ARGS_UNUSED(__VA_ARGS__)
180#define TF_LITE_MAYBE_KERNEL_LOG(context, ...) ARGS_UNUSED(__VA_ARGS__)
181#endif // TF_LITE_STRIP_ERROR_STRINGS
182
183// Check whether value is true, and if not return kTfLiteError from
184// the current function (and report the error string msg).
185#define TF_LITE_ENSURE_MSG(context, value, msg) \
186 do { \
187 if (!(value)) { \
188 TF_LITE_KERNEL_LOG((context), __FILE__ " " msg); \
189 return kTfLiteError; \
190 } \
191 } while (0)
192
193// Check whether the value `a` is true, and if not return kTfLiteError from
194// the current function, while also reporting the location of the error.
195#define TF_LITE_ENSURE(context, a) \
196 do { \
197 if (!(a)) { \
198 TF_LITE_KERNEL_LOG((context), "%s:%d %s was not true.", __FILE__, \
199 __LINE__, #a); \
200 return kTfLiteError; \
201 } \
202 } while (0)
203
204#define TF_LITE_ENSURE_STATUS(a) \
205 do { \
206 const TfLiteStatus s = (a); \
207 if (s != kTfLiteOk) { \
208 return s; \
209 } \
210 } while (0)
211
212// Check whether the value `a == b` is true, and if not return kTfLiteError from
213// the current function, while also reporting the location of the error.
214// `a` and `b` may be evaluated more than once, so no side effects or
215// extremely expensive computations should be done.
216// NOTE: Use TF_LITE_ENSURE_TYPES_EQ if comparing TfLiteTypes.
217#define TF_LITE_ENSURE_EQ(context, a, b) \
218 do { \
219 if ((a) != (b)) { \
220 TF_LITE_KERNEL_LOG((context), "%s:%d %s != %s (%d != %d)", __FILE__, \
221 __LINE__, #a, #b, (a), (b)); \
222 return kTfLiteError; \
223 } \
224 } while (0)
225
226#define TF_LITE_ENSURE_TYPES_EQ(context, a, b) \
227 do { \
228 if ((a) != (b)) { \
229 TF_LITE_KERNEL_LOG((context), "%s:%d %s != %s (%s != %s)", __FILE__, \
230 __LINE__, #a, #b, TfLiteTypeGetName(a), \
231 TfLiteTypeGetName(b)); \
232 return kTfLiteError; \
233 } \
234 } while (0)
235
236#define TF_LITE_ENSURE_NEAR(context, a, b, epsilon) \
237 do { \
238 auto delta = ((a) > (b)) ? ((a) - (b)) : ((b) - (a)); \
239 if (delta > epsilon) { \
240 TF_LITE_KERNEL_LOG((context), "%s:%d %s not near %s (%f != %f)", \
241 __FILE__, __LINE__, #a, #b, static_cast<double>(a), \
242 static_cast<double>(b)); \
243 return kTfLiteError; \
244 } \
245 } while (0)
246
247#define TF_LITE_ENSURE_OK(context, status) \
248 do { \
249 const TfLiteStatus s = (status); \
250 if ((s) != kTfLiteOk) { \
251 return s; \
252 } \
253 } while (0)
254
255// Single-precision complex data type compatible with the C99 definition.
256typedef struct TfLiteComplex64 {
257 float re, im; // real and imaginary parts, respectively.
258} TfLiteComplex64;
259
260// Double-precision complex data type compatible with the C99 definition.
261typedef struct TfLiteComplex128 {
262 double re, im; // real and imaginary parts, respectively.
263} TfLiteComplex128;
264
265// Half precision data type compatible with the C99 definition.
266typedef struct TfLiteFloat16 {
267 uint16_t data;
268} TfLiteFloat16;
269
270// Return the name of a given type, for error reporting purposes.
271const char* TfLiteTypeGetName(TfLiteType type);
272
273// SupportedQuantizationTypes.
274typedef enum TfLiteQuantizationType {
275 // No quantization.
276 kTfLiteNoQuantization = 0,
277 // Affine quantization (with support for per-channel quantization).
278 // Corresponds to TfLiteAffineQuantization.
279 kTfLiteAffineQuantization = 1,
280} TfLiteQuantizationType;
281
282// Structure specifying the quantization used by the tensor, if-any.
283typedef struct TfLiteQuantization {
284 // The type of quantization held by params.
285 TfLiteQuantizationType type;
286 // Holds an optional reference to a quantization param structure. The actual
287 // type depends on the value of the `type` field (see the comment there for
288 // the values and corresponding types).
289 void* params;
290} TfLiteQuantization;
291
292// Parameters for asymmetric quantization across a dimension (i.e per output
293// channel quantization).
294// quantized_dimension specifies which dimension the scales and zero_points
295// correspond to.
296// For a particular value in quantized_dimension, quantized values can be
297// converted back to float using:
298// real_value = scale * (quantized_value - zero_point)
299typedef struct TfLiteAffineQuantization {
300 TfLiteFloatArray* scale;
301 TfLiteIntArray* zero_point;
302 int32_t quantized_dimension;
303} TfLiteAffineQuantization;
304
305/* A union of pointers that points to memory for a given tensor. */
306typedef union TfLitePtrUnion {
307 /* Do not access these members directly, if possible, use
308 * GetTensorData<TYPE>(tensor) instead, otherwise only access .data, as other
309 * members are deprecated. */
310 int32_t* i32;
311 uint32_t* u32;
312 int64_t* i64;
313 uint64_t* u64;
314 float* f;
315 TfLiteFloat16* f16;
316 double* f64;
317 char* raw;
318 const char* raw_const;
319 uint8_t* uint8;
320 bool* b;
321 int16_t* i16;
322 uint16_t* ui16;
323 TfLiteComplex64* c64;
324 TfLiteComplex128* c128;
325 int8_t* int8;
326 /* Only use this member. */
327 void* data;
328} TfLitePtrUnion;
329
330// Memory allocation strategies.
331// * kTfLiteMmapRo: Read-only memory-mapped data, or data externally allocated.
332// * kTfLiteArenaRw: Arena allocated with no guarantees about persistence,
333// and available during eval.
334// * kTfLiteArenaRwPersistent: Arena allocated but persistent across eval, and
335// only available during eval.
336// * kTfLiteDynamic: Allocated during eval, or for string tensors.
337// * kTfLitePersistentRo: Allocated and populated during prepare. This is
338// useful for tensors that can be computed during prepare and treated
339// as constant inputs for downstream ops (also in prepare).
340// * kTfLiteCustom: Custom memory allocation provided by the user. See
341// TfLiteCustomAllocation below.
342typedef enum TfLiteAllocationType {
343 kTfLiteMemNone = 0,
344 kTfLiteMmapRo,
345 kTfLiteArenaRw,
346 kTfLiteArenaRwPersistent,
347 kTfLiteDynamic,
348 kTfLitePersistentRo,
349 kTfLiteCustom,
350} TfLiteAllocationType;
351
352// The delegates should use zero or positive integers to represent handles.
353// -1 is reserved from unallocated status.
354typedef int TfLiteBufferHandle;
355enum {
356 kTfLiteNullBufferHandle = -1,
357};
358
359// Storage format of each dimension in a sparse tensor.
360typedef enum TfLiteDimensionType {
361 kTfLiteDimDense = 0,
362 kTfLiteDimSparseCSR,
363} TfLiteDimensionType;
364
365// Metadata to encode each dimension in a sparse tensor.
366typedef struct TfLiteDimensionMetadata {
367 TfLiteDimensionType format;
368 int dense_size;
369 TfLiteIntArray* array_segments;
370 TfLiteIntArray* array_indices;
371} TfLiteDimensionMetadata;
372
373// Parameters used to encode a sparse tensor. For detailed explanation of each
374// field please refer to lite/schema/schema.fbs.
375typedef struct TfLiteSparsity {
376 TfLiteIntArray* traversal_order;
377 TfLiteIntArray* block_map;
378 TfLiteDimensionMetadata* dim_metadata;
379 int dim_metadata_size;
380} TfLiteSparsity;
381
382// Defines a custom memory allocation not owned by the runtime.
383// `data` should be aligned to kDefaultTensorAlignment defined in
384// lite/util.h. (Currently 64 bytes)
385// NOTE: See Interpreter.SetCustomAllocationForTensor for details on usage.
386typedef struct TfLiteCustomAllocation {
387 void* data;
388 size_t bytes;
389} TfLiteCustomAllocation;
390
391// The flags used in `Interpreter::SetCustomAllocationForTensor`.
392// Note that this is a bitmask, so the values should be 1, 2, 4, 8, ...etc.
393typedef enum TfLiteCustomAllocationFlags {
394 kTfLiteCustomAllocationFlagsNone = 0,
395 // Skips checking whether allocation.data points to an aligned buffer as
396 // expected by the TFLite runtime.
397 // NOTE: Setting this flag can cause crashes when calling Invoke().
398 // Use with caution.
399 kTfLiteCustomAllocationFlagsSkipAlignCheck = 1,
400} TfLiteCustomAllocationFlags;
401
402// A tensor in the interpreter system which is a wrapper around a buffer of
403// data including a dimensionality (or NULL if not currently defined).
404#ifndef TF_LITE_STATIC_MEMORY
405typedef struct TfLiteTensor {
406 // The data type specification for data stored in `data`. This affects
407 // what member of `data` union should be used.
408 TfLiteType type;
409 // A union of data pointers. The appropriate type should be used for a typed
410 // tensor based on `type`.
411 TfLitePtrUnion data;
412 // A pointer to a structure representing the dimensionality interpretation
413 // that the buffer should have. NOTE: the product of elements of `dims`
414 // and the element datatype size should be equal to `bytes` below.
415 TfLiteIntArray* dims;
416 // Quantization information.
417 TfLiteQuantizationParams params;
418 // How memory is mapped
419 // kTfLiteMmapRo: Memory mapped read only.
420 // i.e. weights
421 // kTfLiteArenaRw: Arena allocated read write memory
422 // (i.e. temporaries, outputs).
423 TfLiteAllocationType allocation_type;
424 // The number of bytes required to store the data of this Tensor. I.e.
425 // (bytes of each element) * dims[0] * ... * dims[n-1]. For example, if
426 // type is kTfLiteFloat32 and dims = {3, 2} then
427 // bytes = sizeof(float) * 3 * 2 = 4 * 3 * 2 = 24.
428 size_t bytes;
429
430 // An opaque pointer to a tflite::MMapAllocation
431 const void* allocation;
432
433 // Null-terminated name of this tensor.
434 const char* name;
435
436 // The delegate which knows how to handle `buffer_handle`.
437 // WARNING: This is an experimental interface that is subject to change.
438 struct TfLiteDelegate* delegate;
439
440 // An integer buffer handle that can be handled by `delegate`.
441 // The value is valid only when delegate is not null.
442 // WARNING: This is an experimental interface that is subject to change.
443 TfLiteBufferHandle buffer_handle;
444
445 // If the delegate uses its own buffer (e.g. GPU memory), the delegate is
446 // responsible to set data_is_stale to true.
447 // `delegate->CopyFromBufferHandle` can be called to copy the data from
448 // delegate buffer.
449 // WARNING: This is an // experimental interface that is subject to change.
450 bool data_is_stale;
451
452 // True if the tensor is a variable.
453 bool is_variable;
454
455 // Quantization information. Replaces params field above.
456 TfLiteQuantization quantization;
457
458 // Parameters used to encode a sparse tensor.
459 // This is optional. The field is NULL if a tensor is dense.
460 // WARNING: This is an experimental interface that is subject to change.
461 TfLiteSparsity* sparsity;
462
463 // Optional. Encodes shapes with unknown dimensions with -1. This field is
464 // only populated when unknown dimensions exist in a read-write tensor (i.e.
465 // an input or output tensor). (e.g. `dims` contains [1, 1, 1, 3] and
466 // `dims_signature` contains [1, -1, -1, 3]). Note that this field only
467 // exists when TF_LITE_STATIC_MEMORY is not defined.
468 const TfLiteIntArray* dims_signature;
469} TfLiteTensor;
470
471// A structure representing an instance of a node.
472// This structure only exhibits the inputs, outputs, user defined data and some
473// node properties (like statefulness), not other features like the type.
474typedef struct TfLiteNode {
475 // Inputs to this node expressed as indices into the simulator's tensors.
476 TfLiteIntArray* inputs;
477
478 // Outputs to this node expressed as indices into the simulator's tensors.
479 TfLiteIntArray* outputs;
480
481 // intermediate tensors to this node expressed as indices into the simulator's
482 // tensors.
483 TfLiteIntArray* intermediates;
484
485 // Temporary tensors uses during the computations. This usually contains no
486 // tensors, but ops are allowed to change that if they need scratch space of
487 // any sort.
488 TfLiteIntArray* temporaries;
489
490 // Opaque data provided by the node implementer through `Registration.init`.
491 void* user_data;
492
493 // Opaque data provided to the node if the node is a builtin. This is usually
494 // a structure defined in builtin_op_data.h
495 void* builtin_data;
496
497 // Custom initial data. This is the opaque data provided in the flatbuffer.
498 // WARNING: This is an experimental interface that is subject to change.
499 const void* custom_initial_data;
500 int custom_initial_data_size;
501
502 // The pointer to the delegate. This is non-null only when the node is
503 // created by calling `interpreter.ModifyGraphWithDelegate`.
504 // WARNING: This is an experimental interface that is subject to change.
505 struct TfLiteDelegate* delegate;
506
507 // Whether this op might have side effect (e.g. stateful op).
508 bool might_have_side_effect;
509} TfLiteNode;
510#else // defined(TF_LITE_STATIC_MEMORY)?
511// NOTE: This flag is opt-in only at compile time.
512//
513// Specific reduced TfLiteTensor struct for TF Micro runtime. This struct
514// contains only the minimum fields required to initialize and prepare a micro
515// inference graph. The fields in this struct have been ordered from
516// largest-to-smallest for optimal struct sizeof.
517//
518// This struct does not use:
519// - allocation
520// - buffer_handle
521// - data_is_stale
522// - delegate
523// - dims_signature
524// - name
525// - sparsity
526typedef struct TfLiteTensor {
527 // TODO(b/155784997): Consider consolidating these quantization fields:
528 // Quantization information. Replaces params field above.
529 TfLiteQuantization quantization;
530
531 // Quantization information.
532 TfLiteQuantizationParams params;
533
534 // A union of data pointers. The appropriate type should be used for a typed
535 // tensor based on `type`.
536 TfLitePtrUnion data;
537
538 // A pointer to a structure representing the dimensionality interpretation
539 // that the buffer should have. NOTE: the product of elements of `dims`
540 // and the element datatype size should be equal to `bytes` below.
541 TfLiteIntArray* dims;
542
543 // The number of bytes required to store the data of this Tensor. I.e.
544 // (bytes of each element) * dims[0] * ... * dims[n-1]. For example, if
545 // type is kTfLiteFloat32 and dims = {3, 2} then
546 // bytes = sizeof(float) * 3 * 2 = 4 * 3 * 2 = 24.
547 size_t bytes;
548
549 // The data type specification for data stored in `data`. This affects
550 // what member of `data` union should be used.
551 TfLiteType type;
552
553 // How memory is mapped
554 // kTfLiteMmapRo: Memory mapped read only.
555 // i.e. weights
556 // kTfLiteArenaRw: Arena allocated read write memory
557 // (i.e. temporaries, outputs).
558 TfLiteAllocationType allocation_type;
559
560 // True if the tensor is a variable.
561 bool is_variable;
562} TfLiteTensor;
563
564// Specific reduced TfLiteNode struct for TF Micro runtime. This struct contains
565// only the minimum fields required to represent a node.
566//
567// This struct does not use:
568// - delegate
569// - intermediates
570// - temporaries
571typedef struct TfLiteNode {
572 // Inputs to this node expressed as indices into the simulator's tensors.
573 TfLiteIntArray* inputs;
574
575 // Outputs to this node expressed as indices into the simulator's tensors.
576 TfLiteIntArray* outputs;
577
578 // intermediate tensors to this node expressed as indices into the simulator's
579 // tensors.
580 TfLiteIntArray* intermediates;
581
582 // Opaque data provided by the node implementer through `Registration.init`.
583 void* user_data;
584
585 // Opaque data provided to the node if the node is a builtin. This is usually
586 // a structure defined in builtin_op_data.h
587 void* builtin_data;
588
589 // Custom initial data. This is the opaque data provided in the flatbuffer.
590 // WARNING: This is an experimental interface that is subject to change.
591 const void* custom_initial_data;
592 int custom_initial_data_size;
593} TfLiteNode;
594#endif // TF_LITE_STATIC_MEMORY
595
596// Light-weight tensor struct for TF Micro runtime. Provides the minimal amount
597// of information required for a kernel to run during TfLiteRegistration::Eval.
598// TODO(b/160955687): Move this field into TF_LITE_STATIC_MEMORY when TFLM
599// builds with this flag by default internally.
600typedef struct TfLiteEvalTensor {
601 // A union of data pointers. The appropriate type should be used for a typed
602 // tensor based on `type`.
603 TfLitePtrUnion data;
604
605 // A pointer to a structure representing the dimensionality interpretation
606 // that the buffer should have.
607 TfLiteIntArray* dims;
608
609 // The data type specification for data stored in `data`. This affects
610 // what member of `data` union should be used.
611 TfLiteType type;
612} TfLiteEvalTensor;
613
614#ifndef TF_LITE_STATIC_MEMORY
615// Free data memory of tensor `t`.
616void TfLiteTensorDataFree(TfLiteTensor* t);
617
618// Free quantization data.
619void TfLiteQuantizationFree(TfLiteQuantization* quantization);
620
621// Free sparsity parameters.
622void TfLiteSparsityFree(TfLiteSparsity* sparsity);
623
624// Free memory of tensor `t`.
625void TfLiteTensorFree(TfLiteTensor* t);
626
627// Set all of a tensor's fields (and free any previously allocated data).
628void TfLiteTensorReset(TfLiteType type, const char* name, TfLiteIntArray* dims,
629 TfLiteQuantizationParams quantization, char* buffer,
630 size_t size, TfLiteAllocationType allocation_type,
631 const void* allocation, bool is_variable,
632 TfLiteTensor* tensor);
633
634// Copies the contents of 'src' in 'dst'.
635// Function does nothing if either 'src' or 'dst' is passed as nullptr and
636// return kTfLiteOk.
637// Returns kTfLiteError if 'src' and 'dst' doesn't have matching data size.
638// Note function copies contents, so it won't create new data pointer
639// or change allocation type.
640// All Tensor related properties will be copied from 'src' to 'dst' like
641// quantization, sparsity, ...
642TfLiteStatus TfLiteTensorCopy(const TfLiteTensor* src, TfLiteTensor* dst);
643
644// Change the size of the memory block owned by `tensor` to `num_bytes`.
645// Tensors with allocation types other than kTfLiteDynamic will be ignored.
646// `tensor`'s internal data buffer will be assigned a pointer
647// which can safely be passed to free or realloc if `num_bytes` is zero.
648// Behaviour is undefined if `tensor` is NULL.
649// If `preserve_data` is true, tensor data will be unchanged in the range from
650// the start of the region up to the minimum of the old and new sizes.
651void TfLiteTensorResizeMaybeCopy(size_t num_bytes, TfLiteTensor* tensor,
652 bool preserve_data);
653
654// Change the size of the memory block owned by `tensor` to `num_bytes`.
655// Tensors with allocation types other than kTfLiteDynamic will be ignored.
656// `tensor`'s internal data buffer will be assigned a pointer
657// which can safely be passed to free or realloc if `num_bytes` is zero.
658// Behaviour is undefined if `tensor` is NULL.
659// Tensor data will be unchanged in the range from the start of the region up to
660// the minimum of the old and new sizes.
661void TfLiteTensorRealloc(size_t num_bytes, TfLiteTensor* tensor);
662#endif // TF_LITE_STATIC_MEMORY
663
664// WARNING: This is an experimental interface that is subject to change.
665//
666// Currently, TfLiteDelegateParams has to be allocated in a way that it's
667// trivially destructable. It will be stored as `builtin_data` field in
668// `TfLiteNode` of the delegate node.
669//
670// See also the `CreateDelegateParams` function in `interpreter.cc` details.
671typedef struct TfLiteDelegateParams {
672 struct TfLiteDelegate* delegate;
673 TfLiteIntArray* nodes_to_replace;
674 TfLiteIntArray* input_tensors;
675 TfLiteIntArray* output_tensors;
676} TfLiteDelegateParams;
677
678typedef struct TfLiteContext {
679 // Number of tensors in the context.
680 size_t tensors_size;
681
682 // The execution plan contains a list of the node indices in execution
683 // order. execution_plan->size is the current number of nodes. And,
684 // execution_plan->data[0] is the first node that needs to be run.
685 // TfLiteDelegates can traverse the current execution plan by iterating
686 // through each member of this array and using GetNodeAndRegistration() to
687 // access details about a node. i.e.
688 //
689 // TfLiteIntArray* execution_plan;
690 // TF_LITE_ENSURE_STATUS(context->GetExecutionPlan(context, &execution_plan));
691 // for (int exec_index = 0; exec_index < execution_plan->size; exec_index++) {
692 // int node_index = execution_plan->data[exec_index];
693 // TfLiteNode* node;
694 // TfLiteRegistration* reg;
695 // context->GetNodeAndRegistration(context, node_index, &node, &reg);
696 // }
697 // Note: the memory pointed by '`*execution_plan` is OWNED by TfLite runtime.
698 // Future calls to GetExecutionPlan invalidates earlier outputs. The following
699 // code snippet shows the issue of such an invocation pattern. After calling
700 // CheckNode, subsequent access to `plan_1st` is undefined.
701 //
702 // void CheckNode(const TfLiteNode* node) {
703 // ...
704 // TfLiteIntArray* plan_2nd;
705 // TF_LITE_ENSURE_STATUS(context->GetExecutionPlan(context, &plan_2nd));
706 // ...
707 // }
708 //
709 // TfLiteIntArray* plan_1st;
710 // TF_LITE_ENSURE_STATUS(context->GetExecutionPlan(context, &plan_1st));
711 // for (int exec_index = 0; exec_index < plan_1st->size; exec_index++) {
712 // int node_index = plan_1st->data[exec_index];
713 // TfLiteNode* node;
714 // TfLiteRegistration* reg;
715 // context->GetNodeAndRegistration(context, node_index, &node, &reg);
716 // CheckNode(node);
717 // }
718 //
719 // WARNING: This is an experimental interface that is subject to change.
720 TfLiteStatus (*GetExecutionPlan)(struct TfLiteContext* context,
721 TfLiteIntArray** execution_plan);
722
723 // An array of tensors in the interpreter context (of length `tensors_size`)
724 TfLiteTensor* tensors;
725
726 // opaque full context ptr (an opaque c++ data structure)
727 void* impl_;
728
729 // Request memory pointer be resized. Updates dimensions on the tensor.
730 // NOTE: ResizeTensor takes ownership of newSize.
731 TfLiteStatus (*ResizeTensor)(struct TfLiteContext*, TfLiteTensor* tensor,
732 TfLiteIntArray* new_size);
733 // Request that an error be reported with format string msg.
734 void (*ReportError)(struct TfLiteContext*, const char* msg, ...);
735
736 // Add `tensors_to_add` tensors, preserving pre-existing Tensor entries. If
737 // non-null, the value pointed to by `first_new_tensor_index` will be set to
738 // the index of the first new tensor.
739 TfLiteStatus (*AddTensors)(struct TfLiteContext*, int tensors_to_add,
740 int* first_new_tensor_index);
741
742 // Get a Tensor node by node_index.
743 // WARNING: This is an experimental interface that is subject to change.
744 TfLiteStatus (*GetNodeAndRegistration)(
745 struct TfLiteContext*, int node_index, TfLiteNode** node,
746 struct TfLiteRegistration** registration);
747
748 // Replace ops with one or more stub delegate operations. This function
749 // does not take ownership of `nodes_to_replace`.
750 TfLiteStatus (*ReplaceNodeSubsetsWithDelegateKernels)(
751 struct TfLiteContext*, struct TfLiteRegistration registration,
752 const TfLiteIntArray* nodes_to_replace, struct TfLiteDelegate* delegate);
753
754 // Number of threads that are recommended to subsystems like gemmlowp and
755 // eigen.
756 int recommended_num_threads;
757
758 // Access external contexts by type.
759 // WARNING: This is an experimental interface that is subject to change.
760 TfLiteExternalContext* (*GetExternalContext)(struct TfLiteContext*,
761 TfLiteExternalContextType);
762 // Set the value of a external context. Does not take ownership of the
763 // pointer.
764 // WARNING: This is an experimental interface that is subject to change.
765 void (*SetExternalContext)(struct TfLiteContext*, TfLiteExternalContextType,
766 TfLiteExternalContext*);
767
768 // Flag for allowing float16 precision for FP32 calculation.
769 // default: false.
770 // WARNING: This is an experimental API and subject to change.
771 bool allow_fp32_relax_to_fp16;
772
773 // Pointer to the op-level profiler, if set; nullptr otherwise.
774 void* profiler;
775
776 // Allocate persistent buffer which has the same life time as the interpreter.
777 // Returns nullptr on failure.
778 // The memory is allocated from heap for TFL, and from tail in TFLM.
779 // This method is only available in Init or Prepare stage.
780 // WARNING: This is an experimental interface that is subject to change.
781 void* (*AllocatePersistentBuffer)(struct TfLiteContext* ctx, size_t bytes);
782
783 // Allocate a buffer which will be deallocated right after invoke phase.
784 // The memory is allocated from heap in TFL, and from volatile arena in TFLM.
785 // This method is only available in invoke stage.
786 // NOTE: If possible use RequestScratchBufferInArena method to avoid memory
787 // allocation during inference time.
788 // WARNING: This is an experimental interface that is subject to change.
789 TfLiteStatus (*AllocateBufferForEval)(struct TfLiteContext* ctx, size_t bytes,
790 void** ptr);
791
792 // Request a scratch buffer in the arena through static memory planning.
793 // This method is only available in Prepare stage and the buffer is allocated
794 // by the interpreter between Prepare and Eval stage. In Eval stage,
795 // GetScratchBuffer API can be used to fetch the address.
796 // WARNING: This is an experimental interface that is subject to change.
797 TfLiteStatus (*RequestScratchBufferInArena)(struct TfLiteContext* ctx,
798 size_t bytes, int* buffer_idx);
799
800 // Get the scratch buffer pointer.
801 // This method is only available in Eval stage.
802 // WARNING: This is an experimental interface that is subject to change.
803 void* (*GetScratchBuffer)(struct TfLiteContext* ctx, int buffer_idx);
804
805 // Resize the memory pointer of the `tensor`. This method behaves the same as
806 // `ResizeTensor`, except that it makes a copy of the shape array internally
807 // so the shape array could be deallocated right afterwards.
808 // WARNING: This is an experimental interface that is subject to change.
809 TfLiteStatus (*ResizeTensorExplicit)(struct TfLiteContext* ctx,
810 TfLiteTensor* tensor, int dims,
811 const int* shape);
812
813 // This method provides a preview of post-delegation partitioning. Each
814 // TfLiteDelegateParams in the referenced array corresponds to one instance of
815 // the delegate kernel.
816 // Example usage:
817 //
818 // TfLiteIntArray* nodes_to_replace = ...;
819 // TfLiteDelegateParams* params_array;
820 // int num_partitions = 0;
821 // TF_LITE_ENSURE_STATUS(context->PreviewDelegatePartitioning(
822 // context, delegate, nodes_to_replace, &params_array, &num_partitions));
823 // for (int idx = 0; idx < num_partitions; idx++) {
824 // const auto& partition_params = params_array[idx];
825 // ...
826 // }
827 //
828 // NOTE: The context owns the memory referenced by partition_params_array. It
829 // will be cleared with another call to PreviewDelegateParitioning, or after
830 // TfLiteDelegateParams::Prepare returns.
831 //
832 // WARNING: This is an experimental interface that is subject to change.
833 TfLiteStatus (*PreviewDelegatePartitioning)(
834 struct TfLiteContext* context, const TfLiteIntArray* nodes_to_replace,
835 TfLiteDelegateParams** partition_params_array, int* num_partitions);
836
837 // Returns a TfLiteTensor struct for a given index.
838 // WARNING: This is an experimental interface that is subject to change.
839 // WARNING: This method may not be available on all platforms.
840 TfLiteTensor* (*GetTensor)(const struct TfLiteContext* context,
841 int tensor_idx);
842
843 // Returns a TfLiteEvalTensor struct for a given index.
844 // WARNING: This is an experimental interface that is subject to change.
845 // WARNING: This method may not be available on all platforms.
846 TfLiteEvalTensor* (*GetEvalTensor)(const struct TfLiteContext* context,
847 int tensor_idx);
848
849 // Retrieves named metadata buffer from the TFLite model.
850 // Returns kTfLiteOk if metadata is successfully obtained from the flatbuffer
851 // Model: that is, there exists a `metadata` entry with given `name` string.
852 // (see TFLite's schema.fbs).
853 // The corresponding `buffer` information is populated in `ptr` & `bytes`.
854 // The data from `ptr` is valid for the lifetime of the Interpreter.
855 //
856 // WARNING: This is an experimental interface that is subject to change.
857 TfLiteStatus (*GetModelMetadata)(const struct TfLiteContext* context,
858 const char* name, const char** ptr,
859 size_t* bytes);
860} TfLiteContext;
861
862// `TfLiteRegistrationExternal` is an external version of `TfLiteRegistration`
863// for C API which doesn't use internal types (such as `TfLiteContext`) but only
864// uses stable API types (such as `TfLiteOpaqueContext`). The purpose of each
865// field is the exactly the same as with `TfLiteRegistration`.
866typedef struct TfLiteRegistrationExternal TfLiteRegistrationExternal;
867
868typedef struct TfLiteRegistration {
869 // Initializes the op from serialized data.
870 // Called only *once* for the lifetime of the op, so any one-time allocations
871 // should be made here (unless they depend on tensor sizes).
872 //
873 // If a built-in op:
874 // `buffer` is the op's params data (TfLiteLSTMParams*).
875 // `length` is zero.
876 // If custom op:
877 // `buffer` is the op's `custom_options`.
878 // `length` is the size of the buffer.
879 //
880 // Returns a type-punned (i.e. void*) opaque data (e.g. a primitive pointer
881 // or an instance of a struct).
882 //
883 // The returned pointer will be stored with the node in the `user_data` field,
884 // accessible within prepare and invoke functions below.
885 // NOTE: if the data is already in the desired format, simply implement this
886 // function to return `nullptr` and implement the free function to be a no-op.
887 void* (*init)(TfLiteContext* context, const char* buffer, size_t length);
888
889 // The pointer `buffer` is the data previously returned by an init invocation.
890 void (*free)(TfLiteContext* context, void* buffer);
891
892 // prepare is called when the inputs this node depends on have been resized.
893 // context->ResizeTensor() can be called to request output tensors to be
894 // resized.
895 // Can be called multiple times for the lifetime of the op.
896 //
897 // Returns kTfLiteOk on success.
898 TfLiteStatus (*prepare)(TfLiteContext* context, TfLiteNode* node);
899
900 // Execute the node (should read node->inputs and output to node->outputs).
901 // Returns kTfLiteOk on success.
902 TfLiteStatus (*invoke)(TfLiteContext* context, TfLiteNode* node);
903
904 // profiling_string is called during summarization of profiling information
905 // in order to group executions together. Providing a value here will cause a
906 // given op to appear multiple times is the profiling report. This is
907 // particularly useful for custom ops that can perform significantly
908 // different calculations depending on their `user-data`.
909 const char* (*profiling_string)(const TfLiteContext* context,
910 const TfLiteNode* node);
911
912 // Builtin codes. If this kernel refers to a builtin this is the code
913 // of the builtin. This is so we can do marshaling to other frameworks like
914 // NN API.
915 // Note: It is the responsibility of the registration binder to set this
916 // properly.
917 int32_t builtin_code;
918
919 // Custom op name. If the op is a builtin, this will be null.
920 // Note: It is the responsibility of the registration binder to set this
921 // properly.
922 // WARNING: This is an experimental interface that is subject to change.
923 const char* custom_name;
924
925 // The version of the op.
926 // Note: It is the responsibility of the registration binder to set this
927 // properly.
928 int version;
929
930 // The external version of `TfLiteRegistration`. Since we can't use internal
931 // types (such as `TfLiteContext`) for C API to maintain ABI stability.
932 // C API user will provide `TfLiteRegistrationExternal` to implement custom
933 // ops. We keep it inside of `TfLiteRegistration` and use it to route
934 // callbacks properly.
935 TfLiteRegistrationExternal* registration_external;
936} TfLiteRegistration;
937
938// Old version of `TfLiteRegistration` to maintain binary backward
939// compatibility.
940// WARNING: This structure is deprecated / not an official part of the API.
941// It should be only used for binary backward compatibility.
942typedef struct TfLiteRegistration_V1 {
943 void* (*init)(TfLiteContext* context, const char* buffer, size_t length);
944 void (*free)(TfLiteContext* context, void* buffer);
945 TfLiteStatus (*prepare)(TfLiteContext* context, TfLiteNode* node);
946 TfLiteStatus (*invoke)(TfLiteContext* context, TfLiteNode* node);
947 const char* (*profiling_string)(const TfLiteContext* context,
948 const TfLiteNode* node);
949 int32_t builtin_code;
950 const char* custom_name;
951 int version;
952} TfLiteRegistration_V1;
953
954// The flags used in `TfLiteDelegate`. Note that this is a bitmask, so the
955// values should be 1, 2, 4, 8, ...etc.
956typedef enum TfLiteDelegateFlags {
957 kTfLiteDelegateFlagsNone = 0,
958 // The flag is set if the delegate can handle dynamic sized tensors.
959 // For example, the output shape of a `Resize` op with non-constant shape
960 // can only be inferred when the op is invoked.
961 // In this case, the Delegate is responsible for calling
962 // `SetTensorToDynamic` to mark the tensor as a dynamic tensor, and calling
963 // `ResizeTensor` when invoking the op.
964 //
965 // If the delegate isn't capable to handle dynamic tensors, this flag need
966 // to be set to false.
967 kTfLiteDelegateFlagsAllowDynamicTensors = 1,
968
969 // This flag can be used by delegates (that allow dynamic tensors) to ensure
970 // applicable tensor shapes are automatically propagated in the case of tensor
971 // resizing.
972 // This means that non-dynamic (allocation_type != kTfLiteDynamic) I/O tensors
973 // of a delegate kernel will have correct shapes before its Prepare() method
974 // is called. The runtime leverages TFLite builtin ops in the original
975 // execution plan to propagate shapes.
976 //
977 // A few points to note:
978 // 1. This requires kTfLiteDelegateFlagsAllowDynamicTensors. If that flag is
979 // false, this one is redundant since the delegate kernels are re-initialized
980 // every time tensors are resized.
981 // 2. Enabling this flag adds some overhead to AllocateTensors(), since extra
982 // work is required to prepare the original execution plan.
983 // 3. This flag requires that the original execution plan only have ops with
984 // valid registrations (and not 'dummy' custom ops like with Flex).
985 // WARNING: This feature is experimental and subject to change.
986 kTfLiteDelegateFlagsRequirePropagatedShapes = 2
987} TfLiteDelegateFlags;
988
989// WARNING: This is an experimental interface that is subject to change.
990typedef struct TfLiteDelegate {
991 // Data that delegate needs to identify itself. This data is owned by the
992 // delegate. The delegate is owned in the user code, so the delegate is
993 // responsible for deallocating this when it is destroyed.
994 void* data_;
995
996 // Invoked by ModifyGraphWithDelegate. This prepare is called, giving the
997 // delegate a view of the current graph through TfLiteContext*. It typically
998 // will look at the nodes and call ReplaceNodeSubsetsWithDelegateKernels()
999 // to ask the TensorFlow lite runtime to create macro-nodes to represent
1000 // delegated subgraphs of the original graph.
1001 TfLiteStatus (*Prepare)(TfLiteContext* context,
1002 struct TfLiteDelegate* delegate);
1003
1004 // Copy the data from delegate buffer handle into raw memory of the given
1005 // 'tensor'. Note that the delegate is allowed to allocate the raw bytes as
1006 // long as it follows the rules for kTfLiteDynamic tensors, in which case this
1007 // cannot be null.
1008 TfLiteStatus (*CopyFromBufferHandle)(TfLiteContext* context,
1009 struct TfLiteDelegate* delegate,
1010 TfLiteBufferHandle buffer_handle,
1011 TfLiteTensor* tensor);
1012
1013 // Copy the data from raw memory of the given 'tensor' to delegate buffer
1014 // handle. This can be null if the delegate doesn't use its own buffer.
1015 TfLiteStatus (*CopyToBufferHandle)(TfLiteContext* context,
1016 struct TfLiteDelegate* delegate,
1017 TfLiteBufferHandle buffer_handle,
1018 TfLiteTensor* tensor);
1019
1020 // Free the Delegate Buffer Handle. Note: This only frees the handle, but
1021 // this doesn't release the underlying resource (e.g. textures). The
1022 // resources are either owned by application layer or the delegate.
1023 // This can be null if the delegate doesn't use its own buffer.
1024 void (*FreeBufferHandle)(TfLiteContext* context,
1025 struct TfLiteDelegate* delegate,
1026 TfLiteBufferHandle* handle);
1027
1028 // Bitmask flags. See the comments in `TfLiteDelegateFlags`.
1029 int64_t flags;
1030
1031 // The opaque delegate builder associated with this object. If set then the
1032 // TF Lite runtime will give precedence to this field. E.g. instead of
1033 // invoking 'Prepare' via the function pointer inside the 'TfLiteDelegate'
1034 // object, the runtime will first check if the corresponding function
1035 // pointer inside 'opaque_delegate_builder' is set and if so invoke that.
1036 //
1037 // If this field is non-null, then the 'Prepare' field (of the
1038 // 'TfLiteDelegate') should be null.
1039 struct TfLiteOpaqueDelegateBuilder* opaque_delegate_builder;
1040} TfLiteDelegate;
1041
1042// Build a 'null' delegate, with all the fields properly set to their default
1043// values.
1044TfLiteDelegate TfLiteDelegateCreate(void);
1045
1046// `TfLiteOpaqueDelegateBuilder` is used for constructing
1047// `TfLiteOpaqueDelegateStruct`, see `TfLiteOpaqueDelegateCreate` below. Note:
1048// This struct is not ABI stable.
1049//
1050// For forward source compatibility `TfLiteOpaqueDelegateBuilder` objects should
1051// be brace-initialized, so that all fields (including any that might be added
1052// in the future) get zero-initialized. The purpose of each field is exactly
1053// the same as with `TfLiteDelegate`.
1054//
1055// WARNING: This is an experimental interface that is subject to change.
1056typedef struct TfLiteOpaqueDelegateBuilder {
1057 // Data that delegate needs to identify itself. This data is owned by the
1058 // delegate. The delegate is owned in the user code, so the delegate is
1059 // responsible for deallocating this when it is destroyed.
1060 void* data;
1061 // Invoked by ModifyGraphWithDelegate. This prepare is called, giving the
1062 // delegate a view of the current graph through TfLiteContext*. It typically
1063 // will look at the nodes and call ReplaceNodeSubsetsWithDelegateKernels()
1064 // to ask the TensorFlow lite runtime to create macro-nodes to represent
1065 // delegated subgraphs of the original graph.
1066 TfLiteStatus (*Prepare)(TfLiteOpaqueContext* context, // NOLINT
1067 struct TfLiteOpaqueDelegateStruct* delegate,
1068 void* data);
1069 // Copies the data from delegate buffer handle into raw memory of the given
1070 // 'tensor'. Note that the delegate is allowed to allocate the raw bytes as
1071 // long as it follows the rules for kTfLiteDynamic tensors, in which case this
1072 // cannot be null.
1073 TfLiteStatus (*CopyFromBufferHandle)( // NOLINT
1074 TfLiteOpaqueContext* context, struct TfLiteOpaqueDelegateStruct* delegate,
1075 void* data, TfLiteBufferHandle buffer_handle, TfLiteOpaqueTensor* tensor);
1076 // Copies the data from raw memory of the given 'tensor' to delegate buffer
1077 // handle. This can be null if the delegate doesn't use its own buffer.
1078 TfLiteStatus (*CopyToBufferHandle)( // NOLINT
1079 TfLiteOpaqueContext* context, struct TfLiteOpaqueDelegateStruct* delegate,
1080 void* data, TfLiteBufferHandle buffer_handle, TfLiteOpaqueTensor* tensor);
1081 // Frees the Delegate Buffer Handle. Note: This only frees the handle, but
1082 // this doesn't release the underlying resource (e.g. textures). The
1083 // resources are either owned by application layer or the delegate.
1084 // This can be null if the delegate doesn't use its own buffer.
1085 void (*FreeBufferHandle)(TfLiteOpaqueContext* context, // NOLINT
1086 struct TfLiteOpaqueDelegateStruct* delegate,
1087 void* data, TfLiteBufferHandle* handle);
1088 // Bitmask flags. See the comments in `TfLiteDelegateFlags`.
1089 int64_t flags;
1090} TfLiteOpaqueDelegateBuilder;
1091
1092// Creates an opaque delegate and returns its address. The opaque delegate will
1093// behave according to the provided 'opaque_delegate_builder'. The lifetime of
1094// the fields within the 'opaque_delegate_builder' must outlive any interaction
1095// between the runtime and the returned 'TfLiteOpaqueDelegateStruct'. The
1096// returned address should be passed to 'TfLiteOpaqueDelegateDelete' for
1097// deletion. If 'opaque_delegate_builder' is a null pointer, then a null
1098// pointer will be returned.
1099struct TfLiteOpaqueDelegateStruct* TfLiteOpaqueDelegateCreate(
1100 const TfLiteOpaqueDelegateBuilder* opaque_delegate_builder);
1101
1102// Deletes the provided opaque 'delegate'. This function has no effect if the
1103// 'delegate' is a null pointer.
1104void TfLiteOpaqueDelegateDelete(
1105 const struct TfLiteOpaqueDelegateStruct* delegate);
1106
1107#ifdef __cplusplus
1108} // extern "C"
1109#endif // __cplusplus
1110#endif // TENSORFLOW_LITE_C_COMMON_H_
1111