1/* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
2
3Licensed under the Apache License, Version 2.0 (the "License");
4you may not use this file except in compliance with the License.
5You may obtain a copy of the License at
6
7 http://www.apache.org/licenses/LICENSE-2.0
8
9Unless required by applicable law or agreed to in writing, software
10distributed under the License is distributed on an "AS IS" BASIS,
11WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12See the License for the specific language governing permissions and
13limitations under the License.
14==============================================================================*/
15
16#include "tensorflow/lite/c/common.h"
17
18#include "tensorflow/lite/c/c_api_types.h"
19#ifdef TF_LITE_TENSORFLOW_PROFILER
20#include "tensorflow/lite/tensorflow_profiler_logger.h"
21#endif
22
23#ifndef TF_LITE_STATIC_MEMORY
24#include <stdlib.h>
25#include <string.h>
26#endif // TF_LITE_STATIC_MEMORY
27
28extern "C" {
29
30size_t TfLiteIntArrayGetSizeInBytes(int size) {
31 static TfLiteIntArray dummy;
32
33 size_t computed_size = sizeof(dummy) + sizeof(dummy.data[0]) * size;
34#if defined(_MSC_VER)
35 // Context for why this is needed is in http://b/189926408#comment21
36 computed_size -= sizeof(dummy.data[0]);
37#endif
38 return computed_size;
39}
40
41int TfLiteIntArrayEqual(const TfLiteIntArray* a, const TfLiteIntArray* b) {
42 if (a == b) return 1;
43 if (a == nullptr || b == nullptr) return 0;
44 return TfLiteIntArrayEqualsArray(a, b->size, b->data);
45}
46
47int TfLiteIntArrayEqualsArray(const TfLiteIntArray* a, int b_size,
48 const int b_data[]) {
49 if (a == nullptr) return (b_size == 0);
50 if (a->size != b_size) return 0;
51 int i = 0;
52 for (; i < a->size; i++)
53 if (a->data[i] != b_data[i]) return 0;
54 return 1;
55}
56
57#ifndef TF_LITE_STATIC_MEMORY
58
59TfLiteIntArray* TfLiteIntArrayCreate(int size) {
60 size_t alloc_size = TfLiteIntArrayGetSizeInBytes(size);
61 if (alloc_size <= 0) return nullptr;
62 TfLiteIntArray* ret = (TfLiteIntArray*)malloc(alloc_size);
63 if (!ret) return ret;
64 ret->size = size;
65 return ret;
66}
67
68TfLiteIntArray* TfLiteIntArrayCopy(const TfLiteIntArray* src) {
69 if (!src) return nullptr;
70 TfLiteIntArray* ret = TfLiteIntArrayCreate(src->size);
71 if (ret) {
72 memcpy(ret->data, src->data, src->size * sizeof(int));
73 }
74 return ret;
75}
76
77void TfLiteIntArrayFree(TfLiteIntArray* a) { free(a); }
78
79#endif // TF_LITE_STATIC_MEMORY
80
81int TfLiteFloatArrayGetSizeInBytes(int size) {
82 static TfLiteFloatArray dummy;
83
84 int computed_size = sizeof(dummy) + sizeof(dummy.data[0]) * size;
85#if defined(_MSC_VER)
86 // Context for why this is needed is in http://b/189926408#comment21
87 computed_size -= sizeof(dummy.data[0]);
88#endif
89 return computed_size;
90}
91
92#ifndef TF_LITE_STATIC_MEMORY
93
94TfLiteFloatArray* TfLiteFloatArrayCreate(int size) {
95 TfLiteFloatArray* ret =
96 (TfLiteFloatArray*)malloc(TfLiteFloatArrayGetSizeInBytes(size));
97 ret->size = size;
98 return ret;
99}
100
101void TfLiteFloatArrayFree(TfLiteFloatArray* a) { free(a); }
102
103void TfLiteTensorDataFree(TfLiteTensor* t) {
104 if (t->allocation_type == kTfLiteDynamic ||
105 t->allocation_type == kTfLitePersistentRo) {
106 if (t->data.raw) {
107#ifdef TF_LITE_TENSORFLOW_PROFILER
108 tflite::OnTfLiteTensorDealloc(t);
109#endif
110 free(t->data.raw);
111 }
112 }
113 t->data.raw = nullptr;
114}
115
116void TfLiteQuantizationFree(TfLiteQuantization* quantization) {
117 if (quantization->type == kTfLiteAffineQuantization) {
118 TfLiteAffineQuantization* q_params =
119 (TfLiteAffineQuantization*)(quantization->params);
120 if (q_params->scale) {
121 TfLiteFloatArrayFree(q_params->scale);
122 q_params->scale = nullptr;
123 }
124 if (q_params->zero_point) {
125 TfLiteIntArrayFree(q_params->zero_point);
126 q_params->zero_point = nullptr;
127 }
128 free(q_params);
129 }
130 quantization->params = nullptr;
131 quantization->type = kTfLiteNoQuantization;
132}
133
134void TfLiteSparsityFree(TfLiteSparsity* sparsity) {
135 if (sparsity == nullptr) {
136 return;
137 }
138
139 if (sparsity->traversal_order) {
140 TfLiteIntArrayFree(sparsity->traversal_order);
141 sparsity->traversal_order = nullptr;
142 }
143
144 if (sparsity->block_map) {
145 TfLiteIntArrayFree(sparsity->block_map);
146 sparsity->block_map = nullptr;
147 }
148
149 if (sparsity->dim_metadata) {
150 int i = 0;
151 for (; i < sparsity->dim_metadata_size; i++) {
152 TfLiteDimensionMetadata metadata = sparsity->dim_metadata[i];
153 if (metadata.format == kTfLiteDimSparseCSR) {
154 TfLiteIntArrayFree(metadata.array_segments);
155 metadata.array_segments = nullptr;
156 TfLiteIntArrayFree(metadata.array_indices);
157 metadata.array_indices = nullptr;
158 }
159 }
160 free(sparsity->dim_metadata);
161 sparsity->dim_metadata = nullptr;
162 }
163
164 free(sparsity);
165}
166
167void TfLiteTensorFree(TfLiteTensor* t) {
168 TfLiteTensorDataFree(t);
169 if (t->dims) TfLiteIntArrayFree(t->dims);
170 t->dims = nullptr;
171
172 if (t->dims_signature) {
173 TfLiteIntArrayFree((TfLiteIntArray*)t->dims_signature);
174 }
175 t->dims_signature = nullptr;
176
177 TfLiteQuantizationFree(&t->quantization);
178 TfLiteSparsityFree(t->sparsity);
179 t->sparsity = nullptr;
180}
181
182void TfLiteTensorReset(TfLiteType type, const char* name, TfLiteIntArray* dims,
183 TfLiteQuantizationParams quantization, char* buffer,
184 size_t size, TfLiteAllocationType allocation_type,
185 const void* allocation, bool is_variable,
186 TfLiteTensor* tensor) {
187 TfLiteTensorFree(tensor);
188 tensor->type = type;
189 tensor->name = name;
190 tensor->dims = dims;
191 tensor->params = quantization;
192 tensor->data.raw = buffer;
193 tensor->bytes = size;
194 tensor->allocation_type = allocation_type;
195 tensor->allocation = allocation;
196 tensor->is_variable = is_variable;
197
198 tensor->quantization.type = kTfLiteNoQuantization;
199 tensor->quantization.params = nullptr;
200}
201
202TfLiteStatus TfLiteTensorCopy(const TfLiteTensor* src, TfLiteTensor* dst) {
203 if (!src || !dst) return kTfLiteOk;
204 if (src->bytes != dst->bytes) return kTfLiteError;
205 if (src == dst) return kTfLiteOk;
206
207 dst->type = src->type;
208 if (dst->dims) TfLiteIntArrayFree(dst->dims);
209 dst->dims = TfLiteIntArrayCopy(src->dims);
210 memcpy(dst->data.raw, src->data.raw, src->bytes);
211 dst->buffer_handle = src->buffer_handle;
212 dst->data_is_stale = src->data_is_stale;
213 dst->delegate = src->delegate;
214
215 return kTfLiteOk;
216}
217
218void TfLiteTensorResizeMaybeCopy(size_t num_bytes, TfLiteTensor* tensor,
219 bool preserve_data) {
220 if (tensor->allocation_type != kTfLiteDynamic &&
221 tensor->allocation_type != kTfLitePersistentRo) {
222 return;
223 }
224 // TODO(b/145340303): Tensor data should be aligned.
225 if (!tensor->data.data) {
226 tensor->data.data = (char*)malloc(num_bytes);
227#ifdef TF_LITE_TENSORFLOW_PROFILER
228 tflite::OnTfLiteTensorAlloc(tensor, num_bytes);
229#endif
230 } else if (num_bytes > tensor->bytes) {
231#ifdef TF_LITE_TENSORFLOW_PROFILER
232 tflite::OnTfLiteTensorDealloc(tensor);
233#endif
234 if (preserve_data) {
235 tensor->data.data = (char*)realloc(tensor->data.data, num_bytes);
236 } else {
237 // Calling free and malloc can be more efficient as it avoids needlessly
238 // copying the data when it is not required.
239 free(tensor->data.data);
240 tensor->data.data = (char*)malloc(num_bytes);
241 }
242#ifdef TF_LITE_TENSORFLOW_PROFILER
243 tflite::OnTfLiteTensorAlloc(tensor, num_bytes);
244#endif
245 }
246 tensor->bytes = num_bytes;
247}
248
249void TfLiteTensorRealloc(size_t num_bytes, TfLiteTensor* tensor) {
250 return TfLiteTensorResizeMaybeCopy(num_bytes, tensor, true);
251}
252#endif // TF_LITE_STATIC_MEMORY
253
254const char* TfLiteTypeGetName(TfLiteType type) {
255 switch (type) {
256 case kTfLiteNoType:
257 return "NOTYPE";
258 case kTfLiteFloat32:
259 return "FLOAT32";
260 case kTfLiteUInt16:
261 return "UINT16";
262 case kTfLiteInt16:
263 return "INT16";
264 case kTfLiteInt32:
265 return "INT32";
266 case kTfLiteUInt32:
267 return "UINT32";
268 case kTfLiteUInt8:
269 return "UINT8";
270 case kTfLiteInt8:
271 return "INT8";
272 case kTfLiteInt64:
273 return "INT64";
274 case kTfLiteUInt64:
275 return "UINT64";
276 case kTfLiteBool:
277 return "BOOL";
278 case kTfLiteComplex64:
279 return "COMPLEX64";
280 case kTfLiteComplex128:
281 return "COMPLEX128";
282 case kTfLiteString:
283 return "STRING";
284 case kTfLiteFloat16:
285 return "FLOAT16";
286 case kTfLiteFloat64:
287 return "FLOAT64";
288 case kTfLiteResource:
289 return "RESOURCE";
290 case kTfLiteVariant:
291 return "VARIANT";
292 }
293 return "Unknown type";
294}
295
296TfLiteDelegate TfLiteDelegateCreate() { return TfLiteDelegate{}; }
297
298struct TfLiteOpaqueDelegateStruct* TfLiteOpaqueDelegateCreate(
299 const TfLiteOpaqueDelegateBuilder* opaque_delegate_builder) {
300 if (!opaque_delegate_builder) return nullptr;
301
302 TfLiteDelegate* result = new TfLiteDelegate{};
303 result->opaque_delegate_builder = new TfLiteOpaqueDelegateBuilder{};
304 *(result->opaque_delegate_builder) = *opaque_delegate_builder;
305
306 return reinterpret_cast<struct TfLiteOpaqueDelegateStruct*>(result);
307}
308
309void TfLiteOpaqueDelegateDelete(
310 const struct TfLiteOpaqueDelegateStruct* opaque_delegate) {
311 if (!opaque_delegate) return;
312
313 const TfLiteDelegate* tflite_delegate =
314 reinterpret_cast<const TfLiteDelegate*>(opaque_delegate);
315 delete tflite_delegate->opaque_delegate_builder;
316 delete tflite_delegate;
317}
318
319} // extern "C"
320