1/* Copyright 2017 The TensorFlow Authors. All Rights Reserved.
2
3Licensed under the Apache License, Version 2.0 (the "License");
4you may not use this file except in compliance with the License.
5You may obtain a copy of the License at
6
7 http://www.apache.org/licenses/LICENSE-2.0
8
9Unless required by applicable law or agreed to in writing, software
10distributed under the License is distributed on an "AS IS" BASIS,
11WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12See the License for the specific language governing permissions and
13limitations under the License.
14==============================================================================*/
15#include "tensorflow/lite/kernels/kernel_util.h"
16
17#include <stdint.h>
18#include <stdlib.h>
19
20#include <algorithm>
21#include <complex>
22#include <limits>
23#include <memory>
24#ifndef TF_LITE_STATIC_MEMORY
25#include <string>
26#endif // TF_LITE_STATIC_MEMORY
27
28#include "tensorflow/lite/c/builtin_op_data.h"
29#include "tensorflow/lite/c/common.h"
30#include "tensorflow/lite/context_util.h"
31#include "tensorflow/lite/kernels/internal/cppmath.h"
32#include "tensorflow/lite/kernels/internal/quantization_util.h"
33
34#if defined(__APPLE__)
35#include "TargetConditionals.h"
36#endif
37
38namespace tflite {
39
40namespace {
41
42// Assumes tensor_index is a valid index (in bounds)
43inline TfLiteTensor* GetTensorAtIndex(const TfLiteContext* context,
44 int tensor_index) {
45 if (context->tensors != nullptr) {
46 return &context->tensors[tensor_index];
47 } else {
48 return context->GetTensor(context, tensor_index);
49 }
50}
51
52// Validate in a single place to reduce binary size
53inline TfLiteStatus ValidateTensorIndexingSafe(const TfLiteContext* context,
54 int index, int max_size,
55 const int* tensor_indices,
56 int* tensor_index) {
57 if (index < 0 || index >= max_size) {
58 TF_LITE_KERNEL_LOG(const_cast<TfLiteContext*>(context),
59 "Invalid tensor index %d (not in [0, %d))\n", index,
60 max_size);
61 return kTfLiteError;
62 }
63 if (tensor_indices[index] == kTfLiteOptionalTensor) {
64 TF_LITE_KERNEL_LOG(const_cast<TfLiteContext*>(context),
65 "Tensor at index %d was optional but was expected\n",
66 index);
67 return kTfLiteError;
68 }
69
70 *tensor_index = tensor_indices[index];
71 return kTfLiteOk;
72}
73
74// Same as above but returns -1 for invalid inputs instead of status + logging
75// error.
76inline int ValidateTensorIndexing(const TfLiteContext* context, int index,
77 int max_size, const int* tensor_indices) {
78 if (index >= 0 && index < max_size) {
79 const int tensor_index = tensor_indices[index];
80 if (tensor_index != kTfLiteOptionalTensor) {
81 return tensor_index;
82 }
83 }
84 return -1;
85}
86
87inline TfLiteTensor* GetMutableInput(const TfLiteContext* context,
88 const TfLiteNode* node, int index) {
89 const int tensor_index = ValidateTensorIndexing(
90 context, index, node->inputs->size, node->inputs->data);
91 if (tensor_index < 0) {
92 return nullptr;
93 }
94 return GetTensorAtIndex(context, tensor_index);
95}
96
97inline TfLiteStatus GetMutableInputSafe(const TfLiteContext* context,
98 const TfLiteNode* node, int index,
99 const TfLiteTensor** tensor) {
100 int tensor_index;
101 TF_LITE_ENSURE_OK(
102 context, ValidateTensorIndexingSafe(context, index, node->inputs->size,
103 node->inputs->data, &tensor_index));
104 *tensor = GetTensorAtIndex(context, tensor_index);
105 return kTfLiteOk;
106}
107
108} // anonymous namespace.
109
110const TfLiteTensor* GetInput(const TfLiteContext* context,
111 const TfLiteNode* node, int index) {
112 return GetMutableInput(context, node, index);
113}
114
115TfLiteStatus GetInputSafe(const TfLiteContext* context, const TfLiteNode* node,
116 int index, const TfLiteTensor** tensor) {
117 return GetMutableInputSafe(context, node, index, tensor);
118}
119
120TfLiteTensor* GetVariableInput(TfLiteContext* context, const TfLiteNode* node,
121 int index) {
122 TfLiteTensor* tensor = GetMutableInput(context, node, index);
123 if (tensor == nullptr) return nullptr;
124 return tensor->is_variable ? tensor : nullptr;
125}
126
127TfLiteTensor* GetOutput(TfLiteContext* context, const TfLiteNode* node,
128 int index) {
129 const int tensor_index = ValidateTensorIndexing(
130 context, index, node->outputs->size, node->outputs->data);
131 if (tensor_index < 0) {
132 return nullptr;
133 }
134 return GetTensorAtIndex(context, tensor_index);
135}
136
137TfLiteStatus GetOutputSafe(const TfLiteContext* context, const TfLiteNode* node,
138 int index, TfLiteTensor** tensor) {
139 int tensor_index;
140 TF_LITE_ENSURE_OK(
141 context, ValidateTensorIndexingSafe(context, index, node->outputs->size,
142 node->outputs->data, &tensor_index));
143 *tensor = GetTensorAtIndex(context, tensor_index);
144 return kTfLiteOk;
145}
146
147const TfLiteTensor* GetOptionalInputTensor(const TfLiteContext* context,
148 const TfLiteNode* node, int index) {
149 return GetInput(context, node, index);
150}
151
152#ifndef TF_LITE_STATIC_MEMORY
153TfLiteTensor* GetTemporary(TfLiteContext* context, const TfLiteNode* node,
154 int index) {
155 const int tensor_index = ValidateTensorIndexing(
156 context, index, node->temporaries->size, node->temporaries->data);
157 if (tensor_index < 0) {
158 return nullptr;
159 }
160 return GetTensorAtIndex(context, tensor_index);
161}
162
163TfLiteStatus GetTemporarySafe(const TfLiteContext* context,
164 const TfLiteNode* node, int index,
165 TfLiteTensor** tensor) {
166 int tensor_index;
167 TF_LITE_ENSURE_OK(context, ValidateTensorIndexingSafe(
168 context, index, node->temporaries->size,
169 node->temporaries->data, &tensor_index));
170 *tensor = GetTensorAtIndex(context, tensor_index);
171 return kTfLiteOk;
172}
173
174const TfLiteTensor* GetIntermediates(TfLiteContext* context,
175 const TfLiteNode* node, int index) {
176 const int tensor_index = ValidateTensorIndexing(
177 context, index, node->intermediates->size, node->intermediates->data);
178 if (tensor_index < 0) {
179 return nullptr;
180 }
181 return GetTensorAtIndex(context, tensor_index);
182}
183
184TfLiteStatus GetIntermediatesSafe(const TfLiteContext* context,
185 const TfLiteNode* node, int index,
186 TfLiteTensor** tensor) {
187 int tensor_index;
188 TF_LITE_ENSURE_OK(context, ValidateTensorIndexingSafe(
189 context, index, node->intermediates->size,
190 node->intermediates->data, &tensor_index));
191 *tensor = GetTensorAtIndex(context, tensor_index);
192 return kTfLiteOk;
193}
194#endif // TF_LITE_STATIC_MEMORY
195
196// Per-axis
197TfLiteStatus PopulateConvolutionQuantizationParams(
198 TfLiteContext* context, const TfLiteTensor* input,
199 const TfLiteTensor* filter, const TfLiteTensor* bias, TfLiteTensor* output,
200 const TfLiteFusedActivation& activation, int32_t* multiplier, int* shift,
201 int32_t* output_activation_min, int32_t* output_activation_max,
202 int32_t* per_channel_multiplier, int32_t* per_channel_shift) {
203 const auto* affine_quantization =
204 reinterpret_cast<TfLiteAffineQuantization*>(filter->quantization.params);
205 return PopulateConvolutionQuantizationParams(
206 context, input, filter, bias, output, activation, multiplier, shift,
207 output_activation_min, output_activation_max, per_channel_multiplier,
208 per_channel_shift, affine_quantization->scale->size);
209}
210
211// Per-axis & per-tensor
212TfLiteStatus PopulateConvolutionQuantizationParams(
213 TfLiteContext* context, const TfLiteTensor* input,
214 const TfLiteTensor* filter, const TfLiteTensor* bias, TfLiteTensor* output,
215 const TfLiteFusedActivation& activation, int32_t* multiplier, int* shift,
216 int32_t* output_activation_min, int32_t* output_activation_max,
217 int32_t* per_channel_multiplier, int32_t* per_channel_shift,
218 int num_channels) {
219 TF_LITE_ENSURE_EQ(context, input->quantization.type,
220 kTfLiteAffineQuantization);
221 TF_LITE_ENSURE_EQ(context, filter->quantization.type,
222 kTfLiteAffineQuantization);
223 // TODO(jianlijianli): Enable bias type check and bias scale == input scale
224 // * filter scale for each channel in affine quantization once bias
225 // quantization is properly populated.
226 // TF_LITE_ENSURE_EQ(context, bias->quantization.type,
227 // kTfLiteAffineQuantization);
228
229 // Check data type.
230 const auto* affine_quantization =
231 reinterpret_cast<TfLiteAffineQuantization*>(filter->quantization.params);
232 TF_LITE_ENSURE(context, affine_quantization);
233 TF_LITE_ENSURE(context, affine_quantization->scale);
234 const bool is_per_channel = affine_quantization->scale->size > 1;
235 if (is_per_channel) {
236 // Currently only Int8/Int16 is supported for per channel quantization.
237 TF_LITE_ENSURE(context,
238 input->type == kTfLiteInt8 || input->type == kTfLiteInt16);
239 TF_LITE_ENSURE_EQ(context, filter->type, kTfLiteInt8);
240 TF_LITE_ENSURE_EQ(context, affine_quantization->scale->size, num_channels);
241 TF_LITE_ENSURE_EQ(
242 context, num_channels,
243 filter->dims->data[affine_quantization->quantized_dimension]);
244 }
245
246 // Populate multiplier and shift using affine quantization.
247 const float input_scale = input->params.scale;
248 const float output_scale = output->params.scale;
249 const float* filter_scales = affine_quantization->scale->data;
250 for (int i = 0; i < num_channels; ++i) {
251 // If per-tensor quantization parameter is specified, broadcast it along the
252 // quantization dimension (channels_out).
253 const float scale = is_per_channel ? filter_scales[i] : filter_scales[0];
254 const double filter_scale = static_cast<double>(scale);
255 const double effective_output_scale = static_cast<double>(input_scale) *
256 filter_scale /
257 static_cast<double>(output_scale);
258 int32_t significand;
259 int channel_shift;
260 QuantizeMultiplier(effective_output_scale, &significand, &channel_shift);
261 per_channel_multiplier[i] = significand;
262 per_channel_shift[i] = channel_shift;
263 }
264
265 // Populate scalar quantization parameters.
266 // This check on legacy quantization parameters is kept only for backward
267 // compatibility.
268 if (input->type == kTfLiteUInt8) {
269 // Check bias scale == input scale * filter scale.
270 double real_multiplier = 0.0;
271 TF_LITE_ENSURE_STATUS(GetQuantizedConvolutionMultipler(
272 context, input, filter, bias, output, &real_multiplier));
273 int exponent;
274
275 // Populate quantization parameters with multiplier and shift.
276 QuantizeMultiplier(real_multiplier, multiplier, &exponent);
277 *shift = -exponent;
278 }
279 if (input->type == kTfLiteInt8 || input->type == kTfLiteUInt8 ||
280 input->type == kTfLiteInt16) {
281 TF_LITE_ENSURE_STATUS(CalculateActivationRangeQuantized(
282 context, activation, output, output_activation_min,
283 output_activation_max));
284 }
285 return kTfLiteOk;
286}
287
288TfLiteStatus GetQuantizedConvolutionMultipler(TfLiteContext* context,
289 const TfLiteTensor* input,
290 const TfLiteTensor* filter,
291 const TfLiteTensor* bias,
292 TfLiteTensor* output,
293 double* multiplier) {
294 const double input_product_scale = static_cast<double>(input->params.scale) *
295 static_cast<double>(filter->params.scale);
296 // The following conditions must be guaranteed by the training pipeline.
297 if (bias) {
298 const double bias_scale = static_cast<double>(bias->params.scale);
299 // Here we're making sure the input_product_scale & bias_scale are about the
300 // same. Since we have:
301 // (output - output_zp) * output_scale =
302 // input_product_scale * input_product + bias * bias_scale ---- (0)
303 //
304 // (0) equals:
305 // (input_product + bias) * input_product_scale ----- (1)
306 // +
307 // bias * (bias_scale - input_product_scale) ------ (2)
308 //
309 // For the real kernel computation, we're doing (1), so we really need to
310 // make sure (2) has minimum impact on the output, so:
311 // bias * (bias_scale - input_product_scale) / output_scale should be
312 // a small number for an integer.
313 // Since normally bias should be within a small range.
314 // We should expect (bias_scale - input_product_scale) / output_scale to
315 // be a small number like 0.02.
316 const double scale_diff = std::abs(input_product_scale - bias_scale);
317 const double output_scale = static_cast<double>(output->params.scale);
318
319 TF_LITE_ENSURE(context, scale_diff / output_scale <= 0.02);
320 }
321 return GetQuantizedConvolutionMultipler(context, input, filter, output,
322 multiplier);
323}
324
325TfLiteStatus GetQuantizedConvolutionMultipler(TfLiteContext* context,
326 const TfLiteTensor* input,
327 const TfLiteTensor* filter,
328 TfLiteTensor* output,
329 double* multiplier) {
330 const double input_product_scale =
331 static_cast<double>(input->params.scale * filter->params.scale);
332 TF_LITE_ENSURE(context, input_product_scale >= 0);
333 *multiplier = input_product_scale / static_cast<double>(output->params.scale);
334
335 return kTfLiteOk;
336}
337
338namespace {
339
340inline TfLiteStatus Quantize(TfLiteContext* context, float scale,
341 int32_t zero_point, float f, int32_t& q) {
342 const float tmp = TfLiteRound(f / scale);
343 const bool no_integer_overflow_from_quantization =
344 (tmp >= static_cast<float>(std::numeric_limits<int32_t>::min()) &&
345 tmp <= static_cast<float>(std::numeric_limits<int32_t>::max()));
346 TF_LITE_ENSURE(context, no_integer_overflow_from_quantization);
347 q = zero_point + static_cast<int32_t>(tmp);
348 return kTfLiteOk;
349}
350
351TfLiteStatus CalculateActivationRangeQuantizedImpl(
352 TfLiteContext* context, TfLiteFusedActivation activation, int32_t qmin,
353 int32_t qmax, TfLiteTensor* output, int32_t* act_min, int32_t* act_max) {
354 const auto scale = output->params.scale;
355 const auto zero_point = output->params.zero_point;
356
357 int32_t tmp_q;
358 if (activation == kTfLiteActRelu) {
359 TF_LITE_ENSURE_OK(context,
360 Quantize(context, scale, zero_point, 0.0, tmp_q));
361 *act_min = std::max(qmin, tmp_q);
362 *act_max = qmax;
363 } else if (activation == kTfLiteActRelu6) {
364 TF_LITE_ENSURE_OK(context,
365 Quantize(context, scale, zero_point, 0.0, tmp_q));
366 *act_min = std::max(qmin, tmp_q);
367 TF_LITE_ENSURE_OK(context,
368 Quantize(context, scale, zero_point, 6.0, tmp_q));
369 *act_max = std::min(qmax, tmp_q);
370 } else if (activation == kTfLiteActReluN1To1) {
371 TF_LITE_ENSURE_OK(context,
372 Quantize(context, scale, zero_point, -1.0, tmp_q));
373 *act_min = std::max(qmin, tmp_q);
374 TF_LITE_ENSURE_OK(context,
375 Quantize(context, scale, zero_point, 1.0, tmp_q));
376 *act_max = std::min(qmax, tmp_q);
377 } else {
378 *act_min = qmin;
379 *act_max = qmax;
380 }
381 return kTfLiteOk;
382}
383} // namespace
384
385TfLiteStatus CalculateActivationRangeQuantized(TfLiteContext* context,
386 TfLiteFusedActivation activation,
387 TfLiteTensor* output,
388 int32_t* act_min,
389 int32_t* act_max) {
390 int32_t qmin = 0;
391 int32_t qmax = 0;
392 if (output->type == kTfLiteUInt8) {
393 qmin = std::numeric_limits<uint8_t>::min();
394 qmax = std::numeric_limits<uint8_t>::max();
395 } else if (output->type == kTfLiteInt8) {
396 qmin = std::numeric_limits<int8_t>::min();
397 qmax = std::numeric_limits<int8_t>::max();
398 } else if (output->type == kTfLiteInt16) {
399 qmin = std::numeric_limits<int16_t>::min();
400 qmax = std::numeric_limits<int16_t>::max();
401 } else {
402 TF_LITE_ENSURE(context, false);
403 }
404
405 return CalculateActivationRangeQuantizedImpl(context, activation, qmin, qmax,
406 output, act_min, act_max);
407}
408
409bool HaveSameShapes(const TfLiteTensor* input1, const TfLiteTensor* input2) {
410 return TfLiteIntArrayEqual(input1->dims, input2->dims);
411}
412
413#ifndef TF_LITE_STATIC_MEMORY
414TfLiteStatus GetOutputShapeFromInput(TfLiteContext* context,
415 const TfLiteTensor* input,
416 TfLiteIntArray** output_shape) {
417 if (NumDimensions(input) != 1) {
418 TF_LITE_KERNEL_LOG(const_cast<TfLiteContext*>(context),
419 "Invalid %dD input tensor (must be a 1D tensor).",
420 NumDimensions(input));
421 return kTfLiteError;
422 }
423 const int output_dims = SizeOfDimension(input, 0);
424 std::unique_ptr<TfLiteIntArray, void (*)(TfLiteIntArray*)> shape(
425 TfLiteIntArrayCreate(output_dims), TfLiteIntArrayFree);
426 for (int i = 0; i < output_dims; i++) {
427 shape->data[i] = input->data.i32[i];
428 }
429 *output_shape = shape.release();
430 return kTfLiteOk;
431}
432
433// TODO(b/172067338): Having this function be part of TF_LITE_STATIC_MEMORY
434// build results in a 6KB size increase, even though the function is unsused for
435// that build. What appears to be happening is that while the linker drops the
436// unsused function, the string library that gets pulled in is not dropped,
437// resulting in the increased binary size.
438const std::string GetShapeDebugString(const TfLiteIntArray* shape) {
439 std::string str;
440 for (int d = 0; d < shape->size; ++d) {
441 if (str.empty())
442 str = "[" + std::to_string(shape->data[d]);
443 else
444 // Don't add space after "," to make the output consistent with
445 // tensorflow::shape_inference::InferenceContext::DebugString()
446 str += "," + std::to_string(shape->data[d]);
447 }
448 if (str.empty()) {
449 str = "[]";
450 } else {
451 str += "]";
452 }
453 return str;
454}
455
456TfLiteStatus CalculateShapeForBroadcast(TfLiteContext* context,
457 const TfLiteTensor* input1,
458 const TfLiteTensor* input2,
459 TfLiteIntArray** output_shape) {
460 const int dims1 = NumDimensions(input1);
461 const int dims2 = NumDimensions(input2);
462 const int out_dims = std::max(dims1, dims2);
463
464 std::unique_ptr<TfLiteIntArray, void (*)(TfLiteIntArray*)> shape(
465 TfLiteIntArrayCreate(out_dims), TfLiteIntArrayFree);
466 for (int i = 0; i < out_dims; ++i) {
467 const int d1 = i >= dims1 ? 1 : SizeOfDimension(input1, dims1 - i - 1);
468 const int d2 = i >= dims2 ? 1 : SizeOfDimension(input2, dims2 - i - 1);
469 if (!(d1 == d2 || d1 == 1 || d2 == 1)) {
470 TF_LITE_KERNEL_LOG(context,
471 "Given shapes, %s and %s, are not broadcastable.",
472 GetShapeDebugString(input1->dims).c_str(),
473 GetShapeDebugString(input2->dims).c_str());
474 return kTfLiteError;
475 }
476
477 if (d1 == 0 || d2 == 0) {
478 shape->data[out_dims - i - 1] = 0;
479 } else {
480 shape->data[out_dims - i - 1] = std::max(d1, d2);
481 }
482 }
483 *output_shape = shape.release();
484 return kTfLiteOk;
485}
486
487TfLiteStatus CalculateShapeForBroadcast(TfLiteContext* context,
488 const TfLiteTensor* input1,
489 const TfLiteTensor* input2,
490 const TfLiteTensor* input3,
491 TfLiteIntArray** output_shape) {
492 const int dims1 = NumDimensions(input1);
493 const int dims2 = NumDimensions(input2);
494 const int dims3 = NumDimensions(input3);
495 const int out_dims = std::max(std::max(dims1, dims2), dims3);
496 std::unique_ptr<TfLiteIntArray, void (*)(TfLiteIntArray*)> shape(
497 TfLiteIntArrayCreate(out_dims), TfLiteIntArrayFree);
498 for (int i = 0; i < out_dims; ++i) {
499 const int d1 = i >= dims1 ? 1 : SizeOfDimension(input1, dims1 - i - 1);
500 const int d2 = i >= dims2 ? 1 : SizeOfDimension(input2, dims2 - i - 1);
501 const int d3 = i >= dims3 ? 1 : SizeOfDimension(input3, dims3 - i - 1);
502 const int min_value = std::min(std::min(d1, d2), d3);
503 int max_value = std::max(std::max(d1, d2), d3);
504 // If one dimention is 0, others must be 0 or 1.
505 if (min_value == 0) max_value = 0;
506 if (!(d1 == 1 || d1 == max_value) || !(d2 == 1 || d2 == max_value) ||
507 !(d3 == 1 || d3 == max_value)) {
508 TF_LITE_KERNEL_LOG(context,
509 "Given shapes, %s, %s and %s, are not broadcastable.",
510 GetShapeDebugString(input1->dims).c_str(),
511 GetShapeDebugString(input2->dims).c_str(),
512 GetShapeDebugString(input3->dims).c_str());
513 return kTfLiteError;
514 }
515 shape->data[out_dims - i - 1] = max_value;
516 }
517 *output_shape = shape.release();
518 return kTfLiteOk;
519}
520#endif // TF_LITE_STATIC_MEMORY
521
522// Size of string is not constant, return 0 in such case.
523int TfLiteTypeGetSize(TfLiteType type) {
524 switch (type) {
525 case kTfLiteUInt8:
526 static_assert(sizeof(uint8_t) == 1, "");
527 return 1;
528 case kTfLiteInt8:
529 static_assert(sizeof(int8_t) == 1, "");
530 return 1;
531 case kTfLiteBool:
532 return sizeof(bool);
533 case kTfLiteUInt16:
534 static_assert(sizeof(uint16_t) == 2, "");
535 return 2;
536 case kTfLiteInt16:
537 static_assert(sizeof(int16_t) == 2, "");
538 return 2;
539 case kTfLiteFloat16:
540 static_assert(sizeof(int16_t) == 2, "");
541 return 2;
542 case kTfLiteFloat32:
543 static_assert(sizeof(float) == 4, "");
544 return 4;
545 case kTfLiteInt32:
546 static_assert(sizeof(int32_t) == 4, "");
547 return 4;
548 case kTfLiteUInt32:
549 static_assert(sizeof(uint32_t) == 4, "");
550 return 4;
551 case kTfLiteInt64:
552 static_assert(sizeof(int64_t) == 8, "");
553 return 8;
554 case kTfLiteUInt64:
555 static_assert(sizeof(uint64_t) == 8, "");
556 return 8;
557 case kTfLiteFloat64:
558 static_assert(sizeof(double) == 8, "");
559 return 8;
560 case kTfLiteComplex64:
561 static_assert(sizeof(std::complex<float>) == 8, "");
562 return 8;
563 case kTfLiteComplex128:
564 static_assert(sizeof(std::complex<double>) == 16, "");
565 return 16;
566 default:
567 return 0;
568 }
569}
570
571bool IsMobilePlatform() {
572#if defined(ANDROID) || defined(__ANDROID__)
573 return true;
574#elif defined(__APPLE__)
575#if TARGET_IPHONE_SIMULATOR || TARGET_OS_IPHONE
576 return true;
577#endif
578#endif
579 return false;
580}
581
582bool HasUnspecifiedDimension(const TfLiteTensor* tensor) {
583#ifndef TF_LITE_STATIC_MEMORY
584 if (tensor->dims_signature) {
585 for (int i : TfLiteIntArrayView(tensor->dims_signature)) {
586 if (i == -1) return true;
587 }
588 }
589#endif // TF_LITE_STATIC_MEMORY
590 return false;
591}
592
593} // namespace tflite
594