1/* Copyright 2017 The TensorFlow Authors. All Rights Reserved.
2
3Licensed under the Apache License, Version 2.0 (the "License");
4you may not use this file except in compliance with the License.
5You may obtain a copy of the License at
6
7 http://www.apache.org/licenses/LICENSE-2.0
8
9Unless required by applicable law or agreed to in writing, software
10distributed under the License is distributed on an "AS IS" BASIS,
11WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12See the License for the specific language governing permissions and
13limitations under the License.
14==============================================================================*/
15
16#include "tensorflow/lite/tools/verifier.h"
17
18#include <algorithm>
19#include <climits>
20#include <complex>
21#include <cstdint>
22#include <cstring>
23
24#include "absl/container/flat_hash_set.h"
25#include "tensorflow/lite/schema/schema_generated.h"
26#include "tensorflow/lite/schema/schema_utils.h"
27#include "tensorflow/lite/string_util.h"
28#include "tensorflow/lite/tools/verifier_internal.h"
29#include "tensorflow/lite/util.h"
30#include "tensorflow/lite/version.h"
31
32namespace tflite {
33namespace {
34
35const char* NameOrEmptyString(const flatbuffers::String* str) {
36 if (str == nullptr || str->c_str() == nullptr) {
37 return "";
38 }
39 return str->c_str();
40}
41
42bool IsNullOrEmptyString(const flatbuffers::String* str) {
43 return strcmp(NameOrEmptyString(str), "") == 0;
44}
45
46void ReportError(ErrorReporter* error_reporter, const char* format, ...) {
47 if (error_reporter) {
48 va_list args;
49 va_start(args, format);
50 TF_LITE_REPORT_ERROR(error_reporter, format, args);
51 va_end(args);
52 }
53}
54
55// Returns the int32_t value pointed by ptr.
56const uint32_t GetIntPtr(const char* ptr) {
57#if defined(__BYTE_ORDER__) && defined(__ORDER_BIG_ENDIAN__) && \
58 __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
59 return flatbuffers::EndianScalar(*reinterpret_cast<const uint32_t*>(ptr));
60#else
61 return *reinterpret_cast<const uint32_t*>(ptr);
62#endif
63}
64
65const uint32_t kMaxNumString = UINT_MAX / sizeof(int32_t) - 2;
66
67// Verifies string tensor has legit buffer contents that follow the schema
68// defined in lite/string_util.h
69bool VerifyStringTensorBuffer(const Tensor& tensor, const Buffer& buffer,
70 ErrorReporter* error_reporter) {
71 uint32_t buffer_size = buffer.data()->size();
72 if (buffer_size < sizeof(uint32_t)) {
73 ReportError(error_reporter, "String tensor %s is invalid (empty)",
74 NameOrEmptyString(tensor.name()));
75 return false;
76 }
77 const char* buffer_ptr = reinterpret_cast<const char*>(buffer.data()->data());
78
79 uint32_t num_strings = GetIntPtr(buffer_ptr);
80 if (num_strings > kMaxNumString) {
81 ReportError(error_reporter,
82 "String tensor %s has invalid num of string set: %d",
83 NameOrEmptyString(tensor.name()), num_strings);
84 return false;
85 }
86 uint32_t header_offsets =
87 static_cast<uint32_t>(num_strings + 2) * sizeof(int32_t);
88
89 if (buffer_size < header_offsets) {
90 ReportError(error_reporter,
91 "String tensor %s buffer requires at least %d bytes, but is "
92 "allocated with %d bytes",
93 NameOrEmptyString(tensor.name()), header_offsets, buffer_size);
94 return false;
95 }
96
97 uint32_t prev_ptr = header_offsets;
98 uint32_t offset = sizeof(int32_t);
99
100 if (GetIntPtr(buffer_ptr + offset) != header_offsets) {
101 ReportError(error_reporter,
102 "String tensor %s buffer initial offset must be: %d",
103 NameOrEmptyString(tensor.name()), header_offsets);
104 return false;
105 }
106 offset += sizeof(int32_t);
107 for (int i = 1, end = num_strings; i <= end; i++, offset += sizeof(int32_t)) {
108 int string_offset = GetIntPtr(buffer_ptr + offset);
109 if (string_offset < static_cast<int>(prev_ptr) ||
110 string_offset > static_cast<int>(buffer_size)) {
111 ReportError(error_reporter,
112 "String tensor %s buffer is invalid: index %d",
113 NameOrEmptyString(tensor.name()), i);
114 return false;
115 }
116 }
117 if (GetIntPtr(buffer_ptr + offset - sizeof(int32_t)) != buffer_size) {
118 ReportError(error_reporter,
119 "String tensor %s buffer last offset must be %d",
120 NameOrEmptyString(tensor.name()), buffer_size);
121 return false;
122 }
123 return true;
124}
125
126bool CheckArraySegments(const DimensionMetadata* dim_metadata) {
127 if (dim_metadata->array_segments() == nullptr) {
128 return false;
129 }
130 switch (dim_metadata->array_segments_type()) {
131 case SparseIndexVector_Int32Vector:
132 return (dim_metadata->array_segments_as_Int32Vector()->values() !=
133 nullptr);
134 case SparseIndexVector_Uint16Vector:
135 return (dim_metadata->array_segments_as_Uint16Vector()->values() !=
136 nullptr);
137 case SparseIndexVector_Uint8Vector:
138 return (dim_metadata->array_segments_as_Uint8Vector()->values() !=
139 nullptr);
140 default:
141 return false;
142 }
143}
144
145int GetSizeOfSegments(const DimensionMetadata* dim_metadata) {
146 switch (dim_metadata->array_segments_type()) {
147 case SparseIndexVector_Int32Vector:
148 return dim_metadata->array_segments_as_Int32Vector()->values()->size();
149 case SparseIndexVector_Uint16Vector:
150 return dim_metadata->array_segments_as_Uint16Vector()->values()->size();
151 case SparseIndexVector_Uint8Vector:
152 return dim_metadata->array_segments_as_Uint8Vector()->values()->size();
153 default:
154 return -1;
155 }
156}
157
158int GetValueOfSegmentsAt(const DimensionMetadata* dim_metadata, const int i) {
159 switch (dim_metadata->array_segments_type()) {
160 case SparseIndexVector_Int32Vector:
161 return static_cast<int>(
162 dim_metadata->array_segments_as_Int32Vector()->values()->Get(i));
163 case SparseIndexVector_Uint16Vector:
164 return static_cast<int>(
165 dim_metadata->array_segments_as_Uint16Vector()->values()->Get(i));
166 case SparseIndexVector_Uint8Vector:
167 return static_cast<int>(
168 dim_metadata->array_segments_as_Uint8Vector()->values()->Get(i));
169 default:
170 return -1;
171 }
172}
173
174bool CheckArrayIndices(const DimensionMetadata* dim_metadata) {
175 if (dim_metadata->array_indices() == nullptr) {
176 return false;
177 }
178 switch (dim_metadata->array_indices_type()) {
179 case SparseIndexVector_Int32Vector:
180 return (dim_metadata->array_indices_as_Int32Vector()->values() !=
181 nullptr);
182 case SparseIndexVector_Uint16Vector:
183 return (dim_metadata->array_indices_as_Uint16Vector()->values() !=
184 nullptr);
185 case SparseIndexVector_Uint8Vector:
186 return (dim_metadata->array_indices_as_Uint8Vector()->values() !=
187 nullptr);
188 default:
189 return false;
190 }
191}
192
193int GetSizeOfIndices(const DimensionMetadata* dim_metadata) {
194 switch (dim_metadata->array_indices_type()) {
195 case SparseIndexVector_Int32Vector:
196 return dim_metadata->array_indices_as_Int32Vector()->values()->size();
197 case SparseIndexVector_Uint16Vector:
198 return dim_metadata->array_indices_as_Uint16Vector()->values()->size();
199 case SparseIndexVector_Uint8Vector:
200 return dim_metadata->array_indices_as_Uint8Vector()->values()->size();
201 default:
202 return -1;
203 }
204}
205
206int GetValueOfIndicesAt(const DimensionMetadata* dim_metadata, const int i) {
207 switch (dim_metadata->array_indices_type()) {
208 case SparseIndexVector_Int32Vector:
209 return static_cast<int>(
210 dim_metadata->array_indices_as_Int32Vector()->values()->Get(i));
211 case SparseIndexVector_Uint16Vector:
212 return static_cast<int>(
213 dim_metadata->array_indices_as_Uint16Vector()->values()->Get(i));
214 case SparseIndexVector_Uint8Vector:
215 return static_cast<int>(
216 dim_metadata->array_indices_as_Uint8Vector()->values()->Get(i));
217 default:
218 return -1;
219 }
220 return -1;
221}
222
223// The sparsity parameter defines a tree structure to map each non-zero element
224// stored in the flattened buffer back to its index in the conceptual dense
225// tensor.
226// Traverse the tree level by level, count total number of elements, and
227// validate the sparsity parameters along the way.
228absl::optional<uint64_t> VerifyAndCountElements(
229 const SparsityParameters& sparsity, const std::vector<int>& dim_sizes) {
230 const int total_level = sparsity.traversal_order()->size();
231 uint64_t num_elements = 1;
232 for (int i = 0; i < total_level; i++) {
233 const int original_dim = sparsity.traversal_order()->Get(i);
234 const auto* dim_metadata = sparsity.dim_metadata()->Get(i);
235 if (dim_metadata->format() == DimensionType_DENSE) {
236 if (dim_metadata->dense_size() != dim_sizes[original_dim]) {
237 return absl::nullopt;
238 }
239
240 // Each index in a dense dimension is stored implicitly.
241 num_elements *= dim_metadata->dense_size();
242 } else {
243 if (!CheckArraySegments(dim_metadata) ||
244 !CheckArrayIndices(dim_metadata)) {
245 return absl::nullopt;
246 }
247
248 int array_segments_size = GetSizeOfSegments(dim_metadata);
249 int array_indices_size = GetSizeOfIndices(dim_metadata);
250
251 for (int j = 0; j < array_segments_size - 1; j++) {
252 if (GetValueOfSegmentsAt(dim_metadata, j) < 0 ||
253 GetValueOfSegmentsAt(dim_metadata, j + 1) < 0 ||
254 GetValueOfSegmentsAt(dim_metadata, j) >
255 GetValueOfSegmentsAt(dim_metadata, j + 1)) {
256 return absl::nullopt;
257 }
258 }
259
260 if (static_cast<int>(num_elements) != array_segments_size - 1) {
261 return absl::nullopt;
262 }
263
264 if (array_indices_size !=
265 GetValueOfSegmentsAt(dim_metadata, array_segments_size - 1)) {
266 return absl::nullopt;
267 }
268
269 for (int j = 0; j < array_indices_size; j++) {
270 if (GetValueOfIndicesAt(dim_metadata, j) < 0 ||
271 GetValueOfIndicesAt(dim_metadata, j) >= dim_sizes[original_dim]) {
272 return absl::nullopt;
273 }
274 }
275
276 // Need to reset num_elements when seeing a sparse dimension.
277 num_elements = array_indices_size;
278 }
279 }
280
281 return num_elements;
282}
283
284absl::optional<uint64_t> VerifyAndCountSparseElements(const Tensor& tensor) {
285 const auto* sparsity = tensor.sparsity();
286 if (sparsity->traversal_order() == nullptr ||
287 sparsity->dim_metadata() == nullptr) {
288 return absl::nullopt;
289 }
290
291 const int total_dims = sparsity->traversal_order()->size();
292 const int original_rank = tensor.shape()->size();
293 const int sparsity_dim_metadata_size = sparsity->dim_metadata()->size();
294 if (total_dims < original_rank || sparsity_dim_metadata_size != total_dims) {
295 return absl::nullopt;
296 }
297
298 const int block_rank = total_dims - original_rank;
299 if (block_rank > 0) {
300 if (sparsity->block_map() == nullptr) {
301 return absl::nullopt;
302 }
303 const int sparse_rank = sparsity->block_map()->size();
304 if (sparse_rank != block_rank) {
305 return absl::nullopt;
306 }
307 }
308
309 // For a n-dimensional tensor (d0, ..., dn-1) with k-dimensional block (dn,
310 // ..., dn+k-1), the first n elements in the traversal order should be a
311 // permutation of (d0, ..., dn-1), and the last k elements should be a
312 // permutation of (dn, ..., dn+k-1).
313 std::vector<int> traversal_order(total_dims);
314 for (int i = 0; i < total_dims; i++) {
315 traversal_order[i] = sparsity->traversal_order()->Get(i);
316 }
317
318 std::sort(traversal_order.begin(), traversal_order.begin() + original_rank);
319 for (int i = 0; i < original_rank; i++) {
320 if (traversal_order[i] != i) {
321 return absl::nullopt;
322 }
323 }
324
325 std::sort(traversal_order.begin() + original_rank, traversal_order.end());
326 for (int i = original_rank; i < total_dims; i++) {
327 if (traversal_order[i] != i) {
328 return absl::nullopt;
329 }
330 }
331
332 // For a n-dimensional tensor (d0, ..., dn-1) with k-dimensional block (dn,
333 // ..., dn+k-1), the expanded_dim_sizes holds the size of each dimension in
334 // the order of (d0, ..., dn-1, dn, ..., dn+k-1), not the traversal order.
335 // For example, a 4x4 tensor with 2x2 block has expanded_dim_sizes = {2, 2, 2,
336 // 2}.
337 std::vector<int> expanded_dim_sizes;
338 expanded_dim_sizes.resize(total_dims);
339 // First go through the original tensor dimensions, populate their sizes.
340 for (int i = 0; i < original_rank; i++) {
341 expanded_dim_sizes[i] = tensor.shape()->Get(i);
342 }
343 // Then go through the block dimensions, and
344 // 1. populate block dimension size.
345 // 2. block_map[i] has the original dimension that block dimension i maps
346 // to. Divide the size of the original dimension by the size of the ith
347 // block dimension.
348 for (int i = 0; i < block_rank; i++) {
349 int original_block_dim =
350 sparsity->traversal_order()->Get(i + original_rank);
351 if (original_block_dim < 0 || original_block_dim >= total_dims) {
352 return absl::nullopt;
353 }
354 int block_dim_size =
355 sparsity->dim_metadata()->Get(i + original_rank)->dense_size();
356 // If size is <= 0 we just return as it is invalid.
357 if (block_dim_size <= 0) {
358 return absl::nullopt;
359 }
360
361 expanded_dim_sizes[original_block_dim] = block_dim_size;
362
363 int mapped_block_dim = sparsity->block_map()->Get(i);
364 if (mapped_block_dim < 0 || mapped_block_dim >= total_dims) {
365 return absl::nullopt;
366 }
367 expanded_dim_sizes[mapped_block_dim] /= block_dim_size;
368 }
369
370 return VerifyAndCountElements(*sparsity, expanded_dim_sizes);
371}
372
373// Verifies numeric tensor has legit buffer.
374bool VerifyNumericTensorBuffer(const Tensor& tensor, const Buffer& buffer,
375 ErrorReporter* error_reporter) {
376 uint64_t bytes_required = 1;
377 if (!tensor.shape()) {
378 // Empty tensor. Avoid further checks.
379 return true;
380 }
381 if (tensor.sparsity() != nullptr) {
382 const auto num_elements = VerifyAndCountSparseElements(tensor);
383 if (!num_elements.has_value()) {
384 ReportError(error_reporter, "Tensor %s has invalid sparsity parameters",
385 NameOrEmptyString(tensor.name()));
386 return false;
387 }
388 bytes_required = num_elements.value();
389 if (bytes_required > UINT_MAX) {
390 ReportError(error_reporter, "Tensor %s dimension overflow",
391 NameOrEmptyString(tensor.name()));
392 return false;
393 }
394 } else {
395 for (int dim : *tensor.shape()) {
396 bytes_required *= dim;
397 if (bytes_required > UINT_MAX) {
398 ReportError(error_reporter, "Tensor %s dimension overflow",
399 NameOrEmptyString(tensor.name()));
400 return false;
401 }
402 }
403 }
404
405 switch (tensor.type()) {
406 case TensorType_FLOAT32:
407 bytes_required *= sizeof(float);
408 break;
409 case TensorType_FLOAT16:
410 bytes_required *= sizeof(uint16_t);
411 break;
412 case TensorType_FLOAT64:
413 bytes_required *= sizeof(double);
414 break;
415 case TensorType_INT32:
416 bytes_required *= sizeof(int32_t);
417 break;
418 case TensorType_UINT32:
419 bytes_required *= sizeof(uint32_t);
420 break;
421 case TensorType_UINT8:
422 bytes_required *= sizeof(uint8_t);
423 break;
424 case TensorType_INT8:
425 bytes_required *= sizeof(int8_t);
426 break;
427 case TensorType_INT64:
428 bytes_required *= sizeof(int64_t);
429 break;
430 case TensorType_UINT64:
431 bytes_required *= sizeof(uint64_t);
432 break;
433 case TensorType_BOOL:
434 bytes_required *= sizeof(bool);
435 break;
436 case TensorType_INT16:
437 bytes_required *= sizeof(uint16_t);
438 break;
439 case TensorType_UINT16:
440 bytes_required *= sizeof(uint16_t);
441 break;
442 case TensorType_COMPLEX64:
443 bytes_required *= sizeof(std::complex<float>);
444 break;
445 case TensorType_COMPLEX128:
446 bytes_required *= sizeof(std::complex<double>);
447 break;
448 default:
449 ReportError(error_reporter, "Tensor %s invalid type: %d",
450 NameOrEmptyString(tensor.name()), tensor.type());
451 return false;
452 }
453 if (bytes_required > UINT_MAX) {
454 ReportError(error_reporter, "Tensor %s dimension overflow",
455 NameOrEmptyString(tensor.name()));
456 return false;
457 }
458
459 if (bytes_required != buffer.data()->size()) {
460 ReportError(
461 error_reporter,
462 "Tensor %s requires %d bytes, but is allocated with %d bytes buffer",
463 NameOrEmptyString(tensor.name()), bytes_required,
464 buffer.data()->size());
465 return false;
466 }
467 return true;
468
469 // TODO(yichengfan): verify quantized tensors.
470}
471
472using flatbuffers::Offset;
473using flatbuffers::Vector;
474
475bool VerifyOperators(const Vector<Offset<Operator>>& operators,
476 ErrorReporter* error_reporter) {
477 for (const auto* op : operators) {
478 if (!op->inputs()) {
479 ReportError(error_reporter, "Missing 'inputs' for operator.");
480 return false;
481 }
482 if (!op->outputs()) {
483 ReportError(error_reporter, "Missing 'outputs' for operator.");
484 return false;
485 }
486 }
487 return true;
488}
489
490bool IsConstantTensor(const Tensor& tensor, const Model& model) {
491 if (!tensor.buffer() || !model.buffers()) return false;
492 if (tensor.buffer() > 0 && tensor.buffer() < model.buffers()->size()) {
493 auto* buffer = model.buffers()->Get(tensor.buffer());
494 if (buffer && buffer->data()) {
495 return true;
496 }
497 }
498 return false;
499}
500
501// Performs basic consistency checks on a sub-graph.
502bool VerifySubGraphConsistency(const Model& model, const SubGraph& subgraph,
503 ErrorReporter* error_reporter) {
504 absl::flat_hash_set<int> subgraph_input_tensors, constant_tensors,
505 variable_tensors, output_tensors;
506 if (subgraph.tensors()) {
507 for (int i = 0, end = subgraph.tensors()->size(); i < end; ++i) {
508 const auto* tensor = subgraph.tensors()->Get(i);
509 if (IsConstantTensor(*tensor, model)) {
510 constant_tensors.insert(i);
511 } else if (tensor->is_variable()) {
512 variable_tensors.insert(i);
513 }
514 }
515 }
516 if (subgraph.inputs()) {
517 for (const int tensor_idx : *subgraph.inputs()) {
518 subgraph_input_tensors.insert(tensor_idx);
519 }
520 }
521
522 if (subgraph.operators()) {
523 for (int op_idx = 0, end = subgraph.operators()->size(); op_idx < end;
524 ++op_idx) {
525 const auto* op = subgraph.operators()->Get(op_idx);
526 if (!model.operator_codes() ||
527 (op->opcode_index() >= model.operator_codes()->size())) {
528 ReportError(error_reporter,
529 "Operator %d does not exist in model op codes",
530 op->opcode_index());
531 return false;
532 }
533 const auto& opcode = model.operator_codes()->Get(op->opcode_index());
534 auto builtin_code = GetBuiltinCode(opcode);
535 // Check for invalid inputs by ensuring all exist in produced_tensors.
536 for (const int input_idx : *op->inputs()) {
537 if (input_idx == kTfLiteOptionalTensor) continue;
538 if (constant_tensors.find(input_idx) == constant_tensors.end() &&
539 variable_tensors.find(input_idx) == variable_tensors.end() &&
540 subgraph_input_tensors.find(input_idx) ==
541 subgraph_input_tensors.end() &&
542 output_tensors.find(input_idx) == output_tensors.end()) {
543 ReportError(error_reporter,
544 "Input tensor %d to op %d (%s) is not produced",
545 input_idx, op_idx, EnumNameBuiltinOperator(builtin_code));
546 return false;
547 }
548 }
549 // Check for cycles/invalid outputs by ensuring that none exist in
550 // produced_tensors.
551 for (const int output_idx : *op->outputs()) {
552 if (constant_tensors.find(output_idx) != constant_tensors.end()) {
553 ReportError(
554 error_reporter, "Output tensor %d to op %d (%s) is a constant",
555 output_idx, op_idx, EnumNameBuiltinOperator(builtin_code));
556 return false;
557 } else if (variable_tensors.find(output_idx) !=
558 variable_tensors.end()) {
559 ReportError(
560 error_reporter, "Output tensor %d to op %d (%s) is a variable",
561 output_idx, op_idx, EnumNameBuiltinOperator(builtin_code));
562 return false;
563 } else if (subgraph_input_tensors.find(output_idx) !=
564 subgraph_input_tensors.end()) {
565 ReportError(error_reporter,
566 "Output tensor %d to op %d (%s) is a subgraph input",
567 output_idx, op_idx,
568 EnumNameBuiltinOperator(builtin_code));
569 return false;
570 } else if (output_tensors.find(output_idx) != output_tensors.end()) {
571 ReportError(error_reporter,
572 "Output tensor %d to op %d (%s) is an output from "
573 "another op. There is a cycle in the graph",
574 output_idx, op_idx,
575 EnumNameBuiltinOperator(builtin_code));
576 return false;
577 }
578 // This can be an input to a subsequent op.
579 output_tensors.insert(output_idx);
580 }
581 }
582 }
583 return true;
584}
585
586bool VerifySubGraphs(const Model& model, ErrorReporter* error_reporter) {
587 if (!model.subgraphs()) {
588 ReportError(error_reporter, "Missing 'subgraphs' section.");
589 return false;
590 }
591 for (const auto* subgraph : *model.subgraphs()) {
592 if (!subgraph->operators()) {
593 ReportError(error_reporter, "Missing 'operators' section in subgraph.");
594 return false;
595 }
596
597 if (!VerifyOperators(*subgraph->operators(), error_reporter)) {
598 return false;
599 }
600
601 if (!VerifySubGraphConsistency(model, *subgraph, error_reporter)) {
602 return false;
603 }
604 }
605 return true;
606}
607
608// Verifies tensors have valid properties and legit buffer if set.
609bool VerifyTensors(const Model& model, ErrorReporter* error_reporter) {
610 if (!model.subgraphs()) {
611 return true;
612 }
613 if (!model.buffers()) {
614 ReportError(error_reporter, "Missing 'buffers' section.");
615 return false;
616 }
617
618 for (const auto* subgraph : *model.subgraphs()) {
619 if (!subgraph->tensors()) {
620 continue;
621 }
622 for (const auto* tensor : *subgraph->tensors()) {
623 if (!tensor->buffer()) {
624 continue;
625 }
626 if (tensor->buffer() >= model.buffers()->size()) {
627 ReportError(error_reporter, "Tensor %s invalid buffer index: %d",
628 NameOrEmptyString(tensor->name()), tensor->buffer());
629 return false;
630 }
631 auto* buffer = model.buffers()->Get(tensor->buffer());
632 if (!buffer) {
633 ReportError(error_reporter, "Tensor %s buffer %d not set",
634 NameOrEmptyString(tensor->name()), tensor->buffer());
635 return false;
636 }
637
638 // Many transient tensors don't have data in the flatbuffer. Their
639 // buffers will be allocated by the interpreter at run-time.
640 if (buffer->data()) {
641 if (tensor->type() == TensorType_STRING) {
642 if (!VerifyStringTensorBuffer(*tensor, *buffer, error_reporter)) {
643 return false;
644 }
645 } else {
646 if (!VerifyNumericTensorBuffer(*tensor, *buffer, error_reporter)) {
647 return false;
648 }
649 }
650 }
651 }
652 }
653 return true;
654}
655
656bool VerifyOps(const Model& model, const OpResolver& resolver,
657 ErrorReporter* error_reporter) {
658 if (!model.operator_codes()) {
659 return true;
660 }
661
662 // Track whichs ops are used in only the validation subgraphs. Validation
663 // subgraphs are allowed to contain custom ops that are not in the resolver,
664 // as they will be run with a custom resolver.
665 absl::flat_hash_set<int> regular_code_indices;
666 absl::flat_hash_set<int> validation_code_indices;
667 for (const auto* subgraph : *model.subgraphs()) {
668 if (!subgraph->operators()) {
669 continue;
670 }
671 if (subgraph->name() && IsValidationSubgraph(subgraph->name()->c_str())) {
672 for (const auto& op : *(subgraph->operators())) {
673 validation_code_indices.insert(op->opcode_index());
674 }
675 } else {
676 for (const auto* op : *(subgraph->operators())) {
677 regular_code_indices.insert(op->opcode_index());
678 }
679 }
680 }
681 for (int i = 0; i < model.operator_codes()->size(); i++) {
682 const auto* opcode = model.operator_codes()->Get(i);
683 auto builtin_code = GetBuiltinCode(opcode);
684 if (builtin_code < BuiltinOperator_MIN ||
685 builtin_code > BuiltinOperator_MAX) {
686 ReportError(error_reporter, "Operator id '%d' is out of range.",
687 builtin_code);
688 return false;
689 }
690
691 if (builtin_code == BuiltinOperator_CUSTOM) {
692 if (IsNullOrEmptyString(opcode->custom_code())) {
693 ReportError(error_reporter,
694 "Invalid custom op name, cannot be null/empty.");
695 return false;
696 } else if (!resolver.FindOp(opcode->custom_code()->c_str(),
697 opcode->version())) {
698 if (regular_code_indices.contains(i) ||
699 !validation_code_indices.contains(i)) {
700 ReportError(error_reporter, "Unsupported custom op: %s, version: %d",
701 opcode->custom_code()->c_str(), opcode->version());
702 return false;
703 }
704 }
705 } else {
706 if (!resolver.FindOp(builtin_code, opcode->version())) {
707 ReportError(error_reporter, "Unsupported builtin op: %s, version: %d",
708 EnumNameBuiltinOperator(builtin_code), opcode->version());
709 return false;
710 }
711 }
712 }
713 return true;
714}
715
716bool VerifyModel(const Model* model, ErrorReporter* error_reporter) {
717 if (model == nullptr) {
718 ReportError(error_reporter, "Invalid flatbuffer format");
719 return false;
720 }
721 if (model->version() != TFLITE_SCHEMA_VERSION) {
722 ReportError(error_reporter, "Invalid model version %d", model->version());
723 return false;
724 }
725 if (!VerifySubGraphs(*model, error_reporter)) {
726 return false;
727 }
728 if (!VerifyTensors(*model, error_reporter)) {
729 return false;
730 }
731 return true;
732}
733
734} // namespace
735
736bool Verify(const void* buf, size_t len, ErrorReporter* error_reporter) {
737 const Model* model = internal::VerifyFlatBufferAndGetModel(buf, len);
738 return VerifyModel(model, error_reporter);
739}
740
741// Deprecated: see comments in header.
742bool Verify(const void* buf, size_t len, const OpResolver& resolver,
743 ErrorReporter* error_reporter) {
744 const Model* model = internal::VerifyFlatBufferAndGetModel(buf, len);
745 if (!VerifyModel(model, error_reporter)) {
746 return false;
747 }
748 if (!VerifyOps(*model, resolver, error_reporter)) {
749 return false;
750 }
751 return true;
752}
753
754} // namespace tflite
755