1 | /* Copyright 2022 The TensorFlow Authors. All Rights Reserved. |
2 | |
3 | Licensed under the Apache License, Version 2.0 (the "License"); |
4 | you may not use this file except in compliance with the License. |
5 | You may obtain a copy of the License at |
6 | |
7 | http://www.apache.org/licenses/LICENSE-2.0 |
8 | |
9 | Unless required by applicable law or agreed to in writing, software |
10 | distributed under the License is distributed on an "AS IS" BASIS, |
11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
12 | See the License for the specific language governing permissions and |
13 | limitations under the License. |
14 | ==============================================================================*/ |
15 | /// \file |
16 | /// Provides options to an interpreter. |
17 | /// |
18 | #ifndef TENSORFLOW_LITE_INTERPRETER_OPTIONS_H_ |
19 | #define TENSORFLOW_LITE_INTERPRETER_OPTIONS_H_ |
20 | |
21 | namespace tflite { |
22 | |
23 | /// Options class for `Interpreter`. |
24 | /// WARNING: This is an experimental API and subject to change. |
25 | class InterpreterOptions { |
26 | public: |
27 | InterpreterOptions() |
28 | : experimental_preserve_all_tensors_(false), |
29 | experimental_ensure_dynamic_tensors_are_released_(false), |
30 | experimental_optimize_memory_for_large_tensors_(0) {} |
31 | |
32 | /// Preserving all intermediates tensors for debugging. |
33 | /// WARNING: This is an experimental API and subject to change. |
34 | void SetPreserveAllTensors(bool value = true) { |
35 | experimental_preserve_all_tensors_ = value; |
36 | } |
37 | |
38 | /// Returns if the `experimental_preserve_all_tensors_` feature is enabled. |
39 | /// WARNING: This is an experimental API and subject to change. |
40 | bool GetPreserveAllTensors() { return experimental_preserve_all_tensors_; } |
41 | |
42 | /// Force all intermediate dynamic tensors to be released once they are not |
43 | /// used by the model. Please use this configuration with caution, since it |
44 | /// might reduce the peak memory usage of the model at the cost of a slower |
45 | /// inference speed. |
46 | /// WARNING: This is an experimental API and subject to change. |
47 | void SetEnsureDynamicTensorsAreReleased(bool value = true) { |
48 | experimental_ensure_dynamic_tensors_are_released_ = value; |
49 | } |
50 | |
51 | /// Returns if the `experimental_ensure_dynamic_tensors_are_released_` feature |
52 | /// is enabled. |
53 | /// WARNING: This is an experimental API and subject to change. |
54 | bool GetEnsureDynamicTensorsAreReleased() { |
55 | return experimental_ensure_dynamic_tensors_are_released_; |
56 | } |
57 | |
58 | /// Use dynamic tensor allocation and deallocation method for large tensors |
59 | /// instead of static memory planner. Dynamic tensors are allocated just |
60 | /// before when they're needed and released when they're not needed anymore. |
61 | /// It improves peak memory usage but there could be some latency impact. The |
62 | /// value (in bytes, and default is 1024 * 1024) is used to determine large |
63 | /// tensors. |
64 | /// WARNING: This is an experimental API and subject to change. |
65 | void OptimizeMemoryForLargeTensors(int value = 1 << 20) { |
66 | if (value > 0) { |
67 | experimental_optimize_memory_for_large_tensors_ = value; |
68 | experimental_ensure_dynamic_tensors_are_released_ = true; |
69 | } |
70 | } |
71 | |
72 | /// Returns the size (in bytes) threshold for dynamic tensor allocation |
73 | /// method. It returns zero if the feature is not enabled. |
74 | /// WARNING: This is an experimental API and subject to change. |
75 | int GetDynamicAllocationForLargeTensors() { |
76 | return experimental_optimize_memory_for_large_tensors_; |
77 | } |
78 | |
79 | private: |
80 | bool experimental_preserve_all_tensors_; |
81 | bool experimental_ensure_dynamic_tensors_are_released_; |
82 | int experimental_optimize_memory_for_large_tensors_; |
83 | }; |
84 | |
85 | } // namespace tflite |
86 | |
87 | #endif // TENSORFLOW_LITE_INTERPRETER_OPTIONS_H_ |
88 | |