1 | /* Copyright 2019 The TensorFlow Authors. All Rights Reserved. |
2 | |
3 | Licensed under the Apache License, Version 2.0 (the "License"); |
4 | you may not use this file except in compliance with the License. |
5 | You may obtain a copy of the License at |
6 | |
7 | http://www.apache.org/licenses/LICENSE-2.0 |
8 | |
9 | Unless required by applicable law or agreed to in writing, software |
10 | distributed under the License is distributed on an "AS IS" BASIS, |
11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
12 | See the License for the specific language governing permissions and |
13 | limitations under the License. |
14 | ==============================================================================*/ |
15 | #ifndef TENSORFLOW_LITE_KERNELS_LSTM_SHARED_H_ |
16 | #define TENSORFLOW_LITE_KERNELS_LSTM_SHARED_H_ |
17 | |
18 | namespace tflite { |
19 | namespace ops { |
20 | namespace builtin { |
21 | namespace lstm { |
22 | // For full inputs kernel (24-inputs). |
23 | // Please note the 20-input full kernel is deprecated and only kept |
24 | // here for backward compatibility. |
25 | namespace full { |
26 | |
27 | // Input Tensors of size {n_batch, n_input} |
28 | constexpr int kInputTensor = 0; |
29 | |
30 | // Input weight tensors of size: {n_cell, n_input} |
31 | constexpr int kInputToInputWeightsTensor = 1; // Optional |
32 | constexpr int kInputToForgetWeightsTensor = 2; |
33 | constexpr int kInputToCellWeightsTensor = 3; |
34 | constexpr int kInputToOutputWeightsTensor = 4; |
35 | |
36 | // Recurrent weight tensors of size {n_cell, n_output} |
37 | constexpr int kRecurrentToInputWeightsTensor = 5; // Optional |
38 | constexpr int kRecurrentToForgetWeightsTensor = 6; |
39 | constexpr int kRecurrentToCellWeightsTensor = 7; |
40 | constexpr int kRecurrentToOutputWeightsTensor = 8; |
41 | |
42 | // Peephole weights tensors of size {n_cell}, representing a diagonal matrix. |
43 | constexpr int kCellToInputWeightsTensor = 9; // Optional |
44 | constexpr int kCellToForgetWeightsTensor = 10; // Optional |
45 | constexpr int kCellToOutputWeightsTensor = 11; // Optional |
46 | |
47 | // Gates bias tensors of size {n_cell} |
48 | constexpr int kInputGateBiasTensor = 12; // Optional |
49 | constexpr int kForgetGateBiasTensor = 13; |
50 | constexpr int kCellGateBiasTensor = 14; |
51 | constexpr int kOutputGateBiasTensor = 15; |
52 | |
53 | // Projection weight tensor of size {n_output, n_cell} |
54 | constexpr int kProjectionWeightsTensor = 16; // Optional |
55 | // Projection bias tensor of size {n_output} |
56 | constexpr int kProjectionBiasTensor = 17; // Optional |
57 | |
58 | // These state tensors are defined as variable tensors, and will be modified by |
59 | // this op. |
60 | constexpr int kOutputStateTensor = 18; |
61 | constexpr int kCellStateTensor = 19; |
62 | |
63 | // Layer norm coefficient tensors of size {n_cell}, representing a diagonal |
64 | // matrix. |
65 | constexpr int kInputLayerNormCoefficientsTensor = 20; // Optional |
66 | constexpr int kForgetLayerNormCoefficientsTensor = 21; // Optional |
67 | constexpr int kCellLayerNormCoefficientsTensor = 22; // Optional |
68 | constexpr int kOutputLayerNormCoefficientsTensor = 23; // Optional |
69 | |
70 | // Output tensors. |
71 | constexpr int kOutputTensor = 0; |
72 | } // namespace full |
73 | |
74 | } // namespace lstm |
75 | } // namespace builtin |
76 | } // namespace ops |
77 | } // namespace tflite |
78 | #endif // TENSORFLOW_LITE_KERNELS_LSTM_SHARED_H_ |
79 | |