1 | /* Copyright 2018 The TensorFlow Authors. All Rights Reserved. |
2 | |
3 | Licensed under the Apache License, Version 2.0 (the "License"); |
4 | you may not use this file except in compliance with the License. |
5 | You may obtain a copy of the License at |
6 | |
7 | http://www.apache.org/licenses/LICENSE-2.0 |
8 | |
9 | Unless required by applicable law or agreed to in writing, software |
10 | distributed under the License is distributed on an "AS IS" BASIS, |
11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
12 | See the License for the specific language governing permissions and |
13 | limitations under the License. |
14 | ==============================================================================*/ |
15 | |
16 | #include "tensorflow/lite/kernels/internal/reference/floor.h" |
17 | |
18 | #include "tensorflow/lite/c/common.h" |
19 | #include "tensorflow/lite/kernels/internal/optimized/optimized_ops.h" |
20 | #include "tensorflow/lite/kernels/internal/tensor.h" |
21 | #include "tensorflow/lite/kernels/internal/tensor_ctypes.h" |
22 | #include "tensorflow/lite/kernels/kernel_util.h" |
23 | |
24 | namespace tflite { |
25 | namespace ops { |
26 | namespace builtin { |
27 | namespace floor { |
28 | |
29 | constexpr int kInputTensor = 0; |
30 | constexpr int kOutputTensor = 0; |
31 | |
32 | enum KernelType { |
33 | kReference, |
34 | kGenericOptimized, |
35 | }; |
36 | |
37 | TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { |
38 | const TfLiteTensor* input; |
39 | TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kInputTensor, &input)); |
40 | TfLiteTensor* output; |
41 | TF_LITE_ENSURE_OK(context, |
42 | GetOutputSafe(context, node, kOutputTensor, &output)); |
43 | TF_LITE_ENSURE_EQ(context, NumInputs(node), 1); |
44 | TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); |
45 | TF_LITE_ENSURE_TYPES_EQ(context, input->type, kTfLiteFloat32); |
46 | output->type = input->type; |
47 | TfLiteIntArray* output_size = TfLiteIntArrayCopy(input->dims); |
48 | return context->ResizeTensor(context, output, output_size); |
49 | } |
50 | |
51 | template <KernelType type> |
52 | TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { |
53 | const TfLiteTensor* input; |
54 | TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kInputTensor, &input)); |
55 | TfLiteTensor* output; |
56 | TF_LITE_ENSURE_OK(context, |
57 | GetOutputSafe(context, node, kOutputTensor, &output)); |
58 | |
59 | if (type == kGenericOptimized) { |
60 | optimized_ops::Floor(GetTensorShape(input), GetTensorData<float>(input), |
61 | GetTensorShape(output), GetTensorData<float>(output)); |
62 | } else { |
63 | reference_ops::Floor(GetTensorShape(input), GetTensorData<float>(input), |
64 | GetTensorShape(output), GetTensorData<float>(output)); |
65 | } |
66 | |
67 | return kTfLiteOk; |
68 | } |
69 | } // namespace floor |
70 | |
71 | TfLiteRegistration* Register_FLOOR_REF() { |
72 | static TfLiteRegistration r = {/*init=*/nullptr, |
73 | /*free=*/nullptr, floor::Prepare, |
74 | floor::Eval<floor::kReference>}; |
75 | return &r; |
76 | } |
77 | |
78 | TfLiteRegistration* Register_FLOOR() { |
79 | static TfLiteRegistration r = {/*init=*/nullptr, |
80 | /*free=*/nullptr, floor::Prepare, |
81 | floor::Eval<floor::kGenericOptimized>}; |
82 | return &r; |
83 | } |
84 | |
85 | } // namespace builtin |
86 | } // namespace ops |
87 | } // namespace tflite |
88 | |