1/* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
2
3Licensed under the Apache License, Version 2.0 (the "License");
4you may not use this file except in compliance with the License.
5You may obtain a copy of the License at
6
7 http://www.apache.org/licenses/LICENSE-2.0
8
9Unless required by applicable law or agreed to in writing, software
10distributed under the License is distributed on an "AS IS" BASIS,
11WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12See the License for the specific language governing permissions and
13limitations under the License.
14==============================================================================*/
15
16#include <stddef.h>
17#include <stdint.h>
18
19#include <algorithm>
20#include <memory>
21#include <vector>
22
23#include "ruy/profiler/instrumentation.h" // from @ruy
24#include "tensorflow/lite/c/builtin_op_data.h"
25#include "tensorflow/lite/c/common.h"
26#include "tensorflow/lite/kernels/cpu_backend_context.h"
27#include "tensorflow/lite/kernels/cpu_backend_threadpool.h"
28#include "tensorflow/lite/kernels/internal/optimized/optimized_ops.h"
29#include "tensorflow/lite/kernels/internal/quantization_util.h"
30#include "tensorflow/lite/kernels/internal/reference/reference_ops.h"
31#include "tensorflow/lite/kernels/internal/tensor.h"
32#include "tensorflow/lite/kernels/internal/tensor_ctypes.h"
33#include "tensorflow/lite/kernels/kernel_util.h"
34
35namespace tflite {
36namespace ops {
37namespace builtin {
38namespace mirror_pad {
39namespace {
40
41// Nil value for paddingMode/offset.
42const int kUnsetOffset = -1;
43
44// Wrapper for params passed to the Eval<T> function.
45template <typename T>
46struct EvalData {
47 const TfLiteTensor* padding_matrix = nullptr;
48 const TfLiteIntArray* input_dims = nullptr;
49 // Holds number of elements at the nth dimension.
50 // value at last dimension = 1, at second to last = sizeof last dimension.
51 const std::vector<int>* output_dims_num_elements = nullptr;
52 const std::vector<int>* input_dims_num_elements = nullptr;
53 const T* input_data = nullptr;
54
55 int offset = kUnsetOffset;
56 T* output_data = nullptr;
57 int num_dims = 0;
58};
59
60// Helper method that fills the left and right pads.
61template <typename T>
62inline void GetPadding(const T* data, int offset, int64_t* left_pad,
63 int64_t* right_pad) {
64 *left_pad = static_cast<int64_t>(*(data + offset * 2));
65 *right_pad = static_cast<int64_t>(*(data + offset * 2 + 1));
66}
67
68inline void GetPadding(const TfLiteTensor* padding_matrix, int dimension,
69 int64_t* left_pad, int64_t* right_pad) {
70 switch (padding_matrix->type) {
71 case kTfLiteInt32:
72 GetPadding(padding_matrix->data.i32, dimension, left_pad, right_pad);
73 break;
74 case kTfLiteInt64:
75 GetPadding(padding_matrix->data.i64, dimension, left_pad, right_pad);
76 break;
77 default:
78 return;
79 }
80}
81
82// Returns the shape of the final output after padding.
83std::unique_ptr<TfLiteIntArray, void (*)(TfLiteIntArray*)> GetPaddedOutputShape(
84 const TfLiteTensor* input, const TfLiteTensor* padding_matrix) {
85 const int input_dims = NumDimensions(input);
86 std::unique_ptr<TfLiteIntArray, void (*)(TfLiteIntArray*)> shape(
87 TfLiteIntArrayCreate(input_dims), TfLiteIntArrayFree);
88
89 int64_t left_pad = 0, right_pad = 0;
90 for (int i = 0; i < input_dims; ++i) {
91 GetPadding(padding_matrix, i, &left_pad, &right_pad);
92 shape->data[i] = SizeOfDimension(input, i) + left_pad + right_pad;
93 }
94 return shape;
95}
96
97// Given dimension index and the left/right padding.
98// Returns the corresponding dimension in the input array.
99inline int GetInputDimension(int padded_dimension, int left_pad, int right_pad,
100 int input_dim_size, int offset) {
101 if (padded_dimension < left_pad) {
102 const int original_ind = left_pad + offset - 1;
103 return original_ind - (std::min(padded_dimension, original_ind - offset));
104 }
105 padded_dimension -= left_pad;
106 if (padded_dimension >= input_dim_size) {
107 padded_dimension -= input_dim_size;
108 const int original_ind = input_dim_size - (1 + offset);
109 return original_ind - std::min(padded_dimension, original_ind);
110 }
111 return padded_dimension;
112}
113
114// Given and index in output array, returns the index of the value
115// in input array.
116template <typename T>
117int GetFlatIndex(int index, EvalData<T>* eval_data) {
118 int flat_index = 0;
119 int64_t left_pad = 0, right_pad = 0, dimension_index, index_in_input;
120 for (int i = 0; i < eval_data->num_dims; ++i) {
121 switch (eval_data->padding_matrix->type) {
122 case kTfLiteInt32:
123 GetPadding(eval_data->padding_matrix->data.i32, i, &left_pad,
124 &right_pad);
125 break;
126 case kTfLiteInt64:
127 GetPadding(eval_data->padding_matrix->data.i64, i, &left_pad,
128 &right_pad);
129 break;
130 default:
131 break;
132 }
133 dimension_index = index / (*eval_data->output_dims_num_elements)[i];
134 index_in_input =
135 GetInputDimension(dimension_index, left_pad, right_pad,
136 eval_data->input_dims->data[i], eval_data->offset);
137 flat_index += index_in_input * (*eval_data->input_dims_num_elements)[i];
138 index %= (*eval_data->output_dims_num_elements)[i];
139 }
140 return flat_index;
141}
142
143template <typename T>
144struct MirrorPadWorkerTask : cpu_backend_threadpool::Task {
145 MirrorPadWorkerTask(EvalData<T>* eval_data, int start, int end)
146 : eval_data(eval_data), start(start), end(end) {}
147 void Run() override {
148 auto* input_data = eval_data->input_data;
149 auto* output_data = eval_data->output_data;
150 for (int i = start; i < end; ++i) {
151 output_data[i] = input_data[GetFlatIndex(i, eval_data)];
152 }
153 }
154
155 private:
156 EvalData<T>* eval_data;
157 int start;
158 int end;
159};
160
161} // namespace
162
163TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
164 ruy::profiler::ScopeLabel label("MirrorPad");
165 const TfLiteTensor* input_tensor;
166 TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 0, &input_tensor));
167 const TfLiteTensor* padding_matrix;
168 TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 1, &padding_matrix));
169 auto* params =
170 reinterpret_cast<TfLiteMirrorPaddingParams*>(node->builtin_data);
171
172 if (params == nullptr) {
173 return kTfLiteError;
174 }
175 const int input_dims = NumDimensions(input_tensor);
176
177 TfLiteTensor* output_tensor;
178 TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, 0, &output_tensor));
179 if (IsDynamicTensor(output_tensor)) {
180 auto output_size = GetPaddedOutputShape(input_tensor, padding_matrix);
181 if (output_size == nullptr) {
182 return kTfLiteError;
183 }
184 TF_LITE_ENSURE_STATUS(
185 context->ResizeTensor(context, output_tensor, output_size.release()));
186 }
187
188 std::vector<int> output_dims_num_elements(input_dims, 1);
189 std::vector<int> input_dims_num_elements(input_dims, 1);
190 for (int i = input_dims - 2; i >= 0; i--) {
191 output_dims_num_elements[i] =
192 output_dims_num_elements[i + 1] * output_tensor->dims->data[i + 1];
193 input_dims_num_elements[i] =
194 input_dims_num_elements[i + 1] * input_tensor->dims->data[i + 1];
195 }
196
197 const int offset =
198 params->mode != TfLiteMirrorPaddingMode::kTfLiteMirrorPaddingReflect ? 0
199 : 1;
200
201 CpuBackendContext* cpu_backend_context =
202 CpuBackendContext::GetFromContext(context);
203 const int thread_count = cpu_backend_context->max_num_threads();
204 TfLiteStatus status = kTfLiteOk;
205 const int output_size = NumElements(output_tensor);
206#define TF_LITE_MIRROR_PAD(type) \
207 EvalData<type> eval_data; \
208 eval_data.input_data = GetTensorData<type>(input_tensor); \
209 eval_data.input_dims = input_tensor->dims; \
210 eval_data.input_dims = input_tensor->dims; \
211 eval_data.output_dims_num_elements = &output_dims_num_elements; \
212 eval_data.input_dims_num_elements = &input_dims_num_elements; \
213 eval_data.num_dims = input_dims; \
214 eval_data.offset = offset; \
215 eval_data.output_data = GetTensorData<type>(output_tensor); \
216 eval_data.padding_matrix = padding_matrix; \
217 std::vector<MirrorPadWorkerTask<type>> tasks; \
218 tasks.reserve(thread_count); \
219 int start = 0; \
220 for (int i = 0; i < thread_count; ++i) { \
221 int end = start + (output_size - start) / (thread_count - i); \
222 tasks.emplace_back(MirrorPadWorkerTask<type>(&eval_data, start, end)); \
223 start = end; \
224 } \
225 cpu_backend_threadpool::Execute(tasks.size(), tasks.data(), \
226 cpu_backend_context);
227
228 switch (output_tensor->type) {
229 case kTfLiteFloat32: {
230 TF_LITE_MIRROR_PAD(float);
231 break;
232 }
233 case kTfLiteInt32: {
234 TF_LITE_MIRROR_PAD(int32_t);
235 break;
236 }
237 case kTfLiteUInt8: {
238 TF_LITE_MIRROR_PAD(uint8_t);
239 break;
240 }
241 case kTfLiteInt8: {
242 TF_LITE_MIRROR_PAD(int8_t);
243 break;
244 }
245 case kTfLiteInt64: {
246 TF_LITE_MIRROR_PAD(int64_t);
247 break;
248 }
249 default:
250 status = kTfLiteError;
251 break;
252 }
253#undef TF_LITE_MIRROR_PAD
254 return status;
255}
256
257void* Init(TfLiteContext* context, const char* buffer, size_t length) {
258 return nullptr;
259}
260
261void Free(TfLiteContext* context, void* buffer) {}
262
263TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
264 const TfLiteTensor* input_tensor;
265 TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 0, &input_tensor));
266 const TfLiteTensor* padding_matrix;
267 TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 1, &padding_matrix));
268 TfLiteTensor* output_tensor;
269 TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, 0, &output_tensor));
270
271 TF_LITE_ENSURE_EQ(context, NumDimensions(padding_matrix), 2);
272 TF_LITE_ENSURE_EQ(context, SizeOfDimension(padding_matrix, 0),
273 NumDimensions(input_tensor));
274
275 if (!IsConstantTensor(padding_matrix)) {
276 SetTensorToDynamic(output_tensor);
277 return kTfLiteOk;
278 }
279 // We have constant padding, so we can infer output size.
280 auto output_size = GetPaddedOutputShape(input_tensor, padding_matrix);
281 if (output_size == nullptr) {
282 return kTfLiteError;
283 }
284 return context->ResizeTensor(context, output_tensor, output_size.release());
285}
286
287} // namespace mirror_pad
288TfLiteRegistration* Register_MIRROR_PAD() {
289 static TfLiteRegistration r = {mirror_pad::Init, mirror_pad::Free,
290 mirror_pad::Prepare, mirror_pad::Eval};
291 return &r;
292}
293
294} // namespace builtin
295} // namespace ops
296} // namespace tflite
297