1// Copyright 2020 Google LLC
2//
3// This source code is licensed under the BSD-style license found in the
4// LICENSE file in the root directory of this source tree.
5
6#include <assert.h>
7#include <math.h>
8#include <stddef.h>
9#include <stdint.h>
10#include <stdlib.h>
11
12#include <xnnpack.h>
13#include <xnnpack/allocator.h>
14#include <xnnpack/log.h>
15#include <xnnpack/params.h>
16#include <xnnpack/subgraph.h>
17
18
19enum xnn_status xnn_define_tensor_value(
20 xnn_subgraph_t subgraph,
21 enum xnn_datatype datatype,
22 size_t num_dims,
23 const size_t* dims,
24 const void* data,
25 uint32_t external_id,
26 uint32_t flags,
27 uint32_t* id_out)
28{
29 if ((xnn_params.init_flags & XNN_INIT_FLAG_XNNPACK) == 0) {
30 xnn_log_error("failed to create Dense Tensor value: XNNPACK is not initialized");
31 return xnn_status_uninitialized;
32 }
33
34 if (external_id != XNN_INVALID_VALUE_ID && external_id >= subgraph->external_value_ids) {
35 xnn_log_error(
36 "failed to create Dense Tensor value: "
37 "external ID %" PRIu32 " exceeds the number of reserved external IDs in subgraph (%" PRIu32 ")",
38 external_id, subgraph->external_value_ids);
39 return xnn_status_invalid_parameter;
40 }
41
42 if (num_dims > XNN_MAX_TENSOR_DIMS) {
43 xnn_log_error("failed to create Dense Tensor value: num of dimensions exceeds XNNPACK limit (%d)",
44 XNN_MAX_TENSOR_DIMS);
45 return xnn_status_unsupported_parameter;
46 }
47
48 switch (datatype) {
49 case xnn_datatype_fp32:
50 case xnn_datatype_fp16:
51 break;
52 default:
53 xnn_log_error("failed to create Dense Tensor value: unsupported datatype %s (%d)",
54 xnn_datatype_to_string(datatype), datatype);
55 return xnn_status_unsupported_parameter;
56 }
57
58 struct xnn_value* value = subgraph->values + external_id;
59 if (external_id == XNN_INVALID_VALUE_ID) {
60 value = xnn_subgraph_new_internal_value(subgraph);
61 if (value == NULL) {
62 return xnn_status_out_of_memory;
63 }
64 }
65 value->type = xnn_value_type_dense_tensor;
66 value->datatype = datatype;
67 value->shape.num_dims = num_dims;
68 memcpy(value->shape.dim, dims, num_dims * sizeof(size_t));
69 value->flags = flags;
70 value->data = data;
71
72 *id_out = value->id;
73 return xnn_status_success;
74}
75
76enum xnn_status xnn_define_quantized_tensor_value(
77 xnn_subgraph_t subgraph,
78 enum xnn_datatype datatype,
79 int32_t zero_point,
80 float scale,
81 size_t num_dims,
82 const size_t* dims,
83 const void* data,
84 uint32_t external_id,
85 uint32_t flags,
86 uint32_t* id_out)
87{
88 if ((xnn_params.init_flags & XNN_INIT_FLAG_XNNPACK) == 0) {
89 xnn_log_error("failed to create Quantized Dense Tensor value: XNNPACK is not initialized");
90 return xnn_status_uninitialized;
91 }
92
93 if (external_id != XNN_INVALID_VALUE_ID && external_id >= subgraph->external_value_ids) {
94 xnn_log_error(
95 "failed to create Quantized Dense Tensor value: "
96 "external ID %" PRIu32 " exceeds the number of reserved external IDs in subgraph (%" PRIu32 ")",
97 external_id, subgraph->external_value_ids);
98 return xnn_status_invalid_parameter;
99 }
100
101 if (num_dims > XNN_MAX_TENSOR_DIMS) {
102 xnn_log_error(
103 "failed to create Quantized Dense Tensor value: num of dimensions exceeds XNNPACK limit (%d)",
104 XNN_MAX_TENSOR_DIMS);
105 return xnn_status_unsupported_parameter;
106 }
107
108 switch (datatype) {
109 case xnn_datatype_qint8:
110 if ((int32_t) (int8_t) zero_point != zero_point) {
111 xnn_log_error(
112 "failed to create Quantized Dense Tensor value: invalid zero point %" PRId32" outside the [-128, 127] range",
113 zero_point);
114 return xnn_status_invalid_parameter;
115 }
116 break;
117 case xnn_datatype_quint8:
118 if ((int32_t) (uint8_t) zero_point != zero_point) {
119 xnn_log_error(
120 "failed to create Quantized Dense Tensor value: invalid zero point %" PRId32" outside the [0, 255] range",
121 zero_point);
122 return xnn_status_invalid_parameter;
123 }
124 break;
125 case xnn_datatype_qint32:
126 if (zero_point != 0) {
127 xnn_log_error(
128 "failed to create Quantized Dense Tensor value: invalid non-zero zero point %" PRId32,
129 zero_point);
130 return xnn_status_invalid_parameter;
131 }
132 break;
133 default:
134 xnn_log_error("failed to create Quantized Dense Tensor value: unsupported datatype %s (%d)",
135 xnn_datatype_to_string(datatype), datatype);
136 return xnn_status_unsupported_parameter;
137 }
138
139 if (scale <= 0.0f || !isnormal(scale)) {
140 xnn_log_error(
141 "failed to create Quantized Dense Tensor value with %.7g scale: scale must be finite, normalized, and positive",
142 scale);
143 return xnn_status_invalid_parameter;
144 }
145
146 struct xnn_value* value = subgraph->values + external_id;
147 if (external_id == XNN_INVALID_VALUE_ID) {
148 value = xnn_subgraph_new_internal_value(subgraph);
149 if (value == NULL) {
150 return xnn_status_out_of_memory;
151 }
152 }
153 value->type = xnn_value_type_dense_tensor;
154 value->datatype = datatype;
155 value->quantization.zero_point = zero_point;
156 value->quantization.scale = scale;
157 value->shape.num_dims = num_dims;
158 memcpy(value->shape.dim, dims, num_dims * sizeof(size_t));
159 value->flags = flags;
160 value->data = data;
161
162 *id_out = value->id;
163 return xnn_status_success;
164}
165
166enum xnn_status xnn_define_channelwise_quantized_tensor_value(
167 xnn_subgraph_t subgraph,
168 enum xnn_datatype datatype,
169 const float* scale,
170 size_t num_dims,
171 size_t channel_dim,
172 const size_t* dims,
173 const void* data,
174 uint32_t external_id,
175 uint32_t flags,
176 uint32_t* id_out)
177{
178 if ((xnn_params.init_flags & XNN_INIT_FLAG_XNNPACK) == 0) {
179 xnn_log_error("failed to create Channelwise Quantized Dense Tensor value: XNNPACK is not initialized");
180 return xnn_status_uninitialized;
181 }
182
183 if (external_id != XNN_INVALID_VALUE_ID && external_id >= subgraph->external_value_ids) {
184 xnn_log_error(
185 "failed to create Channelwise Quantized Dense Tensor value: "
186 "external ID %" PRIu32 " exceeds the number of reserved external IDs in subgraph (%" PRIu32 ")",
187 external_id, subgraph->external_value_ids);
188 return xnn_status_invalid_parameter;
189 }
190
191 if (num_dims == 0) {
192 xnn_log_error(
193 "failed to create Channelwise Quantized Dense Tensor value: no channel dimension exists");
194 return xnn_status_invalid_parameter;
195 }
196
197 if (num_dims > XNN_MAX_TENSOR_DIMS) {
198 xnn_log_error(
199 "failed to create Channelwise Quantized Dense Tensor value: num of dimensions exceeds XNNPACK limit (%d)",
200 XNN_MAX_TENSOR_DIMS);
201 return xnn_status_unsupported_parameter;
202 }
203
204 if (channel_dim >= num_dims) {
205 xnn_log_error(
206 "failed to create Channelwise Quantized Dense Tensor value: "
207 "channel dimension index %zu is out of range for %zu-dimensional tensor",
208 channel_dim, num_dims);
209 return xnn_status_invalid_parameter;
210 }
211
212 switch (datatype) {
213 case xnn_datatype_qcint8:
214 case xnn_datatype_qcint32:
215 break;
216 default:
217 xnn_log_error("failed to create Channelwise Quantized Dense Tensor value: unsupported datatype %s (%d)",
218 xnn_datatype_to_string(datatype), datatype);
219 return xnn_status_unsupported_parameter;
220 }
221
222 const size_t channels = dims[0];
223 for (size_t channel = 0; channel < channels; channel++) {
224 if (scale[channel] <= 0.0f || !isnormal(scale[channel])) {
225 xnn_log_error(
226 "failed to create Channelwise Quantized Dense Tensor value with %.7g scale in channel #%zu: "
227 "scale must be finite, normalized, and positive",
228 scale[channel], channel);
229 return xnn_status_invalid_parameter;
230 }
231 }
232
233 struct xnn_value* value = subgraph->values + external_id;
234 if (external_id == XNN_INVALID_VALUE_ID) {
235 value = xnn_subgraph_new_internal_value(subgraph);
236 if (value == NULL) {
237 return xnn_status_out_of_memory;
238 }
239 }
240 value->type = xnn_value_type_dense_tensor;
241 value->datatype = datatype;
242 value->quantization.zero_point = 0;
243 value->quantization.channelwise_scale = scale;
244 value->quantization.channel_dimension = channel_dim;
245 value->shape.num_dims = num_dims;
246 memcpy(value->shape.dim, dims, num_dims * sizeof(size_t));
247 value->flags = flags;
248 value->data = data;
249
250 *id_out = value->id;
251 return xnn_status_success;
252}
253
254size_t xnn_shape_multiply_all_dims(
255 const struct xnn_shape shape[restrict XNN_MIN_ELEMENTS(1)])
256{
257 size_t batch_size = 1;
258 for (size_t i = 0; i < shape->num_dims; i++) {
259 batch_size *= shape->dim[i];
260 }
261 return batch_size;
262}
263
264size_t xnn_shape_multiply_batch_dims(
265 const struct xnn_shape shape[restrict XNN_MIN_ELEMENTS(1)],
266 size_t num_nonbatch_dims)
267{
268 size_t batch_size = 1;
269 for (size_t i = 0; i + num_nonbatch_dims < shape->num_dims; i++) {
270 batch_size *= shape->dim[i];
271 }
272 return batch_size;
273}
274
275size_t xnn_shape_multiply_non_channel_dims(
276 const struct xnn_shape shape[restrict XNN_MIN_ELEMENTS(1)])
277{
278 size_t batch_size = 1;
279 for (size_t i = 0; i + 1 < shape->num_dims; i++) {
280 batch_size *= shape->dim[i];
281 }
282 return batch_size;
283}
284
285size_t xnn_tensor_get_size(
286 xnn_subgraph_t subgraph,
287 uint32_t value_id)
288{
289 assert(value_id < subgraph->num_values);
290
291 const struct xnn_value* value = subgraph->values + value_id;
292 assert(value->type == xnn_value_type_dense_tensor);
293 assert(value->datatype != xnn_datatype_invalid);
294
295 size_t size = 0;
296 switch (value->datatype) {
297 case xnn_datatype_fp16:
298 size = 2;
299 break;
300 case xnn_datatype_fp32:
301 size = 4;
302 break;
303 case xnn_datatype_qint8:
304 case xnn_datatype_quint8:
305 case xnn_datatype_qcint8:
306 size = 1;
307 break;
308 case xnn_datatype_qint32:
309 case xnn_datatype_qcint32:
310 size = 4;
311 break;
312 case xnn_datatype_invalid:
313 XNN_UNREACHABLE;
314 }
315
316 return size * xnn_shape_multiply_all_dims(&value->shape);
317}
318