1/**
2 * Copyright (c) Glow Contributors. See CONTRIBUTORS file.
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include "glow/Base/Tensor.h"
18#include "glow/Base/TensorSerialization.h"
19#include "glow/Graph/Graph.h"
20#include "glow/Quantization/Base/Base.h"
21
22#include "llvm/Support/FileSystem.h"
23#include "llvm/Support/raw_ostream.h"
24
25#include "gtest/gtest.h"
26
27using namespace glow;
28
29TEST(Tensor, iteration) {
30 auto content = {1.2f, 12.1f, 51.0f, 1515.2f};
31 Tensor T = content;
32
33 auto H = T.getHandle<float>();
34
35 std::vector<float> elems;
36 for (auto e : H) {
37 elems.push_back(e);
38 }
39
40 EXPECT_TRUE(elems == std::vector<float>(content));
41}
42
43TEST(Tensor, init) {
44 Tensor T = {1.2f, 12.1f, 51.0f, 1515.2f};
45
46 auto H = T.getHandle<>();
47
48 H.dump();
49
50 EXPECT_EQ(int(H.at({2})), 51);
51
52 H = {1.1f, 1.2f, 1.3f, 1.4f};
53
54 EXPECT_EQ(int(H.at({0})), 1);
55
56 H.dump();
57}
58
59/// Test that Tensors with zero-dimensions work as expected.
60TEST(Tensor, zeroDimTensors) {
61 Tensor T0(ElemKind::FloatTy, {0});
62 Tensor T1(ElemKind::FloatTy, {0, 100});
63 Tensor T2(ElemKind::FloatTy, {100, 0});
64
65 EXPECT_EQ(T0.getUnpaddedSizeInBytes(), 0);
66 EXPECT_EQ(T1.getUnpaddedSizeInBytes(), 0);
67 EXPECT_EQ(T2.getUnpaddedSizeInBytes(), 0);
68 EXPECT_EQ(T0.getSizeInBytes(), 0);
69 EXPECT_EQ(T1.getSizeInBytes(), 0);
70 EXPECT_EQ(T2.getSizeInBytes(), 0);
71 EXPECT_EQ(T0.size(), 0);
72 EXPECT_EQ(T1.size(), 0);
73 EXPECT_EQ(T2.size(), 0);
74
75 // Nothing is allocated for these tensors.
76 EXPECT_EQ(T0.getUnsafePtr(), nullptr);
77 EXPECT_EQ(T1.getUnsafePtr(), nullptr);
78 EXPECT_EQ(T2.getUnsafePtr(), nullptr);
79
80 T0.getHandle<>().dump();
81 T1.getHandle<>().dump();
82 T2.getHandle<>().dump();
83
84 // Now test getting unowned views of partial tensors that are zero sized.
85 Tensor T4(ElemKind::FloatTy, {10, 0, 10});
86 Type ty(ElemKind::FloatTy, {10, 5, 10});
87 Tensor T5(T4.getUnsafePtr(), &ty, T4.getSizeInBytes());
88 EXPECT_EQ(T4.getUnsafePtr(), T5.getUnsafePtr());
89 EXPECT_EQ(T5.getUnpaddedSizeInBytes(), 0);
90 EXPECT_EQ(T5.getSizeInBytes(), ty.getSizeInBytes());
91 EXPECT_EQ(T5.size(), ty.size());
92 T5.getHandle<>().dump();
93}
94
95TEST(Tensor, getSliceSize) {
96 // Test the Type::getSliceSize() function.
97
98 Tensor X(ElemKind::FloatTy, {3, 2, 10, 4});
99 Tensor Y(ElemKind::FloatTy, {1, 2, 3, 4});
100
101 EXPECT_EQ(X.getType().getSliceSize(0), 3 * 2 * 10 * 4);
102 EXPECT_EQ(X.getType().getSliceSize(1), 2 * 10 * 4);
103 EXPECT_EQ(X.getType().getSliceSize(2), 10 * 4);
104 EXPECT_EQ(X.getType().getSliceSize(3), 4);
105 EXPECT_EQ(Y.getType().getSliceSize(0), 1 * 2 * 3 * 4);
106 EXPECT_EQ(Y.getType().getSliceSize(3), 4);
107}
108
109TEST(Tensor, randomizeInt) {
110 PseudoRNG PRNG;
111 Tensor T(ElemKind::Int8QTy, {10, 10}, 1.0, 0);
112 auto H = T.getHandle<int8_t>();
113 H.randomize(-50, 50, PRNG);
114
115 // Check that all of the numbers fall in the range -50 to 50.
116 for (auto elem : H) {
117 EXPECT_NEAR(elem, 0, 50);
118 }
119}
120
121TEST(Tensor, randomizeFloat16) {
122 PseudoRNG PRNG;
123 Tensor T(ElemKind::Float16Ty, {10, 10});
124 auto H = T.getHandle<float16_t>();
125 H.randomize(-50, 50, PRNG);
126
127 // Check that all of the numbers fall in the range -50 to 50.
128 for (auto elem : H) {
129 EXPECT_NEAR(elem, 0, 50);
130 }
131}
132
133TEST(Tensor, randomizeBFloat16) {
134 PseudoRNG PRNG;
135 Tensor T(ElemKind::BFloat16Ty, {10, 10});
136 auto H = T.getHandle<bfloat16_t>();
137 H.randomize(-50, 50, PRNG);
138
139 // Check that all of the numbers fall in the range -50 to 50.
140 for (auto elem : H) {
141 EXPECT_NEAR(elem, 0, 50);
142 }
143}
144
145TEST(Tensor, clone) {
146 Tensor T = {1.2f, 12.1f, 51.0f, 1515.2f};
147 auto H = T.getHandle<>();
148
149 Tensor v;
150 v.assign(&T);
151 auto vH = v.getHandle<>();
152
153 EXPECT_EQ(int(vH.at({0})), 1);
154
155 // Update the original tensor
156 H = {0.11f, 0.22f, 0.33f, 0.44f};
157
158 // The cloned vector is unmodified.
159 EXPECT_EQ(int(vH.at({1})), 12);
160}
161
162TEST(Tensor, minMaxArg) {
163 {
164 Tensor T = {1, 10, 20, -1, 30};
165 auto res = T.getHandle().minMaxArg();
166 EXPECT_EQ(3, res.first);
167 EXPECT_EQ(4, res.second);
168 }
169
170 {
171 Tensor T = {1, 1, 1, 1, 1, 1};
172 auto res = T.getHandle().minMaxArg();
173 EXPECT_EQ(0, res.first);
174 EXPECT_EQ(0, res.second);
175 }
176}
177
178TEST(Tensor, isZero) {
179 {
180 Tensor T = {4, 0, 0, 0, 0};
181 EXPECT_FALSE(T.getHandle<>().isZero());
182 }
183
184 {
185 Tensor T = {0, 0, 0, 0, 0};
186 EXPECT_TRUE(T.getHandle<>().isZero());
187 }
188
189 {
190 Tensor T = {0, 0, 0, 0, 0, 5};
191 EXPECT_FALSE(T.getHandle<>().isZero());
192 }
193}
194
195TEST(Tensor, isTiled) {
196 // Single axis testing.
197 {
198 Tensor T(ElemKind::FloatTy, {2, 3});
199 T.getHandle() = {
200 1, 2, 3, 1, 2, 3,
201 };
202 EXPECT_TRUE(T.isTiled(0, 1));
203 EXPECT_TRUE(T.isTiled(0, 2));
204 EXPECT_FALSE(T.isTiled(1, 1));
205 EXPECT_FALSE(T.isTiled(1, 2));
206 EXPECT_TRUE(T.isTiled(1, 3));
207 }
208 {
209 Tensor T(ElemKind::FloatTy, {2, 4});
210 T.getHandle() = {1, 2, 1, 2, 3, 4, 3, 4};
211 EXPECT_FALSE(T.isTiled(0, 1));
212 EXPECT_TRUE(T.isTiled(0, 2));
213 EXPECT_FALSE(T.isTiled(1, 1));
214 EXPECT_TRUE(T.isTiled(1, 2));
215 EXPECT_FALSE(T.isTiled(1, 3));
216 EXPECT_TRUE(T.isTiled(1, 4));
217 }
218 {
219 Tensor T(ElemKind::FloatTy, {2, 4});
220 T.getHandle() = {1, 2, 1, 2, 1, 2, 1, 2};
221 EXPECT_TRUE(T.isTiled(0, 1));
222 EXPECT_TRUE(T.isTiled(0, 2));
223 EXPECT_FALSE(T.isTiled(1, 1));
224 EXPECT_TRUE(T.isTiled(1, 2));
225 EXPECT_FALSE(T.isTiled(1, 3));
226 EXPECT_TRUE(T.isTiled(1, 4));
227 }
228 {
229 Tensor T(ElemKind::FloatTy, {2, 4});
230 T.getHandle() = {1, 2, 1, 2, 3, 4, 3, 44};
231 EXPECT_FALSE(T.isTiled(0, 1));
232 EXPECT_TRUE(T.isTiled(0, 2));
233 EXPECT_FALSE(T.isTiled(1, 1));
234 EXPECT_FALSE(T.isTiled(1, 2));
235 EXPECT_FALSE(T.isTiled(1, 3));
236 EXPECT_TRUE(T.isTiled(1, 4));
237 }
238 {
239 Tensor T(ElemKind::FloatTy, {5});
240 T.getHandle() = {1, 2, 3, 1, 2};
241 EXPECT_FALSE(T.isTiled(0, 3));
242 EXPECT_TRUE(T.isTiled(0, 3, /* fractional */ true));
243 }
244 // Multiple axis testing.
245 {
246 Tensor T(ElemKind::FloatTy, {2, 3});
247 T.getHandle() = {
248 1, 2, 1, 1, 2, 1,
249 };
250 EXPECT_FALSE(T.isTiled({0, 1}, {1, 1}));
251 EXPECT_FALSE(T.isTiled({0, 1}, {1, 2}));
252 EXPECT_TRUE(T.isTiled({0, 1}, {1, 2}, /* fractional */ true));
253 EXPECT_TRUE(T.isTiled({0, 1}, {1, 3}));
254 EXPECT_FALSE(T.isTiled({0, 1}, {2, 1}));
255 EXPECT_FALSE(T.isTiled({0, 1}, {2, 2}));
256 EXPECT_TRUE(T.isTiled({0, 1}, {2, 2}, /* fractional */ true));
257 EXPECT_TRUE(T.isTiled({0, 1}, {2, 3}));
258 }
259 {
260 Tensor T(ElemKind::FloatTy, {2, 4});
261 T.getHandle() = {
262 1, 2, 1, 2, 1, 2, 1, 2,
263 };
264 EXPECT_FALSE(T.isTiled({0, 1}, {1, 1}));
265 EXPECT_TRUE(T.isTiled({0, 1}, {1, 2}));
266 EXPECT_FALSE(T.isTiled({0, 1}, {1, 3}));
267 EXPECT_TRUE(T.isTiled({0, 1}, {1, 4}));
268 EXPECT_FALSE(T.isTiled({0, 1}, {2, 1}));
269 EXPECT_TRUE(T.isTiled({0, 1}, {2, 2}));
270 EXPECT_FALSE(T.isTiled({0, 1}, {2, 3}));
271 EXPECT_TRUE(T.isTiled({0, 1}, {2, 4}));
272 }
273}
274
275TEST(Tensor, inBounds) {
276 Tensor A(ElemKind::FloatTy, {15, 5, 3});
277
278 EXPECT_TRUE(A.isInBounds({14, 4, 2}));
279 EXPECT_TRUE(A.isInBounds({0, 0, 0}));
280 EXPECT_FALSE(A.isInBounds({15, 4, 2}));
281 EXPECT_FALSE(A.isInBounds({5, 4, 3}));
282}
283
284TEST(Tensor, equalHandles) {
285 {
286 Tensor A = {1.0, 20};
287 Tensor B = {1.0};
288 EXPECT_FALSE(A.isEqual(B));
289 }
290
291 {
292 Tensor A = {1.0, 20};
293 Tensor B = {1.0, 20};
294 EXPECT_TRUE(A.isEqual(B));
295 }
296
297 {
298 Tensor A = {1.0, 20};
299 Tensor B = {1.0, 30};
300 EXPECT_FALSE(A.isEqual(B));
301 }
302}
303
304TEST(Tensor, equalNAN) {
305 {
306 Tensor A = {0.5, 0, 0, 25};
307 Tensor B = {NAN, 0, NAN, NAN};
308 EXPECT_FALSE(A.isEqual(B));
309 }
310 {
311 Tensor A = {NAN, 0, NAN, NAN};
312 Tensor B = {0.5, 0, 0, 25};
313 EXPECT_FALSE(A.isEqual(B));
314 }
315 {
316 Tensor A = {NAN, 0, NAN, NAN};
317 Tensor B = {NAN, 0, NAN, NAN};
318 EXPECT_FALSE(A.isEqual(B));
319 }
320}
321
322template <typename Ty> void testAssignment(const Type &ty) {
323 // Testing some tensor operations.
324 Tensor T(ty);
325
326 auto Handle = T.getHandle<Ty>();
327
328 for (unsigned i = 0; i < 10; i++) {
329 for (unsigned x = 0; x < 32; x++) {
330 for (unsigned y = 0; y < 20; y++) {
331 for (unsigned z = 0; z < 64; z++) {
332 Handle.at({x, y, z}) = x + y + z;
333 }
334 }
335 }
336 }
337
338 EXPECT_EQ(Handle.at({10, 10, 10}), 10 + 10 + 10);
339
340 Tensor TT = Handle.extractSlice(1);
341 auto H2 = TT.getHandle<Ty>();
342
343 EXPECT_EQ(H2.at({10, 10}), 1 + 10 + 10);
344
345 for (unsigned y = 0; y < 20; y++) {
346 for (unsigned z = 0; z < 64; z++) {
347 H2.at({y, z}) = 2;
348 }
349 }
350
351 EXPECT_EQ(H2.at({10, 10}), 2);
352}
353
354TEST(Tensor, assignment) {
355 dim_t dim[] = {320, 200, 64};
356 testAssignment<float>(Type{ElemKind::FloatTy, dim});
357 testAssignment<double>(Type{ElemKind::Float64Ty, dim});
358 testAssignment<int8_t>(Type{ElemKind::Int8QTy, dim, 1., 0});
359 testAssignment<uint8_t>(Type{ElemKind::UInt8QTy, dim, 1., 0});
360 testAssignment<int16_t>(Type{ElemKind::Int16QTy, dim, 1., 0});
361 testAssignment<int32_t>(Type{ElemKind::Int32QTy, dim, 1., 0});
362 testAssignment<uint8_t>(Type{ElemKind::UInt8ITy, dim});
363 testAssignment<int32_t>(Type{ElemKind::Int32ITy, dim});
364 testAssignment<int64_t>(Type{ElemKind::Int64ITy, dim});
365}
366
367TEST(Tensor, concatTensors1D) {
368 Tensor X = {1.1f, 2.1f, 3.1f, 4.1f};
369 Tensor Y = {5.2f, 6.2f, 7.2f, 8.2f};
370 Tensor Z = {0.3f, 0.3f, 0.3f, 0.3f, 0.3f, 0.3f, 0.3f, 0.3f};
371 Tensor expected = {5.2f, 6.2f, 7.2f, 8.2f, 1.1f, 2.1f, 3.1f, 4.1f};
372
373 auto xH = X.getHandle<>();
374 auto yH = Y.getHandle<>();
375 auto zH = Z.getHandle<>();
376 auto eH = expected.getHandle<>();
377
378 zH.insertTensors(xH, {4});
379 zH.insertTensors(yH, {0});
380
381 for (dim_t i = 0, e = eH.size(); i < e; i++) {
382 EXPECT_EQ(eH.at({i}), zH.at({i}));
383 }
384}
385
386TEST(Tensor, concatTensors2D) {
387 Tensor X(ElemKind::FloatTy, {10, 10});
388 Tensor Y(ElemKind::FloatTy, {10, 10});
389 Tensor Z(ElemKind::FloatTy, {20, 20});
390
391 auto xH = X.getHandle<>();
392 auto yH = Y.getHandle<>();
393 auto zH = Z.getHandle<>();
394
395 // Zero Y and Z but not X.
396 Y.zero();
397 Z.zero();
398
399 // Create a nice picture:
400 for (size_t i = 0, e = xH.size(); i < e; i++) {
401 xH.raw(i) = (float(i) - 30) / 50;
402 }
403
404 // Insert the tensors and create a picture of three cards one on to of the
405 // other.
406 zH.insertTensors(xH, {0, 0});
407 zH.insertTensors(xH, {5, 5});
408 zH.insertTensors(xH, {10, 10});
409
410 zH.dumpAscii();
411
412 /// Check some pixels in the image:
413 EXPECT_EQ(zH.at({0, 0}), xH.at({0, 0}));
414 EXPECT_EQ(zH.at({19, 0}), 0);
415 EXPECT_EQ(zH.at({0, 19}), 0);
416 EXPECT_EQ(zH.at({19, 19}), xH.at({9, 9}));
417 EXPECT_EQ(zH.at({10, 10}), xH.at({0, 0}));
418
419 // Extract an image from the tensor.
420 zH.extractTensors(yH, {10, 10});
421
422 // Make sure that what we've extracted is equal to what we've inserted.
423 for (size_t i = 0, e = xH.size(); i < e; i++) {
424 EXPECT_EQ(yH.raw(i), xH.raw(i));
425 }
426}
427
428TEST(Tensor, meanAndVariance) {
429
430 Tensor T1 = {3, 4, 4, 5, 6, 8};
431 Tensor T2 = {1, 2, 4, 5, 7, 11};
432
433 auto H1 = T1.getHandle<>();
434 auto H2 = T2.getHandle<>();
435
436 auto MV1 = H1.calculateMeanVariance();
437 auto MV2 = H2.calculateMeanVariance();
438
439 EXPECT_EQ(int(MV1.first), 5);
440 EXPECT_NEAR(MV1.second, 3.2, 0.01);
441 EXPECT_EQ(int(MV2.first), 5);
442 EXPECT_NEAR(MV2.second, 13.2, 0.01);
443}
444
445TEST(Tensor, getDimForPtr) {
446 // Testing some tensor operations.
447 Tensor T(ElemKind::FloatTy, {10, 5, 3});
448 auto H = T.getHandle<>();
449
450 for (unsigned x = 0; x < 10; x++) {
451 for (unsigned y = 0; y < 5; y++) {
452 for (unsigned z = 0; z < 3; z++) {
453 dim_t ptr = H.getElementPtr({x, y, z});
454 EXPECT_EQ(x, H.getDimForPtr(0, ptr));
455 EXPECT_EQ(y, H.getDimForPtr(1, ptr));
456 EXPECT_EQ(z, H.getDimForPtr(2, ptr));
457 }
458 }
459 }
460}
461
462TEST(Tensor, copySlice) {
463 PseudoRNG PRNG;
464 // Testing some tensor operations.
465 Tensor A(ElemKind::FloatTy, {10, 5, 3});
466 Tensor B(ElemKind::FloatTy, {5, 3});
467
468 auto AH = A.getHandle<>();
469 auto BH = B.getHandle<>();
470
471 AH.randomize(-2.0, 2.0, PRNG);
472
473 B.copySlice(&A, 0);
474
475 for (unsigned y = 0; y < 5; y++) {
476 for (unsigned z = 0; z < 3; z++) {
477 EXPECT_EQ(AH.at({0, y, z}), BH.at({y, z}));
478 }
479 }
480}
481
482/// Check that we can copy tensors across different types.
483TEST(Tensor, copyWithCast) {
484 PseudoRNG PRNG;
485 Tensor A(ElemKind::Float16Ty, {10, 5, 3});
486 Tensor B(ElemKind::FloatTy, {10, 5, 3});
487
488 auto AH = A.getHandle<float16_t>();
489 auto BH = B.getHandle<>();
490
491 AH.randomize(-2.0, 2.0, PRNG);
492
493 B.copyWithCast<float, float16_t>(&A);
494
495 EXPECT_EQ(A.size(), B.size());
496 for (size_t idx = 0, end = A.size(); idx != end; ++idx) {
497 EXPECT_NEAR(AH.raw(idx), BH.raw(idx), 0.0001);
498 }
499}
500
501/// Check that we can copy tensors across different types.
502TEST(Tensor, copyWithCastBFloat16) {
503 PseudoRNG PRNG;
504 Tensor A(ElemKind::BFloat16Ty, {10, 5, 3});
505 Tensor B(ElemKind::FloatTy, {10, 5, 3});
506
507 auto AH = A.getHandle<bfloat16_t>();
508 auto BH = B.getHandle<>();
509
510 AH.randomize(-2.0, 2.0, PRNG);
511
512 B.copyWithCast<float, bfloat16_t>(&A);
513
514 EXPECT_EQ(A.size(), B.size());
515 for (size_t idx = 0, end = A.size(); idx != end; ++idx) {
516 EXPECT_NEAR(AH.raw(idx), BH.raw(idx), 0.0001);
517 }
518}
519
520/// Check that we can convert a tensor from float to float16_t and the other way
521/// around.
522TEST(Tensor, convertToType) {
523 PseudoRNG PRNG;
524 Tensor A(ElemKind::FloatTy, {10, 5, 3});
525 Tensor B(ElemKind::FloatTy, {10, 5, 3});
526
527 auto AH = A.getHandle<>();
528
529 AH.randomize(-2.0, 2.0, PRNG);
530
531 B.copyRawFrom(&A);
532 ASSERT_EQ(B.getElementType(), ElemKind::FloatTy);
533
534 // Cast B from float to float16_t.
535 B.convertToType(ElemKind::Float16Ty);
536 ASSERT_EQ(B.getElementType(), ElemKind::Float16Ty);
537 {
538 auto BH = B.getHandle<float16_t>();
539
540 EXPECT_EQ(A.size(), B.size());
541 for (size_t idx = 0, end = A.size(); idx != end; ++idx) {
542 EXPECT_NEAR(AH.raw(idx), BH.raw(idx), 0.001);
543 }
544 }
545
546 // Cast back B from float16_t to float.
547 B.convertToType(ElemKind::FloatTy);
548 ASSERT_EQ(B.getElementType(), ElemKind::FloatTy);
549 EXPECT_TRUE(B.isEqual(A, 0.001));
550}
551
552TEST(Tensor, reset) {
553 Tensor A(ElemKind::FloatTy, {2, 3});
554 Tensor QA(ElemKind::Int8QTy, {3, 4}, 2.2, 7);
555 auto H = A.getHandle();
556 auto QH = QA.getHandle<int8_t>();
557
558 H = {1.5f, 17.3f, -20.3f, 10.0f, 1.2f, -2.3f};
559 QH = {5, 9, -2, 4, 3, -10, 21, -9, 0, -51, 73, 2};
560
561 A.reset(ElemKind::FloatTy, {5, 2, 6});
562 A.zero();
563 QA.reset(ElemKind::Int8QTy, {4, 7, 3, 8}, 1.4, -13);
564 QA.zero();
565
566 H = A.getHandle();
567 QH = QA.getHandle<int8_t>();
568
569 EXPECT_EQ(H.dims().size(), 3);
570 EXPECT_EQ(QH.dims().size(), 4);
571 EXPECT_TRUE(H.dims().equals({5, 2, 6}));
572 EXPECT_TRUE(QH.dims().equals({4, 7, 3, 8}));
573 EXPECT_EQ(H.size(), 5 * 2 * 6);
574 EXPECT_EQ(QH.size(), 4 * 7 * 3 * 8);
575 EXPECT_NEAR(QA.getType().getScale(), 1.4, 0.0001);
576 EXPECT_EQ(QA.getType().getOffset(), -13);
577
578 for (size_t i = 0; i < 5 * 2 * 6; i++) {
579 EXPECT_EQ(H.raw(i), 0.0);
580 }
581 for (size_t i = 0; i < 4 * 7 * 3 * 8; i++) {
582 EXPECT_EQ(QH.raw(i), QA.getType().getOffset());
583 }
584}
585
586TEST(Tensor, transpose) {
587 Tensor X(ElemKind::FloatTy, {5, 2});
588 auto H = X.getHandle<>();
589 H = {
590 0.2f, 0.4f, 0.6f, 0.8f, 1.0f, 0.6f, 0.8f, 1.0f, 2.0f, 3.0f,
591 };
592
593 Tensor Xhat;
594 X.transpose(&Xhat, {1, 0});
595
596 auto XhatH = Xhat.getHandle<>();
597
598 for (dim_t i = 0; i < 5; i++) {
599 EXPECT_EQ(H.at({i, 0}), XhatH.at({0, i}));
600 EXPECT_EQ(H.at({i, 1}), XhatH.at({1, i}));
601 }
602}
603
604TEST(Tensor, transpose2) {
605 PseudoRNG PRNG;
606 Tensor X(ElemKind::FloatTy, {10, 6, 3});
607 auto H = X.getHandle<>();
608 H.randomize(-2.0, 2.0, PRNG);
609
610 Tensor Xhat;
611 X.transpose(&Xhat, {1, 2, 0});
612
613 auto XhatH = Xhat.getHandle<>();
614
615 for (dim_t i = 0; i < 10; i++) {
616 for (dim_t j = 0; j < 6; j++) {
617 for (dim_t k = 0; k < 3; k++) {
618 EXPECT_EQ(H.at({i, j, k}), XhatH.at({j, k, i}));
619 }
620 }
621 }
622}
623
624TEST(Tensor, nonOwnedTensor) {
625 Tensor T1 = {1.2f, 12.1f, 51.0f, 1515.2f};
626
627 auto H1 = T1.getHandle<>();
628 H1.dump();
629 EXPECT_EQ(int(H1.at({0})), 1);
630
631 {
632 // Create a view on T1 which makes it look like 2x2
633 Tensor T2 = T1.getUnowned({2, 2});
634 EXPECT_EQ(T2.getUnsafePtr(), T1.getUnsafePtr());
635 auto H2 = T2.getHandle<>();
636 // Check that T2 has the same values as T1.
637 EXPECT_EQ(int(H2.at({0, 0})), 1);
638 EXPECT_EQ(int(H2.at({0, 1})), 12);
639 EXPECT_EQ(int(H2.at({1, 0})), 51);
640 EXPECT_EQ(int(H2.at({1, 1})), 1515);
641 // Modify a value through T2.
642 H2.at({1, 1}) = 30.3;
643 EXPECT_EQ(int(H2.at({1, 1})), 30);
644 // Modify a value through T1 and check
645 // that this update is visible through
646 // T2.
647 H1.at({1}) = 40.4;
648 EXPECT_EQ(int(H2.at({0, 1})), 40);
649 H2.dump();
650 }
651
652 // Check that modifications through T2 changed
653 // T1 as well, i.e. T2 was acting like a view
654 // on T1.
655 EXPECT_EQ(int(H1.at({3})), 30);
656
657 // Check that T1 is still alive
658 H1.dump();
659}
660
661/// Check that we properly take ownership of
662/// the underlying memory when we reset the tensor
663/// shape. This test used to fail leak sanitizer.
664TEST(Tensor, nonOwnedTensorFollowedByReset) {
665 float raw_data = 0.;
666 Type F32Ty(ElemKind::FloatTy, {1});
667
668 // Create an unowned tensor.
669 Tensor T1(&raw_data, &F32Ty);
670
671 auto H1 = T1.getHandle<>();
672 EXPECT_EQ(int(H1.at({0})), 0);
673
674 Type F32x2Ty(ElemKind::FloatTy, {2});
675
676 // Resizing the tensor will trigger some memory allocation.
677 // Given the previous data was coming from outside, this
678 // tensor was unowned and we used to not reset that state
679 // as well and were leaking memory.
680 T1.reset(F32x2Ty);
681 T1.zero();
682 H1 = T1.getHandle<>();
683 EXPECT_EQ(int(H1.at({0})), 0);
684 EXPECT_EQ(int(H1.at({1})), 0);
685
686 // When T1 gets delete the memory allocated through reset should
687 // be released.
688}
689
690/// Verify that accessing/modifying a tensor with offsets correctly modifies the
691/// underlying base Tensor's data. Transforms a 2D tensor:
692/// 0.0 0.0 0.0 0.0 0.0 1.0
693/// 0.0 0.0 0.0 --> 1.0 2.0 1.0
694/// 0.0 0.0 0.0 1.0 1.0 0.0
695TEST(Tensor, modifyOffsetIntoTensor2D) {
696 // Zero out the base tensor.
697 Tensor orig(ElemKind::FloatTy, {3, 3});
698 orig.zero();
699
700 // View contiguous data from the original tensor from {0, 2} to {2, 1} as a
701 // single dimensional tensor of length 6.
702 Tensor subview = orig.getUnowned({6}, {0, 2});
703 auto H_subview = subview.getHandle<>();
704 // Clear this row of 6 to 1.0.
705 H_subview.clear(1.0);
706 // Set the 3rd element to 2.0.
707 H_subview.at({2}) = 2.0;
708
709 // Verify the underlying data was correctly modified, according to the picture
710 // above.
711 auto H_orig = orig.getHandle<>();
712 EXPECT_EQ(H_orig.at({0, 0}), 0.0);
713 EXPECT_EQ(H_orig.at({0, 1}), 0.0);
714 EXPECT_EQ(H_orig.at({0, 2}), 1.0);
715 EXPECT_EQ(H_orig.at({1, 0}), 1.0);
716 EXPECT_EQ(H_orig.at({1, 1}), 2.0);
717 EXPECT_EQ(H_orig.at({1, 2}), 1.0);
718 EXPECT_EQ(H_orig.at({2, 0}), 1.0);
719 EXPECT_EQ(H_orig.at({2, 1}), 1.0);
720 EXPECT_EQ(H_orig.at({2, 2}), 0.0);
721}
722
723/// Three-dimensional test of modifying a subtensor; similar in idea to the
724/// two-dimensional version, modifyOffsetIntoTensor2D.
725TEST(Tensor, modifyOffsetIntoTensor3D) {
726 // Zero out the base tensor.
727 Tensor orig(ElemKind::FloatTy, {4, 3, 2});
728 orig.zero();
729
730 // Get a 2D view of the subtensor.
731 Tensor subview = orig.getUnowned({2, 6}, {1, 0, 0});
732 auto H_subview = subview.getHandle<>();
733 // Clear subview to 1.0.
734 H_subview.clear(1.0);
735
736 // Verify the underlying data was correctly modified.
737 auto H_orig = orig.getHandle<>();
738 for (dim_t i = 0; i < 4; i++) {
739 for (dim_t j = 0; j < 3; j++) {
740 for (dim_t k = 0; k < 2; k++) {
741 if (i == 1 || i == 2) {
742 EXPECT_EQ(H_orig.at({i, j, k}), 1.0);
743 } else {
744 EXPECT_EQ(H_orig.at({i, j, k}), 0.0);
745 }
746 }
747 }
748 }
749}
750
751/// Verify that checking equality using a sub-tensor with offsets works
752/// correctly.
753TEST(Tensor, equalsOffsetIntoTensor) {
754 // 0.0 1.0
755 // 2.0 3.0
756 // 4.0 5.0
757 // 6.0 7.0
758 Tensor orig(ElemKind::FloatTy, {4, 2});
759 auto H_orig = orig.getHandle<>();
760 H_orig = {0, 1, 2, 3, 4, 5, 6, 7};
761
762 // View the data from rows 2 and 3 (each of length 2) as a single dimensional
763 // tensor of size 4.
764 Tensor subview = orig.getUnowned({4}, {2, 0});
765 auto H_subview = subview.getHandle<>();
766
767 // Create another tensor with same expected dimensions/data as the subview.
768 Tensor recreatedSubview(ElemKind::FloatTy, {4});
769 auto H_recreatedSubview = recreatedSubview.getHandle<>();
770 H_recreatedSubview = {4, 5, 6, 7};
771
772 for (dim_t i = 0; i < 4; i++) {
773 EXPECT_EQ(H_subview.at({i}), H_recreatedSubview.at({i}));
774 }
775}
776
777TEST(Tensor, externallyManagedPayload) {
778 // Allocate and initialize payload "externally", without using the Tensor API.
779 // For example the data may come from a different library, be read from a
780 // file, etc.
781 std::vector<float> payload{1.2f, 12.1f, 51.0f, 1515.2f};
782
783 {
784 // Work with an existing payload buffer by means of the Tensor APIs.
785 Type ty(ElemKind::FloatTy, {2, 2});
786 Tensor T1(payload.data(), &ty);
787
788 auto H1 = T1.getHandle<>();
789 H1.dump();
790 EXPECT_EQ(int(H1.at({0, 0})), 1);
791
792 H1.at({1, 1}) = 30.3;
793 }
794
795 // Check that modifications through T1 and H1 changed
796 // payload as well, i.e. T1/H1 were acting like a view
797 // on the payload.
798 EXPECT_EQ(int(payload[3]), 30);
799}
800
801TEST(Tensor, integerTensors) {
802 Tensor X;
803 // Integer tensors must have scale and offset.
804 Type I32Ty(ElemKind::Int32QTy, {1, 3}, 0.1, 4);
805 Type I8Ty(ElemKind::Int8QTy, {3, 3}, 0.5, 2);
806
807 Type I8Ty2(ElemKind::Int8QTy, {3, 3}, 4, 4);
808 Type I8Ty3(ElemKind::Int8QTy, {3, 3}, 4, 4);
809
810 // Float tensors must not have scale and offsets.
811 Type FlTy(ElemKind::FloatTy, {1, 3});
812
813 // Check that basic operations work.
814 Tensor I(I8Ty);
815 auto H = I.getHandle<int8_t>();
816 H.at({0, 2}) = 3;
817
818 EXPECT_EQ(H.at({0, 2}), 3);
819 EXPECT_EQ(0.5, I.getType().getScale());
820 EXPECT_EQ(2, I.getType().getOffset());
821
822 // These types have a different scale and offset.
823 EXPECT_FALSE(I8Ty.isEqual(I8Ty2));
824
825 // These types have the same scale and offset.
826 EXPECT_TRUE(I8Ty2.isEqual(I8Ty3));
827}
828
829TEST(Tensor, insertWithCountAndAxis) {
830 Tensor X(ElemKind::FloatTy, {3, 2});
831 Tensor Y(ElemKind::FloatTy, {3, 6});
832
833 auto xH = X.getHandle<>();
834 auto yH = Y.getHandle<>();
835
836 for (size_t i = 0, e = xH.size(); i < e; i++) {
837 xH.raw(i) = float(i);
838 }
839
840 // Insert three of these slices on axis 1
841 yH.insertTensors(xH, {0, 0}, /* count */ 3, /* axis */ 1);
842
843 for (dim_t i = 0; i < 3; i++) {
844 for (dim_t j = 0; j < 6; j++) {
845 EXPECT_EQ(xH.at({i, j % 2}), yH.at({i, j}));
846 }
847 }
848}
849
850/// Verify that tensors that are quantized begin zero'd to their type's offset
851/// and are reset back to that offset.
852TEST(Tensor, zeroQuantizedTensor) {
853 const int32_t offsetQ8 = 0;
854 Tensor Q8T(ElemKind::Int8QTy, {3, 4, 5, 6}, 127, offsetQ8);
855 Q8T.zero();
856
857 const int32_t offsetUQ8 = 3;
858 Tensor UQ8T(ElemKind::UInt8QTy, {3, 4, 5, 6}, 2, offsetUQ8);
859 UQ8T.zero();
860
861 const int32_t offsetQ16 = 223;
862 Tensor Q16T(ElemKind::Int16QTy, {3, 4, 5}, 1234.7, offsetQ16);
863 Q16T.zero();
864
865 const int32_t offsetQ32 = 53452;
866 Tensor Q32T(ElemKind::Int32QTy, {3, 4}, 500.4, offsetQ32);
867 Q32T.zero();
868
869 auto Q8H = Q8T.getHandle<int8_t>();
870 EXPECT_TRUE(Q8H.isZero());
871 for (auto elem : Q8H) {
872 EXPECT_EQ(elem, offsetQ8);
873 }
874
875 auto UQ8H = UQ8T.getHandle<uint8_t>();
876 EXPECT_TRUE(UQ8H.isZero());
877 for (auto elem : UQ8H) {
878 EXPECT_EQ(elem, offsetUQ8);
879 }
880
881 auto Q16H = Q16T.getHandle<int16_t>();
882 EXPECT_TRUE(Q16H.isZero());
883 for (auto elem : Q16H) {
884 EXPECT_EQ(elem, offsetQ16);
885 }
886
887 auto Q32H = Q32T.getHandle<int32_t>();
888 EXPECT_TRUE(Q32H.isZero());
889 for (auto elem : Q32H) {
890 EXPECT_EQ(elem, offsetQ32);
891 }
892
893 Q32H = {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12};
894 EXPECT_FALSE(Q32H.isZero());
895
896 for (auto elem : Q32H) {
897 EXPECT_NE(elem, offsetQ32);
898 }
899
900 Q32T.zero();
901 EXPECT_TRUE(Q32H.isZero());
902 for (auto elem : Q32H) {
903 EXPECT_EQ(elem, offsetQ32);
904 }
905}
906
907// Verify that if the tensor is set to the offset manually then isZero() is
908// true
909TEST(Tensor, manuallySetToOffset) {
910 const int8_t offsetQ8 = 6;
911 Tensor Q8T(ElemKind::Int8QTy, {3, 2}, 10.1, offsetQ8);
912 Q8T.zero();
913
914 auto Q8H = Q8T.getHandle<int8_t>();
915 EXPECT_TRUE(Q8H.isZero());
916
917 Q8H = {1, 2, 3, 4, 5, 6};
918 EXPECT_FALSE(Q8H.isZero());
919
920 Q8H = {offsetQ8, offsetQ8, offsetQ8, offsetQ8, offsetQ8, offsetQ8};
921 EXPECT_TRUE(Q8H.isZero());
922
923 Q8H.raw(1) = offsetQ8 - 2;
924 EXPECT_FALSE(Q8H.isZero());
925
926 Q8H.raw(1) = offsetQ8;
927 EXPECT_TRUE(Q8H.isZero());
928}
929
930TEST(ZeroDimensionalTensor, handleAt) {
931 Tensor T(ElemKind::FloatTy, {});
932 auto H = T.getHandle<>();
933 H.at({}) = 7.1;
934 EXPECT_FLOAT_EQ(H.at({}), 7.1);
935 EXPECT_FLOAT_EQ(((float *)T.getUnsafePtr())[0], 7.1);
936}
937
938TEST(ZeroDimensionalTensor, handleAssign) {
939 Tensor T(ElemKind::FloatTy, {});
940 auto H = T.getHandle<>();
941 H = {1.14f};
942 EXPECT_FLOAT_EQ(H.at({}), 1.14);
943 EXPECT_FLOAT_EQ(((float *)T.getUnsafePtr())[0], 1.14);
944}
945
946TEST(ZeroDimensionalTensor, compareAndDumpTwo) {
947 Tensor T1(ElemKind::FloatTy, {});
948 T1.zero();
949 Tensor T2(ElemKind::FloatTy, {});
950 T2.zero();
951
952 EXPECT_TRUE(T1.isEqual(T2));
953
954 auto H = T1.getHandle<>();
955 H.dump();
956
957 EXPECT_FLOAT_EQ(H.raw(0), 0.0);
958 H.raw(0) = 4.2;
959 EXPECT_FLOAT_EQ(H.raw(0), 4.2);
960
961 EXPECT_FALSE(T1.isEqual(T2));
962 H.dump();
963}
964
965TEST(ZeroDimensionalTensor, compareToNonZeroDimensional) {
966 Tensor T1(ElemKind::FloatTy, {});
967 Tensor T2(ElemKind::FloatTy, {1});
968 T1.zero();
969 T2.zero();
970
971 EXPECT_FALSE(T1.isEqual(T2));
972}
973
974TEST(ZeroDimensionalTensor, transpose) {
975 Tensor T(ElemKind::Int64ITy, {});
976 T.getHandle<int64_t>() = {15};
977
978 Tensor TT;
979 T.transpose(&TT, {});
980
981 EXPECT_TRUE(T.isEqual(TT));
982}
983
984TEST(ZeroDimensionalTensor, iterate) {
985 Tensor T(ElemKind::Int64ITy, {});
986 T.getHandle<int64_t>() = {15};
987
988 auto TH = T.getHandle<int64_t>();
989 std::vector<int64_t> elems;
990 for (auto e : TH) {
991 elems.push_back(e);
992 }
993
994 EXPECT_EQ(elems.size(), 1);
995 EXPECT_EQ(elems[0], 15);
996}
997
998TEST(Type, compare) {
999 Type T1(ElemKind::FloatTy, {});
1000 Type T2(ElemKind::FloatTy, {});
1001 Type T3(ElemKind::FloatTy, {1});
1002 Type T4(ElemKind::Int64ITy, {});
1003
1004 EXPECT_TRUE(T1.isEqual(T2));
1005 EXPECT_FALSE(T1.isEqual(T3));
1006 EXPECT_FALSE(T1.isEqual(T4));
1007}
1008
1009TEST(Type, isEqual) {
1010 {
1011 Type T1(ElemKind::FloatTy, {1, 2, 3});
1012 Type T2(ElemKind::Int64ITy, {1, 2, 3});
1013 EXPECT_FALSE(T1.isEqual(T2));
1014 EXPECT_FALSE(T2.isEqual(T1));
1015 }
1016 {
1017 Type T1(ElemKind::FloatTy, {1, 2, 3});
1018 Type T2(ElemKind::FloatTy, {1, 2});
1019 EXPECT_FALSE(T1.isEqual(T2));
1020 EXPECT_FALSE(T2.isEqual(T1));
1021 }
1022 {
1023 Type T1(ElemKind::FloatTy, {1, 2, 3});
1024 Type T2(ElemKind::FloatTy, {1, 2, 4});
1025 EXPECT_FALSE(T1.isEqual(T2));
1026 EXPECT_FALSE(T2.isEqual(T1));
1027 }
1028 {
1029 Type T1(ElemKind::FloatTy, {1, 2, 3});
1030 Type T2(ElemKind::FloatTy, {1, 2, 4});
1031 EXPECT_TRUE(T1.isEqual(T2, /* allowDifferentShape */ true));
1032 EXPECT_TRUE(T2.isEqual(T1, /* allowDifferentShape */ true));
1033 }
1034 {
1035 Type T1(ElemKind::FloatTy, {1, 2, 3});
1036 Type T2(ElemKind::FloatTy, {4, 2, 3});
1037 EXPECT_TRUE(T1.isEqual(T2, /* allowDifferentShape */ true));
1038 EXPECT_TRUE(T2.isEqual(T1, /* allowDifferentShape */ true));
1039 }
1040 {
1041 Type T1(ElemKind::Int8QTy, {1, 2, 3}, 0, 0);
1042 Type T2(ElemKind::Int8QTy, {1, 2, 3}, 1, 0);
1043 EXPECT_FALSE(T1.isEqual(T2));
1044 EXPECT_FALSE(T2.isEqual(T1));
1045 }
1046 {
1047 Type T1(ElemKind::Int8QTy, {1, 2, 3}, 1, 4);
1048 Type T2(ElemKind::Int8QTy, {1, 2, 3}, 1, 4);
1049 EXPECT_TRUE(T1.isEqual(T2));
1050 EXPECT_TRUE(T2.isEqual(T1));
1051 }
1052 {
1053 Type T1(ElemKind::FloatTy, {1, 2, 3});
1054 Type T2(ElemKind::FloatTy, {1, 2, 3});
1055 EXPECT_TRUE(T1.isEqual(T2));
1056 EXPECT_TRUE(T2.isEqual(T1));
1057 }
1058}
1059
1060TEST(Tensor, insertSlice) {
1061 Tensor big(ElemKind::FloatTy, {3, 4});
1062 Tensor small({1.0f, 2.0f, 3.0f, 4.0f});
1063 big.zero();
1064 big.getHandle<>().insertSlice(small, 1);
1065 Tensor expected(ElemKind::FloatTy, {3, 4});
1066 expected.getHandle<>() = {0.0f, 0.0f, 0.0f, 0.0f, 1.0f, 2.0f,
1067 3.0f, 4.0f, 0.0f, 0.0f, 0.0f, 0.0f};
1068 EXPECT_TRUE(big.isEqual(expected));
1069}
1070
1071/// Check that after converting to UInt8FusedQTy, the data, scale and offset are
1072/// the same as original ones.
1073template <class Ty>
1074static void testConvertToUInt8FusedQTy(ElemKind fusedKind, dim_t row,
1075 dim_t col) {
1076 EXPECT_LT(row, 100);
1077 EXPECT_LT(col, 100);
1078 Tensor T(fusedKind, {row, col}, 1.0, 0);
1079 auto dataCol = col - 2 * sizeof(Ty);
1080 auto TH = T.getHandle<uint8_t>();
1081 for (dim_t i = 0; i < row; i++) {
1082 TH.setFusedScaleOffsetInRow<Ty>(i, i, i);
1083 for (dim_t j = 0; j < dataCol; j++) {
1084 TH.at({i, j}) = i + j;
1085 }
1086 }
1087
1088 Tensor newT = T.getCopyConvertedToType(ElemKind::UInt8FusedQTy);
1089 auto newTH = newT.getHandle<uint8_t>();
1090 bool is4Bit = fusedKind == ElemKind::UInt4FusedFP16QTy ||
1091 fusedKind == ElemKind::UInt4FusedQTy;
1092
1093 // Check the converted dims.
1094 auto expectedCol = dataCol * (is4Bit ? 2 : 1) + 2 * sizeof(float);
1095 EXPECT_EQ(newTH.dims().size(), 2);
1096 EXPECT_EQ(newTH.dims()[0], TH.dims()[0]);
1097 EXPECT_EQ(newTH.dims()[1], expectedCol);
1098
1099 // Check the converted FP32 scale/offset are correctly cast from Fp16
1100 // scale/offset.
1101 for (dim_t i = 0; i < row; i++) {
1102 float scale, offset;
1103 std::tie(scale, offset) = newTH.getFusedScaleOffsetFromRow<float>(i);
1104 EXPECT_EQ(scale, (float)i);
1105 EXPECT_EQ(offset, (float)i);
1106 }
1107
1108 // Check the converted data are the same as original ones.
1109 for (dim_t i = 0; i < row; i++) {
1110 for (dim_t j = 0; j < dataCol; j++) {
1111 if (is4Bit) {
1112 EXPECT_EQ(newTH.at({i, j * 2}), (i + j) & 0x0F);
1113 EXPECT_EQ(newTH.at({i, j * 2 + 1}), ((i + j) >> 4) & 0x0F);
1114 } else {
1115 EXPECT_EQ(newTH.at({i, j}), i + j);
1116 }
1117 }
1118 }
1119}
1120
1121/// Check that after initializing a fused tensor to zero that the scale and
1122/// offset are not changed and that the values for each row are set to that
1123/// row's offset.
1124template <typename ScaleOffsetT>
1125static void testInitZeroFused(ElemKind fusedKind, float allowedError) {
1126 constexpr dim_t numTotalColumns = 2 + 2 * sizeof(ScaleOffsetT);
1127 Tensor T(fusedKind, {10, numTotalColumns}, 0.0, 0);
1128 auto TH = T.getHandle<uint8_t>();
1129 auto *TData = reinterpret_cast<uint8_t *>(T.getUnsafePtr());
1130 TH.clear(127);
1131 auto rowLength = TH.getElementPtr({1, 0});
1132 auto width = TH.dims()[1];
1133
1134 // Now set the scale/offset of each row. Set the scale to 0.1 so that we are
1135 // multiplying by 10 when calculating zero. Offset is dependent on each row.
1136 const ScaleOffsetT scaleForAllRows = 0.1;
1137 for (size_t i = 0; i < 10; i++) {
1138 const ScaleOffsetT offset = -(i + 0.7);
1139 uint8_t *scaleOffsetPtr =
1140 &TData[i * rowLength] + width - 2 * sizeof(ScaleOffsetT);
1141 memcpy(scaleOffsetPtr, &scaleForAllRows, sizeof(ScaleOffsetT));
1142 memcpy(scaleOffsetPtr + sizeof(ScaleOffsetT), &offset,
1143 sizeof(ScaleOffsetT));
1144 }
1145
1146 // Now reset so that all row's actual data is set to zero based on the
1147 // scale/offset in the row.
1148 PseudoRNG PRNG;
1149 T.init(Tensor::InitKind::Zero, 1, PRNG);
1150
1151 EXPECT_TRUE(TH.isZero(allowedError));
1152
1153 // Now check that we correctly set the data, and that the scale/offsets are
1154 // the same as expected (untouched by initializing to zero).
1155 for (dim_t i = 0; i < 10; i++) {
1156 uint8_t *scaleOffsetPtr =
1157 &TData[i * rowLength] + width - 2 * sizeof(ScaleOffsetT);
1158 ScaleOffsetT scale, offset;
1159 memcpy(&scale, scaleOffsetPtr, sizeof(ScaleOffsetT));
1160 memcpy(&offset, scaleOffsetPtr + sizeof(ScaleOffsetT),
1161 sizeof(ScaleOffsetT));
1162
1163 EXPECT_NEAR(quantization::dequantizeWithFloatOffset<uint8_t>(
1164 TH.at({i, 0}), static_cast<float>(scale),
1165 static_cast<float>(offset)),
1166 0, allowedError);
1167 EXPECT_NEAR(quantization::dequantizeWithFloatOffset<uint8_t>(
1168 TH.at({i, 1}), static_cast<float>(scale),
1169 static_cast<float>(offset)),
1170 0, allowedError);
1171 }
1172}
1173
1174/// Test zeroing a Fused tensor with Float scale/offsets.
1175TEST(Tensor, initZeroFused_Float) {
1176 testInitZeroFused<float>(ElemKind::UInt8FusedQTy, 1E-5);
1177}
1178
1179/// Test zeroing a Fused tensor with Float16 scale/offsets.
1180TEST(Tensor, initZeroFused_Float16) {
1181 testInitZeroFused<float16_t>(ElemKind::UInt8FusedFP16QTy, 1E-2);
1182}
1183
1184/// Check that initializing a fused tensor with Broadcast that the scale and
1185/// offset are not changed, and broadcast value is set correctly.
1186static void testBroadcastFused(ElemKind fusedKind) {
1187 const dim_t numTotalColumns =
1188 2 + 2 * ((fusedKind == ElemKind::UInt8FusedQTy) ? sizeof(float)
1189 : sizeof(float16_t));
1190 Tensor T(fusedKind, {10, numTotalColumns}, 0.0, 0);
1191 auto TH = T.getHandle<uint8_t>();
1192 for (dim_t i = 0; i < 10; i++) {
1193 for (dim_t j = 0; j < numTotalColumns; j++) {
1194 TH.at({i, j}) = i * 10 + j;
1195 }
1196 }
1197 PseudoRNG PRNG;
1198 T.init(Tensor::InitKind::Broadcast, 5, PRNG);
1199 for (dim_t i = 0; i < 10; i++) {
1200 for (dim_t j = 0; j < numTotalColumns; j++) {
1201 // Check that the scales/offsets are unchanged, and that the broadcast
1202 // value is everywhere else.
1203 if (j < 2) {
1204 EXPECT_EQ(TH.at({i, j}), 5);
1205 } else {
1206 EXPECT_EQ(TH.at({i, j}), i * 10 + j);
1207 }
1208 }
1209 }
1210}
1211
1212/// Test broadcasting a Fused tensor with Float scale/offsets.
1213TEST(Tensor, initBroadcastFused_Float) {
1214 testBroadcastFused(ElemKind::UInt8FusedQTy);
1215}
1216
1217/// Test broadcasting a Fused tensor with Float16 scale/offsets.
1218TEST(Tensor, initBroadcastFused_Float16) {
1219 testBroadcastFused(ElemKind::UInt8FusedFP16QTy);
1220}
1221
1222/// Check that when randomizing a fused quantized tensor, the scale and offset
1223/// are not changed.
1224static void testRandomizeFused(ElemKind fusedKind) {
1225 const dim_t numTotalColumns =
1226 2 + 2 * ((fusedKind == ElemKind::UInt8FusedQTy) ? sizeof(float)
1227 : sizeof(float16_t));
1228 Tensor T(fusedKind, {10, numTotalColumns}, 1.0, 0);
1229 auto TH = T.getHandle<uint8_t>();
1230 for (dim_t i = 0; i < 10; i++) {
1231 for (dim_t j = 0; j < numTotalColumns; j++) {
1232 TH.at({i, j}) = i * 10 + j;
1233 }
1234 }
1235 PseudoRNG PRNG;
1236 TH.randomize(0, 255, PRNG);
1237 for (dim_t i = 0; i < 10; i++) {
1238 for (dim_t j = 2; j < numTotalColumns; j++) {
1239 // Check that the scales/offsets are unchanged.
1240 EXPECT_EQ(TH.at({i, j}), i * 10 + j);
1241 }
1242 }
1243}
1244
1245/// Test randomizing a Fused tensor with Float scale/offsets.
1246TEST(Tensor, randomizeFused_Float) {
1247 testRandomizeFused(ElemKind::UInt8FusedQTy);
1248}
1249
1250/// Test randomizing a Fused tensor with Float16 scale/offsets.
1251TEST(Tensor, randomizeFused_Float16) {
1252 testRandomizeFused(ElemKind::UInt8FusedFP16QTy);
1253}
1254
1255/// Check that getting and setting fused tensors works correctly.
1256template <typename ScaleOffsetT>
1257static void testGetSetFusedScaleOffset(ElemKind fusedKind) {
1258 Tensor T(fusedKind, {10, 10}, 1.0, 0);
1259 auto TH = T.getHandle<uint8_t>();
1260 for (size_t i = 0; i < 10; i++) {
1261 TH.setFusedScaleOffsetInRow<ScaleOffsetT>(i, i, i);
1262 }
1263 for (size_t i = 0; i < 10; i++) {
1264 ScaleOffsetT scale, offset;
1265 std::tie(scale, offset) = TH.getFusedScaleOffsetFromRow<ScaleOffsetT>(i);
1266 EXPECT_EQ(scale, (ScaleOffsetT)i);
1267 EXPECT_EQ(offset, (ScaleOffsetT)i);
1268 }
1269}
1270
1271/// Test getting and setting fused scales and offsets from UInt8FusedQTy.
1272TEST(Tensor, GetFusedScaleOffset_UInt8FusedQTy) {
1273 testGetSetFusedScaleOffset<float>(ElemKind::UInt8FusedQTy);
1274}
1275
1276/// Test getting and setting fused scales and offsets from UInt8FusedFP16QTy.
1277TEST(Tensor, GetFusedScaleOffset_UInt8FusedFP16QTy) {
1278 testGetSetFusedScaleOffset<float16_t>(ElemKind::UInt8FusedFP16QTy);
1279}
1280
1281/// Test getting and setting fused scales and offsets from UInt4FusedFP16QTy.
1282TEST(Tensor, GetFusedScaleOffset_UInt4FusedFP16QTy) {
1283 testGetSetFusedScaleOffset<float16_t>(ElemKind::UInt4FusedFP16QTy);
1284}
1285
1286/// Test getting and setting fused scales and offsets from UInt4FusedQTy.
1287TEST(Tensor, GetFusedScaleOffset_UInt4FusedQTy) {
1288 testGetSetFusedScaleOffset<float>(ElemKind::UInt4FusedQTy);
1289}
1290
1291/// Check if dump functions work for Tensor
1292TEST(Tensor, dump) {
1293 Tensor T = {1.2f, 12.1f, 51.0f, 1515.2f};
1294 std::string mes = T.toString();
1295 std::string storageT1;
1296 llvm::raw_string_ostream osT1(storageT1);
1297 T.dump(osT1);
1298 std::string expectMes = R"(shape: ( 4 )
1299elemkind: float
1300max: 1515.19995 min: 1.20000 avg: 394.87499
1301[1.20000, 12.10000, 51.00000, 1515.19995, ]
1302)";
1303 EXPECT_EQ(mes, expectMes);
1304 EXPECT_EQ(mes, osT1.str());
1305 std::string storageT2;
1306 llvm::raw_string_ostream osT2(storageT2);
1307 osT2 << T;
1308 EXPECT_EQ(mes, osT2.str());
1309 T.dump(2);
1310 std::string expectMes2 = R"(shape: ( 4 )
1311elemkind: float
1312max: 1515.19995 min: 1.20000 avg: 394.87499
1313[1.20000, 12.10000, ...]
1314)";
1315 std::string storageT3;
1316 llvm::raw_string_ostream osT3(storageT3);
1317 // Only dump 2 elements.
1318 T.dump(osT3, 2);
1319 std::string mes2 = T.toString(2);
1320 EXPECT_EQ(mes2, expectMes2);
1321 EXPECT_EQ(mes2, osT3.str());
1322
1323 // Get an unowned padded (partial) tensor sharing storage with T.
1324 auto paddedType = Type::newShape(T.getType(), {256});
1325 Tensor partialT(T.getUnsafePtr(), &paddedType, T.getSizeInBytes());
1326 std::string expectPartial = R"(shape: ( 256 ) ; partial num elements: 4
1327elemkind: float
1328max: 1515.19995 min: 1.20000 avg: 394.87499
1329[1.20000, 12.10000, 51.00000, ...]
1330)";
1331 std::string partialString = partialT.toString(3);
1332 EXPECT_EQ(partialString, expectPartial);
1333}
1334
1335/// Check Type serialization functions.
1336TEST(Tensor, typeSerialization) {
1337 auto testType = [](Type ty) {
1338 EXPECT_EQ(ty, Type::fromString(ty.toString()));
1339 };
1340 testType(Type(ElemKind::FloatTy, {1}));
1341 testType(Type(ElemKind::Float16Ty, {1, 2}));
1342 testType(Type(ElemKind::Float64Ty, {1}));
1343 testType(Type(ElemKind::Int8QTy, {1, 2, 3}, 1.1, 1));
1344 testType(Type(ElemKind::UInt8QTy, {1, 2, 3}, 1.2, 2));
1345 testType(Type(ElemKind::Int16QTy, {1, 2, 3}, 1.3, 3));
1346 testType(Type(ElemKind::Int32QTy, {1, 2, 3}, 1.4, 4));
1347 testType(Type(ElemKind::UInt8ITy, {1, 2, 3}));
1348 testType(Type(ElemKind::Int32ITy, {1, 2, 3}));
1349 testType(Type(ElemKind::Int64ITy, {1, 2, 3}));
1350 testType(Type(ElemKind::UInt8FusedQTy, {1, 2, 3}, 1.5, 5));
1351 testType(Type(ElemKind::UInt8FusedFP16QTy, {1, 2, 3}, 1.6, 6));
1352 testType(Type(ElemKind::UInt4FusedFP16QTy, {1, 2, 3}, 1.7, 7));
1353 testType(Type(ElemKind::UInt4FusedQTy, {1, 2, 3}, 1.7, 7));
1354 testType(Type(ElemKind::BoolTy, {1, 2, 3}));
1355}
1356
1357/// Test unpadded size.
1358TEST(Tensor, unpaddedSize) {
1359 Tensor partial(ElemKind::FloatTy, {11});
1360 PseudoRNG PRNG;
1361 partial.init(Tensor::InitKind::Broadcast, 5, PRNG);
1362 auto bytes = partial.getSizeInBytes();
1363
1364 auto H = partial.getHandle<float>();
1365 for (const auto &e : H) {
1366 EXPECT_EQ(e, 5);
1367 }
1368
1369 // Get an unowned padded tensor sharing storage with partial.
1370 auto paddedType = Type::newShape(partial.getType(), {256});
1371 auto paddedBytes = paddedType.getSizeInBytes();
1372 Tensor T(partial.getUnsafePtr(), &paddedType, bytes);
1373 EXPECT_EQ(T.getUnpaddedSizeInBytes(), bytes);
1374 EXPECT_EQ(T.getSizeInBytes(), paddedBytes);
1375 EXPECT_EQ(T.getRealNumElements(), 11);
1376 auto partialH = partial.getHandle<float>();
1377 int numElemCount = 0;
1378 for (const auto &e : partialH) {
1379 EXPECT_EQ(e, 5);
1380 numElemCount += 1;
1381 }
1382 EXPECT_EQ(numElemCount, 11);
1383
1384 // Test that moving the padded tensor preserves properties.
1385 auto moved = std::move(T);
1386 EXPECT_EQ(moved.getUnpaddedSizeInBytes(), bytes);
1387 EXPECT_EQ(moved.getSizeInBytes(), paddedBytes);
1388
1389 // Test getting an unowned tensor from a padded tensor.
1390 auto copy = moved.getUnowned();
1391 EXPECT_EQ(copy.getUnpaddedSizeInBytes(), bytes);
1392 EXPECT_EQ(copy.getSizeInBytes(), paddedBytes);
1393
1394 // Test that a clone of a partial is still partial.
1395 auto clone = moved.clone();
1396 EXPECT_EQ(clone.getUnpaddedSizeInBytes(), bytes);
1397 EXPECT_EQ(clone.getSizeInBytes(), paddedBytes);
1398
1399 // Test that assigning a Tensor to a partial is still partial.
1400 Tensor assigned;
1401 assigned.assign(&moved);
1402 EXPECT_EQ(assigned.getUnpaddedSizeInBytes(), bytes);
1403 EXPECT_EQ(assigned.getSizeInBytes(), paddedBytes);
1404
1405 // Check that when we reset a partial Tensor with the same Type but without
1406 // specifying the reset should be partial that we do not have the same ptr,
1407 // as it should have been reallocated.
1408 char *oldPtr = assigned.getUnsafePtr();
1409 assigned.reset(paddedType);
1410 EXPECT_NE(assigned.getUnsafePtr(), oldPtr);
1411}
1412
1413TEST(CustomAlignedTensor, sizes) {
1414 Type T(ElemKind::FloatTy, {2, 2, 1}, {12, 8, 1});
1415 Tensor aligned(T);
1416
1417 // EXPECT_EQ(aligned.size(), 4);
1418 // EXPECT_EQ(aligned.actualSize(), 12);
1419}
1420
1421TEST(CustomAlignedTensor, iteration) {
1422 Type T(ElemKind::FloatTy, {2, 2, 1}, {12, 8, 1});
1423 Tensor aligned(T);
1424
1425 auto H = aligned.getHandle<float>();
1426
1427 std::vector<float> content = {13.5f, -3.3f, 4.2f, 33.0f};
1428 H.at({0, 0, 0}) = content[0];
1429 H.at({0, 1, 0}) = content[1];
1430 H.at({1, 0, 0}) = content[2];
1431 H.at({1, 1, 0}) = content[3];
1432
1433 std::vector<float> elems;
1434 for (auto e : H) {
1435 elems.push_back(e);
1436 }
1437
1438 EXPECT_TRUE(elems == content);
1439}
1440
1441TEST(CustomAlignedTensor, raw) {
1442 Type T(ElemKind::FloatTy, {2, 2, 1}, {12, 8, 1});
1443 Tensor aligned(T);
1444 aligned.zero();
1445
1446 auto H = aligned.getHandle<float>();
1447
1448 std::vector<float> content{13.5f, -3.3f, 4.2f, 33.0f};
1449 H.at({0, 0, 0}) = content[0];
1450 H.at({0, 1, 0}) = content[1];
1451 H.at({1, 0, 0}) = content[2];
1452 H.at({1, 1, 0}) = content[3];
1453
1454 std::vector<float> elems;
1455 for (size_t i = 0; i < 12; i++) {
1456 elems.push_back(H.raw(i));
1457 }
1458
1459 std::vector<float> alignedContent = {
1460 13.5, 0, -3.3, 0, 0, 0, 4.2, 0, 33, 0, 0, 0,
1461 };
1462
1463 EXPECT_TRUE(elems == alignedContent);
1464}
1465
1466TEST(CustomAlignedTensor, getUnowned) {
1467 Type T(ElemKind::FloatTy, {2, 2, 1}, {12, 8, 1});
1468 Tensor aligned(T);
1469
1470 auto H = aligned.getHandle<float>();
1471 // Fill everything including pads with 1.0
1472 for (size_t i = 0; i < 12; i++) {
1473 H.raw(i) = 1.0;
1474 }
1475
1476 std::vector<float> content{13.5f, -3.3f, 4.2f, 33.0f};
1477 H.at({0, 0, 0}) = content[0];
1478 H.at({0, 1, 0}) = content[1];
1479 H.at({1, 0, 0}) = content[2];
1480 H.at({1, 1, 0}) = content[3];
1481
1482 Tensor UO = aligned.getUnowned({1, 2, 2}, {1, 1, 0});
1483 EXPECT_EQ(UO.size(), 4);
1484 EXPECT_EQ(UO.actualSize(), 4);
1485 EXPECT_EQ(UO.getHandle<float>().at({0, 0, 0}), 33);
1486 EXPECT_EQ(UO.getHandle<float>().at({0, 0, 1}), 1);
1487 EXPECT_EQ(UO.getHandle<float>().at({0, 1, 0}), 1);
1488 EXPECT_EQ(UO.getHandle<float>().at({0, 1, 1}), 1);
1489 EXPECT_EQ(UO.getHandle<float>().raw(0), 33);
1490 EXPECT_EQ(UO.getHandle<float>().raw(1), 1);
1491 EXPECT_EQ(UO.getHandle<float>().raw(2), 1);
1492 EXPECT_EQ(UO.getHandle<float>().raw(3), 1);
1493}
1494
1495TEST(CustomAlignedTensor, getDimForPtr) {
1496 Type T(ElemKind::FloatTy, {2, 2, 1}, {12, 8, 1});
1497 Tensor aligned(T);
1498
1499 auto H = aligned.getHandle<float>();
1500
1501 EXPECT_EQ(H.getDimForPtr(0, 0), 0);
1502 EXPECT_EQ(H.getDimForPtr(1, 0), 0);
1503 EXPECT_EQ(H.getDimForPtr(2, 0), 0);
1504
1505 EXPECT_EQ(H.getDimForPtr(0, 1), 0);
1506 EXPECT_EQ(H.getDimForPtr(1, 1), 1);
1507 EXPECT_EQ(H.getDimForPtr(2, 1), 0);
1508
1509 EXPECT_EQ(H.getDimForPtr(0, 2), 1);
1510 EXPECT_EQ(H.getDimForPtr(1, 2), 0);
1511 EXPECT_EQ(H.getDimForPtr(2, 2), 0);
1512
1513 EXPECT_EQ(H.getDimForPtr(0, 3), 1);
1514 EXPECT_EQ(H.getDimForPtr(1, 3), 1);
1515 EXPECT_EQ(H.getDimForPtr(2, 3), 0);
1516}
1517
1518// Check that we iterate over tensors correctly: unit test for a bug wherein
1519// we used size() instead of actualSize() when treating the data as a raw
1520// pointer.
1521TEST(Tensor, sameAlignment) {
1522 Type Ty1(ElemKind::Float16Ty, {2, 1}, {4, 1});
1523 Type Ty2(ElemKind::Float16Ty, {2, 1}, {4, 1});
1524 Tensor T1(Ty1);
1525 Tensor T2(Ty2);
1526 auto T1H = T1.getHandle<float16_t>();
1527 auto T2H = T2.getHandle<float16_t>();
1528 T1H.clear(0);
1529 T2H.clear(1);
1530 T1H.at({0, 0}) = T2H.at({0, 0}) = 1;
1531 T1H.at({1, 0}) = T2H.at({1, 0}) = 2;
1532
1533 EXPECT_TRUE(T1.isEqual(T2));
1534 T2H.at({1, 0}) = 1;
1535 EXPECT_FALSE(T1.isEqual(T2));
1536}
1537
1538// Check that our tensor iteration is aware of padding: unit-test that checks
1539// we iterate correctly when accessing elements in tensors that have different
1540// alignment requirements.
1541TEST(Tensor, differentAlignment) {
1542 Type Ty1(ElemKind::Float16Ty, {2, 1}, {4, 1});
1543 Type Ty2(ElemKind::Float16Ty, {2, 1}, {2, 1});
1544 Tensor T1(Ty1);
1545 Tensor T2(Ty2);
1546 auto T1H = T1.getHandle<float16_t>();
1547 auto T2H = T2.getHandle<float16_t>();
1548 T1H.at({0, 0}) = T2H.at({0, 0}) = 1;
1549 T1H.at({1, 0}) = T2H.at({1, 0}) = 2;
1550
1551 EXPECT_TRUE(T1.isEqual(T2));
1552 T2H.at({1, 0}) = 1;
1553 EXPECT_FALSE(T1.isEqual(T2));
1554}
1555
1556// Check that write/read of tensors data from/to raw-text files is
1557// working properly.
1558TEST(Tensor, accessToTextFile) {
1559 Tensor tensorRef = {0.75f, 0.23f, 0.76f, 0.99f, 1.00f,
1560 -0.78f, 0.23f, -0.97f, -0.37f, 0.00f};
1561 llvm::SmallString<64> path;
1562 auto tempFileRes = llvm::sys::fs::createTemporaryFile("tensor", ".txt", path);
1563 if (tempFileRes.value() != 0) {
1564 FAIL() << "Failed to create temp file to write into.";
1565 }
1566 TensorSerializationOptions opts;
1567 opts.withType = true;
1568 dumpTensorToTextFile(tensorRef, path, opts);
1569 Tensor tensorTest;
1570 loadTensorFromTextFile(tensorTest, path, opts);
1571 llvm::sys::fs::remove(path);
1572
1573 auto handleRef = tensorRef.getHandle<>();
1574 auto handleTest = tensorTest.getHandle<>();
1575
1576 EXPECT_EQ(handleRef.size(), handleTest.size());
1577 EXPECT_EQ(handleRef.actualSize(), handleTest.actualSize());
1578 for (size_t rcnt = 0; rcnt < tensorTest.actualSize(); rcnt++) {
1579 EXPECT_FLOAT_EQ(handleTest.raw(rcnt), handleRef.raw(rcnt));
1580 }
1581}
1582
1583// Check that write/read of tensors data from/to raw-binary files is
1584// working properly.
1585TEST(Tensor, accessToBinaryFile) {
1586 Tensor tensorRef = {0.75f, 0.23f, 0.76f, 0.99f, 1.00f,
1587 -0.78f, 0.23f, -0.97f, -0.37f, 0.00f};
1588 llvm::SmallString<64> path;
1589 auto tempFileRes = llvm::sys::fs::createTemporaryFile("tensor", ".bin", path);
1590 if (tempFileRes.value() != 0) {
1591 FAIL() << "Failed to create temp file to write into.";
1592 }
1593 TensorSerializationOptions opts;
1594 opts.withType = true;
1595 dumpTensorToBinaryFile(tensorRef, path, opts);
1596 Tensor tensorTest;
1597 loadTensorFromBinaryFile(tensorTest, path, opts);
1598 llvm::sys::fs::remove(path);
1599
1600 auto handleRef = tensorRef.getHandle<>();
1601 auto handleTest = tensorTest.getHandle<>();
1602
1603 EXPECT_EQ(handleRef.size(), handleTest.size());
1604 EXPECT_EQ(handleRef.actualSize(), handleTest.actualSize());
1605 for (size_t rcnt = 0; rcnt < tensorTest.actualSize(); rcnt++) {
1606 EXPECT_FLOAT_EQ(handleTest.raw(rcnt), handleRef.raw(rcnt));
1607 }
1608}
1609
1610// Check that write/read of tensors data from/to raw-text files is
1611// working properly.
1612TEST(Tensor, accessToRawTextFile) {
1613 Tensor tensorRef = {0.75f, 0.23f, 0.76f, 0.99f, 1.00f,
1614 -0.78f, 0.23f, -0.97f, -0.37f, 0.00f};
1615 llvm::SmallString<64> path;
1616 auto tempFileRes = llvm::sys::fs::createTemporaryFile("tensor", ".txt", path);
1617 if (tempFileRes.value() != 0) {
1618 FAIL() << "Failed to create temp file to write into.";
1619 }
1620 TensorSerializationOptions opts;
1621 opts.withType = false;
1622 dumpTensorToTextFile(tensorRef, path, opts);
1623 Tensor tensorTest(ElemKind::FloatTy, {10});
1624 loadTensorFromTextFile(tensorTest, path, opts);
1625 llvm::sys::fs::remove(path);
1626
1627 auto handleRef = tensorRef.getHandle<>();
1628 auto handleTest = tensorTest.getHandle<>();
1629
1630 EXPECT_EQ(handleRef.size(), handleTest.size());
1631 EXPECT_EQ(handleRef.actualSize(), handleTest.actualSize());
1632 for (size_t rcnt = 0; rcnt < tensorTest.actualSize(); rcnt++) {
1633 EXPECT_FLOAT_EQ(handleTest.raw(rcnt), handleRef.raw(rcnt));
1634 }
1635}
1636
1637#ifdef WITH_PNG
1638
1639/// Testing loading of input tensors from a file.
1640static void tensorInputWriterLoader(ImageLayout outImageLayout,
1641 ImageLayout inImageLayout) {
1642 Tensor tensorRef(Type{ElemKind::FloatTy, {1, 2, 4, 3}});
1643 tensorRef.getHandle<>() = {0.75f, 0.23f, 0.76f, 0.99f, 1.00f, -0.78f,
1644 0.23f, -0.97f, -0.37f, 0.00f, 0.25f, 0.13f,
1645 0.66f, 0.69f, 2.00f, -0.18f, 0.43f, -0.92f,
1646 -0.33f, 0.01f, 0.21f, 0.11f, 0.13f, 0.87f};
1647 llvm::SmallString<64> path;
1648 auto tempFileRes = llvm::sys::fs::createTemporaryFile("tensor", ".txt", path);
1649 if (tempFileRes.value() != 0) {
1650 FAIL() << "Failed to create temp file to write into.";
1651 }
1652 dumpInputTensorToFileWithType({path.str().str()}, tensorRef, outImageLayout);
1653 //
1654 Tensor tensorTest;
1655 loadInputImageFromFileWithType({path.str().str()}, &tensorTest,
1656 inImageLayout);
1657
1658 if (outImageLayout == ImageLayout::NHWC) {
1659 Tensor transposed;
1660 tensorRef.transpose(&transposed, NHWC2NCHW);
1661 tensorRef = std::move(transposed);
1662 }
1663
1664 if (inImageLayout == ImageLayout::NHWC) {
1665 Tensor transposed;
1666 tensorTest.transpose(&transposed, NHWC2NCHW);
1667 tensorTest = std::move(transposed);
1668 }
1669
1670 auto handleRef = tensorRef.getHandle<>();
1671 auto handleTest = tensorTest.getHandle<>();
1672 EXPECT_EQ(handleRef.size(), handleTest.size());
1673 EXPECT_EQ(tensorRef.dims(), tensorTest.dims());
1674 for (size_t rcnt = 0, e = tensorTest.actualSize(); rcnt < e; rcnt++) {
1675 EXPECT_FLOAT_EQ(handleTest.raw(rcnt), handleRef.raw(rcnt));
1676 }
1677}
1678
1679TEST(Tensor, tensorInputWriterLoaderNCHW) {
1680 tensorInputWriterLoader(ImageLayout::NCHW, ImageLayout::NCHW);
1681}
1682
1683TEST(Tensor, tensorInputWriterLoaderNCHW_NHWC) {
1684 tensorInputWriterLoader(ImageLayout::NCHW, ImageLayout::NHWC);
1685}
1686
1687TEST(Tensor, tensorInputWriterLoaderNHWC_NCHW) {
1688 tensorInputWriterLoader(ImageLayout::NHWC, ImageLayout::NCHW);
1689}
1690
1691TEST(Tensor, tensorInputWriterLoaderNHWC) {
1692 tensorInputWriterLoader(ImageLayout::NHWC, ImageLayout::NHWC);
1693}
1694
1695// Test custom input tensor loader
1696TEST(Tensor, tensorCustomInputLoader) {
1697 bool entered = false;
1698 auto loader = [&entered](Tensor &T, llvm::StringRef filename,
1699 ImageLayout imageLayout) {
1700 EXPECT_EQ(imageLayout, ImageLayout::NHWC);
1701 EXPECT_EQ(filename, "input.tensor");
1702 T.reset(ElemKind::FloatTy, {1, 2, 3, 4});
1703 entered = true;
1704 };
1705 Tensor testT(Type{ElemKind::Int32ITy, {4, 4, 4, 4}});
1706 registerInputTensorFileLoader(loader);
1707 loadInputImageFromFileWithType({"input.tensor"}, &testT, ImageLayout::NHWC);
1708 EXPECT_EQ(entered, true);
1709 EXPECT_EQ(testT.dims(), llvm::ArrayRef<dim_t>({1, 2, 3, 4}));
1710}
1711
1712#endif // WITH_PNG
1713
1714// Check that write/read of tensors data from/to raw-binary files is
1715// working properly.
1716TEST(Tensor, accessToRawBinaryFile) {
1717 Tensor tensorRef = {0.75f, 0.23f, 0.76f, 0.99f, 1.00f,
1718 -0.78f, 0.23f, -0.97f, -0.37f, 0.00f};
1719 llvm::SmallString<64> path;
1720 auto tempFileRes = llvm::sys::fs::createTemporaryFile("tensor", ".bin", path);
1721 if (tempFileRes.value() != 0) {
1722 FAIL() << "Failed to create temp file to write into.";
1723 }
1724 TensorSerializationOptions opts;
1725 opts.withType = false;
1726 dumpTensorToBinaryFile(tensorRef, path, opts);
1727 Tensor tensorTest(ElemKind::FloatTy, {10});
1728 loadTensorFromBinaryFile(tensorTest, path, opts);
1729 llvm::sys::fs::remove(path);
1730
1731 auto handleRef = tensorRef.getHandle<>();
1732 auto handleTest = tensorTest.getHandle<>();
1733
1734 EXPECT_EQ(handleRef.size(), handleTest.size());
1735 EXPECT_EQ(handleRef.actualSize(), handleTest.actualSize());
1736 for (size_t rcnt = 0; rcnt < tensorTest.actualSize(); rcnt++) {
1737 EXPECT_FLOAT_EQ(handleTest.raw(rcnt), handleRef.raw(rcnt));
1738 }
1739}
1740
1741/// Test convert UInt4FusedFP16QTy tensor to a UInt8FusedQTy tensor.
1742TEST(Tensor, typeConvert_UInt4FusedFP16QTy_To_UInt8FusedQTY) {
1743 testConvertToUInt8FusedQTy<float16_t>(ElemKind::UInt4FusedFP16QTy, 10, 10);
1744}
1745
1746/// Test convert UInt8FusedFP16QTy tensor to a UInt8FusedQTy tensor.
1747TEST(Tensor, typeConvert_UInt8FusedFP16QTy_To_UInt8FusedQTy) {
1748 testConvertToUInt8FusedQTy<float16_t>(ElemKind::UInt8FusedFP16QTy, 10, 10);
1749}
1750
1751/// Test convert UInt4FusedQTy tensor to a UInt8FusedQTy tensor.
1752TEST(Tensor, typeConvert_UInt4FusedQTy_To_UInt8FusedQTy) {
1753 testConvertToUInt8FusedQTy<float>(ElemKind::UInt4FusedQTy, 10, 10);
1754}
1755