1/**
2 * Copyright (c) Glow Contributors. See CONTRIBUTORS file.
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16#include "ImporterTestUtils.h"
17#include "glow/ExecutionEngine/ExecutionEngine.h"
18#include "glow/Graph/Graph.h"
19#include "glow/Importer/Caffe2ModelLoader.h"
20#include "gtest/gtest.h"
21
22#ifndef GLOW_DATA_PATH
23#define GLOW_DATA_PATH
24#endif
25
26class Caffe2ImporterTest : public ::testing::Test {
27protected:
28 // By default constant folding at load time is enabled in general, but we do
29 // many tests here loading Constants, so keep it false during these tests by
30 // default.
31 void SetUp() override { glow::setConstantFoldLoaderOpsFlag(false); }
32 void TearDown() override { glow::setConstantFoldLoaderOpsFlag(true); }
33};
34
35using namespace glow;
36/// Test loading of Elementwise Unary Ops floating point.
37static void testEltwiseUnaryOpFloat(std::string fileName,
38 llvm::ArrayRef<dim_t> inputShape,
39 std::string input_name, float delta,
40 const std::function<float(float)> &op) {
41 ExecutionEngine EE{};
42 auto &mod = EE.getModule();
43 Function *F = mod.createFunction("main");
44 std::string NetDescFilename =
45 std::string(GLOW_DATA_PATH "tests/models/caffe2Models/") + fileName;
46 std::string NetWeightFilename(
47 GLOW_DATA_PATH "tests/models/caffe2Models/empty_init_net.pbtxt");
48
49 PlaceholderBindings bindings;
50 Placeholder *graphOutputVar;
51 Type input_type(ElemKind::FloatTy, inputShape);
52 Caffe2ModelLoader caffe2LD(NetDescFilename, NetWeightFilename,
53 {input_name.c_str()}, {&input_type}, *F);
54 graphOutputVar = EXIT_ON_ERR(caffe2LD.getSingleOutput());
55 auto PH = mod.getPlaceholderByNameSlow(input_name);
56 auto *inTensor = bindings.allocate(PH);
57 inTensor->getHandle().randomize(-10.0, 10.0, mod.getPRNG());
58 EE.compile(CompilationMode::Infer);
59 bindings.allocate(mod.getPlaceholders());
60 EE.run(bindings);
61 auto result = bindings.get(graphOutputVar)->getHandle();
62 auto inHandle = inTensor->getHandle();
63 ASSERT_TRUE(result.dims() == inputShape);
64 for (size_t i = 0; i < result.getType().size(); i++) {
65 EXPECT_NEAR(result.raw(i), op(inHandle.raw(i)), delta);
66 }
67}
68
69TEST_F(Caffe2ImporterTest, importExp) {
70 testEltwiseUnaryOpFloat("exp_op_net.pbtxt", {1, 2, 4, 3}, "data", 0.002,
71 [](float a) { return std::exp(a); });
72}
73
74/// Test loading PReLU op from a Caffe2 model.
75/// The input is N*C*H*W (1*2*3*3), the slope is 2.
76TEST_F(Caffe2ImporterTest, importPReLU) {
77 ExecutionEngine EE{};
78 auto &mod = EE.getModule();
79 Function *F = mod.createFunction("main");
80
81 std::string NetDescFilename(GLOW_DATA_PATH
82 "tests/models/caffe2Models/prelu.pbtxt");
83 std::string NetWeightFilename(
84 GLOW_DATA_PATH "tests/models/caffe2Models/empty_init_net.pbtxt");
85
86 Placeholder *output;
87 PlaceholderBindings bindings;
88
89 // Destroy the loader after the graph is loaded since the following execution
90 // will not depend on anything from the loader.
91 {
92 Tensor data(ElemKind::FloatTy, {1, 2, 3, 3});
93 data.getHandle() = {-2.0, -0.5, 0, 1, 2, 3, 4, 5, 6,
94 -1.5, -2.5, 7, 8, 9, 10, 11, 12, 13};
95 Tensor slope(ElemKind::FloatTy, {2});
96 slope.getHandle() = {0.1, 0.2};
97 Caffe2ModelLoader caffe2LD(NetDescFilename, NetWeightFilename,
98 {"prelu_test_input", "slope"},
99 {&data.getType(), &slope.getType()}, *F);
100 output = EXIT_ON_ERR(caffe2LD.getSingleOutput());
101
102 bindings.allocate(mod.getPlaceholders());
103 updateInputPlaceholdersByName(bindings, &mod, {"prelu_test_input", "slope"},
104 {&data, &slope});
105 }
106
107 auto res = bindings.get(output);
108 EE.compile(CompilationMode::Infer);
109
110 EE.run(bindings);
111 auto result = res->getHandle();
112 std::vector<dim_t> expectedDims = {1, 2, 3, 3};
113 std::vector<float> expectedValues = {-0.2, -0.05, 0, 1, 2, 3, 4, 5, 6,
114 -0.3, -0.5, 7, 8, 9, 10, 11, 12, 13};
115 EXPECT_TRUE(result.dims().vec() == expectedDims);
116 for (size_t i = 0; i < expectedValues.size(); i++)
117 EXPECT_FLOAT_EQ(result.raw(i), expectedValues[i]);
118}
119
120/// Test loading conv op from a Caffe2 model.
121/// The input is N*C*H*W (1*1*3*3), the kernel is 2,
122/// stride is 1, pad is 1, group is 1, dilation is 2.
123TEST_F(Caffe2ImporterTest, importConv) {
124 ExecutionEngine EE{};
125 auto &mod = EE.getModule();
126 Function *F = mod.createFunction("main");
127
128 std::string NetDescFilename(GLOW_DATA_PATH
129 "tests/models/caffe2Models/predict_net.pbtxt");
130 std::string NetWeightFilename(GLOW_DATA_PATH
131 "tests/models/caffe2Models/init_net.pbtxt");
132
133 Placeholder *output;
134 PlaceholderBindings bindings;
135
136 // Destroy the loader after the graph is loaded since the following execution
137 // will not depend on anything from the loader.
138 {
139 Tensor data;
140 getNCHWData(&data, 1, 1, 3, 3);
141 Caffe2ModelLoader caffe2LD(NetDescFilename, NetWeightFilename,
142 {"gpu_0/data_0"}, {&data.getType()}, *F);
143 output = EXIT_ON_ERR(caffe2LD.getSingleOutput());
144
145 bindings.allocate(mod.getPlaceholders());
146 updateInputPlaceholdersByName(bindings, &mod, {"gpu_0/data_0"}, {&data});
147 }
148
149 auto res = bindings.get(output);
150 EE.compile(CompilationMode::Infer);
151
152 EE.run(bindings);
153 auto result = res->getHandle();
154 std::vector<dim_t> expectedDims = {1, 1, 3, 3};
155 std::vector<float> expectedValues = {6, 10, 6, 10, 18, 10, 6, 10, 6};
156 EXPECT_TRUE(result.dims().vec() == expectedDims);
157 for (size_t i = 0; i < 3 * 3; i++)
158 EXPECT_FLOAT_EQ(result.raw(i), expectedValues[i]);
159}
160
161/// Test loading ConvRelu op from a Caffe2 model.
162/// The input is N*C*H*W (1*1*3*3), the kernel is 2,
163/// stride is 1, pad is 1, group is 1.
164TEST_F(Caffe2ImporterTest, importConvRelu) {
165 ExecutionEngine EE{};
166 auto &mod = EE.getModule();
167 Function *F = mod.createFunction("main");
168
169 std::string NetDescFilename(
170 GLOW_DATA_PATH "tests/models/caffe2Models/convrelu_pred_net.pbtxt");
171 std::string NetWeightFilename(
172 GLOW_DATA_PATH "tests/models/caffe2Models/convrelu_init_net.pbtxt");
173
174 Placeholder *output;
175 PlaceholderBindings bindings;
176
177 // Destroy the loader after the graph is loaded since the following execution
178 // will not depend on anything from the loader.
179 {
180 Tensor data;
181 getNCHWData(&data, 1, 1, 3, 3);
182 Caffe2ModelLoader caffe2LD(NetDescFilename, NetWeightFilename,
183 {"gpu_0/data_0"}, {&data.getType()}, *F);
184 output = EXIT_ON_ERR(caffe2LD.getSingleOutput());
185
186 bindings.allocate(mod.getPlaceholders());
187 updateInputPlaceholdersByName(bindings, &mod, {"gpu_0/data_0"}, {&data});
188 }
189
190 // High level check on the content of the graph. We should have
191 // {transpose, transpose} => conv => relu => transpose => save
192 EXPECT_EQ(F->getNodes().size(), 6);
193 auto *saveNode = getSaveNodeFromDest(output);
194
195 auto *transNode1 =
196 llvm::dyn_cast<TransposeNode>(saveNode->getInput().getNode());
197 ASSERT_TRUE(transNode1);
198 auto *reluNode = llvm::dyn_cast<ReluNode>(transNode1->getInput().getNode());
199 ASSERT_TRUE(reluNode);
200 auto *convNode =
201 llvm::dyn_cast<ConvolutionNode>(reluNode->getInput().getNode());
202 ASSERT_TRUE(convNode);
203 auto *transNode2 =
204 llvm::dyn_cast<TransposeNode>(convNode->getInput().getNode());
205 ASSERT_TRUE(transNode2);
206 auto *transNode3 =
207 llvm::dyn_cast<TransposeNode>(convNode->getFilter().getNode());
208 ASSERT_TRUE(transNode3);
209
210 auto res = bindings.get(output);
211 EE.compile(CompilationMode::Infer);
212
213 EE.run(bindings);
214 auto result = res->getHandle();
215 std::vector<dim_t> expectedDims = {1, 1, 4, 4};
216 std::vector<float> expectedValues = {2, 3, 5, 4, 5, 10, 14, 9,
217 11, 22, 26, 15, 8, 15, 17, 10};
218 EXPECT_TRUE(result.dims().vec() == expectedDims);
219 for (size_t i = 0; i < 4 * 4; i++)
220 EXPECT_FLOAT_EQ(result.raw(i), expectedValues[i]);
221}
222
223/// Test loading conv op from a Caffe2 model.
224/// The input is N*H*W*C (1*3*3*1), the kernel is 2,
225/// stride is 1, pad is 1, group is 1.
226TEST_F(Caffe2ImporterTest, convNHWC) {
227 ExecutionEngine EE{};
228 auto &mod = EE.getModule();
229 Function *F = mod.createFunction("main");
230
231 std::string NetDescFilename(
232 GLOW_DATA_PATH "tests/models/caffe2Models/conv_nhwc_predict_net.pbtxt");
233 std::string NetWeightFilename(
234 GLOW_DATA_PATH "tests/models/caffe2Models/conv_nhwc_init_net.pbtxt");
235
236 Placeholder *output;
237 PlaceholderBindings bindings;
238
239 Tensor inputs(ElemKind::FloatTy, {1, 3, 3, 1});
240
241 // Destroy the loader after the graph is loaded since the following execution
242 // will not depend on anything from the loader.
243 {
244 Caffe2ModelLoader caffe2LD(NetDescFilename, NetWeightFilename, {"inputs"},
245 {&inputs.getType()}, *F);
246 output = EXIT_ON_ERR(caffe2LD.getSingleOutput());
247 }
248
249 // High level check on the content of the graph. We have 1 conv, 1 transpose,
250 // and1 save.
251 EXPECT_EQ(F->getNodes().size(), 3);
252 auto *saveNode = getSaveNodeFromDest(output);
253 auto *convNode =
254 llvm::dyn_cast<ConvolutionNode>(saveNode->getInput().getNode());
255 ASSERT_TRUE(convNode);
256 auto *transposeNode = llvm::dyn_cast<TransposeNode>(convNode->getFilter());
257 ASSERT_TRUE(transposeNode);
258
259 // We have 2 placeholders: 1 input and 1 output.
260 EXPECT_EQ(mod.getPlaceholders().size(), 2);
261 // We have 2 constants: Weights and bias.
262 EXPECT_EQ(mod.getConstants().size(), 2);
263}
264
265/// Test loading ChannelwiseQuantizedConvolutionNode op from a Caffe2 model.
266/// The input is N*H*W*C (1*1*1*4), the kernel is 1, stride is 1, pad is 1,
267/// group is 2.
268TEST_F(Caffe2ImporterTest, convGroupQuantized) {
269 ExecutionEngine EE{};
270 auto &mod = EE.getModule();
271 Function *F = mod.createFunction("main");
272
273 std::string NetDescFilename(
274 GLOW_DATA_PATH
275 "tests/models/caffe2Models/conv_group_quantized_pred_net.pbtxt");
276 std::string NetWeightFilename(
277 GLOW_DATA_PATH
278 "tests/models/caffe2Models/conv_group_quantized_init_net.pbtxt");
279
280 Placeholder *output;
281 PlaceholderBindings bindings;
282
283 Tensor input(ElemKind::Int8QTy, {1, 1, 1, 4}, 1.0, 0);
284
285 // Destroy the loader after the graph is loaded since the following execution
286 // will not depend on anything from the loader.
287 {
288 Caffe2ModelLoader caffe2LD(NetDescFilename, NetWeightFilename, {"input"},
289 {&input.getType()}, *F);
290 output = EXIT_ON_ERR(caffe2LD.getSingleOutput());
291 }
292
293 // High level check on the content of the graph. We have 1
294 // ChannelwiseQuantizedConvolutionNode and 1 save.
295 EXPECT_EQ(F->getNodes().size(), 2);
296 auto *saveNode = getSaveNodeFromDest(output);
297 auto *groupwiseConv = llvm::dyn_cast<ChannelwiseQuantizedConvolutionNode>(
298 saveNode->getInput().getNode());
299 ASSERT_TRUE(groupwiseConv);
300
301 // Check params.
302 std::vector<unsigned> expectedKernelsStridesAndDilation = {1, 1};
303 std::vector<unsigned> expectedPads = {1, 1, 1, 1};
304 EXPECT_EQ(groupwiseConv->getKernels(),
305 llvm::makeArrayRef(expectedKernelsStridesAndDilation));
306 EXPECT_EQ(groupwiseConv->getStrides(),
307 llvm::makeArrayRef(expectedKernelsStridesAndDilation));
308 EXPECT_EQ(groupwiseConv->getPads(), llvm::makeArrayRef(expectedPads));
309 EXPECT_EQ(groupwiseConv->getGroup(), 2);
310 EXPECT_EQ(groupwiseConv->getDilation(),
311 llvm::makeArrayRef(expectedKernelsStridesAndDilation));
312
313 // Check constant inputs.
314 Constant *filterConstant =
315 llvm::dyn_cast<Constant>(groupwiseConv->getFilter().getNode());
316 Constant *biasConstant =
317 llvm::dyn_cast<Constant>(groupwiseConv->getBias().getNode());
318 Constant *filterScalesConstant =
319 llvm::dyn_cast<Constant>(groupwiseConv->getFilterScales().getNode());
320 Constant *filterOffsetsConstant =
321 llvm::dyn_cast<Constant>(groupwiseConv->getFilterOffsets().getNode());
322 Constant *biasScalesConstant =
323 llvm::dyn_cast<Constant>(groupwiseConv->getBiasScales().getNode());
324 Constant *biasOffsetsConstant =
325 llvm::dyn_cast<Constant>(groupwiseConv->getBiasOffsets().getNode());
326
327 ASSERT_TRUE(filterConstant);
328 ASSERT_TRUE(biasConstant);
329 ASSERT_TRUE(filterScalesConstant);
330 ASSERT_TRUE(filterOffsetsConstant);
331 ASSERT_TRUE(biasScalesConstant);
332 ASSERT_TRUE(biasOffsetsConstant);
333
334 const auto filterH = filterConstant->getPayload().getHandle<int8_t>();
335 const auto biasH = biasConstant->getPayload().getHandle<float>();
336 const auto filterScalesH =
337 filterScalesConstant->getPayload().getHandle<float>();
338 const auto filterOffsetsH =
339 filterOffsetsConstant->getPayload().getHandle<int32_t>();
340 const auto biasScalesH = biasScalesConstant->getPayload().getHandle<float>();
341 const auto biasOffsetsH =
342 biasOffsetsConstant->getPayload().getHandle<int32_t>();
343
344 for (size_t i = 0; i < filterH.size(); ++i) {
345 EXPECT_EQ(filterH.raw(i), i % 2);
346 }
347
348 for (size_t i = 0; i < biasH.size(); ++i) {
349 EXPECT_EQ(biasH.raw(i), 7.0);
350 }
351
352 for (size_t i = 0; i < filterScalesH.size(); ++i) {
353 EXPECT_EQ(filterScalesH.raw(i), 6.0f);
354 }
355
356 for (size_t i = 0; i < filterOffsetsH.size(); ++i) {
357 EXPECT_EQ(filterOffsetsH.raw(i), 5);
358 }
359
360 for (size_t i = 0; i < biasScalesH.size(); ++i) {
361 float matmulScale = filterScalesH.raw(i) * input.getType().getScale();
362 EXPECT_EQ(biasScalesH.raw(i), matmulScale);
363 }
364
365 for (size_t i = 0; i < biasOffsetsH.size(); ++i) {
366 EXPECT_EQ(biasOffsetsH.raw(i), 0);
367 }
368
369 // We have 2 placeholders: 1 input and 1 output.
370 EXPECT_EQ(mod.getPlaceholders().size(), 2);
371 // We have 6 constants: Bias, Filter, FilterScales, FilterOffsets, BiasScales
372 // and BiasOffsets.
373 EXPECT_EQ(mod.getConstants().size(), 6);
374}
375
376/// Helper method to run the ConvTranspose operator test cases.
377/// \p filename contains the model .onnxtxt.
378/// \p expectedDims: output Tensor dimensions.
379/// \p expectedValues : output Tensor values expected.
380/// The input is N*C*H*W (1*1*2*2), the kernels is {3, 3},
381/// strides is {1, 1}, group is 1. Pads can vary.
382static void convTransposeTestHelper(std::string &netname, std::string &initname,
383 llvm::ArrayRef<dim_t> expectedDims,
384 llvm::ArrayRef<float> expectedValues) {
385 ExecutionEngine EE{};
386 auto &mod = EE.getModule();
387 Function *F = mod.createFunction("main");
388
389 std::string NetDescFilename =
390 std::string(GLOW_DATA_PATH "tests/models/caffe2Models/") + netname;
391
392 std::string NetWeightFilename =
393 std::string(GLOW_DATA_PATH "tests/models/caffe2Models/") + initname;
394
395 Placeholder *output;
396 PlaceholderBindings bindings;
397
398 // Destroy the loader after the graph is loaded since the following execution
399 // will not depend on anything from the loader.
400 {
401 Tensor data;
402 getNCHWData(&data, 1, 1, 2, 2);
403 data.getHandle() = {2., 3., 4., 5.};
404
405 Caffe2ModelLoader caffe2LD(NetDescFilename, NetWeightFilename,
406 {"gpu_0/data_0"}, {&data.getType()}, *F);
407 output = EXIT_ON_ERR(caffe2LD.getSingleOutput());
408
409 bindings.allocate(mod.getPlaceholders());
410 updateInputPlaceholdersByName(bindings, &mod, {"gpu_0/data_0"}, {&data});
411 }
412
413 auto res = bindings.get(output);
414 EE.compile(CompilationMode::Infer);
415
416 EE.run(bindings);
417 auto result = res->getHandle();
418
419 EXPECT_TRUE(result.dims() == expectedDims);
420 for (dim_t i = 0, e = expectedValues.size(); i < e; i++) {
421 EXPECT_FLOAT_EQ(result.raw(i), expectedValues[i]);
422 }
423}
424
425/// Test loading ConvTranspose op from a ONNX model.
426/// The input is N*C*H*W (1*1*2*2), the kernels is {3, 3},
427/// strides is {1, 1}, pads is {0, 0, 0, 0}, group is 1.
428TEST(caffe2, importConvTranspose) {
429 std::string netname("convtranspose.pbtxt");
430 std::string initname("convtranspose_init.pbtxt");
431 std::vector<dim_t> expectedDims = {1, 1, 4, 4};
432 std::vector<float> expectedValues = {5, 13, 18, 13, 19, 50, 64, 42,
433 37, 92, 106, 66, 33, 77, 86, 51};
434 convTransposeTestHelper(netname, initname, expectedDims, expectedValues);
435}
436
437/// Test loading ConvTranspose op from a ONNX model.
438/// The input is N*C*H*W (1*1*2*2), the kernels is {3, 3},
439/// strides is {1, 1}, pads is {1, 1, 1, 1}, group is 1.
440TEST(onnx, importConvTransposePads) {
441 std::string netname("convtranspose_pads.pbtxt");
442 std::string initname("convtranspose_init.pbtxt");
443 std::vector<dim_t> expectedDims = {1, 1, 2, 2};
444 std::vector<float> expectedValues = {50, 64, 92, 106};
445 convTransposeTestHelper(netname, initname, expectedDims, expectedValues);
446}
447
448/// Test loading conv op from a Caffe2 model.
449/// The input is N*H*W*C (1*3*3*1), the kernel is 2,
450/// stride is 1, pad is 1, group is 1.
451TEST(caffe2, convTransposeNHWC) {
452 ExecutionEngine EE{};
453 auto &mod = EE.getModule();
454 Function *F = mod.createFunction("main");
455
456 std::string NetDescFilename(
457 GLOW_DATA_PATH "tests/models/caffe2Models/convtranspose_nhwc.pbtxt");
458 std::string NetWeightFilename(
459 GLOW_DATA_PATH "tests/models/caffe2Models/convtranspose_nhwc_init.pbtxt");
460
461 Placeholder *output;
462 PlaceholderBindings bindings;
463
464 Tensor inputs(ElemKind::FloatTy, {1, 2, 2, 1});
465 inputs.getHandle() = {2., 3., 4., 5.};
466
467 // Destroy the loader after the graph is loaded since the following execution
468 // will not depend on anything from the loader.
469 {
470 Caffe2ModelLoader caffe2LD(NetDescFilename, NetWeightFilename, {"inputs"},
471 {&inputs.getType()}, *F);
472 output = EXIT_ON_ERR(caffe2LD.getSingleOutput());
473 }
474
475 // High level check on the content of the graph. We have 1 conv, 1 Transpose,
476 // and 1 save.
477 EXPECT_EQ(F->getNodes().size(), 3);
478 auto *saveNode = getSaveNodeFromDest(output);
479 auto *convTransposeNode =
480 llvm::dyn_cast<ConvTransposeNode>(saveNode->getInput().getNode());
481 ASSERT_TRUE(convTransposeNode);
482 auto *transposeNode =
483 llvm::dyn_cast<TransposeNode>(convTransposeNode->getFilter());
484 ASSERT_TRUE(transposeNode);
485
486 // We have 2 placeholders: 1 input and 1 output.
487 EXPECT_EQ(mod.getPlaceholders().size(), 2);
488 // We have 2 constants: Weights and bias.
489 EXPECT_EQ(mod.getConstants().size(), 2);
490}
491
492/// Test loading MaxPool with NHWC order input.
493TEST_F(Caffe2ImporterTest, maxPoolNHWC) {
494 ExecutionEngine EE{};
495 auto &mod = EE.getModule();
496 Function *F = mod.createFunction("main");
497
498 std::string NetDescFilename(
499 GLOW_DATA_PATH
500 "tests/models/caffe2Models/maxpool_nhwc_predict_net.pbtxt");
501 std::string NetWeightFilename(
502 GLOW_DATA_PATH "tests/models/caffe2Models/empty_init_net.pbtxt");
503
504 Placeholder *output;
505 PlaceholderBindings bindings;
506
507 Tensor inputs(ElemKind::FloatTy, {1, 3, 3, 1});
508
509 // Destroy the loader after the graph is loaded since the following execution
510 // will not depend on anything from the loader.
511 {
512 Caffe2ModelLoader caffe2LD(NetDescFilename, NetWeightFilename, {"inputs"},
513 {&inputs.getType()}, *F);
514 output = EXIT_ON_ERR(caffe2LD.getSingleOutput());
515 }
516
517 // High level check on the content of the graph. We have 1 maxpool and 1 save.
518 EXPECT_EQ(F->getNodes().size(), 2);
519 auto *saveNode = getSaveNodeFromDest(output);
520 auto *maxPoolNode =
521 llvm::dyn_cast<MaxPoolNode>(saveNode->getInput().getNode());
522 ASSERT_TRUE(maxPoolNode);
523
524 // We have 2 placeholders: 1 input and 1 output.
525 EXPECT_EQ(mod.getPlaceholders().size(), 2);
526 // We have 0 constants.
527 EXPECT_EQ(mod.getConstants().size(), 0);
528}
529
530/// Test that loading MaxPool with legacy padding terminates early.
531TEST_F(Caffe2ImporterTest, maxPoolLegacyPadding) {
532 ExecutionEngine EE{};
533 auto &mod = EE.getModule();
534 Function *F = mod.createFunction("main");
535
536 std::string NetDescFilename(
537 GLOW_DATA_PATH
538 "tests/models/caffe2Models/maxpool_legacy_padding_predict_net.pbtxt");
539 std::string NetWeightFilename(
540 GLOW_DATA_PATH "tests/models/caffe2Models/empty_init_net.pbtxt");
541
542 Tensor inputs(ElemKind::FloatTy, {1, 3, 3, 1});
543
544 Error err(Error::success());
545 Caffe2ModelLoader caffe2LD(NetDescFilename, NetWeightFilename, {"inputs"},
546 {&inputs.getType()}, *F, &err);
547
548 // Test that the error is the expected one.
549 auto msg = ERR_TO_STRING(std::move(err));
550 ASSERT_NE(msg.find("MaxPool nodes with legacy caffe padding are "
551 "deprecated and not supported."),
552 std::string::npos);
553}
554
555/// Test loading MaxPool with default NCHW order input.
556TEST_F(Caffe2ImporterTest, maxPool) {
557 ExecutionEngine EE{};
558 auto &mod = EE.getModule();
559 Function *F = mod.createFunction("main");
560
561 std::string NetDescFilename(
562 GLOW_DATA_PATH "tests/models/caffe2Models/maxpool_predict_net.pbtxt");
563 std::string NetWeightFilename(
564 GLOW_DATA_PATH "tests/models/caffe2Models/empty_init_net.pbtxt");
565
566 Placeholder *output;
567 PlaceholderBindings bindings;
568
569 Tensor inputs(ElemKind::FloatTy, {1, 3, 3, 1});
570
571 // Destroy the loader after the graph is loaded since the following execution
572 // will not depend on anything from the loader.
573 {
574 Caffe2ModelLoader caffe2LD(NetDescFilename, NetWeightFilename, {"inputs"},
575 {&inputs.getType()}, *F);
576 output = EXIT_ON_ERR(caffe2LD.getSingleOutput());
577 }
578
579 // High level check on the content of the graph. We have 1 maxpool, 1 save
580 // and 2 transpose.
581 EXPECT_EQ(F->getNodes().size(), 4);
582 auto *saveNode = getSaveNodeFromDest(output);
583 auto *transNode1 =
584 llvm::dyn_cast<TransposeNode>(saveNode->getInput().getNode());
585 ASSERT_TRUE(transNode1);
586 auto *maxPoolNode =
587 llvm::dyn_cast<MaxPoolNode>(transNode1->getInput().getNode());
588 ASSERT_TRUE(maxPoolNode);
589 auto *transNode2 =
590 llvm::dyn_cast<TransposeNode>(maxPoolNode->getInput().getNode());
591 ASSERT_TRUE(transNode2);
592
593 // We have 2 placeholders: 1 input and 1 output.
594 EXPECT_EQ(mod.getPlaceholders().size(), 2);
595 // We have 0 constants.
596 EXPECT_EQ(mod.getConstants().size(), 0);
597}
598
599/// Test loading AvgPool with NHWC order input.
600TEST_F(Caffe2ImporterTest, avgPoolNHWC) {
601 ExecutionEngine EE{};
602 auto &mod = EE.getModule();
603 Function *F = mod.createFunction("main");
604
605 std::string NetDescFilename(
606 GLOW_DATA_PATH
607 "tests/models/caffe2Models/avgpool_nhwc_predict_net.pbtxt");
608 std::string NetWeightFilename(
609 GLOW_DATA_PATH "tests/models/caffe2Models/empty_init_net.pbtxt");
610
611 Placeholder *output;
612 PlaceholderBindings bindings;
613
614 Tensor inputs(ElemKind::FloatTy, {1, 3, 3, 1});
615
616 // Destroy the loader after the graph is loaded since the following execution
617 // will not depend on anything from the loader.
618 {
619 Caffe2ModelLoader caffe2LD(NetDescFilename, NetWeightFilename, {"inputs"},
620 {&inputs.getType()}, *F);
621 output = EXIT_ON_ERR(caffe2LD.getSingleOutput());
622 }
623
624 // High level check on the content of the graph. We have 1 maxpool and 1 save.
625 EXPECT_EQ(F->getNodes().size(), 2);
626 auto *saveNode = getSaveNodeFromDest(output);
627 auto *avgPoolNode =
628 llvm::dyn_cast<AvgPoolNode>(saveNode->getInput().getNode());
629 ASSERT_TRUE(avgPoolNode);
630 ASSERT_FALSE(avgPoolNode->getCountIncludePads());
631
632 // We have 2 placeholders: 1 input and 1 output.
633 EXPECT_EQ(mod.getPlaceholders().size(), 2);
634 // We have 0 constants.
635 EXPECT_EQ(mod.getConstants().size(), 0);
636}
637
638/// Test loading AveragePool with default NCHW order input.
639TEST_F(Caffe2ImporterTest, avgPool) {
640 ExecutionEngine EE{};
641 auto &mod = EE.getModule();
642 Function *F = mod.createFunction("main");
643
644 std::string NetDescFilename(
645 GLOW_DATA_PATH "tests/models/caffe2Models/avgpool_predict_net.pbtxt");
646 std::string NetWeightFilename(
647 GLOW_DATA_PATH "tests/models/caffe2Models/empty_init_net.pbtxt");
648
649 Placeholder *output;
650 PlaceholderBindings bindings;
651
652 Tensor inputs(ElemKind::FloatTy, {1, 3, 3, 1});
653
654 // Destroy the loader after the graph is loaded since the following execution
655 // will not depend on anything from the loader.
656 {
657 Caffe2ModelLoader caffe2LD(NetDescFilename, NetWeightFilename, {"inputs"},
658 {&inputs.getType()}, *F);
659 output = EXIT_ON_ERR(caffe2LD.getSingleOutput());
660 }
661
662 // High level check on the content of the graph. We have 1 maxpool, 1 save
663 // and 2 transpose.
664 EXPECT_EQ(F->getNodes().size(), 4);
665 auto *saveNode = getSaveNodeFromDest(output);
666 auto *transNode1 =
667 llvm::dyn_cast<TransposeNode>(saveNode->getInput().getNode());
668 ASSERT_TRUE(transNode1);
669 auto *avgPoolNode =
670 llvm::dyn_cast<AvgPoolNode>(transNode1->getInput().getNode());
671 ASSERT_TRUE(avgPoolNode);
672 ASSERT_TRUE(avgPoolNode->getCountIncludePads());
673 auto *transNode2 =
674 llvm::dyn_cast<TransposeNode>(avgPoolNode->getInput().getNode());
675 ASSERT_TRUE(transNode2);
676
677 // We have 2 placeholders: 1 input and 1 output.
678 EXPECT_EQ(mod.getPlaceholders().size(), 2);
679 // We have 0 constants.
680 EXPECT_EQ(mod.getConstants().size(), 0);
681}
682
683/// Test loading a concat node with add_axis.
684/// Concat nodes with add_axis have a different semantic
685/// than the plain glow concat.
686/// concat A(dim0, dim1), B(dim0, dim1), ... 1, add_axis = 1
687/// res = A, B...
688/// C2 shape: dim0, #input, dim1, i.e., three dimensions.
689/// Glow shape: dim0, #input x dim1, i.e., two dimensions.
690///
691/// To fill the gap between the two, glow issues a reshape
692/// right after its concat.
693TEST_F(Caffe2ImporterTest, concatAddAxis) {
694 ExecutionEngine EE{};
695 auto &mod = EE.getModule();
696 Function *F = mod.createFunction("main");
697
698 std::string NetDescFilename(
699 GLOW_DATA_PATH
700 "tests/models/caffe2Models/concat_add_axis_predict_net.pbtxt");
701 std::string NetWeightFilename(
702 GLOW_DATA_PATH "tests/models/caffe2Models/empty_init_net.pbtxt");
703
704 PlaceholderBindings bindings;
705
706 Placeholder *output;
707 Tensor inputs_0(ElemKind::FloatTy, {10, 7});
708 Tensor inputs_1(ElemKind::FloatTy, {10, 7});
709 Tensor inputs_2(ElemKind::FloatTy, {10, 7});
710 inputs_0.getHandle().randomize(-3.0, 3.0, mod.getPRNG());
711 inputs_1.getHandle().randomize(-3.0, 3.0, mod.getPRNG());
712 inputs_2.getHandle().randomize(-3.0, 3.0, mod.getPRNG());
713 // Destroy the loader after the graph is loaded since the following execution
714 // will not depend on anything from the loader.
715 {
716 Caffe2ModelLoader caffe2LD(
717 NetDescFilename, NetWeightFilename,
718 {"inputs_0", "inputs_1", "inputs_2"},
719 {&inputs_0.getType(), &inputs_1.getType(), &inputs_2.getType()}, *F);
720 output = EXIT_ON_ERR(caffe2LD.getSingleOutput());
721
722 bindings.allocate(mod.getPlaceholders());
723 updateInputPlaceholdersByName(bindings, &mod,
724 {"inputs_0", "inputs_1", "inputs_2"},
725 {&inputs_0, &inputs_1, &inputs_2});
726 }
727
728 // Check that the shape of the output matches what Caffe2 expects.
729 std::vector<dim_t> expectedDims = {10, 3, 7};
730 EXPECT_TRUE(output->dims().vec() == expectedDims);
731
732 auto res = bindings.get(output);
733 EE.compile(CompilationMode::Infer);
734
735 EE.run(bindings);
736 // High level check on the content of the graph.
737 // We have 1 reshape, 1 concat, and 1 save.
738 EXPECT_EQ(F->getNodes().size(), 3);
739 // With have three inputs and one outputs.
740 EXPECT_EQ(mod.getPlaceholders().size(), 4);
741
742 // Check that the graph has the expected shape,
743 // starting from the output.
744 auto *saveNode = getSaveNodeFromDest(output);
745 auto *reshape = llvm::dyn_cast<ReshapeNode>(saveNode->getInput().getNode());
746 ASSERT_TRUE(reshape);
747 auto *concat = llvm::dyn_cast<ConcatNode>(reshape->getInput());
748 ASSERT_TRUE(concat);
749 // We will check that the inputs are correct within
750 // the next loop.
751
752 auto result = res->getHandle();
753
754 // Check that the output matches the concatenation of
755 // all the inputs.
756 Tensor *inputs[] = {&inputs_0, &inputs_1, &inputs_2};
757 for (dim_t i = 0; i < 3; ++i) {
758 const auto inputsHandle = inputs[i]->getHandle();
759 ASSERT_TRUE(llvm::isa<Placeholder>(concat->getInputs()[i]));
760
761 for (dim_t row = 0; row < 10; ++row) {
762 for (dim_t column = 0; column < 7; ++column) {
763 EXPECT_FLOAT_EQ(result.at({row, i, column}),
764 inputsHandle.at({row, column}));
765 }
766 }
767 }
768}
769
770TEST_F(Caffe2ImporterTest, concatAddAxisAtEdge) {
771 ExecutionEngine EE{};
772 auto &mod = EE.getModule();
773 Function *F = mod.createFunction("main");
774
775 std::string NetDescFilename(
776 GLOW_DATA_PATH "tests/models/caffe2Models/concat_add_axis_at_edge.pbtxt");
777 std::string NetWeightFilename(
778 GLOW_DATA_PATH "tests/models/caffe2Models/empty_init_net.pbtxt");
779
780 PlaceholderBindings bindings;
781
782 Placeholder *output;
783 const std::array<dim_t, 4> inputShape{7, 11, 13, 5};
784 Tensor inputs_0(ElemKind::FloatTy, inputShape);
785 Tensor inputs_1(ElemKind::FloatTy, inputShape);
786 Tensor inputs_2(ElemKind::FloatTy, inputShape);
787 inputs_0.getHandle().randomize(-3.0, 3.0, mod.getPRNG());
788 inputs_1.getHandle().randomize(-3.0, 3.0, mod.getPRNG());
789 inputs_2.getHandle().randomize(-3.0, 3.0, mod.getPRNG());
790 // Destroy the loader after the graph is loaded since the following execution
791 // will not depend on anything from the loader.
792 {
793 Caffe2ModelLoader caffe2LD(
794 NetDescFilename, NetWeightFilename,
795 {"inputs_0", "inputs_1", "inputs_2"},
796 {&inputs_0.getType(), &inputs_1.getType(), &inputs_2.getType()}, *F);
797 output = EXIT_ON_ERR(caffe2LD.getSingleOutput());
798
799 bindings.allocate(mod.getPlaceholders());
800 updateInputPlaceholdersByName(bindings, &mod,
801 {"inputs_0", "inputs_1", "inputs_2"},
802 {&inputs_0, &inputs_1, &inputs_2});
803 }
804
805 const std::vector<Tensor *> inputs{&inputs_0, &inputs_1, &inputs_2};
806
807 // Check that the shape of the output matches what Caffe2 expects.
808 std::vector<dim_t> expectedDims{inputShape.begin(), inputShape.end()};
809 expectedDims.push_back(inputs.size());
810 EXPECT_EQ(expectedDims, output->dims().vec());
811
812 auto res = bindings.get(output);
813 EE.compile(CompilationMode::Infer);
814 EE.run(bindings);
815
816 auto result = res->getHandle();
817
818 // Check that the output matches the concatenation of all inputs.
819 LOG(INFO) << "inputs_0=" << inputs_0.toString();
820 LOG(INFO) << "inputs_1=" << inputs_1.toString();
821 LOG(INFO) << "inputs_2=" << inputs_2.toString();
822 LOG(INFO) << "result=" << result.clone().toString();
823
824 for (dim_t i = 0; i < inputs.size(); ++i) {
825 const auto inputsHandle = inputs[i]->getHandle();
826
827 for (dim_t dim1 = 0; dim1 < inputShape[0]; ++dim1) {
828 for (dim_t dim2 = 0; dim2 < inputShape[1]; ++dim2) {
829 for (dim_t dim3 = 0; dim3 < inputShape[2]; ++dim3) {
830 for (dim_t dim4 = 0; dim4 < inputShape[3]; ++dim4) {
831 EXPECT_FLOAT_EQ(result.at({dim1, dim2, dim3, dim4, i}),
832 inputsHandle.at({dim1, dim2, dim3, dim4}));
833 }
834 }
835 }
836 }
837 }
838}
839
840/// Test loading a regular concat node.
841TEST_F(Caffe2ImporterTest, concat) {
842 ExecutionEngine EE{};
843 auto &mod = EE.getModule();
844 Function *F = mod.createFunction("main");
845
846 std::string NetDescFilename(
847 GLOW_DATA_PATH "tests/models/caffe2Models/concat_predict_net.pbtxt");
848 std::string NetWeightFilename(
849 GLOW_DATA_PATH "tests/models/caffe2Models/empty_init_net.pbtxt");
850
851 PlaceholderBindings bindings;
852 Placeholder *output;
853 Tensor inputs_0(ElemKind::FloatTy, {10, 7});
854 Tensor inputs_1(ElemKind::FloatTy, {10, 12});
855 Tensor inputs_2(ElemKind::FloatTy, {10, 5});
856 inputs_0.getHandle().randomize(-3.0, 3.0, mod.getPRNG());
857 inputs_1.getHandle().randomize(-3.0, 3.0, mod.getPRNG());
858 inputs_2.getHandle().randomize(-3.0, 3.0, mod.getPRNG());
859 // Destroy the loader after the graph is loaded since the following execution
860 // will not depend on anything from the loader.
861 {
862 Caffe2ModelLoader caffe2LD(
863 NetDescFilename, NetWeightFilename,
864 {"inputs_0", "inputs_1", "inputs_2"},
865 {&inputs_0.getType(), &inputs_1.getType(), &inputs_2.getType()}, *F);
866 output = EXIT_ON_ERR(caffe2LD.getSingleOutput());
867
868 bindings.allocate(mod.getPlaceholders());
869 updateInputPlaceholdersByName(bindings, &mod,
870 {"inputs_0", "inputs_1", "inputs_2"},
871 {&inputs_0, &inputs_1, &inputs_2});
872 }
873
874 // Check that the shape of the output matches what Caffe2 expects.
875 std::vector<dim_t> expectedDims = {10, 24};
876 EXPECT_TRUE(output->dims().vec() == expectedDims);
877
878 bindings.allocate(mod.getPlaceholders());
879 auto res = bindings.get(output);
880 EE.compile(CompilationMode::Infer);
881
882 EE.run(bindings);
883 // High level check on the content of the graph.
884 // We have 1 concat, and 1 save.
885 EXPECT_EQ(F->getNodes().size(), 2);
886 // With have three inputs and one outputs.
887 EXPECT_EQ(mod.getPlaceholders().size(), 4);
888
889 auto result = res->getHandle();
890
891 // Check that the graph has the expected shape,
892 // starting from the output.
893 auto *saveNode = getSaveNodeFromDest(output);
894 auto *concat = llvm::dyn_cast<ConcatNode>(saveNode->getInput());
895 ASSERT_TRUE(concat);
896 // We will check that the inputs are correct within
897 // the next loop.
898
899 // Check that the output matches the concatenation of
900 // all the inputs.
901 Tensor *inputs[] = {&inputs_0, &inputs_1, &inputs_2};
902 dim_t columnsChecked = 0;
903 for (size_t i = 0; i < 3; ++i) {
904 const auto inputsHandle = inputs[i]->getHandle();
905 ASSERT_TRUE(llvm::isa<Placeholder>(concat->getInputs()[i]));
906
907 dim_t currentColumnWidth = inputs[i]->dims()[1];
908 for (dim_t row = 0; row < 10; ++row) {
909 for (dim_t column = 0; column < currentColumnWidth; ++column) {
910 EXPECT_FLOAT_EQ(result.at({row, columnsChecked + column}),
911 inputsHandle.at({row, column}));
912 }
913 }
914 columnsChecked += currentColumnWidth;
915 }
916}
917
918/// Test loading a batched matmul with transpose on RHS.
919TEST_F(Caffe2ImporterTest, batchedMatmulRHS) {
920 ExecutionEngine EE{};
921 auto &mod = EE.getModule();
922 Function *F = mod.createFunction("main");
923 std::string NetDescFilename(
924 GLOW_DATA_PATH
925 "tests/models/caffe2Models/matmul_trans_RHS_predict_net.pbtxt");
926 std::string NetWeightFilename(
927 GLOW_DATA_PATH "tests/models/caffe2Models/empty_init_net.pbtxt");
928
929 Placeholder *output;
930 Tensor inputs_0(ElemKind::FloatTy, {3, 10, 7});
931 Tensor inputs_1(ElemKind::FloatTy, {10, 7});
932 inputs_0.getHandle().randomize(-3.0, 3.0, mod.getPRNG());
933 inputs_1.getHandle().randomize(-3.0, 3.0, mod.getPRNG());
934 // Destroy the loader after the graph is loaded since the following execution
935 // will not depend on anything from the loader.
936 {
937 Caffe2ModelLoader caffe2LD(NetDescFilename, NetWeightFilename,
938 {"inputs_0", "inputs_1"},
939 {&inputs_0.getType(), &inputs_1.getType()}, *F);
940 output = EXIT_ON_ERR(caffe2LD.getSingleOutput());
941 }
942
943 // Check that the shape of the output matches what Caffe2 expects.
944 std::vector<dim_t> expectedDims = {3, 10, 10};
945 EXPECT_TRUE(output->dims().vec() == expectedDims);
946 // High level check on the content of the graph.
947 // We have 1 transpose, 1 matmul, 1 save, and 2 reshapes.
948 EXPECT_EQ(F->getNodes().size(), 5);
949 // With have 2 inputs and one outputs.
950 EXPECT_EQ(mod.getPlaceholders().size(), 3);
951 // Check that the graph has the expected shape,
952 // starting from the output.
953 auto *saveNode = getSaveNodeFromDest(output);
954 auto *BMMN = llvm::dyn_cast<BatchMatMulNode>(saveNode->getInput().getNode());
955 ASSERT_TRUE(BMMN);
956 const dim_t batchMatmulDims[] = {3, 10, 10};
957 EXPECT_EQ(BMMN->getResult().dims(), llvm::makeArrayRef(batchMatmulDims));
958 EXPECT_TRUE(llvm::isa<Placeholder>(BMMN->getLHS()));
959 auto *tileRHS = llvm::dyn_cast<TileNode>(BMMN->getRHS());
960 ASSERT_TRUE(tileRHS);
961 auto *reshapeRHS = llvm::dyn_cast<ReshapeNode>(tileRHS->getInput());
962 ASSERT_TRUE(reshapeRHS);
963 auto *transposeRHS = llvm::dyn_cast<TransposeNode>(reshapeRHS->getInput());
964 ASSERT_TRUE(transposeRHS);
965 EXPECT_TRUE(llvm::isa<Placeholder>(transposeRHS->getInput()));
966 // Check that the last two dimensions are swapped.
967 const unsigned_t shuffle[] = {1, 0};
968 EXPECT_EQ(transposeRHS->getShuffle(), llvm::makeArrayRef(shuffle));
969 // We don't actually check that the output is correct, because this
970 // should be covered in the OperatorTest for MatMul already.
971}
972
973/// Test loading a parallel batched matmul.
974TEST_F(Caffe2ImporterTest, parallelBatchedMatmulRHS) {
975 ExecutionEngine EE{};
976 auto &mod = EE.getModule();
977 Function *F = mod.createFunction("main");
978 std::string NetDescFilename(
979 GLOW_DATA_PATH
980 "tests/models/caffe2Models/parallel_matmul_predict_net.pbtxt");
981 std::string NetWeightFilename(
982 GLOW_DATA_PATH "tests/models/caffe2Models/empty_init_net.pbtxt");
983
984 Placeholder *output;
985 Tensor inputs_0(ElemKind::FloatTy, {3, 10, 7});
986 Tensor inputs_1(ElemKind::FloatTy, {3, 7, 10});
987 inputs_0.getHandle().randomize(-3.0, 3.0, mod.getPRNG());
988 inputs_1.getHandle().randomize(-3.0, 3.0, mod.getPRNG());
989 // Destroy the loader after the graph is loaded since the following execution
990 // will not depend on anything from the loader.
991 {
992 Caffe2ModelLoader caffe2LD(NetDescFilename, NetWeightFilename,
993 {"inputs_0", "inputs_1"},
994 {&inputs_0.getType(), &inputs_1.getType()}, *F);
995 output = EXIT_ON_ERR(caffe2LD.getSingleOutput());
996 }
997
998 // High level check on the content of the graph.
999 // We have a BatchMatMul and a Save.
1000 EXPECT_EQ(F->getNodes().size(), 2);
1001 // With have 2 inputs and one outputs.
1002 EXPECT_EQ(mod.getPlaceholders().size(), 3);
1003 // Check that the graph has the expected shape,
1004 // starting from the output.
1005 // Parallel Batched matmul is lowered to a sequence of slices, reshapes and
1006 // regular matmuls.
1007 auto *saveNode = getSaveNodeFromDest(output);
1008 auto *BMMN = llvm::dyn_cast<BatchMatMulNode>(saveNode->getInput());
1009 ASSERT_TRUE(BMMN);
1010
1011 const dim_t lhsDims[] = {3, 10, 7};
1012 EXPECT_EQ(BMMN->getLHS().dims(), llvm::makeArrayRef(lhsDims));
1013 const dim_t rhsDims[] = {3, 7, 10};
1014 EXPECT_EQ(BMMN->getRHS().dims(), llvm::makeArrayRef(rhsDims));
1015 const dim_t resultDims[] = {3, 10, 10};
1016 EXPECT_EQ(BMMN->getResult().dims(), llvm::makeArrayRef(resultDims));
1017
1018 // We don't actually check that the output is correct, because this
1019 // should be covered in the OperatorTest for MatMul already.
1020}
1021
1022TEST_F(Caffe2ImporterTest, batchMatMulManyDims) {
1023 const std::string NetDescFilename(
1024 GLOW_DATA_PATH "tests/models/caffe2Models/batched_matmul.pbtxt");
1025 const std::string NetWeightFilename(
1026 GLOW_DATA_PATH "tests/models/caffe2Models/empty_init_net.pbtxt");
1027
1028 // Set of test cases: (lhs shape, rhs shape, expected result shape)
1029 const std::vector<std::vector<std::vector<dim_t>>> shape_cases{
1030 {{2, 2, 3, 4}, {2, 2, 4, 5}, {2, 2, 3, 5}},
1031 {{2, 2, 3, 4}, {2, 1, 4, 5}, {2, 2, 3, 5}},
1032 {{2, 2, 3, 4}, {2, 2, 2, 4, 5}, {2, 2, 2, 3, 5}},
1033 {{2, 2, 2, 3, 4}, {3, 1, 2, 2, 4, 5}, {3, 2, 2, 2, 3, 5}},
1034 {{2, 4, 5}, {3, 2, 5, 6}, {3, 2, 4, 6}},
1035 };
1036
1037 for (const auto &shapes : shape_cases) {
1038 ExecutionEngine EE{};
1039 auto &mod = EE.getModule();
1040 Function *F = mod.createFunction("main");
1041
1042 PlaceholderBindings bindings;
1043 Placeholder *output;
1044
1045 Tensor lhs{ElemKind::FloatTy, shapes[0]};
1046 const auto lhsSize = std::accumulate(lhs.dims().begin(), lhs.dims().end(),
1047 1, std::multiplies<>());
1048 std::vector<float> lhsData(lhsSize);
1049 std::iota(lhsData.begin(), lhsData.end(), 0);
1050 lhs.getHandle() = lhsData;
1051
1052 Tensor rhs{ElemKind::FloatTy, shapes[1]};
1053 const auto rhsSize = std::accumulate(rhs.dims().begin(), rhs.dims().end(),
1054 1, std::multiplies<>());
1055 std::vector<float> rhsData(rhsSize);
1056 std::iota(rhsData.begin(), rhsData.end(), 10);
1057 rhs.getHandle() = rhsData;
1058
1059 // Destroy the loader after the graph is loaded since the following
1060 // execution will not depend on anything from the loader.
1061 {
1062 Caffe2ModelLoader caffe2LD(NetDescFilename, NetWeightFilename,
1063 {"lhs", "rhs"},
1064 {&lhs.getType(), &rhs.getType()}, *F);
1065 output = EXIT_ON_ERR(caffe2LD.getSingleOutput());
1066
1067 bindings.allocate(mod.getPlaceholders());
1068 updateInputPlaceholdersByName(bindings, &mod, {"lhs", "rhs"},
1069 {&lhs, &rhs});
1070 }
1071
1072 EXPECT_EQ(shapes[2], output->dims().vec());
1073
1074 auto res = bindings.get(output);
1075 EE.compile(CompilationMode::Infer);
1076 EE.run(bindings);
1077
1078 auto result = res->getHandle();
1079
1080 const auto resultSize = std::accumulate(
1081 output->dims().begin(), output->dims().end(), 1, std::multiplies<>());
1082 result.dump(llvm::errs(), resultSize);
1083 }
1084}
1085
1086/// Test loading a FC node : I * transpose(W) + B.
1087TEST_F(Caffe2ImporterTest, FC) {
1088 ExecutionEngine EE{};
1089 auto &mod = EE.getModule();
1090 Function *F = mod.createFunction("main");
1091
1092 std::string NetDescFilename(GLOW_DATA_PATH
1093 "tests/models/caffe2Models/fc_predict_net.pbtxt");
1094 std::string NetWeightFilename(GLOW_DATA_PATH
1095 "tests/models/caffe2Models/fc_init_net.pbtxt");
1096
1097 Placeholder *output;
1098 PlaceholderBindings bindings;
1099 // Destroy the loader after the graph is loaded since the following execution
1100 // will not depend on anything from the loader.
1101 {
1102 Tensor inputs(ElemKind::FloatTy, {2, 3});
1103 inputs.getHandle() = {1, 2, 3, 4, 5, 6};
1104
1105 // Weights and bias are read from NetWeightFilename. And the values are:
1106 // weights : {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12};
1107 // bias : {0.1f, 0.2f, 0.3f, 0.4f};
1108 Caffe2ModelLoader caffe2LD(NetDescFilename, NetWeightFilename, {"inputs"},
1109 {&inputs.getType()}, *F);
1110 output = EXIT_ON_ERR(caffe2LD.getSingleOutput());
1111 bindings.allocate(mod.getPlaceholders());
1112 updateInputPlaceholdersByName(bindings, &mod, {"inputs"}, {&inputs});
1113 }
1114
1115 // High level check on the content of the graph. We have 1 FC node,
1116 // 1 transpose, and 1 save.
1117 EXPECT_EQ(F->getNodes().size(), 3);
1118 auto *saveNode = getSaveNodeFromDest(output);
1119 auto *fcNode =
1120 llvm::dyn_cast<FullyConnectedNode>(saveNode->getInput().getNode());
1121 ASSERT_TRUE(fcNode);
1122 auto *transposeNode = llvm::dyn_cast<TransposeNode>(fcNode->getWeights());
1123 ASSERT_TRUE(transposeNode);
1124
1125 // Check the numerical values of the weights and biases.
1126 {
1127 const Constant *constant =
1128 llvm::dyn_cast<Constant>(transposeNode->getInput());
1129 ASSERT_TRUE(constant);
1130 const Tensor &weights = constant->getPayload();
1131 const std::vector<dim_t> expectedDimensions = {4, 3};
1132 const std::vector<float> expectedValues = {1.0f, 2.0f, 3.0f, 4.0f,
1133 5.0f, 6.0f, 7.0f, 8.0f,
1134 9.0f, 10.0f, 11.0f, 12.0f};
1135 EXPECT_EQ(expectedDimensions, weights.dims().vec());
1136 ASSERT_EQ(expectedValues.size(), weights.size());
1137 const auto elements = weights.getHandle();
1138 for (size_t i = 0; i < expectedValues.size(); ++i) {
1139 EXPECT_FLOAT_EQ(expectedValues.at(i), elements.raw(i))
1140 << "Where i = " << i;
1141 }
1142 }
1143 {
1144 const Constant *constant = mod.getConstantByName("bias");
1145 ASSERT_TRUE(constant);
1146 const Tensor &bias = constant->getPayload();
1147 const std::vector<dim_t> expectedDimensions = {4};
1148 const std::vector<float> expectedValues = {0.1f, 0.2f, 0.3f, 0.4f};
1149 EXPECT_EQ(expectedDimensions, bias.dims().vec());
1150 ASSERT_EQ(expectedValues.size(), bias.size());
1151 const auto elements = bias.getHandle();
1152 for (size_t i = 0; i < expectedValues.size(); ++i) {
1153 EXPECT_FLOAT_EQ(expectedValues.at(i), elements.raw(i))
1154 << "Where i = " << i;
1155 }
1156 }
1157
1158 // We don't actually check that the output is correct, because this is
1159 // already covered in the Operator.FC/* tests.
1160}
1161
1162/// Test loading a FC node : I * transpose(W) + B, where I is need to be
1163/// flatten.
1164TEST_F(Caffe2ImporterTest, FCWithFlatten) {
1165 ExecutionEngine EE{};
1166 auto &mod = EE.getModule();
1167 Function *F = mod.createFunction("main");
1168
1169 std::string NetDescFilename(
1170 GLOW_DATA_PATH "tests/models/caffe2Models/fc_4d_predict_net.pbtxt");
1171 std::string NetWeightFilename(
1172 GLOW_DATA_PATH "tests/models/caffe2Models/fc_4d_init_net.pbtxt");
1173
1174 Placeholder *output;
1175 PlaceholderBindings bindings;
1176
1177 {
1178 Tensor inputs(ElemKind::FloatTy, {1, 1, 1, 2048});
1179
1180 // Weights and bias are read from NetWeightFilename
1181 Caffe2ModelLoader caffe2LD(NetDescFilename, NetWeightFilename, {"inputs"},
1182 {&inputs.getType()}, *F);
1183 output = EXIT_ON_ERR(caffe2LD.getSingleOutput());
1184 bindings.allocate(mod.getPlaceholders());
1185 updateInputPlaceholdersByName(bindings, &mod, {"inputs"}, {&inputs});
1186 }
1187
1188 // High level check on the content of the graph. We have two Splats for
1189 // weights and bias, a reshape, Transpose for FC weights, an FC, another
1190 // reshape, and a save.
1191 EXPECT_EQ(F->getNodes().size(), 7);
1192
1193 auto finalShape = output->getType()->dims();
1194 std::vector<dim_t> expectedOutput{1, 1, 1, 9190};
1195 EXPECT_EQ(finalShape, llvm::makeArrayRef(expectedOutput));
1196
1197 auto *saveNode = getSaveNodeFromDest(output);
1198 auto *reshapeAfterNode =
1199 llvm::dyn_cast<ReshapeNode>(saveNode->getInput().getNode());
1200 ASSERT_TRUE(reshapeAfterNode);
1201 auto *fcNode = llvm::dyn_cast<FullyConnectedNode>(
1202 reshapeAfterNode->getInput().getNode());
1203 ASSERT_TRUE(fcNode);
1204 auto *reshape = llvm::dyn_cast<ReshapeNode>(fcNode->getInput());
1205 ASSERT_TRUE(reshape);
1206 auto *transpose = llvm::dyn_cast<TransposeNode>(fcNode->getWeights());
1207 ASSERT_TRUE(transpose);
1208
1209 // We don't actually check that the output is correct, because this is
1210 // already covered in the Operator.FCWithFlatten/* tests.
1211}
1212
1213/// Test loading a FCTransposed node: I * W + B
1214TEST_F(Caffe2ImporterTest, FCTransposed) {
1215 ExecutionEngine EE{};
1216 auto &mod = EE.getModule();
1217 Function *F = mod.createFunction("main");
1218
1219 std::string NetDescFilename(
1220 GLOW_DATA_PATH
1221 "tests/models/caffe2Models/fcTransposed_predict_net.pbtxt");
1222 std::string NetWeightFilename(
1223 GLOW_DATA_PATH "tests/models/caffe2Models/fcTransposed_init_net.pbtxt");
1224
1225 Placeholder *output;
1226 PlaceholderBindings bindings;
1227
1228 // Destroy the loader after the graph is loaded since the following execution
1229 // will not depend on anything from the loader.
1230 {
1231 Tensor inputs(ElemKind::FloatTy, {2, 3});
1232 inputs.getHandle() = {1, 2, 3, 4, 5, 6};
1233
1234 // Weights and bias are read from NetWeightFilename. And the values are:
1235 // weights : {1, 4, 7, 10, 2, 5, 8, 11, 3, 6, 9, 12};
1236 // bias : {0.1f, 0.2f, 0.3f, 0.4f};
1237 Caffe2ModelLoader caffe2LD(NetDescFilename, NetWeightFilename, {"inputs"},
1238 {&inputs.getType()}, *F);
1239 output = EXIT_ON_ERR(caffe2LD.getSingleOutput());
1240 bindings.allocate(mod.getPlaceholders());
1241 updateInputPlaceholdersByName(bindings, &mod, {"inputs"}, {&inputs});
1242 }
1243
1244 // High level check on the content of the graph. We have 1 FC and 1 save,
1245 EXPECT_EQ(F->getNodes().size(), 2);
1246 auto *saveNode = getSaveNodeFromDest(output);
1247 auto *fcNode =
1248 llvm::dyn_cast<FullyConnectedNode>(saveNode->getInput().getNode());
1249 ASSERT_TRUE(fcNode);
1250
1251 // Check the numerical values of the weights and biases.
1252 {
1253 const Constant *constant = mod.getConstantByName("weights");
1254 ASSERT_TRUE(constant);
1255 const Tensor &weights = constant->getPayload();
1256 const std::vector<dim_t> expectedDimensions = {3, 4};
1257 const std::vector<float> expectedValues = {1.0f, 4.0f, 7.0f, 10.0f, //
1258 2.0f, 5.0f, 8.0f, 11.0f, //
1259 3.0f, 6.0f, 9.0f, 12.0f};
1260 EXPECT_EQ(expectedDimensions, weights.dims().vec());
1261 ASSERT_EQ(expectedValues.size(), weights.size());
1262 const auto elements = weights.getHandle();
1263 for (size_t i = 0; i < expectedValues.size(); ++i) {
1264 EXPECT_FLOAT_EQ(expectedValues.at(i), elements.raw(i))
1265 << "Where i = " << i;
1266 }
1267 }
1268 {
1269 const Constant *constant = mod.getConstantByName("bias");
1270 ASSERT_TRUE(constant);
1271 const Tensor &bias = constant->getPayload();
1272 const std::vector<dim_t> expectedDimensions = {4};
1273 const std::vector<float> expectedValues = {0.1f, 0.2f, 0.3f, 0.4f};
1274 EXPECT_EQ(expectedDimensions, bias.dims().vec());
1275 ASSERT_EQ(expectedValues.size(), bias.size());
1276 const auto elements = bias.getHandle();
1277 for (size_t i = 0; i < expectedValues.size(); ++i) {
1278 EXPECT_FLOAT_EQ(expectedValues.at(i), elements.raw(i))
1279 << "Where i = " << i;
1280 }
1281 }
1282
1283 // We don't actually check that the output is correct, because this is
1284 // already covered in the Operator.FCWithFlatten/* tests.
1285}
1286
1287/// Test loading a FCTransposed node: I * W + B, where I is need to be flatten.
1288TEST_F(Caffe2ImporterTest, FCTransposedWithFlatten) {
1289 ExecutionEngine EE{};
1290 auto &mod = EE.getModule();
1291 Function *F = mod.createFunction("main");
1292
1293 std::string NetDescFilename(
1294 GLOW_DATA_PATH
1295 "tests/models/caffe2Models/fcTransposed_4d_predict_net.pbtxt");
1296 std::string NetWeightFilename(
1297 GLOW_DATA_PATH
1298 "tests/models/caffe2Models/fcTransposed_4d_init_net.pbtxt");
1299
1300 Placeholder *output;
1301 PlaceholderBindings bindings;
1302
1303 {
1304 Tensor inputs(ElemKind::FloatTy, {1, 1, 1, 2048});
1305
1306 // Weights and bias are read from NetWeightFilename.
1307 Caffe2ModelLoader caffe2LD(NetDescFilename, NetWeightFilename, {"inputs"},
1308 {&inputs.getType()}, *F);
1309 output = EXIT_ON_ERR(caffe2LD.getSingleOutput());
1310 bindings.allocate(mod.getPlaceholders());
1311 updateInputPlaceholdersByName(bindings, &mod, {"inputs"}, {&inputs});
1312 }
1313
1314 // High level check on the content of the graph. We have two Splats for
1315 // weights and bias, a reshape, an FC, another reshape, and a save.
1316 EXPECT_EQ(F->getNodes().size(), 6);
1317
1318 auto finalShape = output->getType()->dims();
1319 std::vector<dim_t> expectedOutput{1, 1, 1, 9190};
1320 EXPECT_EQ(finalShape, llvm::makeArrayRef(expectedOutput));
1321
1322 auto *saveNode = getSaveNodeFromDest(output);
1323 auto *reshapeAfterNode =
1324 llvm::dyn_cast<ReshapeNode>(saveNode->getInput().getNode());
1325 ASSERT_TRUE(reshapeAfterNode);
1326 auto *fcNode = llvm::dyn_cast<FullyConnectedNode>(
1327 reshapeAfterNode->getInput().getNode());
1328 ASSERT_TRUE(fcNode);
1329 auto *reshape = llvm::dyn_cast<ReshapeNode>(fcNode->getInput());
1330 ASSERT_TRUE(reshape);
1331
1332 // We don't actually check that the output is correct, because this is
1333 // already covered in the Operator.FCWithFlatten/* tests.
1334}
1335
1336TEST_F(Caffe2ImporterTest, FC3DSecondAxis) {
1337 const std::string NetDescFilename(
1338 GLOW_DATA_PATH
1339 "tests/models/caffe2Models/fc_3d_second_axis_predict.pbtxt");
1340 const std::string NetWeightFilename(
1341 GLOW_DATA_PATH "tests/models/caffe2Models/fc_3d_second_axis_init.pbtxt");
1342
1343 ExecutionEngine EE{};
1344 auto &mod = EE.getModule();
1345 Function *F = mod.createFunction("main");
1346
1347 PlaceholderBindings bindings;
1348 Placeholder *output;
1349
1350 const std::vector<dim_t> inputShape{2, 3, 4};
1351
1352 // Create and populate input tensor
1353 Tensor input{ElemKind::FloatTy, inputShape};
1354 std::vector<float> inputData;
1355 inputData.resize(input.size());
1356 std::iota(inputData.begin(), inputData.end(), 0);
1357 input.getHandle() = inputData;
1358
1359 // Destroy the loader after the graph is loaded since the following
1360 // execution will not depend on anything from the loader.
1361 {
1362 Caffe2ModelLoader caffe2LD(NetDescFilename, NetWeightFilename, {"input"},
1363 {&input.getType()}, *F);
1364 output = EXIT_ON_ERR(caffe2LD.getSingleOutput());
1365
1366 bindings.allocate(mod.getPlaceholders());
1367 updateInputPlaceholdersByName(bindings, &mod, {"input"}, {&input});
1368 }
1369
1370 auto res = bindings.get(output);
1371 EE.compile(CompilationMode::Infer);
1372 EE.run(bindings);
1373
1374 std::vector<dim_t> expectedShape{inputShape.begin(), inputShape.end() - 1};
1375 expectedShape.push_back(5); // bias size as defined in .pbtxt file
1376 EXPECT_EQ(expectedShape, res->dims().vec());
1377
1378 auto result = res->getHandle();
1379
1380 // Data is based on Caffe2 results
1381 std::vector<std::vector<std::vector<float>>> expectedData{
1382 {{14, 39, 64, 89, 114},
1383 {38, 127, 216, 305, 394},
1384 {62, 215, 368, 521, 674}},
1385 {{86, 303, 520, 737, 954},
1386 {110, 391, 672, 953, 1234},
1387 {134, 479, 824, 1169, 1514}}};
1388
1389 for (dim_t dim1 = 0; dim1 < expectedShape[0]; ++dim1) {
1390 for (dim_t dim2 = 0; dim2 < expectedShape[0]; ++dim2) {
1391 for (dim_t dim3 = 0; dim3 < expectedShape[0]; ++dim3) {
1392 EXPECT_FLOAT_EQ(expectedData[dim1][dim2][dim3],
1393 result.at({dim1, dim2, dim3}));
1394 }
1395 }
1396 }
1397}
1398
1399TEST_F(Caffe2ImporterTest, FC4DFirstAxis) {
1400 ExecutionEngine EE{};
1401 auto &mod = EE.getModule();
1402 Function *F = mod.createFunction("main");
1403
1404 std::string NetDescFilename(
1405 GLOW_DATA_PATH
1406 "tests/models/caffe2Models/fc_4d_first_axis_predict_net.pbtxt");
1407 std::string NetWeightFilename(
1408 GLOW_DATA_PATH
1409 "tests/models/caffe2Models/fc_4d_first_axis_init_net.pbtxt");
1410
1411 Placeholder *output;
1412 PlaceholderBindings bindings;
1413
1414 {
1415 Tensor inputs(ElemKind::FloatTy, {1, 5, 1, 1});
1416
1417 // Weights and bias are read from NetWeightFilename
1418 Caffe2ModelLoader caffe2LD(NetDescFilename, NetWeightFilename, {"inputs"},
1419 {&inputs.getType()}, *F);
1420 output = EXIT_ON_ERR(caffe2LD.getSingleOutput());
1421 bindings.allocate(mod.getPlaceholders());
1422 updateInputPlaceholdersByName(bindings, &mod, {"inputs"}, {&inputs});
1423 }
1424
1425 auto finalShape = output->getType()->dims();
1426 std::vector<dim_t> expectedOutput{1, 3};
1427 EXPECT_EQ(finalShape, llvm::makeArrayRef(expectedOutput));
1428}
1429
1430TEST_F(Caffe2ImporterTest, FbFCPacked3DSecondAxis) {
1431 const std::string NetDescFilename(
1432 GLOW_DATA_PATH
1433 "tests/models/caffe2Models/fbfcpacked_3d_second_axis_predict.pbtxt");
1434 const std::string NetWeightFilename(
1435 GLOW_DATA_PATH
1436 "tests/models/caffe2Models/fbfcpacked_3d_second_axis_init.pbtxt");
1437
1438 ExecutionEngine EE{};
1439 auto &mod = EE.getModule();
1440 Function *F = mod.createFunction("main");
1441
1442 PlaceholderBindings bindings;
1443 Placeholder *output;
1444
1445 const std::vector<dim_t> inputShape{2, 3, 4};
1446
1447 // Create and populate input tensor
1448 Tensor input{ElemKind::FloatTy, inputShape};
1449 std::vector<float> inputData;
1450 inputData.resize(input.size());
1451 std::iota(inputData.begin(), inputData.end(), 0);
1452 input.getHandle() = inputData;
1453
1454 // Destroy the loader after the graph is loaded since the following
1455 // execution will not depend on anything from the loader.
1456 {
1457 Caffe2ModelLoader caffe2LD(NetDescFilename, NetWeightFilename, {"input"},
1458 {&input.getType()}, *F);
1459 output = EXIT_ON_ERR(caffe2LD.getSingleOutput());
1460
1461 bindings.allocate(mod.getPlaceholders());
1462 updateInputPlaceholdersByName(bindings, &mod, {"input"}, {&input});
1463 }
1464
1465 auto res = bindings.get(output);
1466 EE.compile(CompilationMode::Infer);
1467 EE.run(bindings);
1468
1469 std::vector<dim_t> expectedShape{inputShape.begin(), inputShape.end() - 1};
1470 expectedShape.push_back(5); // bias size as defined in .pbtxt file
1471 EXPECT_EQ(expectedShape, res->dims().vec());
1472
1473 auto result = res->getHandle();
1474
1475 // Data is based on Caffe2 results
1476 std::vector<std::vector<std::vector<float>>> expectedData{
1477 {{14, 39, 64, 89, 114},
1478 {38, 127, 216, 305, 394},
1479 {62, 215, 368, 521, 674}},
1480 {{86, 303, 520, 737, 954},
1481 {110, 391, 672, 953, 1234},
1482 {134, 479, 824, 1169, 1514}}};
1483
1484 for (dim_t dim1 = 0; dim1 < expectedShape[0]; ++dim1) {
1485 for (dim_t dim2 = 0; dim2 < expectedShape[0]; ++dim2) {
1486 for (dim_t dim3 = 0; dim3 < expectedShape[0]; ++dim3) {
1487 EXPECT_FLOAT_EQ(expectedData[dim1][dim2][dim3],
1488 result.at({dim1, dim2, dim3}));
1489 }
1490 }
1491 }
1492}
1493
1494TEST_F(Caffe2ImporterTest, FbFCPacked4DFirstAxis) {
1495 ExecutionEngine EE{};
1496 auto &mod = EE.getModule();
1497 Function *F = mod.createFunction("main");
1498
1499 std::string NetDescFilename(
1500 GLOW_DATA_PATH
1501 "tests/models/caffe2Models/fbfcpacked_4d_first_axis_predict_net.pbtxt");
1502 std::string NetWeightFilename(
1503 GLOW_DATA_PATH
1504 "tests/models/caffe2Models/fbfcpacked_4d_first_axis_init_net.pbtxt");
1505
1506 Placeholder *output;
1507 PlaceholderBindings bindings;
1508
1509 {
1510 Tensor input(ElemKind::FloatTy, {1, 5, 1, 1});
1511
1512 // Weights and bias are read from NetWeightFilename
1513 Caffe2ModelLoader caffe2LD(NetDescFilename, NetWeightFilename, {"input"},
1514 {&input.getType()}, *F);
1515 output = EXIT_ON_ERR(caffe2LD.getSingleOutput());
1516 bindings.allocate(mod.getPlaceholders());
1517 updateInputPlaceholdersByName(bindings, &mod, {"input"}, {&input});
1518 }
1519
1520 auto finalShape = output->getType()->dims();
1521 std::vector<dim_t> expectedOutput{1, 3};
1522 EXPECT_EQ(finalShape, llvm::makeArrayRef(expectedOutput));
1523}
1524
1525TEST_F(Caffe2ImporterTest, Int8FC3DSecondAxis) {
1526 const std::string NetDescFilename(
1527 GLOW_DATA_PATH
1528 "tests/models/caffe2Models/int8fc_3d_second_axis_predict.pbtxt");
1529 const std::string NetWeightFilename(
1530 GLOW_DATA_PATH
1531 "tests/models/caffe2Models/int8fc_3d_second_axis_init.pbtxt");
1532
1533 ExecutionEngine EE{};
1534 auto &mod = EE.getModule();
1535 Function *F = mod.createFunction("main");
1536
1537 PlaceholderBindings bindings;
1538 Placeholder *output;
1539
1540 const std::vector<dim_t> inputShape{2, 2, 3};
1541
1542 // Create and populate input tensor
1543 Tensor input{ElemKind::Int8QTy, inputShape, 1, 0};
1544 std::vector<int8_t> inputData;
1545 inputData.resize(input.size());
1546 std::iota(inputData.begin(), inputData.end(), 0);
1547 input.getHandle<int8_t>() = inputData;
1548
1549 // Destroy the loader after the graph is loaded since the following
1550 // execution will not depend on anything from the loader.
1551 {
1552 Caffe2ModelLoader caffe2LD(NetDescFilename, NetWeightFilename, {"input"},
1553 {&input.getType()}, *F);
1554 output = EXIT_ON_ERR(caffe2LD.getSingleOutput());
1555
1556 bindings.allocate(mod.getPlaceholders());
1557 updateInputPlaceholdersByName(bindings, &mod, {"input"}, {&input});
1558 }
1559
1560 auto res = bindings.get(output);
1561 EE.compile(CompilationMode::Infer);
1562 EE.run(bindings);
1563
1564 std::vector<dim_t> expectedShape{inputShape.begin(), inputShape.end() - 1};
1565 expectedShape.push_back(2); // bias size as defined in .pbtxt file
1566 EXPECT_EQ(expectedShape, res->dims().vec());
1567
1568 auto result = res->getHandle<float>();
1569
1570 // Data is based on Caffe2 results
1571 std::vector<std::vector<std::vector<float>>> expectedData{
1572 {{4.956894, 14.890197}, {14.003877, 51.125492}},
1573 {{22.921618, 86.84314}, {31.9686, 123.07844}}};
1574
1575 for (dim_t dim1 = 0; dim1 < expectedShape[0]; ++dim1) {
1576 for (dim_t dim2 = 0; dim2 < expectedShape[0]; ++dim2) {
1577 for (dim_t dim3 = 0; dim3 < expectedShape[0]; ++dim3) {
1578 EXPECT_NEAR(expectedData[dim1][dim2][dim3],
1579 result.at({dim1, dim2, dim3}), 0.2);
1580 }
1581 }
1582 }
1583}
1584
1585TEST_F(Caffe2ImporterTest, Int8FC4DFirstAxis) {
1586 ExecutionEngine EE{};
1587 auto &mod = EE.getModule();
1588 Function *F = mod.createFunction("main");
1589
1590 std::string NetDescFilename(
1591 GLOW_DATA_PATH
1592 "tests/models/caffe2Models/int8fc_4d_first_axis_predict_net.pbtxt");
1593 std::string NetWeightFilename(
1594 GLOW_DATA_PATH
1595 "tests/models/caffe2Models/int8fc_4d_first_axis_init_net.pbtxt");
1596
1597 Placeholder *output;
1598 PlaceholderBindings bindings;
1599
1600 {
1601 Tensor input(ElemKind::FloatTy, {1, 5, 1, 1});
1602
1603 // Weights and bias are read from NetWeightFilename
1604 Caffe2ModelLoader caffe2LD(NetDescFilename, NetWeightFilename, {"input"},
1605 {&input.getType()}, *F);
1606 output = EXIT_ON_ERR(caffe2LD.getSingleOutput());
1607 bindings.allocate(mod.getPlaceholders());
1608 updateInputPlaceholdersByName(bindings, &mod, {"input"}, {&input});
1609 }
1610
1611 auto finalShape = output->getType()->dims();
1612 std::vector<dim_t> expectedOutput{1, 3};
1613 EXPECT_EQ(finalShape, llvm::makeArrayRef(expectedOutput));
1614}
1615
1616TEST_F(Caffe2ImporterTest, Int8FCDynamicQuantized) {
1617 const std::string NetDescFilename(
1618 GLOW_DATA_PATH
1619 "tests/models/caffe2Models/dynamic_quantized_fc_predict_net.pbtxt");
1620 const std::string NetWeightFilename(
1621 GLOW_DATA_PATH
1622 "tests/models/caffe2Models/dynamic_quantized_fc_init.pbtxt");
1623
1624 ExecutionEngine EE{};
1625 auto &mod = EE.getModule();
1626 Function *F = mod.createFunction("main");
1627
1628 PlaceholderBindings bindings;
1629 Placeholder *output;
1630
1631 const std::vector<dim_t> inputShape{2, 3};
1632
1633 // Create and populate input tensor
1634 Tensor input{ElemKind::Float16Ty, inputShape};
1635 std::vector<float16_t> inputData;
1636 inputData.resize(input.size());
1637 std::iota(inputData.begin(), inputData.end(), 1);
1638 input.getHandle<float16_t>() = inputData;
1639
1640 // Destroy the loader after the graph is loaded since the following
1641 // execution will not depend on anything from the loader.
1642 {
1643 Caffe2ModelLoader caffe2LD(NetDescFilename, NetWeightFilename, {"input"},
1644 {&input.getType()}, *F);
1645 output = EXIT_ON_ERR(caffe2LD.getSingleOutput());
1646
1647 bindings.allocate(mod.getPlaceholders());
1648 updateInputPlaceholdersByName(bindings, &mod, {"input"}, {&input});
1649 }
1650
1651 auto res = bindings.get(output);
1652 EE.compile(CompilationMode::Infer);
1653 EE.run(bindings);
1654
1655 std::vector<dim_t> expectedShape{2, 4};
1656 EXPECT_EQ(expectedShape, res->dims().vec());
1657
1658 auto result = res->getHandle<float16_t>();
1659
1660 std::vector<std::vector<float>> expectedData{{8, 27, 46, 65},
1661 {17, 63, 109, 155}};
1662
1663 for (dim_t dim1 = 0; dim1 < expectedShape[0]; ++dim1) {
1664 for (dim_t dim2 = 0; dim2 < expectedShape[1]; ++dim2) {
1665 EXPECT_NEAR(expectedData[dim1][dim2], result.at({dim1, dim2}), 0.3);
1666 }
1667 }
1668}
1669
1670/// Test loading bucketize op from a Caffe2 model.
1671/// Test with arg boundaries = [0.1, 2.5]
1672TEST_F(Caffe2ImporterTest, importBucketize) {
1673 ExecutionEngine EE{};
1674 auto &mod = EE.getModule();
1675 Function *F = mod.createFunction("main");
1676
1677 std::string NetDescFilename(
1678 GLOW_DATA_PATH "tests/models/caffe2Models/bucketize_op_net.pbtxt");
1679 std::string NetWeightFilename(
1680 GLOW_DATA_PATH "tests/models/caffe2Models/empty_init_net.pbtxt");
1681
1682 const std::vector<dim_t> inputShape{3, 2};
1683 PlaceholderBindings bindings;
1684 Placeholder *outputPH;
1685 Tensor inputs_0(ElemKind::FloatTy, inputShape);
1686 inputs_0.getHandle().randomize(-3.0, 3.0, mod.getPRNG());
1687 // Destroy the loader after the graph is loaded since the following execution
1688 // will not depend on anything from the loader.
1689 {
1690 Caffe2ModelLoader caffe2LD(NetDescFilename, NetWeightFilename, {"input_0"},
1691 {&inputs_0.getType()}, *F);
1692 outputPH = EXIT_ON_ERR(caffe2LD.getSingleOutput());
1693 bindings.allocate(mod.getPlaceholders());
1694 updateInputPlaceholdersByName(bindings, &mod, {"input_0"}, {&inputs_0});
1695 }
1696
1697 EXPECT_EQ(F->getNodes().size(), 2);
1698 auto *saveNode = getSaveNodeFromDest(outputPH);
1699 auto *bucketizeNode =
1700 llvm::dyn_cast<BucketizeNode>(saveNode->getInput().getNode());
1701 ASSERT_TRUE(bucketizeNode);
1702 auto boundriesVec = bucketizeNode->getBoundaries();
1703 ASSERT_EQ(boundriesVec.size(), 2);
1704 EXPECT_NEAR(boundriesVec[0], 0.1, 0.00001);
1705 EXPECT_NEAR(boundriesVec[1], 2.5, 0.00001);
1706 // We have one input and one output.
1707 EXPECT_EQ(mod.getPlaceholders().size(), 2);
1708
1709 auto output = bindings.get(outputPH);
1710 EXPECT_EQ(inputShape, output->dims().vec());
1711
1712 EE.compile(CompilationMode::Infer);
1713 EE.run(bindings);
1714
1715 auto outputH = output->getHandle<int32_t>();
1716
1717 // From the bucketize_op_net.pbtxt file
1718 std::array<float, 2> boundaries{0.1, 2.5};
1719 for (dim_t d1 = 0; d1 < inputShape[0]; ++d1) {
1720 for (dim_t d2 = 0; d2 < inputShape[1]; ++d2) {
1721 auto inputVal = inputs_0.getHandle().at({d1, d2});
1722 auto exp =
1723 std::lower_bound(boundaries.begin(), boundaries.end(), inputVal) -
1724 boundaries.begin();
1725 EXPECT_EQ(exp, outputH.at({d1, d2}));
1726 }
1727 }
1728}
1729
1730/// Test loading ResizeNearest op from a Caffe2 model.
1731/// Test with NHWC order, 2.0 height scale and 1.5 width scale
1732TEST_F(Caffe2ImporterTest, importResizeNearest) {
1733 ExecutionEngine EE{};
1734 auto &mod = EE.getModule();
1735 Function *F = mod.createFunction("main");
1736
1737 std::string NetDescFilename(
1738 GLOW_DATA_PATH "tests/models/caffe2Models/resize_nearest_op_net.pbtxt");
1739 std::string NetWeightFilename(
1740 GLOW_DATA_PATH "tests/models/caffe2Models/empty_init_net.pbtxt");
1741
1742 Placeholder *output;
1743 PlaceholderBindings bindings;
1744
1745 {
1746 Tensor input(ElemKind::FloatTy, {1, 2, 2, 1});
1747
1748 Caffe2ModelLoader caffe2LD(NetDescFilename, NetWeightFilename, {"input_0"},
1749 {&input.getType()}, *F);
1750 output = EXIT_ON_ERR(caffe2LD.getSingleOutput());
1751 bindings.allocate(mod.getPlaceholders());
1752 updateInputPlaceholdersByName(bindings, &mod, {"input_0"}, {&input});
1753 }
1754
1755 EXPECT_EQ(F->getNodes().size(), 2);
1756 auto *saveNode = getSaveNodeFromDest(output);
1757 auto *resizeNearestNode =
1758 llvm::dyn_cast<ResizeNearestNode>(saveNode->getInput().getNode());
1759 ASSERT_TRUE(resizeNearestNode);
1760 // We have one input and one output.
1761 EXPECT_EQ(mod.getPlaceholders().size(), 2);
1762 auto scale = resizeNearestNode->getScale();
1763 EXPECT_EQ(scale[0], 1);
1764 auto heightScale = scale[1];
1765 auto widthScale = scale[2];
1766 EXPECT_EQ(scale[3], 1);
1767 EXPECT_NEAR(heightScale, 2.0, 0.00001);
1768 EXPECT_NEAR(widthScale, 1.5, 0.00001);
1769}
1770
1771/// Test loading clip op from a Caffe2 model.
1772/// Test with arg min = 20.0 max = 60.0
1773TEST_F(Caffe2ImporterTest, importClip) {
1774 ExecutionEngine EE{};
1775 auto &mod = EE.getModule();
1776 Function *F = mod.createFunction("main");
1777
1778 std::string NetDescFilename(GLOW_DATA_PATH
1779 "tests/models/caffe2Models/clip_op_net.pbtxt");
1780 std::string NetWeightFilename(
1781 GLOW_DATA_PATH "tests/models/caffe2Models/empty_init_net.pbtxt");
1782
1783 PlaceholderBindings bindings;
1784 Placeholder *output;
1785 Tensor inputs_0(ElemKind::FloatTy, {5, 5});
1786 // Destroy the loader after the graph is loaded since the following execution
1787 // will not depend on anything from the loader.
1788 {
1789 Caffe2ModelLoader caffe2LD(NetDescFilename, NetWeightFilename, {"inputs_0"},
1790 {&inputs_0.getType()}, *F);
1791 output = EXIT_ON_ERR(caffe2LD.getSingleOutput());
1792 bindings.allocate(mod.getPlaceholders());
1793 updateInputPlaceholdersByName(bindings, &mod, {"inputs_0"}, {&inputs_0});
1794 }
1795
1796 EXPECT_EQ(F->getNodes().size(), 2);
1797 auto *saveNode = getSaveNodeFromDest(output);
1798 auto *clipNode = llvm::dyn_cast<ClipNode>(saveNode->getInput().getNode());
1799 ASSERT_TRUE(clipNode);
1800 EXPECT_EQ(clipNode->getMax(), 60.0);
1801 EXPECT_EQ(clipNode->getMin(), 20.0);
1802 auto *inputNode = llvm::dyn_cast<Placeholder>(clipNode->getInput());
1803 ASSERT_EQ(inputNode, mod.getPlaceholderByNameSlow("inputs_0"));
1804 // We have one input and one output.
1805 EXPECT_EQ(mod.getPlaceholders().size(), 2);
1806}
1807
1808/// Test loading clip op from a Caffe2 model with default arg values:
1809/// min = std::numeric_limits<float>::lowest()
1810/// max = std::numeric_limits<float>::max()
1811TEST_F(Caffe2ImporterTest, importClipDefault) {
1812 ExecutionEngine EE{};
1813 auto &mod = EE.getModule();
1814 Function *F = mod.createFunction("main");
1815
1816 std::string NetDescFilename(
1817 GLOW_DATA_PATH "tests/models/caffe2Models/clip_op_default_net.pbtxt");
1818 std::string NetWeightFilename(
1819 GLOW_DATA_PATH "tests/models/caffe2Models/empty_init_net.pbtxt");
1820
1821 PlaceholderBindings bindings;
1822 Placeholder *output;
1823 Tensor inputs_0(ElemKind::FloatTy, {5, 5});
1824
1825 // Destroy the loader after the graph is loaded since the following execution
1826 // will not depend on anything from the loader.
1827 {
1828 Caffe2ModelLoader caffe2LD(NetDescFilename, NetWeightFilename, {"inputs_0"},
1829 {&inputs_0.getType()}, *F);
1830 output = EXIT_ON_ERR(caffe2LD.getSingleOutput());
1831 bindings.allocate(mod.getPlaceholders());
1832 updateInputPlaceholdersByName(bindings, &mod, {"inputs_0"}, {&inputs_0});
1833 }
1834 EXPECT_EQ(F->getNodes().size(), 2);
1835 auto *saveNode = getSaveNodeFromDest(output);
1836 auto *clipNode = llvm::dyn_cast<ClipNode>(saveNode->getInput().getNode());
1837 EXPECT_EQ(clipNode->getMax(), std::numeric_limits<float>::max());
1838 EXPECT_EQ(clipNode->getMin(), std::numeric_limits<float>::lowest());
1839 auto *inputNode = llvm::dyn_cast<Placeholder>(clipNode->getInput().getNode());
1840 ASSERT_EQ(inputNode, mod.getPlaceholderByNameSlow("inputs_0"));
1841 // We have one input and one output.
1842 EXPECT_EQ(mod.getPlaceholders().size(), 2);
1843}
1844
1845/// Test loading a ReplaceNaN operator.
1846TEST_F(Caffe2ImporterTest, replaceNaN) {
1847 ExecutionEngine EE{};
1848 auto &mod = EE.getModule();
1849 Function *F = mod.createFunction("main");
1850
1851 std::string NetDescFilename(
1852 GLOW_DATA_PATH "tests/models/caffe2Models/replace_nan_predict_net.pbtxt");
1853 std::string NetWeightFilename(
1854 GLOW_DATA_PATH "tests/models/caffe2Models/empty_init_net.pbtxt");
1855
1856 PlaceholderBindings bindings;
1857 Placeholder *output;
1858 Tensor input(ElemKind::FloatTy, {10, 10});
1859
1860 // Destroy the loader after the graph is loaded since the following execution
1861 // will not depend on anything from the loader.
1862 {
1863 Caffe2ModelLoader caffe2LD(NetDescFilename, NetWeightFilename, {"input"},
1864 {&input.getType()}, *F);
1865 output = EXIT_ON_ERR(caffe2LD.getSingleOutput());
1866 bindings.allocate(mod.getPlaceholders());
1867 updateInputPlaceholdersByName(bindings, &mod, {"input"}, {&input});
1868 }
1869
1870 // Check that the shape of the output matches the input.
1871 std::vector<dim_t> expectedDims = {10, 10};
1872 EXPECT_TRUE(output->dims().vec() == expectedDims);
1873
1874 // High level checks on the content of the graph.
1875 // We have 1 ReplaceNaN and 1 Output.
1876 EXPECT_EQ(F->getNodes().size(), 2);
1877 auto *saveNode = getSaveNodeFromDest(output);
1878 auto *replaceNaNNode =
1879 llvm::dyn_cast<ReplaceNaNNode>(saveNode->getInput().getNode());
1880 EXPECT_EQ(replaceNaNNode->getValue(), 1.0f);
1881 auto *inputNode =
1882 llvm::dyn_cast<Placeholder>(replaceNaNNode->getInput().getNode());
1883 ASSERT_EQ(inputNode, mod.getPlaceholderByNameSlow("input"));
1884
1885 // We have one input and one output.
1886 EXPECT_EQ(mod.getPlaceholders().size(), 2);
1887}
1888
1889/// Test loading a DotProduct operator with 1D inputs.
1890TEST_F(Caffe2ImporterTest, dotProduct1D) {
1891 ExecutionEngine EE{};
1892 auto &mod = EE.getModule();
1893 Function *F = mod.createFunction("main");
1894
1895 std::string NetDescFilename(
1896 GLOW_DATA_PATH "tests/models/caffe2Models/dot_product_predict_net.pbtxt");
1897 std::string NetWeightFilename(
1898 GLOW_DATA_PATH "tests/models/caffe2Models/empty_init_net.pbtxt");
1899
1900 Placeholder *output;
1901
1902 // Input tensors.
1903 constexpr dim_t kDataSize = 10;
1904 auto type = mod.uniqueType(ElemKind::FloatTy, {kDataSize});
1905
1906 // Destroy the loader after the graph is loaded to ensure the function F
1907 // does not depend on anything stored in it.
1908 {
1909 Caffe2ModelLoader caffe2LD(NetDescFilename, NetWeightFilename, {"X", "Y"},
1910 {type, type}, *F);
1911 output = EXIT_ON_ERR(caffe2LD.getSingleOutput());
1912 }
1913
1914 // Check that the shape of the output matches that of the expected output.
1915 EXPECT_TRUE(output->dims().equals({kDataSize}));
1916
1917 // High level checks on the content of the graph.
1918 // We have 1 Mul and 1 Output.
1919 EXPECT_EQ(F->getNodes().size(), 2);
1920
1921 // Check that the graph has the expected shape (Mul -> Save),
1922 // starting from the output.
1923 auto *saveNode = getSaveNodeFromDest(output);
1924 auto *MN = llvm::dyn_cast<MulNode>(saveNode->getInput());
1925 ASSERT_TRUE(MN);
1926
1927 // We have two inputs and one output.
1928 EXPECT_EQ(mod.getPlaceholders().size(), 3);
1929}
1930
1931// Test loading a DotProduct operator with 2D inputs.
1932TEST_F(Caffe2ImporterTest, dotProduct2D) {
1933 ExecutionEngine EE{};
1934 auto &mod = EE.getModule();
1935 Function *F = mod.createFunction("main");
1936
1937 std::string NetDescFilename(
1938 GLOW_DATA_PATH "tests/models/caffe2Models/dot_product_predict_net.pbtxt");
1939 std::string NetWeightFilename(
1940 GLOW_DATA_PATH "tests/models/caffe2Models/empty_init_net.pbtxt");
1941
1942 Placeholder *output;
1943
1944 // Input tensors.
1945 constexpr dim_t kRows = 10;
1946 constexpr dim_t kCols = 20;
1947 auto type = mod.uniqueType(ElemKind::FloatTy, {kRows, kCols});
1948
1949 // Destroy the loader after the graph is loaded to ensure the function F
1950 // does not depend on anything stored in it.
1951 {
1952 Caffe2ModelLoader caffe2LD(NetDescFilename, NetWeightFilename, {"X", "Y"},
1953 {type, type}, *F);
1954 output = EXIT_ON_ERR(caffe2LD.getSingleOutput());
1955 }
1956
1957 // Check that the shape of the output matches that of the expected output.
1958 EXPECT_TRUE(output->dims().equals({kRows}));
1959
1960 // High level checks on the content of the graph.
1961 // We have 1 Mul, 1 BatchedReduceAdd and 1 Output.
1962 EXPECT_EQ(F->getNodes().size(), 3);
1963
1964 // Check that the graph has the expected shape
1965 // (Mul -> BatchedReduceAdd -> Save), starting from the output.
1966 auto *saveNode = getSaveNodeFromDest(output);
1967 auto *BRA = llvm::dyn_cast<BatchedReduceAddNode>(saveNode->getInput());
1968 ASSERT_TRUE(BRA);
1969 ASSERT_EQ(BRA->getNumInputs(), 1);
1970
1971 auto *MN = llvm::dyn_cast<MulNode>(BRA->getBatch());
1972 ASSERT_TRUE(MN);
1973
1974 // We have two inputs and one output.
1975 EXPECT_EQ(mod.getPlaceholders().size(), 3);
1976}
1977
1978// Test loading a BatchBoxCox operator.
1979TEST_F(Caffe2ImporterTest, batchBoxCox) {
1980 ExecutionEngine EE{};
1981 auto &mod = EE.getModule();
1982 Function *F = mod.createFunction("main");
1983
1984 std::string NetDescFilename(
1985 GLOW_DATA_PATH
1986 "tests/models/caffe2Models/batch_box_cox_predict_net.pbtxt");
1987 std::string NetWeightFilename(
1988 GLOW_DATA_PATH "tests/models/caffe2Models/empty_init_net.pbtxt");
1989
1990 PlaceholderBindings bindings;
1991 Placeholder *output;
1992
1993 // Input tensors.
1994 const dim_t kRows = 10;
1995 const dim_t kCols = 5;
1996 Tensor data(ElemKind::FloatTy, {kRows, kCols});
1997 Tensor lambda1(ElemKind::FloatTy, {kCols});
1998 Tensor lambda2(ElemKind::FloatTy, {kCols});
1999 Tensor O(ElemKind::FloatTy, {kRows, kCols});
2000 // Destroy the loader after the graph is loaded since the following execution
2001 // will not depend on anything from the loader.
2002 {
2003 Caffe2ModelLoader caffe2LD(
2004 NetDescFilename, NetWeightFilename, {"data", "lambda1", "lambda2"},
2005 {&data.getType(), &lambda1.getType(), &lambda2.getType()}, *F);
2006 output = EXIT_ON_ERR(caffe2LD.getSingleOutput());
2007 bindings.allocate(mod.getPlaceholders());
2008 updateInputPlaceholdersByName(bindings, &mod,
2009 {"data", "lambda1", "lambda2"},
2010 {&data, &lambda1, &lambda2});
2011 }
2012
2013 EXPECT_EQ(F->getNodes().size(), 2);
2014
2015 // Output.
2016 auto *saveNode = getSaveNodeFromDest(output);
2017 ASSERT_TRUE(saveNode);
2018
2019 // Select.
2020 auto *BBCN = llvm::dyn_cast<BatchBoxCoxNode>(saveNode->getInput());
2021 ASSERT_TRUE(BBCN);
2022
2023 // There are three inputs and one output.
2024 EXPECT_EQ(mod.getPlaceholders().size(), 4);
2025}
2026
2027// Test loading a EQ operator with 1D inputs.
2028TEST_F(Caffe2ImporterTest, EQ1D) {
2029 ExecutionEngine EE{};
2030 auto &mod = EE.getModule();
2031 Function *F = mod.createFunction("main");
2032
2033 std::string NetDescFilename(GLOW_DATA_PATH
2034 "tests/models/caffe2Models/eq_op_net.pbtxt");
2035 std::string NetWeightFilename(
2036 GLOW_DATA_PATH "tests/models/caffe2Models/empty_init_net.pbtxt");
2037
2038 Placeholder *output;
2039 PlaceholderBindings bindings;
2040
2041 // Input tensors.
2042 const dim_t kDataSize = 10;
2043 Tensor X(ElemKind::FloatTy, {kDataSize});
2044 Tensor Y(ElemKind::FloatTy, {kDataSize});
2045
2046 // Destroy the loader after the graph is loaded
2047 {
2048 Caffe2ModelLoader caffe2LD(NetDescFilename, NetWeightFilename, {"X", "Y"},
2049 {&X.getType(), &Y.getType()}, *F);
2050 output = EXIT_ON_ERR(caffe2LD.getSingleOutput());
2051 }
2052
2053 // High level checks on the content of the graph.
2054 // We have 1 EQ and 1 Output.
2055 EXPECT_EQ(F->getNodes().size(), 2);
2056
2057 // Check that the graph has the expected shape (EQ -> Save),
2058 // starting from the output.
2059 auto *saveNode = getSaveNodeFromDest(output);
2060 auto *EQN = llvm::dyn_cast<CmpEQNode>(saveNode->getInput());
2061 ASSERT_TRUE(EQN);
2062
2063 // Graph has two inputs and one output.
2064 EXPECT_EQ(mod.getPlaceholders().size(), 3);
2065}
2066
2067// Test loading a LengthsToRanges operator.
2068TEST_F(Caffe2ImporterTest, LengthsToRanges) {
2069 ExecutionEngine EE{};
2070 auto &mod = EE.getModule();
2071 Function *F = mod.createFunction("main");
2072
2073 std::string NetDescFilename(
2074 GLOW_DATA_PATH "tests/models/caffe2Models/lengths_to_ranges.pbtxt");
2075 std::string NetWeightFilename(
2076 GLOW_DATA_PATH
2077 "tests/models/caffe2Models/lengths_to_ranges_init_net.pbtxt");
2078
2079 Placeholder *output;
2080
2081 // Destroy the loader after the graph is loaded
2082 {
2083 Caffe2ModelLoader caffe2LD(NetDescFilename, NetWeightFilename, {}, {}, *F);
2084 output = EXIT_ON_ERR(caffe2LD.getSingleOutput());
2085 }
2086
2087 // High level checks on the content of the graph.
2088 // We have 1 LengthsToRanges and 1 Save.
2089 EXPECT_EQ(F->getNodes().size(), 2);
2090
2091 // Check that the graph has the expected shape (LengthsToRanges -> Save),
2092 // starting from the output.
2093 auto *saveNode = getSaveNodeFromDest(output);
2094 auto *N = llvm::dyn_cast<LengthsToRangesNode>(saveNode->getInput());
2095 ASSERT_TRUE(N);
2096
2097 // Graph has one output.
2098 EXPECT_EQ(mod.getPlaceholders().size(), 1);
2099}
2100
2101// Test loading Logit operator from a Caffe2 model.
2102TEST_F(Caffe2ImporterTest, Logit) {
2103 ExecutionEngine EE{};
2104 auto &mod = EE.getModule();
2105 Function *F = mod.createFunction("main");
2106
2107 std::string NetDescFilename(GLOW_DATA_PATH
2108 "tests/models/caffe2Models/logit_op_net.pbtxt");
2109 std::string NetWeightFilename(
2110 GLOW_DATA_PATH "tests/models/caffe2Models/empty_init_net.pbtxt");
2111
2112 Placeholder *output;
2113
2114 // Input tensors.
2115 const dim_t kDataSize = 10;
2116 Tensor X(ElemKind::FloatTy, {kDataSize});
2117
2118 // Destroy the loader after the graph is loaded
2119 {
2120 Caffe2ModelLoader caffe2LD(NetDescFilename, NetWeightFilename, {"inputs_0"},
2121 {&X.getType()}, *F);
2122 output = EXIT_ON_ERR(caffe2LD.getSingleOutput());
2123 }
2124
2125 // Check that the shape of the output matches what Caffe2 expects.
2126 std::vector<dim_t> expectedDims = {kDataSize};
2127 EXPECT_EQ(output->dims().vec(), expectedDims);
2128
2129 // High level checks on the content of the graph.
2130 // We have 1 Logit, 1 Save.
2131 EXPECT_EQ(F->getNodes().size(), 2);
2132
2133 // Graph has one input and one output.
2134 EXPECT_EQ(mod.getPlaceholders().size(), 2);
2135}
2136
2137// Test loading Logit operator from a Caffe2 model.
2138TEST_F(Caffe2ImporterTest, Swish) {
2139 ExecutionEngine EE{};
2140 auto &mod = EE.getModule();
2141 Function *F = mod.createFunction("main");
2142
2143 std::string NetDescFilename(GLOW_DATA_PATH
2144 "tests/models/caffe2Models/swish_op_net.pbtxt");
2145 std::string NetWeightFilename(
2146 GLOW_DATA_PATH "tests/models/caffe2Models/empty_init_net.pbtxt");
2147
2148 PlaceholderBindings bindings;
2149 Placeholder *output;
2150
2151 // Input tensors.
2152 Tensor X(ElemKind::FloatTy, {10});
2153
2154 // Destroy the loader after the graph is loaded
2155 {
2156 Caffe2ModelLoader caffe2LD(NetDescFilename, NetWeightFilename, {"input"},
2157 {&X.getType()}, *F);
2158 output = EXIT_ON_ERR(caffe2LD.getSingleOutput());
2159 bindings.allocate(mod.getPlaceholders());
2160 updateInputPlaceholdersByName(bindings, &mod, {"input"}, {&X});
2161 }
2162
2163 // Check that the type of the output matches the input.
2164 EXPECT_TRUE(output->getType()->isEqual(X.getType()));
2165
2166 // High level checks on the content of the graph.
2167 EXPECT_EQ(F->getNodes().size(), 2); // Save and Swish
2168 auto *saveNode = getSaveNodeFromDest(output);
2169 auto *swish = llvm::dyn_cast<SwishNode>(saveNode->getInput());
2170 ASSERT_TRUE(swish);
2171
2172 // Graph has one input and one output.
2173 EXPECT_EQ(mod.getPlaceholders().size(), 2);
2174
2175 EE.compile(CompilationMode::Infer);
2176 EE.run(bindings);
2177}
2178
2179void testSparseToDense(llvm::ArrayRef<dim_t> indicesShape,
2180 llvm::ArrayRef<dim_t> valuesShape,
2181 llvm::ArrayRef<dim_t> dataToInferDimShape,
2182 int expectedNumberOfNodes) {
2183 ExecutionEngine EE{};
2184 auto &mod = EE.getModule();
2185 Function *F = mod.createFunction("main");
2186
2187 std::string NetDescFilename(
2188 GLOW_DATA_PATH "tests/models/caffe2Models/sparse_to_dense.pbtxt");
2189 std::string NetWeightFilename(
2190 GLOW_DATA_PATH "tests/models/caffe2Models/empty_init_net.pbtxt");
2191
2192 Placeholder *outputPH;
2193 PlaceholderBindings bindings;
2194
2195 // Create inputs.
2196 Tensor indices(ElemKind::Int64ITy, indicesShape);
2197 Tensor values(ElemKind::FloatTy, valuesShape);
2198 Tensor dataToInferDim(ElemKind::FloatTy, dataToInferDimShape);
2199
2200 indices.getHandle<int64_t>().randomize(0, dataToInferDimShape[0] - 1,
2201 mod.getPRNG());
2202 values.getHandle().randomize(-3.0, 3.0, mod.getPRNG());
2203 // Destroy the loader after the graph is loaded since the following execution
2204 // will not depend on anything from the loader.
2205 {
2206 Caffe2ModelLoader caffe2LD(
2207 NetDescFilename, NetWeightFilename,
2208 {"indices", "values", "dataToInferDim"},
2209 {&indices.getType(), &values.getType(), &dataToInferDim.getType()}, *F);
2210 outputPH = EXIT_ON_ERR(caffe2LD.getSingleOutput());
2211 bindings.allocate(mod.getPlaceholders());
2212 updateInputPlaceholdersByName(bindings, &mod, {"indices", "values"},
2213 {&indices, &values});
2214 }
2215
2216 // Check that the shape of the output matches that of the expected output.
2217 const std::vector<dim_t> expectedOutputShape{dataToInferDimShape[0],
2218 valuesShape[1], valuesShape[2]};
2219 EXPECT_EQ(expectedOutputShape, outputPH->dims().vec());
2220
2221 // High level checks on the content of the graph.
2222 // We should have 1 Splat, 1 optional Reshape, 1 ScatterData and 1 Output
2223 // node, i.e. 4 nodes in total.
2224 EXPECT_EQ(F->getNodes().size(), expectedNumberOfNodes);
2225
2226 // Check that the graph has the expected shape (SparseToDense -> Save),
2227 // starting from the output.
2228 auto *saveNode = getSaveNodeFromDest(outputPH);
2229 auto *STDN = llvm::dyn_cast<ScatterDataNode>(saveNode->getInput());
2230 ASSERT_TRUE(STDN);
2231
2232 // Graph has three inputs and one output.
2233 EXPECT_EQ(mod.getPlaceholders().size(), 4);
2234
2235 auto output = bindings.get(outputPH);
2236
2237 EE.compile(CompilationMode::Infer);
2238 EE.run(bindings);
2239
2240 auto outputH = output->getHandle();
2241
2242 // Initialize with zeroes
2243 std::vector<std::vector<std::vector<float>>> expected(
2244 expectedOutputShape[0],
2245 std::vector<std::vector<float>>(
2246 expectedOutputShape[1],
2247 std::vector<float>(expectedOutputShape[2], 0)));
2248 for (dim_t d1 = 0; d1 < valuesShape[0]; ++d1) {
2249 dim_t dest_d1 = indices.getHandle<int64_t>().at(d1);
2250 for (dim_t d2 = 0; d2 < valuesShape[1]; ++d2) {
2251 for (dim_t d3 = 0; d3 < valuesShape[2]; ++d3) {
2252 expected[dest_d1][d2][d3] += values.getHandle().at({d1, d2, d3});
2253 }
2254 }
2255 }
2256
2257 for (dim_t d1 = 0; d1 < expectedOutputShape[0]; ++d1) {
2258 for (dim_t d2 = 0; d2 < expectedOutputShape[1]; ++d2) {
2259 for (dim_t d3 = 0; d3 < expectedOutputShape[2]; ++d3) {
2260 EXPECT_NEAR(expected[d1][d2][d3], outputH.at({d1, d2, d3}), 1e-3);
2261 }
2262 }
2263 }
2264}
2265
2266// Test loading a SparseToDense operator.
2267TEST_F(Caffe2ImporterTest, sparseToDense_indices1D) {
2268 constexpr dim_t kNumIndices = 40;
2269 constexpr dim_t kMaxIndex = 20;
2270 constexpr dim_t kRows = 10;
2271 constexpr dim_t kCols = 5;
2272
2273 testSparseToDense({kNumIndices}, {kNumIndices, kRows, kCols}, {kMaxIndex}, 4);
2274}
2275
2276TEST_F(Caffe2ImporterTest, sparseToDense_indices2D) {
2277 constexpr dim_t kNumIndices = 40;
2278 constexpr dim_t kMaxIndex = 20;
2279 constexpr dim_t kRows = 10;
2280 constexpr dim_t kCols = 5;
2281
2282 testSparseToDense({kNumIndices, 1}, {kNumIndices, kRows, kCols}, {kMaxIndex},
2283 3);
2284}
2285
2286TEST_F(Caffe2ImporterTest, SparseToDenseMask) {
2287 ExecutionEngine EE{};
2288 auto &mod = EE.getModule();
2289 Function *F = mod.createFunction("main");
2290
2291 std::string NetDescFilename(
2292 GLOW_DATA_PATH
2293 "tests/models/caffe2Models/sparse_to_dense_mask_op_net.pbtxt");
2294 std::string NetWeightFilename(
2295 GLOW_DATA_PATH "tests/models/caffe2Models/empty_init_net.pbtxt");
2296
2297 Placeholder *output;
2298 PlaceholderBindings bindings;
2299
2300 Tensor indices(ElemKind::Int64ITy, {4});
2301 Tensor values(ElemKind::FloatTy, {4, 10, 20, 30});
2302 Tensor defaultValue(ElemKind::FloatTy, {10, 20, 30});
2303
2304 // Destroy the loader after the graph is loaded since the following execution
2305 // will not depend on anything from the loader.
2306 {
2307 // Loaded protos must have at least one external output, so load an unused
2308 // output and type to satisfy it. It is named unused_output in
2309 // empty_predict_net.pbtxt.
2310 Caffe2ModelLoader caffe2LD(
2311 NetDescFilename, NetWeightFilename,
2312 {"indices", "values", "defaultValue"},
2313 {&indices.getType(), &values.getType(), &defaultValue.getType()}, *F);
2314 output = EXIT_ON_ERR(caffe2LD.getSingleOutput());
2315 }
2316
2317 ASSERT_TRUE(output);
2318
2319 // Graph has 2 nodes: Save and SparseToDenseMask
2320 EXPECT_EQ(F->getNodes().size(), 2);
2321
2322 // One constant was created for implicit Lengths input
2323 EXPECT_EQ(mod.getConstants().size(), 1);
2324
2325 // Net has 3 inputs.
2326 EXPECT_EQ(mod.getPlaceholders().size(), 4);
2327
2328 auto *saveNode = getSaveNodeFromDest(output);
2329 auto *N = llvm::dyn_cast<SparseToDenseMaskNode>(saveNode->getInput());
2330 ASSERT_TRUE(N);
2331
2332 // Check that no batch dimension was added because Lengths was not given.
2333 EXPECT_TRUE(N->getResult().dims().equals({6, 10, 20, 30}));
2334 // Check that mask was read correctly.
2335 EXPECT_TRUE(N->getMask().equals({42, 100, 300, 1, 0, 312}));
2336}
2337
2338// Test loading a FillExamplesWithIndicator
2339TEST_F(Caffe2ImporterTest, FillExamplesWithIndicator) {
2340 ExecutionEngine EE{};
2341 auto &mod = EE.getModule();
2342 Function *F = mod.createFunction("main");
2343
2344 std::string NetDescFilename(
2345 GLOW_DATA_PATH
2346 "tests/models/caffe2Models/fill_examples_with_indicator.pbtxt");
2347 std::string NetWeightFilename(
2348 GLOW_DATA_PATH "tests/models/caffe2Models/empty_init_net.pbtxt");
2349
2350 Placeholder *outputPH;
2351 PlaceholderBindings bindings;
2352 // Create inputs.
2353 constexpr dim_t n = 20;
2354 constexpr dim_t d1 = 3;
2355 constexpr dim_t d2 = 4;
2356 Tensor indicator(ElemKind::Int32ITy, {n});
2357 auto indicatorH = indicator.getHandle<int32_t>();
2358 indicatorH.randomize(0, 1, mod.getPRNG());
2359
2360 dim_t m = 0;
2361 for (size_t i = 0, s = indicatorH.actualSize(); i < s; i++) {
2362 m += indicatorH.at(i);
2363 }
2364 Tensor data(ElemKind::FloatTy, {m, d1, d2});
2365 data.zero();
2366 data.getHandle().randomize(-3.0, 3.0, mod.getPRNG());
2367 // Destroy the loader after the graph is loaded since the following execution
2368 // will not depend on anything from the loader.
2369 {
2370 Caffe2ModelLoader caffe2LD(NetDescFilename, NetWeightFilename,
2371 {"data", "indicator"},
2372 {&data.getType(), &indicator.getType()}, *F);
2373 outputPH = EXIT_ON_ERR(caffe2LD.getSingleOutput());
2374 bindings.allocate(mod.getPlaceholders());
2375 updateInputPlaceholdersByName(bindings, &mod, {"data", "indicator"},
2376 {&data, &indicator});
2377 }
2378
2379 // Check that the shape of the output matches that of the expected output.
2380 const std::vector<dim_t> expectedOutputShape{n, d1, d2};
2381 EXPECT_EQ(expectedOutputShape, outputPH->dims().vec());
2382 // Graph has 9 nodes: 2 Reshapes, 2 Converts, Nonzero, Slice, Splat,
2383 // ScatterData, Output
2384 // NonZero is translated into multiple ops.
2385 EXPECT_EQ(F->getNodes().size(), 21);
2386
2387 // Graph has two inputs and one output.
2388 EXPECT_EQ(mod.getPlaceholders().size(), 3);
2389
2390 auto output = bindings.get(outputPH);
2391
2392 EE.compile(CompilationMode::Infer);
2393 EE.run(bindings);
2394
2395 auto outputH = output->getHandle();
2396
2397 // Initialize with zeroes
2398 std::vector<std::vector<std::vector<float>>> expected(
2399 n, std::vector<std::vector<float>>(d1, std::vector<float>(d2)));
2400 for (dim_t d = 0, v = 0; d < n; ++d) {
2401 if (indicatorH.at(d)) {
2402 for (dim_t e = 0; e < d1; ++e) {
2403 for (dim_t f = 0; f < d2; ++f) {
2404 expected[d][e][f] = data.getHandle().at({v, e, f});
2405 }
2406 }
2407 v++;
2408 }
2409 }
2410
2411 for (dim_t d = 0; d < n; ++d) {
2412 for (dim_t e = 0; e < d1; ++e) {
2413 for (dim_t f = 0; f < d2; ++f) {
2414 EXPECT_NEAR(expected[d][e][f], outputH.at({d, e, f}), 1e-3);
2415 }
2416 }
2417 }
2418}
2419
2420// Test loading a BatchSparseToDense operator w/ second dimension = 1.
2421TEST_F(Caffe2ImporterTest, BatchSparseToDense_lastdim1) {
2422 ExecutionEngine EE{};
2423 auto &mod = EE.getModule();
2424 Function *F = mod.createFunction("main");
2425
2426 std::string NetDescFilename(
2427 GLOW_DATA_PATH
2428 "tests/models/caffe2Models/batch_sparse_to_dense_last_dim_1.pbtxt");
2429 std::string NetWeightFilename(
2430 GLOW_DATA_PATH "tests/models/caffe2Models/empty_init_net.pbtxt");
2431
2432 Placeholder *outputPH;
2433 PlaceholderBindings bindings;
2434 // Create inputs.
2435 constexpr dim_t numBatches = 100;
2436 Tensor lengths(ElemKind::Int32ITy, {numBatches});
2437 auto lengthsH = lengths.getHandle<int32_t>();
2438 lengthsH.randomize(0, 1, mod.getPRNG());
2439
2440 // Calculate number of nonzero indices.
2441 dim_t numIndices = 0;
2442 for (size_t i = 0, n = lengthsH.actualSize(); i < n; i++) {
2443 numIndices += lengthsH.at(i);
2444 }
2445 Tensor indices(ElemKind::Int64ITy, {numIndices});
2446 Tensor values(ElemKind::FloatTy, {numIndices});
2447 indices.zero();
2448 values.getHandle().randomize(-3.0, 3.0, mod.getPRNG());
2449 // Destroy the loader after the graph is loaded since the following execution
2450 // will not depend on anything from the loader.
2451 {
2452 Caffe2ModelLoader caffe2LD(
2453 NetDescFilename, NetWeightFilename, {"lengths", "indices", "values"},
2454 {&lengths.getType(), &indices.getType(), &values.getType()}, *F);
2455 outputPH = EXIT_ON_ERR(caffe2LD.getSingleOutput());
2456 bindings.allocate(mod.getPlaceholders());
2457 updateInputPlaceholdersByName(bindings, &mod,
2458 {"lengths", "indices", "values"},
2459 {&lengths, &indices, &values});
2460 }
2461
2462 // Check that the shape of the output matches that of the expected output.
2463 const std::vector<dim_t> expectedOutputShape{numBatches, 1};
2464 EXPECT_EQ(expectedOutputShape, outputPH->dims().vec());
2465 EXPECT_EQ(F->getNodes().size(), 20);
2466
2467 // Graph has three inputs and one output.
2468 EXPECT_EQ(mod.getPlaceholders().size(), 4);
2469
2470 auto output = bindings.get(outputPH);
2471
2472 EE.compile(CompilationMode::Infer);
2473 EE.run(bindings);
2474
2475 auto outputH = output->getHandle();
2476
2477 // Initialize with zeroes
2478 std::vector<std::vector<float>> expected(numBatches, std::vector<float>(1));
2479 for (dim_t d = 0, v = 0; d < numBatches; ++d) {
2480 if (lengthsH.at(d) == 1) {
2481 expected[d][0] = values.getHandle().at({v});
2482 v++;
2483 }
2484 }
2485
2486 for (dim_t d = 0; d < numBatches; ++d) {
2487 EXPECT_NEAR(expected[d][0], outputH.at({d, 0}), 1e-3);
2488 }
2489}
2490
2491/// Test loading NCHW2NHWC op.
2492TEST_F(Caffe2ImporterTest, testNCHW2NHWC) {
2493 ExecutionEngine EE{};
2494 auto &mod = EE.getModule();
2495 Function *F = mod.createFunction("main");
2496
2497 std::string NetDescFilename(
2498 GLOW_DATA_PATH "tests/models/caffe2Models/NCHW2NHWC_predict_net.pbtxt");
2499 std::string NetWeightFilename(
2500 GLOW_DATA_PATH "tests/models/caffe2Models/empty_init_net.pbtxt");
2501
2502 Placeholder *output;
2503 PlaceholderBindings bindings;
2504
2505 Tensor inputs(ElemKind::FloatTy, {1, 2, 3, 4});
2506
2507 // Destroy the loader after the graph is loaded since the following execution
2508 // will not depend on anything from the loader.
2509 {
2510 Caffe2ModelLoader caffe2LD(NetDescFilename, NetWeightFilename, {"inputs"},
2511 {&inputs.getType()}, *F);
2512 output = EXIT_ON_ERR(caffe2LD.getSingleOutput());
2513 bindings.allocate(mod.getPlaceholders());
2514 }
2515
2516 // Check output shape.
2517 auto res = bindings.get(output);
2518 std::vector<dim_t> expectedDims = {1, 3, 4, 2};
2519 EXPECT_TRUE(res->getHandle<float>().dims().vec() == expectedDims);
2520
2521 // High level check on the content of the graph. We have 1 transpose and 1
2522 // save.
2523 EXPECT_EQ(F->getNodes().size(), 2);
2524 auto *saveNode = getSaveNodeFromDest(output);
2525 auto *transNode =
2526 llvm::dyn_cast<TransposeNode>(saveNode->getInput().getNode());
2527 ASSERT_TRUE(transNode);
2528
2529 // We have 2 placeholders: 1 input and 1 output.
2530 EXPECT_EQ(mod.getPlaceholders().size(), 2);
2531 // We have 0 constants.
2532 EXPECT_EQ(mod.getConstants().size(), 0);
2533}
2534
2535/// Test loading a LengthsSum operator.
2536TEST_F(Caffe2ImporterTest, lengthsSum) {
2537 ExecutionEngine EE{};
2538 auto &mod = EE.getModule();
2539 Function *F = mod.createFunction("main");
2540
2541 std::string NetDescFilename(GLOW_DATA_PATH
2542 "tests/models/caffe2Models/lengths_sum.pbtxt");
2543 std::string NetWeightFilename(
2544 GLOW_DATA_PATH "tests/models/caffe2Models/empty_init_net.pbtxt");
2545
2546 Placeholder *output;
2547 PlaceholderBindings bindings;
2548
2549 // Create inputs.
2550 Tensor data(ElemKind::Int64ITy, {10, 2, 3});
2551 Tensor lengths(ElemKind::FloatTy, {5});
2552
2553 // Destroy the loader after the graph is loaded since the following execution
2554 // will not depend on anything from the loader.
2555 {
2556 Caffe2ModelLoader caffe2LD(NetDescFilename, NetWeightFilename,
2557 {"data", "lengths"},
2558 {&data.getType(), &lengths.getType()}, *F);
2559 output = EXIT_ON_ERR(caffe2LD.getSingleOutput());
2560 }
2561
2562 // Check that the shape of the output matches that of the expected output.
2563 std::vector<dim_t> expectedShape{5, 2, 3};
2564 EXPECT_TRUE(output->dims().vec() == expectedShape);
2565
2566 // High level checks on the content of the graph.
2567 // We should have 1 LengthsSum and 1 Output node = 2 nodes in total.
2568 EXPECT_EQ(F->getNodes().size(), 2);
2569
2570 // Check that the graph has the expected shape (LengthsSum -> Save),
2571 // starting from the output.
2572 auto *saveNode = getSaveNodeFromDest(output);
2573 auto *LSN = llvm::dyn_cast<LengthsSumNode>(saveNode->getInput());
2574 ASSERT_TRUE(LSN);
2575
2576 // Graph has two inputs and one output.
2577 EXPECT_EQ(mod.getPlaceholders().size(), 3);
2578}
2579
2580/// Test loading a GatherRanges op.
2581TEST_F(Caffe2ImporterTest, gatherRanges) {
2582 ExecutionEngine EE;
2583 auto &mod = EE.getModule();
2584 auto *F = mod.createFunction("main");
2585
2586 std::string NetDescFilename(GLOW_DATA_PATH
2587 "tests/models/caffe2Models/gather_ranges.pbtxt");
2588 std::string NetWeightFilename(
2589 GLOW_DATA_PATH "tests/models/caffe2Models/empty_init_net.pbtxt");
2590
2591 Placeholder *output;
2592 Tensor data(ElemKind::FloatTy, {6});
2593 Tensor ranges(ElemKind::Int32ITy, {2, 2, 2});
2594
2595 {
2596 Caffe2ModelLoader caffe2LD(NetDescFilename, NetWeightFilename,
2597 {"data", "ranges"},
2598 {&data.getType(), &ranges.getType()}, *F);
2599 output = EXIT_ON_ERR(caffe2LD.getOutputByName("output"));
2600 }
2601
2602 // Verify structure: PH/PH -> GatherRanges -> Save -> PH/PH.
2603 ASSERT_EQ(mod.getPlaceholders().size(), 4);
2604 ASSERT_EQ(F->getNodes().size(), 3);
2605 auto *save = getSaveNodeFromDest(output);
2606 auto *gatherRanges =
2607 llvm::dyn_cast<GatherRangesNode>(save->getInput().getNode());
2608 ASSERT_TRUE(gatherRanges);
2609 EXPECT_TRUE(gatherRanges->getOutput().dims().equals({5}));
2610 EXPECT_TRUE(gatherRanges->getLengths().dims().equals({2}));
2611}
2612
2613/// Test loading Gather ops with constant folding from an Caffe2 model.
2614TEST_F(Caffe2ImporterTest, gatherConstantFoldingAndReshape) {
2615 // This test verifies that Gather gets constant-folded, so that the argument
2616 // of the reshape becomes constant.
2617 ExecutionEngine EE;
2618 auto &mod = EE.getModule();
2619
2620 std::string netDescFilename(
2621 GLOW_DATA_PATH "tests/models/caffe2Models/gather_const_fold.pbtxt");
2622 std::string netWeightFilename(
2623 GLOW_DATA_PATH "tests/models/caffe2Models/gather_const_fold_init.pbtxt");
2624 PlaceholderBindings bindings;
2625 auto *F = mod.createFunction("main");
2626 Placeholder *output;
2627 Tensor data(ElemKind::FloatTy, {1, 2, 4, 3});
2628 // This test is testing constant folding during loading, so enable it
2629 // explicitly.
2630 setConstantFoldLoaderOpsFlag(true);
2631 {
2632 Caffe2ModelLoader caffe2LD(netDescFilename, netWeightFilename, {"data"},
2633 {&data.getType()}, *F);
2634 output = EXIT_ON_ERR(caffe2LD.getOutputByName("result"));
2635 bindings.allocate(mod.getPlaceholders());
2636 }
2637 EE.compile(CompilationMode::Infer);
2638 EE.run(bindings);
2639
2640 auto result = bindings.get(output)->getHandle();
2641 std::vector<dim_t> expectedDims = {1, 4, 3, 2};
2642 EXPECT_TRUE(result.dims().vec() == expectedDims);
2643}
2644/// Test loading a LengthsRangeFill op.
2645TEST_F(Caffe2ImporterTest, LengthsRangeFill) {
2646 ExecutionEngine EE;
2647 auto &mod = EE.getModule();
2648 auto *F = mod.createFunction("main");
2649
2650 std::string NetDescFilename(
2651 GLOW_DATA_PATH
2652 "tests/models/caffe2Models/lengths_range_fill_predict_net.pbtxt");
2653 std::string NetWeightFilename(
2654 GLOW_DATA_PATH "tests/models/caffe2Models/empty_init_net.pbtxt");
2655
2656 Placeholder *output;
2657 Tensor lengths(ElemKind::Int32ITy, {3});
2658
2659 {
2660 Caffe2ModelLoader caffe2LD(NetDescFilename, NetWeightFilename, {"lengths"},
2661 {&lengths.getType()}, *F);
2662 output = EXIT_ON_ERR(caffe2LD.getOutputByName("result"));
2663 }
2664
2665 // Verify structure: PH -> LengthsRangeFill -> Save -> PH.
2666 ASSERT_EQ(mod.getPlaceholders().size(), 2);
2667 ASSERT_EQ(F->getNodes().size(), 2);
2668 auto *save = getSaveNodeFromDest(output);
2669 auto *LRF = llvm::dyn_cast<LengthsRangeFillNode>(save->getInput().getNode());
2670 ASSERT_TRUE(LRF);
2671 EXPECT_TRUE(LRF->getLengths().dims().equals({3}));
2672 EXPECT_EQ(LRF->getResult().dims().size(), 1);
2673 // Proto specifies the max output size is 8.
2674 EXPECT_TRUE(LRF->getResult().dims().equals({8}));
2675}
2676
2677/// Verify that different fill types are loaded with the correct types.
2678TEST_F(Caffe2ImporterTest, tensorFillsTest) {
2679 ExecutionEngine EE{};
2680 auto &mod = EE.getModule();
2681 Function *F = mod.createFunction("main");
2682
2683 std::string NetDescFilename(
2684 GLOW_DATA_PATH "tests/models/caffe2Models/fill_test_predict_net.pbtxt");
2685 std::string NetWeightFilename(
2686 GLOW_DATA_PATH "tests/models/caffe2Models/fill_test_init_net.pbtxt");
2687
2688 Constant *tensorFillFloat, *tensorIntFill, *tensorInt64Fill,
2689 *tensorStringToUInt8Fill;
2690
2691 // Destroy the loader after the graph is loaded since the following execution
2692 // will not depend on anything from the loader.
2693 {
2694 // Loaded protos must have at least one external output, so load an unused
2695 // output and type to satisfy it. It is named unused_output in
2696 // empty_predict_net.pbtxt.
2697 Type unusedTy = Type(ElemKind::FloatTy, {4});
2698 Caffe2ModelLoader caffe2LD(
2699 NetDescFilename, NetWeightFilename,
2700 {"tensor_fill_float_eq", "tensor_int_fill_eq", "tensor_int64_fill_eq",
2701 "tensor_string_to_uint8_fill_eq"},
2702 {&unusedTy, &unusedTy, &unusedTy, &unusedTy}, *F);
2703 tensorFillFloat = llvm::dyn_cast<Constant>(
2704 EXIT_ON_ERR(caffe2LD.getNodeValueByName("tensor_fill_float")));
2705 tensorIntFill = llvm::dyn_cast<Constant>(
2706 EXIT_ON_ERR(caffe2LD.getNodeValueByName("tensor_int_fill")));
2707 tensorInt64Fill = llvm::dyn_cast<Constant>(
2708 EXIT_ON_ERR(caffe2LD.getNodeValueByName("tensor_int64_fill")));
2709 tensorStringToUInt8Fill = llvm::dyn_cast<Constant>(EXIT_ON_ERR(
2710 caffe2LD.getNodeValueByName("tensor_string_to_uint8_fill")));
2711 }
2712
2713 ASSERT_TRUE(tensorFillFloat);
2714 ASSERT_TRUE(tensorIntFill);
2715 ASSERT_TRUE(tensorInt64Fill);
2716 ASSERT_TRUE(tensorStringToUInt8Fill);
2717
2718 // All fills in fill_test_init_net.pbtxt use shape {2, 2}.
2719 const std::vector<dim_t> expectedDims = {2, 2};
2720 ASSERT_TRUE(tensorFillFloat->dims().equals(expectedDims));
2721 ASSERT_TRUE(tensorIntFill->dims().equals(expectedDims));
2722 ASSERT_TRUE(tensorInt64Fill->dims().equals(expectedDims));
2723 ASSERT_TRUE(tensorStringToUInt8Fill->dims().equals(expectedDims));
2724
2725 auto tensorFillFloatH = tensorFillFloat->getPayload().getHandle<float>();
2726 auto tensorIntFillH = tensorIntFill->getPayload().getHandle<int32_t>();
2727 auto tensorInt64FillH = tensorInt64Fill->getPayload().getHandle<int64_t>();
2728 // We load GivenTensorByteStringToUInt8Fill as UInt8QTy with dummy
2729 // scale/offset for now, because it's only used for rowwise-quantized tensors.
2730 auto tensorStringToUInt8FillH =
2731 tensorStringToUInt8Fill->getPayload().getHandle<uint8_t>();
2732
2733 // All fills in fill_test_init_net.pbtxt are set to 0 through 3.
2734 for (size_t i = 0; i < 4; i++) {
2735 EXPECT_FLOAT_EQ(tensorFillFloatH.raw(i), (float)i);
2736 EXPECT_EQ(tensorIntFillH.raw(i), (int32_t)i);
2737 EXPECT_EQ(tensorInt64FillH.raw(i), (int64_t)i);
2738 EXPECT_EQ(tensorStringToUInt8FillH.raw(i), (uint8_t)(i + 128));
2739 }
2740}
2741
2742TEST_F(Caffe2ImporterTest, HalfToFloat) {
2743 ExecutionEngine EE{};
2744 auto &mod = EE.getModule();
2745 Function *F = mod.createFunction("main");
2746
2747 llvm::StringRef NetDescFilename(
2748 GLOW_DATA_PATH "tests/models/caffe2Models/halftofloat_op_net.pbtxt");
2749 std::string NetWeightFilename(
2750 GLOW_DATA_PATH "tests/models/caffe2Models/empty_init_net.pbtxt");
2751
2752 Placeholder *output;
2753 PlaceholderBindings bindings;
2754
2755 Tensor input(ElemKind::Float16Ty, {1, 2, 3, 4});
2756
2757 // Destroy the loader after the graph is loaded since the following execution
2758 // will not depend on anything from the loader.
2759 {
2760 // Loaded protos must have at least one external output, so load an unused
2761 // output and type to satisfy it. It is named unused_output in
2762 // empty_predict_net.pbtxt.
2763 Caffe2ModelLoader caffe2LD(NetDescFilename.str(), NetWeightFilename, {"X"},
2764 {&input.getType()}, *F);
2765 output = EXIT_ON_ERR(caffe2LD.getSingleOutput());
2766 }
2767
2768 ASSERT_TRUE(output);
2769
2770 // Graph has 2 nodes: Save and ConvertTo
2771 EXPECT_EQ(F->getNodes().size(), 2);
2772
2773 // Input to save node is ConvertToNode.
2774 auto *saveNode = getSaveNodeFromDest(output);
2775 auto *N = llvm::dyn_cast<ConvertToNode>(saveNode->getInput());
2776 EXPECT_TRUE(N);
2777 EXPECT_EQ(N->getResult().getElementType(), ElemKind::FloatTy);
2778}
2779
2780TEST_F(Caffe2ImporterTest, Alias) {
2781 ExecutionEngine EE{};
2782 auto &mod = EE.getModule();
2783 Function *F = mod.createFunction("main");
2784
2785 llvm::StringRef NetDescFilename(
2786 GLOW_DATA_PATH "tests/models/caffe2Models/alias_op_net.pbtxt");
2787 std::string NetWeightFilename(
2788 GLOW_DATA_PATH "tests/models/caffe2Models/empty_init_net.pbtxt");
2789
2790 Placeholder *output;
2791 PlaceholderBindings bindings;
2792
2793 Tensor input(ElemKind::FloatTy, {1, 2, 3, 4});
2794
2795 // Destroy the loader after the graph is loaded since the following execution
2796 // will not depend on anything from the loader.
2797 {
2798 // Loaded protos must have at least one external output, so load an unused
2799 // output and type to satisfy it. It is named unused_output in
2800 // empty_predict_net.pbtxt.
2801 Caffe2ModelLoader caffe2LD(NetDescFilename.str(), NetWeightFilename, {"X"},
2802 {&input.getType()}, *F);
2803 output = EXIT_ON_ERR(caffe2LD.getSingleOutput());
2804 }
2805
2806 ASSERT_TRUE(output);
2807
2808 // The only node is Save.
2809 EXPECT_EQ(F->getNodes().size(), 1);
2810
2811 auto *saveNode = getSaveNodeFromDest(output);
2812 auto *N = llvm::dyn_cast<Placeholder>(saveNode->getInput());
2813 EXPECT_TRUE(N);
2814}
2815
2816TEST_F(Caffe2ImporterTest, Modulo) {
2817 ExecutionEngine EE{};
2818 auto &mod = EE.getModule();
2819 Function *F = mod.createFunction("main");
2820
2821 std::string NetDescFilename(GLOW_DATA_PATH
2822 "tests/models/caffe2Models/modulo_op_net.pbtxt");
2823 std::string NetWeightFilename(
2824 GLOW_DATA_PATH "tests/models/caffe2Models/fill_test_init_net.pbtxt");
2825
2826 Placeholder *output;
2827 PlaceholderBindings bindings;
2828
2829 Tensor data(ElemKind::Int64ITy, {7});
2830
2831 // Destroy the loader after the graph is loaded since the following execution
2832 // will not depend on anything from the loader.
2833 {
2834 // Loaded protos must have at least one external output, so load an unused
2835 // output and type to satisfy it. It is named unused_output in
2836 // empty_predict_net.pbtxt.
2837 Caffe2ModelLoader caffe2LD(NetDescFilename, NetWeightFilename, {"data"},
2838 {&data.getType()}, *F);
2839 output = EXIT_ON_ERR(caffe2LD.getSingleOutput());
2840 }
2841
2842 ASSERT_TRUE(output);
2843
2844 // Graph has 2 nodes: Save and Modulo.
2845 EXPECT_EQ(F->getNodes().size(), 2);
2846
2847 // Net has 1 inputs.
2848 EXPECT_EQ(mod.getPlaceholders().size(), 2);
2849
2850 // Input to save node is ModuloNode.
2851 auto *saveNode = getSaveNodeFromDest(output);
2852 auto *N = llvm::dyn_cast<ModuloNode>(saveNode->getInput());
2853 ASSERT_TRUE(N);
2854}
2855
2856/// Test loading an ElementwiseLinear operator.
2857TEST_F(Caffe2ImporterTest, elementwiseLinear) {
2858 ExecutionEngine EE{};
2859 auto &mod = EE.getModule();
2860 Function *F = mod.createFunction("main");
2861
2862 std::string NetDescFilename(
2863 GLOW_DATA_PATH "tests/models/caffe2Models/elementwise_linear_net.pbtxt");
2864 std::string NetWeightFilename(
2865 GLOW_DATA_PATH "tests/models/caffe2Models/empty_init_net.pbtxt");
2866
2867 PlaceholderBindings bindings;
2868 Placeholder *output;
2869 Tensor X(ElemKind::FloatTy, {10, 5});
2870 Tensor w(ElemKind::FloatTy, {10}), b(ElemKind::FloatTy, {10});
2871
2872 // Destroy the loader after the graph is loaded since the following execution
2873 // will not depend on anything from the loader.
2874 {
2875 Caffe2ModelLoader caffe2LD(NetDescFilename, NetWeightFilename,
2876 {"X", "w", "b"},
2877 {&X.getType(), &w.getType(), &b.getType()}, *F);
2878 output = EXIT_ON_ERR(caffe2LD.getSingleOutput());
2879 }
2880
2881 // Check that the shape of the output matches that of the input.
2882 std::vector<dim_t> expectedDims = {10, 5};
2883 EXPECT_TRUE(output->dims().vec() == expectedDims);
2884
2885 // High level checks on the content of the graph.
2886 // It should look like this:
2887 //
2888 // X w b
2889 // | | |
2890 // | v v
2891 // | Broadcast Broadcast
2892 // | | |
2893 // | | |
2894 // | / /
2895 // v v------- /
2896 // Mul /
2897 // | /----------------
2898 // v v
2899 // Add
2900 // |
2901 // v
2902 // Save
2903
2904 EXPECT_EQ(F->getNodes().size(), 5);
2905 auto *save = getSaveNodeFromDest(output);
2906 auto *add = llvm::dyn_cast<AddNode>(save->getInput().getNode());
2907 ASSERT_TRUE(add);
2908 auto *mul = llvm::dyn_cast<MulNode>(add->getLHS().getNode());
2909 ASSERT_TRUE(mul);
2910 auto *bBN = llvm::dyn_cast<BroadcastNode>(add->getRHS().getNode());
2911 ASSERT_TRUE(bBN);
2912 EXPECT_EQ(bBN->getAxis(), 0);
2913 auto *XPH = llvm::dyn_cast<Placeholder>(mul->getRHS().getNode());
2914 EXPECT_EQ(XPH, mod.getPlaceholderByNameSlow("X"));
2915 auto *wBN = llvm::dyn_cast<BroadcastNode>(mul->getLHS().getNode());
2916 ASSERT_TRUE(wBN);
2917 EXPECT_EQ(wBN->getAxis(), 0);
2918 auto *wPH = llvm::dyn_cast<Placeholder>(wBN->getInput().getNode());
2919 EXPECT_EQ(wPH, mod.getPlaceholderByNameSlow("w"));
2920 auto *bPH = llvm::dyn_cast<Placeholder>(bBN->getInput().getNode());
2921 EXPECT_EQ(bPH, mod.getPlaceholderByNameSlow("b"));
2922
2923 // We have three inputs and one output.
2924 EXPECT_EQ(mod.getPlaceholders().size(), 4);
2925}
2926
2927/// Test loading an ElementwiseLinear operator with no axis specified.
2928TEST_F(Caffe2ImporterTest, elementwiseLinearUnspecifiedAxis) {
2929 ExecutionEngine EE{};
2930 auto &mod = EE.getModule();
2931 Function *F = mod.createFunction("main");
2932
2933 std::string NetDescFilename(
2934 GLOW_DATA_PATH
2935 "tests/models/caffe2Models/elementwise_linear_default_net.pbtxt");
2936 std::string NetWeightFilename(
2937 GLOW_DATA_PATH "tests/models/caffe2Models/empty_init_net.pbtxt");
2938
2939 PlaceholderBindings bindings;
2940 Placeholder *output;
2941
2942 // Since the loader will assume that axis = 1, the 0th dim of the shapes of w
2943 // and b must match the 1st dim of X.
2944 Tensor X(ElemKind::FloatTy, {5, 10});
2945 Tensor w(ElemKind::FloatTy, {10}), b(ElemKind::FloatTy, {10});
2946
2947 // Destroy the loader after the graph is loaded since the following execution
2948 // will not depend on anything from the loader.
2949 {
2950 Caffe2ModelLoader caffe2LD(NetDescFilename, NetWeightFilename,
2951 {"X", "w", "b"},
2952 {&X.getType(), &w.getType(), &b.getType()}, *F);
2953 output = EXIT_ON_ERR(caffe2LD.getSingleOutput());
2954 }
2955
2956 // Check that the shape of the output matches that of the input.
2957 std::vector<dim_t> expectedDims = {5, 10};
2958 EXPECT_TRUE(output->dims().vec() == expectedDims);
2959
2960 // High level checks on the content of the graph.
2961 // It should look like this:
2962 //
2963 // X w b
2964 // | | |
2965 // | v v
2966 // | Broadcast Broadcast
2967 // | | |
2968 // | | |
2969 // | / /
2970 // v v------- /
2971 // Mul /
2972 // | /----------------
2973 // v v
2974 // Add
2975 // |
2976 // v
2977 // Save
2978
2979 EXPECT_EQ(F->getNodes().size(), 5);
2980 auto *save = getSaveNodeFromDest(output);
2981 auto *add = llvm::dyn_cast<AddNode>(save->getInput().getNode());
2982 ASSERT_TRUE(add);
2983 auto *mul = llvm::dyn_cast<MulNode>(add->getLHS().getNode());
2984 ASSERT_TRUE(mul);
2985 auto *bBN = llvm::dyn_cast<BroadcastNode>(add->getRHS().getNode());
2986 ASSERT_TRUE(bBN);
2987 EXPECT_EQ(bBN->getAxis(), 1);
2988 auto *XPH = llvm::dyn_cast<Placeholder>(mul->getRHS().getNode());
2989 EXPECT_EQ(XPH, mod.getPlaceholderByNameSlow("X"));
2990 auto *wBN = llvm::dyn_cast<BroadcastNode>(mul->getLHS().getNode());
2991 ASSERT_TRUE(wBN);
2992 EXPECT_EQ(wBN->getAxis(), 1);
2993 auto *wPH = llvm::dyn_cast<Placeholder>(wBN->getInput().getNode());
2994 EXPECT_EQ(wPH, mod.getPlaceholderByNameSlow("w"));
2995 auto *bPH = llvm::dyn_cast<Placeholder>(bBN->getInput().getNode());
2996 EXPECT_EQ(bPH, mod.getPlaceholderByNameSlow("b"));
2997
2998 // We have three inputs and one output.
2999 EXPECT_EQ(mod.getPlaceholders().size(), 4);
3000}
3001
3002/// Test loading an ElementwiseLinear operator with implicit broadcast
3003TEST_F(Caffe2ImporterTest, elementwiseImplicitBroadcast) {
3004 ExecutionEngine EE{};
3005 auto &mod = EE.getModule();
3006 Function *F = mod.createFunction("main");
3007
3008 std::string NetDescFilename(
3009 GLOW_DATA_PATH
3010 "tests/models/caffe2Models/elementwise_linear_broadcast_net.pbtxt");
3011 std::string NetWeightFilename(
3012 GLOW_DATA_PATH "tests/models/caffe2Models/empty_init_net.pbtxt");
3013
3014 PlaceholderBindings bindings;
3015 Placeholder *output;
3016
3017 // Since the loader will assume that axis = 1, the 0th dim of the shapes of w
3018 // and b must match the 1st dim of X.
3019 Tensor X(ElemKind::FloatTy, {5, 10});
3020 Tensor w(ElemKind::FloatTy, {10}), b(ElemKind::FloatTy, {10});
3021
3022 // Destroy the loader after the graph is loaded since the following execution
3023 // will not depend on anything from the loader.
3024 {
3025 Caffe2ModelLoader caffe2LD(NetDescFilename, NetWeightFilename,
3026 {"X", "w", "b"},
3027 {&X.getType(), &w.getType(), &b.getType()}, *F);
3028 output = EXIT_ON_ERR(caffe2LD.getSingleOutput());
3029 }
3030
3031 // Check that the shape of the output matches that of the input.
3032 std::vector<dim_t> expectedDims = {5, 10};
3033 EXPECT_TRUE(output->dims().vec() == expectedDims);
3034
3035 // High level checks on the content of the graph.
3036 // It should look like this:
3037 //
3038 // X w b
3039 // | | |
3040 // | v v
3041 // | Broadcast Broadcast
3042 // | | |
3043 // | | |
3044 // | / /
3045 // v v------- /
3046 // Mul /
3047 // | /----------------
3048 // v v
3049 // Add
3050 // |
3051 // v
3052 // Save
3053
3054 EXPECT_EQ(F->getNodes().size(), 5);
3055 auto *save = getSaveNodeFromDest(output);
3056 auto *add = llvm::dyn_cast<AddNode>(save->getInput().getNode());
3057 ASSERT_TRUE(add);
3058 auto *mul = llvm::dyn_cast<MulNode>(add->getLHS().getNode());
3059 ASSERT_TRUE(mul);
3060 auto *bBN = llvm::dyn_cast<BroadcastNode>(add->getRHS().getNode());
3061 ASSERT_TRUE(bBN);
3062 EXPECT_EQ(bBN->getAxis(), 1);
3063 auto *XPH = llvm::dyn_cast<Placeholder>(mul->getLHS().getNode());
3064 EXPECT_EQ(XPH, mod.getPlaceholderByNameSlow("X"));
3065 auto *wBN = llvm::dyn_cast<BroadcastNode>(mul->getRHS().getNode());
3066 ASSERT_TRUE(wBN);
3067 EXPECT_EQ(wBN->getAxis(), 1);
3068 auto *wPH = llvm::dyn_cast<Placeholder>(wBN->getInput().getNode());
3069 EXPECT_EQ(wPH, mod.getPlaceholderByNameSlow("w"));
3070 auto *bPH = llvm::dyn_cast<Placeholder>(bBN->getInput().getNode());
3071 EXPECT_EQ(bPH, mod.getPlaceholderByNameSlow("b"));
3072
3073 // We have three inputs and one output.
3074 EXPECT_EQ(mod.getPlaceholders().size(), 4);
3075}
3076
3077/// Test loading SparseLengthsWeightedSum8BitsRowwise. This is created as a
3078/// RowwiseQuantizedSparseLengthsWeightedSumNode. The following inputs/outputs
3079/// are used/expected for this test. Note that the DATA input is
3080/// rowwise-quantized in the init_net proto. Scales/offsets are loaded in a
3081/// separate tensor scales_bias. The C2 loader will copy the scales/offsets into
3082/// separate Constants for use by RowwiseQuantizedSparseLengthsWeightedSumNode.
3083/// DATA = [[2.0, -0.5, 13]]
3084/// WEIGHTS = [3, 1, 0, 0, 0, 0, 2, -0.5]
3085/// INDICES = [1, 0, 2, 0, 1, 2, 2, 0]
3086/// LENGTHS = [3, 0, 3, 2]
3087/// OUTPUT = [[0.5, 0, 0, 25]]
3088TEST_F(Caffe2ImporterTest, SparseLengthsWeightedSum8BitsRowwise) {
3089 ExecutionEngine EE{};
3090 auto &mod = EE.getModule();
3091 Function *F = mod.createFunction("main");
3092
3093 std::string NetDescFilename(
3094 GLOW_DATA_PATH
3095 "tests/models/caffe2Models/"
3096 "rowwise_quantized_sparse_lengths_weighted_sum_predict_net.pbtxt");
3097 std::string NetWeightFilename(
3098 GLOW_DATA_PATH
3099 "tests/models/caffe2Models/"
3100 "rowwise_quantized_sparse_lengths_weighted_sum_init_net.pbtxt");
3101
3102 Placeholder *output, *indices, *lengths;
3103 PlaceholderBindings bindings;
3104
3105 TypeRef indicesType = F->getParent()->uniqueType(ElemKind::Int64ITy, {8});
3106 TypeRef lengthsType = F->getParent()->uniqueType(ElemKind::Int32ITy, {4});
3107
3108 // Destroy the loader after the graph is loaded since the following execution
3109 // will not depend on anything from the loader.
3110 {
3111 Caffe2ModelLoader caffe2LD(NetDescFilename, NetWeightFilename,
3112 {"indices", "lengths"},
3113 {indicesType, lengthsType}, *F);
3114
3115 indices = llvm::dyn_cast<Placeholder>(
3116 EXIT_ON_ERR(caffe2LD.getNodeValueByName("indices")));
3117 lengths = llvm::dyn_cast<Placeholder>(
3118 EXIT_ON_ERR(caffe2LD.getNodeValueByName("lengths")));
3119 output = EXIT_ON_ERR(caffe2LD.getSingleOutput());
3120 }
3121
3122 ASSERT_TRUE(indices);
3123 ASSERT_TRUE(lengths);
3124
3125 bindings.allocate(indices)->getHandle<int64_t>() = {
3126 1, 0, 2, 0, 1, 2, 2, 0,
3127 };
3128 bindings.allocate(lengths)->getHandle<int32_t>() = {
3129 3,
3130 0,
3131 3,
3132 2,
3133 };
3134
3135 // High level check on the content of the graph. We have 1 rowwise-quantized
3136 // SLWS and 1 save, along with 2 Slices and 2 Reshapes to extract out
3137 // scales/biases from the loaded Constant.
3138 EXPECT_EQ(F->getNodes().size(), 6);
3139 SaveNode *saveNode = getSaveNodeFromDest(output);
3140 RowwiseQuantizedSparseLengthsWeightedSumNode *RWQSLWS =
3141 llvm::dyn_cast<RowwiseQuantizedSparseLengthsWeightedSumNode>(
3142 saveNode->getInput().getNode());
3143 ASSERT_TRUE(RWQSLWS);
3144 // Check that the weights input is a Constant node.
3145 Constant *weights = llvm::dyn_cast<Constant>(RWQSLWS->getWeights().getNode());
3146 ASSERT_TRUE(weights);
3147
3148 // Check that we have a Reshape(Slice(Constant)) for Scales/Offsets.
3149 ReshapeNode *reshapeScales =
3150 llvm::dyn_cast<ReshapeNode>(RWQSLWS->getScales());
3151 ASSERT_TRUE(reshapeScales);
3152 SliceNode *sliceScales = llvm::dyn_cast<SliceNode>(reshapeScales->getInput());
3153 ASSERT_TRUE(sliceScales);
3154 ReshapeNode *reshapeOffsets =
3155 llvm::dyn_cast<ReshapeNode>(RWQSLWS->getOffsets());
3156 ASSERT_TRUE(reshapeOffsets);
3157 SliceNode *sliceOffsets =
3158 llvm::dyn_cast<SliceNode>(reshapeOffsets->getInput());
3159 ASSERT_TRUE(sliceOffsets);
3160 EXPECT_EQ(sliceScales->getInput(), sliceOffsets->getInput());
3161 EXPECT_TRUE(llvm::isa<Constant>(sliceScales->getInput()));
3162
3163 // We have 3 placeholders: 1 for save, and then indices and lengths.
3164 EXPECT_EQ(mod.getPlaceholders().size(), 3);
3165
3166 // We have 3 constants: data, scales+offsets, and weights. Originally fused
3167 // data is no longer used and is removed by loader.
3168 EXPECT_EQ(mod.getConstants().size(), 3);
3169
3170 EE.compile(CompilationMode::Infer);
3171 bindings.allocate(mod.getPlaceholders());
3172
3173 // Post compile, should have folded the Slice and Reshape into the
3174 // Scales/Biases. Also, DCE should have gotten rid of the originally fused
3175 // data Constant, as it is no longer used.
3176 EXPECT_EQ(F->getNodes().size(), 2);
3177 EXPECT_EQ(mod.getConstants().size(), 4);
3178
3179 EE.run(bindings);
3180
3181 Tensor &result = *bindings.get(output);
3182 Tensor expected(ElemKind::FloatTy, {4, 1});
3183 expected.getHandle() = {
3184 0.5,
3185 0,
3186 0,
3187 25,
3188 };
3189
3190 EXPECT_TRUE(expected.isEqual(result, 0.03f));
3191}
3192
3193/// Test loading SparseLengthsSum8BitsRowwise. This is created as a
3194/// RowwiseQuantizedSparseLengthsWeightedSumNode. The following inputs/outputs
3195/// are used/expected for this test. Note that the DATA input is
3196/// rowwise-quantized in the init_net proto. Scales/offsets are loaded in a
3197/// separate tensor scales_bias. The C2 loader will copy the scales/offsets into
3198/// separate Constants for use by RowwiseQuantizedSparseLengthsSumNode.
3199/// DATA = [
3200/// [1.0, 1.2],
3201/// [2.3, 3.4],
3202/// [4.5, 5.7],
3203/// ]
3204/// INDICES = [2, 0, 1, 2, 0, 0, 0, 0]
3205/// LENGTHS = [2, 0, 2, 1, 3]
3206/// OUTPUT = [
3207/// [5.5, 6.9],
3208/// [0.0, 0.0],
3209/// [6.8, 9.1],
3210/// [1.0, 1.2],
3211/// [3.0, 3.6],
3212/// ]
3213TEST_F(Caffe2ImporterTest, SparseLengthsSum8BitsRowwise) {
3214 ExecutionEngine EE{};
3215 auto &mod = EE.getModule();
3216 Function *F = mod.createFunction("main");
3217
3218 std::string NetDescFilename(
3219 GLOW_DATA_PATH "tests/models/caffe2Models/"
3220 "rowwise_quantized_sparse_lengths_sum_predict_net.pbtxt");
3221 std::string NetWeightFilename(
3222 GLOW_DATA_PATH "tests/models/caffe2Models/"
3223 "rowwise_quantized_sparse_lengths_sum_init_net.pbtxt");
3224
3225 Placeholder *output, *indices, *lengths;
3226 PlaceholderBindings bindings;
3227
3228 TypeRef indicesType = F->getParent()->uniqueType(ElemKind::Int64ITy, {8});
3229 TypeRef lengthsType = F->getParent()->uniqueType(ElemKind::Int32ITy, {5});
3230
3231 // Destroy the loader after the graph is loaded since the following execution
3232 // will not depend on anything from the loader.
3233 {
3234 Caffe2ModelLoader caffe2LD(NetDescFilename, NetWeightFilename,
3235 {"indices", "lengths"},
3236 {indicesType, lengthsType}, *F);
3237
3238 indices = llvm::dyn_cast<Placeholder>(
3239 EXIT_ON_ERR(caffe2LD.getNodeValueByName("indices")));
3240 lengths = llvm::dyn_cast<Placeholder>(
3241 EXIT_ON_ERR(caffe2LD.getNodeValueByName("lengths")));
3242 output = EXIT_ON_ERR(caffe2LD.getSingleOutput());
3243 }
3244
3245 ASSERT_TRUE(indices);
3246 ASSERT_TRUE(lengths);
3247
3248 bindings.allocate(indices)->getHandle<int64_t>() = {
3249 2, 0, 1, 2, 0, 0, 0, 0,
3250 };
3251 bindings.allocate(lengths)->getHandle<int32_t>() = {
3252 2, 0, 2, 1, 3,
3253 };
3254
3255 // High level check on the content of the graph. We have 1 rowwise-quantized
3256 // SLWS (which implements SLS), 1 Splat for the weights, and 1 save. For SLS
3257 // scales/bias, we have 2 Slices and 2 Reshapes to extract out scales/biases
3258 // from the loaded Constant.
3259 EXPECT_EQ(F->getNodes().size(), 7);
3260 SaveNode *saveNode = getSaveNodeFromDest(output);
3261 RowwiseQuantizedSparseLengthsWeightedSumNode *RWQSLS =
3262 llvm::dyn_cast<RowwiseQuantizedSparseLengthsWeightedSumNode>(
3263 saveNode->getInput().getNode());
3264 ASSERT_TRUE(RWQSLS);
3265 SplatNode *splatNode =
3266 llvm::dyn_cast<SplatNode>(RWQSLS->getWeights().getNode());
3267 ASSERT_TRUE(splatNode);
3268 EXPECT_EQ(splatNode->getValue(), 1.0f);
3269
3270 // Check that we have a Reshape(Slice(Constant)) for Scales/Offsets.
3271 ReshapeNode *reshapeScales = llvm::dyn_cast<ReshapeNode>(RWQSLS->getScales());
3272 ASSERT_TRUE(reshapeScales);
3273 SliceNode *sliceScales = llvm::dyn_cast<SliceNode>(reshapeScales->getInput());
3274 ASSERT_TRUE(sliceScales);
3275 ReshapeNode *reshapeOffsets =
3276 llvm::dyn_cast<ReshapeNode>(RWQSLS->getOffsets());
3277 ASSERT_TRUE(reshapeOffsets);
3278 SliceNode *sliceOffsets =
3279 llvm::dyn_cast<SliceNode>(reshapeOffsets->getInput());
3280 ASSERT_TRUE(sliceOffsets);
3281 EXPECT_EQ(sliceScales->getInput(), sliceOffsets->getInput());
3282 EXPECT_TRUE(llvm::isa<Constant>(sliceScales->getInput()));
3283
3284 // We have 3 placeholders: 1 for save, and then indices and lengths.
3285 EXPECT_EQ(mod.getPlaceholders().size(), 3);
3286
3287 // We have 2 constants: Data and fused scales+offsets.
3288 EXPECT_EQ(mod.getConstants().size(), 2);
3289
3290 EE.compile(CompilationMode::Infer);
3291 bindings.allocate(mod.getPlaceholders());
3292
3293 // Post compile, DCE should have gotten rid of the originally fused data
3294 // Constant, as it is no longer used.
3295 EXPECT_EQ(mod.getConstants().size(), 3);
3296
3297 EE.run(bindings);
3298
3299 Tensor &result = *bindings.get(output);
3300 Tensor expected(ElemKind::FloatTy, {5, 2});
3301 expected.getHandle() = {
3302 5.5f, 6.9f, 0.0f, 0.0f, 6.8f, 9.1f, 1.0f, 1.2f, 3.0f, 3.6f,
3303 };
3304
3305 EXPECT_TRUE(expected.isEqual(result, 0.02f));
3306}
3307
3308/// Test loading SparseLengthsWeightedSumFused8BitRowwise. This is created as a
3309/// RowwiseQuantizedSparseLengthsWeightedSumNode. The following inputs/outputs
3310/// are used/expected for this test. Note that the DATA input is
3311/// rowwise-quantized in the init_net proto.
3312/// DATA = [[2.0, -0.5, 13]]
3313/// WEIGHTS = [3, 1, 0, 0, 0, 0, 2, -0.5]
3314/// INDICES = [1, 0, 2, 0, 1, 2, 2, 0]
3315/// LENGTHS = [3, 0, 3, 2]
3316/// OUTPUT = [[0.5, 0, 0, 25]]
3317static void testFRWQSLWS(float avgLength) {
3318 ExecutionEngine EE{};
3319 auto &mod = EE.getModule();
3320 Function *F = mod.createFunction("main");
3321
3322 std::string NetDescFilename(
3323 std::isnan(avgLength) ? GLOW_DATA_PATH
3324 "tests/models/caffe2Models/"
3325 "fused_rowwise_quantized_sparse_lengths_weighted_sum_predict_net."
3326 "pbtxt"
3327 : GLOW_DATA_PATH
3328 "tests/models/caffe2Models/"
3329 "fused_rowwise_quantized_sparse_lengths_weighted_sum_avg_length_"
3330 "predict_net.pbtxt");
3331 std::string NetWeightFilename(
3332 GLOW_DATA_PATH
3333 "tests/models/caffe2Models/"
3334 "fused_rowwise_quantized_sparse_lengths_weighted_sum_init_net.pbtxt");
3335
3336 Placeholder *output, *indices, *lengths;
3337 PlaceholderBindings bindings;
3338
3339 TypeRef indicesType = F->getParent()->uniqueType(ElemKind::Int64ITy, {8});
3340 TypeRef lengthsType = F->getParent()->uniqueType(ElemKind::Int32ITy, {4});
3341
3342 // Destroy the loader after the graph is loaded since the following execution
3343 // will not depend on anything from the loader.
3344 {
3345 Caffe2ModelLoader caffe2LD(NetDescFilename, NetWeightFilename,
3346 {"indices", "lengths"},
3347 {indicesType, lengthsType}, *F);
3348
3349 indices = llvm::dyn_cast<Placeholder>(
3350 EXIT_ON_ERR(caffe2LD.getNodeValueByName("indices")));
3351 lengths = llvm::dyn_cast<Placeholder>(
3352 EXIT_ON_ERR(caffe2LD.getNodeValueByName("lengths")));
3353 output = EXIT_ON_ERR(caffe2LD.getSingleOutput());
3354 }
3355
3356 ASSERT_TRUE(indices);
3357 ASSERT_TRUE(lengths);
3358
3359 bindings.allocate(indices)->getHandle<int64_t>() = {
3360 1, 0, 2, 0, 1, 2, 2, 0,
3361 };
3362 bindings.allocate(lengths)->getHandle<int32_t>() = {
3363 3,
3364 0,
3365 3,
3366 2,
3367 };
3368
3369 // High level check on the content of the graph. We have 1 rowwise-quantized
3370 // SLWS and 1 save.
3371 EXPECT_EQ(F->getNodes().size(), 2);
3372 SaveNode *saveNode = getSaveNodeFromDest(output);
3373 FusedRowwiseQuantizedSparseLengthsWeightedSumNode *FRWQSLWS =
3374 llvm::dyn_cast<FusedRowwiseQuantizedSparseLengthsWeightedSumNode>(
3375 saveNode->getInput().getNode());
3376 ASSERT_TRUE(FRWQSLWS);
3377 if (std::isnan(avgLength)) {
3378 EXPECT_TRUE(std::isnan(FRWQSLWS->getAvgLength()));
3379 } else {
3380 EXPECT_EQ(FRWQSLWS->getAvgLength(), avgLength);
3381 }
3382 // Check that the weights input is a Constant node.
3383 Constant *weights =
3384 llvm::dyn_cast<Constant>(FRWQSLWS->getWeights().getNode());
3385 ASSERT_TRUE(weights);
3386 // Check that the data input is a Constant node with expected ElemKind.
3387 Constant *data = llvm::dyn_cast<Constant>(FRWQSLWS->getData().getNode());
3388 ASSERT_TRUE(data);
3389 EXPECT_TRUE(data->getElementType() == ElemKind::UInt8FusedQTy);
3390
3391 // We have 3 placeholders: 1 for save, and then indices and lengths.
3392 EXPECT_EQ(mod.getPlaceholders().size(), 3);
3393
3394 // We have 2 constants: data and weights.
3395 EXPECT_EQ(mod.getConstants().size(), 2);
3396
3397 EE.compile(CompilationMode::Infer);
3398 bindings.allocate(mod.getPlaceholders());
3399
3400 EE.run(bindings);
3401
3402 Tensor &result = *bindings.get(output);
3403 Tensor expected(ElemKind::FloatTy, {4, 1});
3404 expected.getHandle() = {
3405 0.5,
3406 0,
3407 0,
3408 25,
3409 };
3410
3411 EXPECT_TRUE(expected.isEqual(result, 0.02f));
3412}
3413
3414TEST_F(Caffe2ImporterTest, SparseLengthsWeightedSumFused8BitRowwise) {
3415 testFRWQSLWS(NAN);
3416}
3417
3418TEST_F(Caffe2ImporterTest, SparseLengthsWeightedSumFused8BitRowwiseAvgLength) {
3419 testFRWQSLWS(5.0f);
3420}
3421
3422/// Test loading SparseLengthsSumFused8BitRowwise. This is created as a
3423/// RowwiseQuantizedSparseLengthsWeightedSumNode. The following inputs/outputs
3424/// are used/expected for this test. Note that the DATA input is
3425/// rowwise-quantized in the init_net proto.
3426/// DATA = [
3427/// [1.0, 1.2],
3428/// [2.3, 3.4],
3429/// [4.5, 5.7],
3430/// ]
3431/// INDICES = [2, 0, 1, 2, 0, 0, 0, 0]
3432/// LENGTHS = [2, 0, 2, 1, 3]
3433/// OUTPUT = [
3434/// [5.5, 6.9],
3435/// [0.0, 0.0],
3436/// [6.8, 9.1],
3437/// [1.0, 1.2],
3438/// [3.0, 3.6],
3439/// ]
3440TEST_F(Caffe2ImporterTest, SparseLengthsSumFused8BitRowwise) {
3441 ExecutionEngine EE{};
3442 auto &mod = EE.getModule();
3443 Function *F = mod.createFunction("main");
3444
3445 std::string NetDescFilename(
3446 GLOW_DATA_PATH
3447 "tests/models/caffe2Models/"
3448 "fused_rowwise_quantized_sparse_lengths_sum_predict_net.pbtxt");
3449 std::string NetWeightFilename(
3450 GLOW_DATA_PATH
3451 "tests/models/caffe2Models/"
3452 "fused_rowwise_quantized_sparse_lengths_sum_init_net.pbtxt");
3453
3454 Placeholder *output, *indices, *lengths;
3455 PlaceholderBindings bindings;
3456
3457 TypeRef indicesType = F->getParent()->uniqueType(ElemKind::Int64ITy, {8});
3458 TypeRef lengthsType = F->getParent()->uniqueType(ElemKind::Int32ITy, {5});
3459
3460 // Destroy the loader after the graph is loaded since the following execution
3461 // will not depend on anything from the loader.
3462 {
3463 Caffe2ModelLoader caffe2LD(NetDescFilename, NetWeightFilename,
3464 {"indices", "lengths"},
3465 {indicesType, lengthsType}, *F);
3466
3467 indices = llvm::dyn_cast<Placeholder>(
3468 EXIT_ON_ERR(caffe2LD.getNodeValueByName("indices")));
3469 lengths = llvm::dyn_cast<Placeholder>(
3470 EXIT_ON_ERR(caffe2LD.getNodeValueByName("lengths")));
3471 output = EXIT_ON_ERR(caffe2LD.getSingleOutput());
3472 }
3473
3474 ASSERT_TRUE(indices);
3475 ASSERT_TRUE(lengths);
3476
3477 bindings.allocate(indices)->getHandle<int64_t>() = {
3478 2, 0, 1, 2, 0, 0, 0, 0,
3479 };
3480 bindings.allocate(lengths)->getHandle<int32_t>() = {
3481 2, 0, 2, 1, 3,
3482 };
3483
3484 // High level check on the content of the graph. We have 1 rowwise-quantized
3485 // SLS and 1 save.
3486 EXPECT_EQ(F->getNodes().size(), 2);
3487 SaveNode *saveNode = getSaveNodeFromDest(output);
3488 FusedRowwiseQuantizedSparseLengthsSumNode *FRWQSLS =
3489 llvm::dyn_cast<FusedRowwiseQuantizedSparseLengthsSumNode>(
3490 saveNode->getInput().getNode());
3491 ASSERT_TRUE(FRWQSLS);
3492 // Check that the data input is a Constant node with expected ElemKind.
3493 Constant *data = llvm::dyn_cast<Constant>(FRWQSLS->getData().getNode());
3494 ASSERT_TRUE(data);
3495 EXPECT_TRUE(data->getElementType() == ElemKind::UInt8FusedQTy);
3496
3497 // We have 3 placeholders: 1 for save, and then indices and lengths.
3498 EXPECT_EQ(mod.getPlaceholders().size(), 3);
3499
3500 // We have 1 constant: data.
3501 EXPECT_EQ(mod.getConstants().size(), 1);
3502
3503 EE.compile(CompilationMode::Infer);
3504 bindings.allocate(mod.getPlaceholders());
3505
3506 EE.run(bindings);
3507
3508 Tensor &result = *bindings.get(output);
3509 Tensor expected(ElemKind::FloatTy, {5, 2});
3510 expected.getHandle() = {
3511 5.5f, 6.9f, 0.0f, 0.0f, 6.8f, 9.1f, 1.0f, 1.2f, 3.0f, 3.6f,
3512 };
3513
3514 EXPECT_TRUE(expected.isEqual(result, 0.02f));
3515}
3516
3517/// Test loading SparseLengthsSumFused8BitRowwise with all lookup lengths equal
3518/// to one. This is created as a RowwiseQuantizedSparseLengthsWeightedSumNode
3519/// with `AllLengthsOne=true`. The following inputs/outputs are used/expected
3520/// for this test. Note that the DATA input is rowwise-quantized in the init_net
3521/// proto.
3522/// DATA = [
3523/// [1.0, 1.2],
3524/// [2.3, 3.4],
3525/// [4.5, 5.7],
3526/// ]
3527/// INDICES = [2, 0, 1, 2, 0]
3528/// LENGTHS = [1, 1, 1, 1, 1]
3529/// OUTPUT = [
3530/// [4.5, 5.7],
3531/// [1.0, 1.2],
3532/// [2.3, 3.4],
3533/// [4.5, 5.7],
3534/// [1.0, 1.2],
3535/// ]
3536TEST_F(Caffe2ImporterTest, SparseLengthsSumFused8BitRowwiseAllLengthsOne) {
3537 ExecutionEngine EE{};
3538 auto &mod = EE.getModule();
3539 Function *F = mod.createFunction("main");
3540
3541 std::string NetDescFilename(
3542 GLOW_DATA_PATH
3543 "tests/models/caffe2Models/"
3544 "fused_rowwise_quantized_sparse_lengths_sum_predict_net_length1.pbtxt");
3545 std::string NetWeightFilename(
3546 GLOW_DATA_PATH
3547 "tests/models/caffe2Models/"
3548 "fused_rowwise_quantized_sparse_lengths_sum_init_net.pbtxt");
3549
3550 Placeholder *output, *indices, *lengths;
3551 PlaceholderBindings bindings;
3552
3553 TypeRef indicesType = F->getParent()->uniqueType(ElemKind::Int64ITy, {5});
3554 TypeRef lengthsType = F->getParent()->uniqueType(ElemKind::Int32ITy, {5});
3555
3556 // Destroy the loader after the graph is loaded since the following execution
3557 // will not depend on anything from the loader.
3558 {
3559 Caffe2ModelLoader caffe2LD(NetDescFilename, NetWeightFilename,
3560 {"indices", "lengths"},
3561 {indicesType, lengthsType}, *F);
3562
3563 indices = llvm::dyn_cast<Placeholder>(
3564 EXIT_ON_ERR(caffe2LD.getNodeValueByName("indices")));
3565 lengths = llvm::dyn_cast<Placeholder>(
3566 EXIT_ON_ERR(caffe2LD.getNodeValueByName("lengths")));
3567 output = EXIT_ON_ERR(caffe2LD.getSingleOutput());
3568 }
3569
3570 ASSERT_TRUE(indices);
3571 ASSERT_TRUE(lengths);
3572
3573 bindings.allocate(indices)->getHandle<int64_t>() = {
3574 2, 0, 1, 2, 0,
3575 };
3576 bindings.allocate(lengths)->getHandle<int32_t>() = {
3577 1, 1, 1, 1, 1,
3578 };
3579
3580 // High level check on the content of the graph. We have 1 rowwise-quantized
3581 // SLS and 1 save.
3582 EXPECT_EQ(F->getNodes().size(), 2);
3583 SaveNode *saveNode = getSaveNodeFromDest(output);
3584 FusedRowwiseQuantizedSparseLengthsSumNode *FRWQSLS =
3585 llvm::dyn_cast<FusedRowwiseQuantizedSparseLengthsSumNode>(
3586 saveNode->getInput().getNode());
3587 ASSERT_TRUE(FRWQSLS);
3588 EXPECT_EQ(FRWQSLS->getLengthsMode(), LengthsMode::AllOne);
3589 // Check that the data input is a Constant node with expected ElemKind.
3590 Constant *data = llvm::dyn_cast<Constant>(FRWQSLS->getData().getNode());
3591 ASSERT_TRUE(data);
3592 EXPECT_TRUE(data->getElementType() == ElemKind::UInt8FusedQTy);
3593
3594 // We have 3 placeholders: 1 for save, and then indices and lengths.
3595 EXPECT_EQ(mod.getPlaceholders().size(), 3);
3596
3597 // We have 1 constant: data.
3598 EXPECT_EQ(mod.getConstants().size(), 1);
3599
3600 EE.compile(CompilationMode::Infer);
3601 bindings.allocate(mod.getPlaceholders());
3602
3603 EE.run(bindings);
3604
3605 Tensor &result = *bindings.get(output);
3606 Tensor expected(ElemKind::FloatTy, {5, 2});
3607 expected.getHandle() = {
3608 4.5f, 5.7f, 1.0f, 1.2f, 2.3f, 3.4f, 4.5f, 5.7f, 1.0f, 1.2f,
3609 };
3610
3611 EXPECT_TRUE(expected.isEqual(result, 0.02f));
3612}
3613
3614/// Test loading SparseLengthsSumFused4BitRowwise.
3615TEST_F(Caffe2ImporterTest, SparseLengthsSumFused4BitRowwise) {
3616 ExecutionEngine EE{};
3617 auto &mod = EE.getModule();
3618 Function *F = mod.createFunction("main");
3619
3620 std::string NetDescFilename(
3621 GLOW_DATA_PATH
3622 "tests/models/caffe2Models/"
3623 "4bit_fused_rowwise_quantized_sparse_lengths_sum_predict_net.pbtxt");
3624 std::string NetWeightFilename(
3625 GLOW_DATA_PATH
3626 "tests/models/caffe2Models/"
3627 "4bit_fused_rowwise_quantized_sparse_lengths_sum_init_net.pbtxt");
3628
3629 Placeholder *output, *indices, *lengths;
3630 PlaceholderBindings bindings;
3631
3632 TypeRef indicesType = F->getParent()->uniqueType(ElemKind::Int64ITy, {8});
3633 TypeRef lengthsType = F->getParent()->uniqueType(ElemKind::Int32ITy, {5});
3634
3635 // Destroy the loader after the graph is loaded since the following execution
3636 // will not depend on anything from the loader.
3637 {
3638 Caffe2ModelLoader caffe2LD(NetDescFilename, NetWeightFilename,
3639 {"indices", "lengths"},
3640 {indicesType, lengthsType}, *F);
3641
3642 indices = llvm::dyn_cast<Placeholder>(
3643 EXIT_ON_ERR(caffe2LD.getNodeValueByName("indices")));
3644 lengths = llvm::dyn_cast<Placeholder>(
3645 EXIT_ON_ERR(caffe2LD.getNodeValueByName("lengths")));
3646 output = EXIT_ON_ERR(caffe2LD.getSingleOutput());
3647 }
3648
3649 ASSERT_TRUE(indices);
3650 ASSERT_TRUE(lengths);
3651
3652 // High level check on the content of the graph. We have 1 rowwise-quantized
3653 // SLS, 1 convertTo and 1 save.
3654 EXPECT_EQ(F->getNodes().size(), 3);
3655 SaveNode *saveNode = getSaveNodeFromDest(output);
3656 ConvertToNode *C =
3657 llvm::dyn_cast<ConvertToNode>(saveNode->getInput().getNode());
3658 ASSERT_TRUE(C);
3659 FusedRowwiseQuantizedSparseLengthsSumNode *FRWQSLS =
3660 llvm::dyn_cast<FusedRowwiseQuantizedSparseLengthsSumNode>(
3661 C->getInput().getNode());
3662 ASSERT_TRUE(FRWQSLS);
3663 // Check that the data input is a Constant node with expected ElemKind.
3664 Constant *data = llvm::dyn_cast<Constant>(FRWQSLS->getData().getNode());
3665 ASSERT_TRUE(data);
3666 EXPECT_TRUE(data->getElementType() == ElemKind::UInt4FusedFP16QTy);
3667
3668 // Check the output dim
3669 const auto out_node = saveNode->getOutput();
3670 EXPECT_EQ(out_node.getElementType(), ElemKind::FloatTy);
3671 const auto dims = out_node.dims();
3672 EXPECT_EQ(dims.size(), 2);
3673 EXPECT_EQ(dims[0], 5);
3674 EXPECT_EQ(dims[1], 10);
3675
3676 // We have 3 placeholders: 1 for save, and then indices and lengths.
3677 EXPECT_EQ(mod.getPlaceholders().size(), 3);
3678
3679 // We have 1 constant: data.
3680 EXPECT_EQ(mod.getConstants().size(), 1);
3681}
3682
3683/// Load big enough model and validate node order.
3684TEST_F(Caffe2ImporterTest, validateNodeOrder) {
3685 ExecutionEngine EE{};
3686 auto &mod = EE.getModule();
3687 Function *F = mod.createFunction("main");
3688 std::string NetDescFilename(
3689 GLOW_DATA_PATH
3690 "tests/models/caffe2Models/batch_box_cox_predict_net.pbtxt");
3691 std::string NetWeightFilename(
3692 GLOW_DATA_PATH "tests/models/caffe2Models/empty_init_net.pbtxt");
3693
3694 PlaceholderBindings bindings;
3695
3696 // Input tensors.
3697 const dim_t kRows = 10;
3698 const dim_t kCols = 5;
3699 Tensor data(ElemKind::FloatTy, {kRows, kCols});
3700 Tensor lambda1(ElemKind::FloatTy, {kCols});
3701 Tensor lambda2(ElemKind::FloatTy, {kCols});
3702 Tensor O(ElemKind::FloatTy, {kRows, kCols});
3703 // Destroy the loader after the graph is loaded since the following execution
3704 // will not depend on anything from the loader.
3705 {
3706 Caffe2ModelLoader caffe2LD(
3707 NetDescFilename, NetWeightFilename, {"data", "lambda1", "lambda2"},
3708 {&data.getType(), &lambda1.getType(), &lambda2.getType()}, *F);
3709 bindings.allocate(mod.getPlaceholders());
3710 updateInputPlaceholdersByName(bindings, &mod,
3711 {"data", "lambda1", "lambda2"},
3712 {&data, &lambda1, &lambda2});
3713 }
3714
3715 EXPECT_EQ(F->getNodes().size(), 2);
3716 // Make sure that nodes are sorted by name.
3717 EXPECT_TRUE(std::is_sorted(
3718 F->getNodes().begin(), F->getNodes().end(),
3719 [](const Node &a, const Node &b) { return a.getName() < b.getName(); }));
3720}
3721
3722TEST_F(Caffe2ImporterTest, importInt8ConvRelu) {
3723 ExecutionEngine EE{};
3724 auto &mod = EE.getModule();
3725 Function *F = mod.createFunction("main");
3726
3727 std::string NetDescFilename(
3728 GLOW_DATA_PATH "tests/models/caffe2Models/int8convrelu_pred_net.pbtxt");
3729 std::string NetWeightFilename(
3730 GLOW_DATA_PATH "tests/models/caffe2Models/int8convrelu_init_net.pbtxt");
3731
3732 Placeholder *output;
3733 PlaceholderBindings bindings;
3734
3735 // Destroy the loader after the graph is loaded since the following execution
3736 // will not depend on anything from the loader.
3737 {
3738 Tensor data(ElemKind::Int8QTy, {1, 1, 3, 3}, 1, 0);
3739 Caffe2ModelLoader caffe2LD(NetDescFilename, NetWeightFilename,
3740 {"gpu_0/data_0"}, {&data.getType()}, *F);
3741 output = EXIT_ON_ERR(caffe2LD.getSingleOutput());
3742
3743 bindings.allocate(mod.getPlaceholders());
3744 updateInputPlaceholdersByName(bindings, &mod, {"gpu_0/data_0"}, {&data});
3745 }
3746
3747 // High level check on the content of the graph. We should have
3748 // {transpose, transpose} => conv => relu => transpose => save
3749 EXPECT_EQ(F->getNodes().size(), 6);
3750 auto *saveNode = getSaveNodeFromDest(output);
3751
3752 auto *transNode1 =
3753 llvm::dyn_cast<TransposeNode>(saveNode->getInput().getNode());
3754 ASSERT_TRUE(transNode1);
3755 auto *reluNode = llvm::dyn_cast<ReluNode>(transNode1->getInput().getNode());
3756 ASSERT_TRUE(reluNode);
3757 EXPECT_TRUE(reluNode->getResult().getType()->isQuantizedType());
3758 EXPECT_EQ(reluNode->getResult().getType()->getScale(), 1.5f);
3759 EXPECT_EQ(reluNode->getResult().getType()->getOffset(),
3760 7 - UINT8_TO_INT8_SHIFT);
3761 auto *convNode =
3762 llvm::dyn_cast<ConvolutionNode>(reluNode->getInput().getNode());
3763 ASSERT_TRUE(convNode);
3764 EXPECT_TRUE(convNode->getResult().getType()->isQuantizedType());
3765 EXPECT_EQ(convNode->getResult().getType()->getScale(), 1.5f);
3766 EXPECT_EQ(convNode->getResult().getType()->getOffset(),
3767 7 - UINT8_TO_INT8_SHIFT);
3768 EXPECT_TRUE(convNode->getFilter().getType()->isQuantizedType());
3769 EXPECT_EQ(convNode->getFilter().getType()->getScale(), 2.f);
3770 EXPECT_EQ(convNode->getFilter().getType()->getOffset(),
3771 10 - UINT8_TO_INT8_SHIFT);
3772 EXPECT_TRUE(convNode->getBias().getType()->isQuantizedType());
3773 EXPECT_EQ(convNode->getBias().getType()->getScale(), 10.f);
3774 // This one is loaded int32, so has no shift.
3775 EXPECT_EQ(convNode->getBias().getType()->getOffset(), 4);
3776 auto *transNode2 =
3777 llvm::dyn_cast<TransposeNode>(convNode->getInput().getNode());
3778 ASSERT_TRUE(transNode2);
3779 auto *transNode3 =
3780 llvm::dyn_cast<TransposeNode>(convNode->getFilter().getNode());
3781 ASSERT_TRUE(transNode3);
3782
3783 EE.compile(CompilationMode::Infer);
3784}
3785
3786TEST_F(Caffe2ImporterTest, importInt8SumRelu) {
3787 ExecutionEngine EE{};
3788 auto &mod = EE.getModule();
3789 Function *F = mod.createFunction("main");
3790
3791 std::string NetDescFilename(
3792 GLOW_DATA_PATH "tests/models/caffe2Models/int8sumrelu_pred_net.pbtxt");
3793 std::string NetWeightFilename(
3794 GLOW_DATA_PATH "tests/models/caffe2Models/int8sumrelu_init_net.pbtxt");
3795
3796 Placeholder *output;
3797 PlaceholderBindings bindings;
3798
3799 // Destroy the loader after the graph is loaded since the following execution
3800 // will not depend on anything from the loader.
3801 {
3802 Tensor data(ElemKind::Int8QTy, {4, 2}, 1, 0);
3803 Caffe2ModelLoader caffe2LD(NetDescFilename, NetWeightFilename,
3804 {"gpu_0/data_0"}, {&data.getType()}, *F);
3805 output = EXIT_ON_ERR(caffe2LD.getSingleOutput());
3806
3807 bindings.allocate(mod.getPlaceholders());
3808 updateInputPlaceholdersByName(bindings, &mod, {"gpu_0/data_0"}, {&data});
3809 }
3810
3811 // High level check on the content of the graph. We should have
3812 // input-=> add => relu => save
3813 // const/
3814 EXPECT_EQ(F->getNodes().size(), 3);
3815 auto *save = getSaveNodeFromDest(output);
3816
3817 auto *relu = llvm::dyn_cast<ReluNode>(save->getInput().getNode());
3818 ASSERT_TRUE(relu);
3819 auto *add = llvm::dyn_cast<AddNode>(relu->getInput().getNode());
3820 ASSERT_TRUE(add);
3821 auto *input = llvm::dyn_cast<Placeholder>(add->getLHS().getNode());
3822 ASSERT_TRUE(input);
3823 auto *val = llvm::dyn_cast<Constant>(add->getRHS().getNode());
3824 ASSERT_TRUE(val);
3825
3826 EE.compile(CompilationMode::Infer);
3827}
3828
3829TEST_F(Caffe2ImporterTest, importNames) {
3830 std::string NetDescFilename(GLOW_DATA_PATH
3831 "tests/models/caffe2Models/sigmoid.pbtxt");
3832 std::string NetWeightFilename(
3833 GLOW_DATA_PATH "tests/models/caffe2Models/empty_init_net.pbtxt");
3834 ExecutionEngine EE;
3835 auto &mod = EE.getModule();
3836 auto *F = mod.createFunction("main");
3837 Tensor input(ElemKind::FloatTy, {6});
3838 Caffe2ModelLoader caffe2LD(NetDescFilename, NetWeightFilename,
3839 {"sigmoid_test_input"}, {&input.getType()}, *F);
3840 EXPECT_TRUE(mod.getPlaceholderByNameSlow("sigmoid_test_output"));
3841 EXPECT_TRUE(F->getNodeByName("sigmoid_test_output__1"));
3842}
3843
3844TEST_F(Caffe2ImporterTest, importSqr) {
3845 ExecutionEngine EE{};
3846 auto &mod = EE.getModule();
3847 Function *F = mod.createFunction("main");
3848
3849 std::string NetDescFilename(
3850 GLOW_DATA_PATH "tests/models/caffe2Models/sqr_predict_net.pbtxt");
3851 std::string NetWeightFilename(
3852 GLOW_DATA_PATH "tests/models/caffe2Models/empty_init_net.pbtxt");
3853
3854 Placeholder *output;
3855 PlaceholderBindings bindings;
3856
3857 // Destroy the loader after the graph is loaded since the following execution
3858 // will not depend on anything from the loader.
3859 {
3860 Tensor data(ElemKind::FloatTy, {4, 2});
3861 Caffe2ModelLoader caffe2LD(NetDescFilename, NetWeightFilename, {"input"},
3862 {&data.getType()}, *F);
3863 output = EXIT_ON_ERR(caffe2LD.getSingleOutput());
3864
3865 bindings.allocate(mod.getPlaceholders());
3866 updateInputPlaceholdersByName(bindings, &mod, {"input"}, {&data});
3867 }
3868
3869 // High level check on the content of the graph. We should have
3870 // save(pow(input, splat(2)))
3871 EXPECT_EQ(F->getNodes().size(), 3);
3872 auto *save = getSaveNodeFromDest(output);
3873 ASSERT_TRUE(save);
3874 auto *pow = llvm::dyn_cast<PowNode>(save->getInput().getNode());
3875 ASSERT_TRUE(pow);
3876 auto *input = llvm::dyn_cast<Placeholder>(pow->getLHS().getNode());
3877 ASSERT_TRUE(input);
3878 auto *splat = llvm::dyn_cast<SplatNode>(pow->getRHS().getNode());
3879 ASSERT_TRUE(splat);
3880 EXPECT_EQ(splat->getValue(), 2);
3881
3882 EE.compile(CompilationMode::Infer);
3883}
3884
3885/// \returns whether \p val is found in \p vec.
3886static bool vecContainsVal(const std::vector<runtime::DeviceIDTy> &vec,
3887 runtime::DeviceIDTy val) {
3888 return std::find(vec.begin(), vec.end(), val) != vec.end();
3889}
3890
3891/// Verify that different fill types are loaded with the correct types into
3892/// their respective partitions specified in the C2 proto.
3893TEST_F(Caffe2ImporterTest, PrePartitionedTensorFillsTest) {
3894 ExecutionEngine EE("Interpreter", /* deviceMemory (16GB) */ 0x400000000,
3895 /* ignoreUserDeviceConfig */ false, /* numDevices */ 3);
3896 auto &mod = EE.getModule();
3897
3898 std::string NetDescFilename(
3899 GLOW_DATA_PATH
3900 "tests/models/caffe2Models/pre_partitioned_fill_test_predict_net.pbtxt");
3901 std::string NetWeightFilename(
3902 GLOW_DATA_PATH "tests/models/caffe2Models/fill_test_init_net.pbtxt");
3903
3904 Constant *tensorFillFloat, *tensorIntFill, *tensorInt64Fill,
3905 *tensorStringToUInt8Fill;
3906
3907 // Destroy the loader after the graph is loaded since the following execution
3908 // will not depend on anything from the loader.
3909 runtime::PrePartitionedConfig PPC;
3910 {
3911 // Loaded protos must have at least one external output, so load an unused
3912 // output and type to satisfy it. It is named unused_output in
3913 // empty_predict_net.pbtxt.
3914 Type unusedTy = Type(ElemKind::FloatTy, {4});
3915 Caffe2ModelLoader caffe2LD(
3916 NetDescFilename, NetWeightFilename,
3917 {"tensor_fill_float_eq", "tensor_int_fill_eq", "tensor_int64_fill_eq",
3918 "tensor_string_to_uint8_fill_eq"},
3919 {&unusedTy, &unusedTy, &unusedTy, &unusedTy}, mod, "main", &PPC);
3920 tensorFillFloat = llvm::dyn_cast<Constant>(
3921 EXIT_ON_ERR(caffe2LD.getNodeValueByName("tensor_fill_float")));
3922 tensorIntFill = llvm::dyn_cast<Constant>(
3923 EXIT_ON_ERR(caffe2LD.getNodeValueByName("tensor_int_fill")));
3924 tensorInt64Fill = llvm::dyn_cast<Constant>(
3925 EXIT_ON_ERR(caffe2LD.getNodeValueByName("tensor_int64_fill")));
3926 tensorStringToUInt8Fill = llvm::dyn_cast<Constant>(EXIT_ON_ERR(
3927 caffe2LD.getNodeValueByName("tensor_string_to_uint8_fill")));
3928 }
3929
3930 ASSERT_EQ(mod.getFunctions().size(), 3);
3931 Function *P0 = nullptr, *P1 = nullptr, *P2 = nullptr;
3932 for (size_t i = 0, e = PPC.funcs.size(); i < e; i++) {
3933 // Find the expected Function, and check that the logical device IDs were
3934 // correctly loaded.
3935 Function *F = PPC.funcs[i];
3936 if (F->getName() == "main_p0") {
3937 P0 = F;
3938 ASSERT_EQ(PPC.logicalIDs[i].size(), 2);
3939 EXPECT_TRUE(vecContainsVal(PPC.logicalIDs[i], 0));
3940 EXPECT_TRUE(vecContainsVal(PPC.logicalIDs[i], 2));
3941 } else if (F->getName() == "main_p1") {
3942 P1 = F;
3943 ASSERT_EQ(PPC.logicalIDs[i].size(), 1);
3944 EXPECT_TRUE(vecContainsVal(PPC.logicalIDs[i], 1));
3945 } else if (F->getName() == "main_p2") {
3946 P2 = F;
3947 } else {
3948 FAIL() << "Unknown Function found.";
3949 ASSERT_EQ(PPC.logicalIDs[i].size(), 1);
3950 EXPECT_TRUE(vecContainsVal(PPC.logicalIDs[i], 2));
3951 }
3952
3953 // Check that the function was also found in the module.
3954 auto &modFuns = mod.getFunctions();
3955 ASSERT_NE(std::find(modFuns.begin(), modFuns.end(), F), modFuns.end());
3956 }
3957 ASSERT_TRUE(P0);
3958 ASSERT_TRUE(P1);
3959 ASSERT_TRUE(P2);
3960
3961 ASSERT_TRUE(tensorFillFloat);
3962 ASSERT_TRUE(tensorIntFill);
3963 ASSERT_TRUE(tensorInt64Fill);
3964 ASSERT_TRUE(tensorStringToUInt8Fill);
3965
3966 // Note: Only user is a no-op Reshape, which is fed into a Save.
3967 ASSERT_EQ(tensorFillFloat->getNumUsers(), 1);
3968 ASSERT_EQ(tensorIntFill->getNumUsers(), 1);
3969 ASSERT_EQ(tensorInt64Fill->getNumUsers(), 1);
3970 ASSERT_EQ(tensorStringToUInt8Fill->getNumUsers(), 1);
3971
3972 // Check that the parent Functions of the Reshapes match expected partitions.
3973 EXPECT_EQ(tensorFillFloat->getUsers().front().getUser()->getParent(), P0);
3974 EXPECT_EQ(tensorIntFill->getUsers().front().getUser()->getParent(), P1);
3975 EXPECT_EQ(tensorInt64Fill->getUsers().front().getUser()->getParent(), P2);
3976 EXPECT_EQ(tensorStringToUInt8Fill->getUsers().front().getUser()->getParent(),
3977 P0);
3978
3979 // All fills in fill_test_init_net.pbtxt use shape {2, 2}.
3980 const std::vector<dim_t> expectedDims = {2, 2};
3981 ASSERT_TRUE(tensorFillFloat->dims().equals(expectedDims));
3982 ASSERT_TRUE(tensorIntFill->dims().equals(expectedDims));
3983 ASSERT_TRUE(tensorInt64Fill->dims().equals(expectedDims));
3984 ASSERT_TRUE(tensorStringToUInt8Fill->dims().equals(expectedDims));
3985
3986 auto tensorFillFloatH = tensorFillFloat->getPayload().getHandle<float>();
3987 auto tensorIntFillH = tensorIntFill->getPayload().getHandle<int32_t>();
3988 auto tensorInt64FillH = tensorInt64Fill->getPayload().getHandle<int64_t>();
3989 // We load GivenTensorByteStringToUInt8Fill as UInt8QTy with dummy
3990 // scale/offset for now, because it's only used for rowwise-quantized tensors.
3991 auto tensorStringToUInt8FillH =
3992 tensorStringToUInt8Fill->getPayload().getHandle<uint8_t>();
3993
3994 // All fills in fill_test_init_net.pbtxt are set to 0 through 3.
3995 for (size_t i = 0; i < 4; i++) {
3996 EXPECT_FLOAT_EQ(tensorFillFloatH.raw(i), (float)i);
3997 EXPECT_EQ(tensorIntFillH.raw(i), (int32_t)i);
3998 EXPECT_EQ(tensorInt64FillH.raw(i), (int64_t)i);
3999 EXPECT_EQ(tensorStringToUInt8FillH.raw(i), (uint8_t)(i + 128));
4000 }
4001
4002 CompilationContext cctx;
4003 cctx.prepartitionedConfig = &PPC;
4004 EE.compile(cctx);
4005 PlaceholderBindings bindings;
4006 bindings.allocate(mod.getPlaceholders());
4007 EE.run(bindings);
4008}
4009
4010/// Verify that multiple ops loaded into different pre-partitioned Functions
4011/// with a non-trivial dependence between them works correctly.
4012/// Note: DAG of the partitions looks like: F0 -> F1
4013/// \ |
4014/// v v
4015/// F2
4016TEST_F(Caffe2ImporterTest, PrePartitionedMultiOpTest) {
4017 ExecutionEngine EE("Interpreter", /* deviceMemory (16GB) */ 0x400000000,
4018 /* ignoreUserDeviceConfig */ false, /* numDevices */ 3);
4019 auto &mod = EE.getModule();
4020
4021 const std::string NetDescFilename(
4022 GLOW_DATA_PATH
4023 "tests/models/caffe2Models/pre_partitioned_multi_op_predict_net.pbtxt");
4024 const std::string NetWeightFilename(
4025 GLOW_DATA_PATH "tests/models/caffe2Models/empty_init_net.pbtxt");
4026
4027 Placeholder *outputPH;
4028 Tensor *resultPartitionedT;
4029 PlaceholderBindings bindingsU;
4030 PlaceholderBindings bindingsP;
4031
4032 // Destroy the loader after the graph is loaded since the following execution
4033 // will not depend on anything from the loader.
4034 runtime::PrePartitionedConfig PPC;
4035 Tensor mmIn0T(ElemKind::FloatTy, {10, 10});
4036 Tensor mmIn1T(ElemKind::FloatTy, {10, 10});
4037 Tensor addInT(ElemKind::FloatTy, {10, 10});
4038 mmIn0T.getHandle().randomize(-3.0, 3.0, mod.getPRNG());
4039 mmIn1T.getHandle().randomize(-3.0, 3.0, mod.getPRNG());
4040 addInT.getHandle().randomize(-3.0, 3.0, mod.getPRNG());
4041 Placeholder *mmIn0P = nullptr, *mmIn1P = nullptr, *addInP = nullptr;
4042 {
4043 Caffe2ModelLoader caffe2LD(
4044 NetDescFilename, NetWeightFilename, {"mm0_in", "mm1_in", "add_in"},
4045 {&mmIn0T.getType(), &mmIn1T.getType(), &addInT.getType()}, mod, "main",
4046 &PPC);
4047 outputPH = EXIT_ON_ERR(caffe2LD.getSingleOutput());
4048 NodeValue mmIn0NV;
4049 ASSIGN_VALUE_OR_FAIL_TEST(mmIn0NV, caffe2LD.getNodeValueByName("mm0_in"));
4050 mmIn0P = llvm::dyn_cast<Placeholder>(mmIn0NV);
4051 NodeValue mmIn1NV;
4052 ASSIGN_VALUE_OR_FAIL_TEST(mmIn1NV, caffe2LD.getNodeValueByName("mm1_in"));
4053 mmIn1P = llvm::dyn_cast<Placeholder>(mmIn1NV);
4054 NodeValue addInNV;
4055 ASSIGN_VALUE_OR_FAIL_TEST(addInNV, caffe2LD.getNodeValueByName("add_in"));
4056 addInP = llvm::dyn_cast<Placeholder>(addInNV);
4057 }
4058
4059 // First we are going to make sure the structure of the pre-partitioned Module
4060 // is set up as expected, and run it with random inputs to get some results.
4061 {
4062 ASSERT_TRUE(mmIn0P);
4063 ASSERT_TRUE(mmIn1P);
4064 ASSERT_TRUE(addInP);
4065
4066 ASSERT_EQ(mod.getFunctions().size(), 3);
4067 Function *P0 = nullptr, *P1 = nullptr, *P2 = nullptr;
4068 for (size_t i = 0, e = PPC.funcs.size(); i < e; i++) {
4069 // Find the expected Function, and check that the logical device IDs were
4070 // correctly loaded.
4071 Function *F = PPC.funcs[i];
4072 if (F->getName() == "main_p0") {
4073 P0 = F;
4074 ASSERT_EQ(PPC.logicalIDs[i].size(), 1);
4075 EXPECT_TRUE(vecContainsVal(PPC.logicalIDs[i], 2));
4076 EXPECT_EQ(PPC.backendSpecificOpts[i].size(), 0);
4077 } else if (F->getName() == "main_p1") {
4078 P1 = F;
4079 ASSERT_EQ(PPC.logicalIDs[i].size(), 2);
4080 EXPECT_TRUE(vecContainsVal(PPC.logicalIDs[i], 0));
4081 EXPECT_TRUE(vecContainsVal(PPC.logicalIDs[i], 1));
4082 EXPECT_EQ(PPC.backendSpecificOpts[i].size(), 0);
4083 } else if (F->getName() == "main_p2") {
4084 P2 = F;
4085 ASSERT_EQ(PPC.logicalIDs[i].size(), 1);
4086 EXPECT_TRUE(vecContainsVal(PPC.logicalIDs[i], 2));
4087 EXPECT_EQ(PPC.backendSpecificOpts[i].size(), 3);
4088 ASSERT_TRUE(PPC.backendSpecificOpts[i].count("BackendA_opt1"));
4089 EXPECT_EQ(PPC.backendSpecificOpts[i].at("BackendA_opt1"), "val1");
4090 ASSERT_TRUE(PPC.backendSpecificOpts[i].count("BackendA_opt2"));
4091 EXPECT_EQ(PPC.backendSpecificOpts[i].at("BackendA_opt2"), "val2");
4092 ASSERT_TRUE(PPC.backendSpecificOpts[i].count("BackendB_opt3"));
4093 EXPECT_EQ(PPC.backendSpecificOpts[i].at("BackendB_opt3"), "val3");
4094 } else {
4095 FAIL() << "Unknown Function found.";
4096 }
4097
4098 // Check that the function was also found in the module.
4099 auto &modFuns = mod.getFunctions();
4100 ASSERT_NE(std::find(modFuns.begin(), modFuns.end(), F), modFuns.end());
4101 }
4102 ASSERT_TRUE(P0);
4103 ASSERT_TRUE(P1);
4104 ASSERT_TRUE(P2);
4105
4106 // Verify P0:
4107 auto *finalSave = getSaveNodeFromDest(outputPH);
4108 ASSERT_TRUE(finalSave);
4109 EXPECT_EQ(finalSave->getParent(), P0);
4110 SubNode *sub = llvm::dyn_cast<SubNode>(finalSave->getInput());
4111 ASSERT_TRUE(sub);
4112 Placeholder *intermedAddOut = llvm::dyn_cast<Placeholder>(sub->getRHS());
4113 ASSERT_TRUE(intermedAddOut);
4114 MulNode *mul = llvm::dyn_cast<MulNode>(sub->getLHS());
4115 ASSERT_TRUE(mul);
4116 Placeholder *intermedMMOut = llvm::dyn_cast<Placeholder>(mul->getRHS());
4117 ASSERT_TRUE(intermedMMOut);
4118 Placeholder *mmIn0 = llvm::dyn_cast<Placeholder>(mul->getLHS());
4119 ASSERT_TRUE(mmIn0);
4120
4121 // Verify P2:
4122 Node *userFromP2 = nullptr;
4123 for (auto &U : intermedAddOut->getUsers()) {
4124 if (U.getUser()->getParent() == P2) {
4125 ASSERT_FALSE(userFromP2);
4126 userFromP2 = U.getUser();
4127 }
4128 }
4129 ASSERT_TRUE(userFromP2);
4130 SaveNode *saveIntermedP2Out = llvm::dyn_cast<SaveNode>(userFromP2);
4131 ASSERT_TRUE(saveIntermedP2Out);
4132 AddNode *add = llvm::dyn_cast<AddNode>(saveIntermedP2Out->getInput());
4133 ASSERT_TRUE(add);
4134 Placeholder *addIn = llvm::dyn_cast<Placeholder>(add->getRHS());
4135 ASSERT_TRUE(addIn);
4136 EXPECT_EQ(add->getLHS().getNode(), intermedMMOut);
4137
4138 // Verify P1:
4139 Node *userFromP1 = nullptr;
4140 for (auto &U : intermedMMOut->getUsers()) {
4141 if (U.getUser()->getParent() == P1) {
4142 ASSERT_FALSE(userFromP1);
4143 userFromP1 = U.getUser();
4144 }
4145 }
4146 ASSERT_TRUE(userFromP1);
4147 SaveNode *saveIntermedP1Out = llvm::dyn_cast<SaveNode>(userFromP1);
4148 ASSERT_TRUE(saveIntermedP1Out);
4149 MatMulNode *matMul =
4150 llvm::dyn_cast<MatMulNode>(saveIntermedP1Out->getInput());
4151 ASSERT_TRUE(matMul);
4152 EXPECT_EQ(matMul->getLHS().getNode(), mmIn0);
4153 Placeholder *matMulIn = llvm::dyn_cast<Placeholder>(matMul->getRHS());
4154 ASSERT_TRUE(matMulIn);
4155
4156 // Now that we've verifed the shape of the Module, run it and keep around
4157 // the pointer to the result.
4158 CompilationContext cctx;
4159 cctx.prepartitionedConfig = &PPC;
4160 EE.compile(cctx);
4161 bindingsP.insert(mmIn0P, mmIn0T.getUnowned());
4162 bindingsP.insert(mmIn1P, mmIn1T.getUnowned());
4163 bindingsP.insert(addInP, addInT.getUnowned());
4164 bindingsP.allocate(mod.getPlaceholders());
4165 EE.run(bindingsP);
4166
4167 resultPartitionedT = bindingsP.get(outputPH);
4168 }
4169
4170 // Now that we have the model result from pre-partitioned execution, execute
4171 // the model ignoring the pre-partitioning and bitwise compare results.
4172 EE.setBackendName(EE.getBackendName());
4173
4174 Module &modU = EE.getModule();
4175 {
4176 Function *F = modU.createFunction("main");
4177 Caffe2ModelLoader caffe2LD(
4178 NetDescFilename, NetWeightFilename, {"mm0_in", "mm1_in", "add_in"},
4179 {&mmIn0T.getType(), &mmIn1T.getType(), &addInT.getType()}, *F);
4180 outputPH = EXIT_ON_ERR(caffe2LD.getSingleOutput());
4181 NodeValue mmIn0NV;
4182 ASSIGN_VALUE_OR_FAIL_TEST(mmIn0NV, caffe2LD.getNodeValueByName("mm0_in"));
4183 mmIn0P = llvm::dyn_cast<Placeholder>(mmIn0NV);
4184 NodeValue mmIn1NV;
4185 ASSIGN_VALUE_OR_FAIL_TEST(mmIn1NV, caffe2LD.getNodeValueByName("mm1_in"));
4186 mmIn1P = llvm::dyn_cast<Placeholder>(mmIn1NV);
4187 NodeValue addInNV;
4188 ASSIGN_VALUE_OR_FAIL_TEST(addInNV, caffe2LD.getNodeValueByName("add_in"));
4189 addInP = llvm::dyn_cast<Placeholder>(addInNV);
4190 }
4191
4192 Tensor *resultUnpartitonedT;
4193
4194 {
4195 ASSERT_TRUE(mmIn0P);
4196 ASSERT_TRUE(mmIn1P);
4197 ASSERT_TRUE(addInP);
4198 ASSERT_EQ(modU.getFunctions().size(), 1);
4199
4200 EE.compile(CompilationMode::Infer);
4201 bindingsU.insert(mmIn0P, mmIn0T.getUnowned());
4202 bindingsU.insert(mmIn1P, mmIn1T.getUnowned());
4203 bindingsU.insert(addInP, addInT.getUnowned());
4204 bindingsU.allocate(modU.getPlaceholders());
4205 EE.run(bindingsU);
4206
4207 resultUnpartitonedT = bindingsU.get(outputPH);
4208 }
4209
4210 EXPECT_TRUE(resultPartitionedT->isBitwiseEqual(*resultUnpartitonedT,
4211 /* verbose */ true));
4212}
4213
4214/// Test importing a Caffe2 LayerNorm without weights and bias provided but with
4215/// epsilon or axis.
4216TEST_F(Caffe2ImporterTest, importLayerNormNoWeightBias) {
4217 ExecutionEngine EE{};
4218 auto &mod = EE.getModule();
4219 Function *F = mod.createFunction("main");
4220
4221 std::string NetDescFilename(
4222 GLOW_DATA_PATH "tests/models/caffe2Models/layernorm_pred_net.pbtxt");
4223 std::string NetWeightFilename(
4224 GLOW_DATA_PATH "tests/models/caffe2Models/empty_init_net.pbtxt");
4225
4226 Placeholder *output;
4227 PlaceholderBindings bindings;
4228
4229 const ShapeVector inShape({4, 2, 5, 5});
4230
4231 // Destroy the loader after the graph is loaded since the following execution
4232 // will not depend on anything from the loader.
4233 {
4234 Tensor data(ElemKind::FloatTy, inShape);
4235 data.getHandle().randomize(-3.0, 3.0, mod.getPRNG());
4236 Caffe2ModelLoader caffe2LD(NetDescFilename, NetWeightFilename, {"input"},
4237 {&data.getType()}, *F);
4238 output = EXIT_ON_ERR(caffe2LD.getSingleOutput());
4239
4240 bindings.allocate(mod.getPlaceholders());
4241 updateInputPlaceholdersByName(bindings, &mod, {"input"}, {&data});
4242 }
4243
4244 // High level check on the content of the graph. We should have
4245 // {Placeholder, Splat, Splat} => LayerNorm => Save
4246 EXPECT_EQ(F->getNodes().size(), 4);
4247 SaveNode *save = getSaveNodeFromDest(output);
4248
4249 auto *LN = llvm::dyn_cast<LayerNormalizationNode>(save->getInput().getNode());
4250 ASSERT_TRUE(LN);
4251 EXPECT_EQ(LN->getEpsilon(), 0.05f);
4252 EXPECT_TRUE(LN->getInput().dims().equals(inShape));
4253 EXPECT_TRUE(LN->getResult().dims().equals(inShape));
4254
4255 auto *scale = llvm::dyn_cast<SplatNode>(LN->getScale().getNode());
4256 ASSERT_TRUE(scale);
4257 EXPECT_EQ(scale->getValue(), 1.0f);
4258
4259 auto *bias = llvm::dyn_cast<SplatNode>(LN->getBias().getNode());
4260 ASSERT_TRUE(bias);
4261 EXPECT_EQ(bias->getValue(), 0.0f);
4262
4263 // Axis is 2, so check shape with second and third dims of inShape.
4264 EXPECT_TRUE(scale->getResult().dims().equals({inShape[2], inShape[3]}));
4265 EXPECT_TRUE(bias->getResult().dims().equals({inShape[2], inShape[3]}));
4266
4267 EE.compile(CompilationMode::Infer);
4268 EE.run(bindings);
4269}
4270
4271/// Test importing a Caffe2 LayerNorm with weights and bias provided but no
4272/// epsilon or axis.
4273TEST_F(Caffe2ImporterTest, importLayerNormWithWeightBias) {
4274 ExecutionEngine EE{};
4275 auto &mod = EE.getModule();
4276 Function *F = mod.createFunction("main");
4277
4278 std::string NetDescFilename(
4279 GLOW_DATA_PATH
4280 "tests/models/caffe2Models/layernorm_weight_bias_pred_net.pbtxt");
4281 std::string NetWeightFilename(
4282 GLOW_DATA_PATH
4283 "tests/models/caffe2Models/layernorm_weight_bias_init_net.pbtxt");
4284
4285 Placeholder *output;
4286 PlaceholderBindings bindings;
4287
4288 const ShapeVector inShape({5, 4, 3});
4289
4290 // Destroy the loader after the graph is loaded since the following execution
4291 // will not depend on anything from the loader.
4292 {
4293 Tensor data(ElemKind::FloatTy, inShape);
4294 data.getHandle().randomize(-3.0, 3.0, mod.getPRNG());
4295 Caffe2ModelLoader caffe2LD(NetDescFilename, NetWeightFilename, {"input"},
4296 {&data.getType()}, *F);
4297 output = EXIT_ON_ERR(caffe2LD.getSingleOutput());
4298
4299 bindings.allocate(mod.getPlaceholders());
4300 updateInputPlaceholdersByName(bindings, &mod, {"input"}, {&data});
4301 }
4302
4303 // High level check on the content of the graph. We should have
4304 // {Placeholder, Constant, Constant} => LayerNorm => Save
4305 EXPECT_EQ(F->getNodes().size(), 2);
4306 SaveNode *save = getSaveNodeFromDest(output);
4307
4308 auto *LN = llvm::dyn_cast<LayerNormalizationNode>(save->getInput().getNode());
4309 ASSERT_TRUE(LN);
4310 EXPECT_EQ(LN->getEpsilon(), 0.001f); // Caffe2 default.
4311 EXPECT_TRUE(LN->getInput().dims().equals(inShape));
4312 EXPECT_TRUE(LN->getResult().dims().equals(inShape));
4313
4314 auto *scale = llvm::dyn_cast<Constant>(LN->getScale().getNode());
4315 ASSERT_TRUE(scale);
4316
4317 auto *bias = llvm::dyn_cast<Constant>(LN->getBias().getNode());
4318 ASSERT_TRUE(bias);
4319
4320 // Default axis is 1 and it was unspecified in the input proto, so check shape
4321 // with first and second dims of inShape.
4322 EXPECT_TRUE(scale->getOutput().dims().equals({inShape[1], inShape[2]}));
4323 EXPECT_TRUE(bias->getOutput().dims().equals({inShape[1], inShape[2]}));
4324
4325 EE.compile(CompilationMode::Infer);
4326 EE.run(bindings);
4327}
4328
4329/// Test importing a Caffe2 LayerNorm with negative axis
4330TEST_F(Caffe2ImporterTest, importLayerNormNegativeAxis) {
4331 ExecutionEngine EE{};
4332 auto &mod = EE.getModule();
4333 Function *F = mod.createFunction("main");
4334
4335 std::string NetDescFilename(
4336 GLOW_DATA_PATH
4337 "tests/models/caffe2Models/layernorm_neg_axis_pred_net.pbtxt");
4338 std::string NetWeightFilename(
4339 GLOW_DATA_PATH "tests/models/caffe2Models/empty_init_net.pbtxt");
4340
4341 Placeholder *output;
4342 PlaceholderBindings bindings;
4343
4344 const ShapeVector inShape({4, 2, 5, 5});
4345
4346 // Destroy the loader after the graph is loaded since the following execution
4347 // will not depend on anything from the loader.
4348 {
4349 Tensor data(ElemKind::FloatTy, inShape);
4350 data.getHandle().randomize(-3.0, 3.0, mod.getPRNG());
4351 Caffe2ModelLoader caffe2LD(NetDescFilename, NetWeightFilename, {"input"},
4352 {&data.getType()}, *F);
4353 output = EXIT_ON_ERR(caffe2LD.getSingleOutput());
4354
4355 bindings.allocate(mod.getPlaceholders());
4356 updateInputPlaceholdersByName(bindings, &mod, {"input"}, {&data});
4357 }
4358
4359 // High level check on the content of the graph. We should have
4360 // {Placeholder, Splat, Splat} => LayerNorm => Save
4361 EXPECT_EQ(F->getNodes().size(), 4);
4362 SaveNode *save = getSaveNodeFromDest(output);
4363
4364 auto *LN = llvm::dyn_cast<LayerNormalizationNode>(save->getInput().getNode());
4365 ASSERT_TRUE(LN);
4366 EXPECT_EQ(LN->getEpsilon(), 0.05f);
4367 EXPECT_TRUE(LN->getInput().dims().equals(inShape));
4368 EXPECT_TRUE(LN->getResult().dims().equals(inShape));
4369
4370 auto *scale = llvm::dyn_cast<SplatNode>(LN->getScale().getNode());
4371 ASSERT_TRUE(scale);
4372 EXPECT_EQ(scale->getValue(), 1.0f);
4373
4374 auto *bias = llvm::dyn_cast<SplatNode>(LN->getBias().getNode());
4375 ASSERT_TRUE(bias);
4376 EXPECT_EQ(bias->getValue(), 0.0f);
4377
4378 // Axis is -2, so check shape with second and third dims of inShape.
4379 EXPECT_TRUE(scale->getResult().dims().equals({inShape[2], inShape[3]}));
4380 EXPECT_TRUE(bias->getResult().dims().equals({inShape[2], inShape[3]}));
4381
4382 EE.compile(CompilationMode::Infer);
4383 EE.run(bindings);
4384}
4385
4386static void testImportTrackedQParams(bool loadUniquedDummyQParams) {
4387 ExecutionEngine EE{};
4388 auto &mod = EE.getModule();
4389 Function *F = mod.createFunction("main");
4390
4391 std::string NetDescFilename(
4392 GLOW_DATA_PATH "tests/models/caffe2Models/int8convrelu_pred_net.pbtxt");
4393 std::string NetWeightFilename(
4394 GLOW_DATA_PATH "tests/models/caffe2Models/int8convrelu_init_net.pbtxt");
4395
4396 Placeholder *output;
4397 PlaceholderBindings bindings;
4398 OriginNameToTQPMap originNameToTQPMap;
4399
4400 // Destroy the loader after the graph is loaded since the following execution
4401 // will not depend on anything from the loader.
4402 {
4403 Tensor data(ElemKind::Int8QTy, {1, 1, 3, 3}, 1, 0);
4404 Caffe2ModelLoader caffe2LD(NetDescFilename, NetWeightFilename,
4405 {"gpu_0/data_0"}, {&data.getType()}, *F,
4406 /* errPtr */ nullptr, &originNameToTQPMap,
4407 loadUniquedDummyQParams);
4408 output = EXIT_ON_ERR(caffe2LD.getSingleOutput());
4409
4410 bindings.allocate(mod.getPlaceholders());
4411 }
4412
4413 // High level check on the content of the graph. We should have
4414 // {transpose, transpose} => conv => relu => transpose => save
4415 EXPECT_EQ(F->getNodes().size(), 6);
4416 auto *saveNode = getSaveNodeFromDest(output);
4417
4418 EXPECT_EQ(originNameToTQPMap.size(), 4);
4419 TensorQuantizationParams convOut, convBias, convWeight, convInput;
4420 for (const auto &nameTQP : originNameToTQPMap) {
4421 if (nameTQP.first == "conv_out") {
4422 convOut = nameTQP.second;
4423 } else if (nameTQP.first == "conv_w") {
4424 convWeight = nameTQP.second;
4425 } else if (nameTQP.first == "conv_b") {
4426 convBias = nameTQP.second;
4427 } else if (nameTQP.first == "gpu_0/data_0") {
4428 convInput = nameTQP.second;
4429 } else {
4430 FAIL();
4431 }
4432 }
4433
4434 if (loadUniquedDummyQParams) {
4435 // Dummies should have unique offsets 0->3.
4436 EXPECT_EQ(convInput.offset, 0);
4437 EXPECT_EQ(convWeight.offset, 1);
4438 EXPECT_EQ(convBias.offset, 2);
4439 EXPECT_EQ(convOut.offset, 3);
4440
4441 // All dummmies should have dummy scale.
4442 EXPECT_EQ(convInput.scale, dummyScale);
4443 EXPECT_EQ(convWeight.scale, dummyScale);
4444 EXPECT_EQ(convBias.scale, dummyScale);
4445 EXPECT_EQ(convOut.scale, dummyScale);
4446 } else {
4447 // This one was provided as an input PH with a type already based on Glow
4448 // Int8QTy, so don't shift.
4449 EXPECT_EQ(convInput.offset, 0);
4450 EXPECT_EQ(convWeight.offset, 10 - UINT8_TO_INT8_SHIFT);
4451 // This one is loaded int32, so has no shift.
4452 EXPECT_EQ(convBias.offset, 4);
4453 EXPECT_EQ(convOut.offset, 7 - UINT8_TO_INT8_SHIFT);
4454
4455 EXPECT_EQ(convInput.scale, 1.f);
4456 EXPECT_EQ(convWeight.scale, 2.f);
4457 EXPECT_EQ(convBias.scale, 10.f);
4458 EXPECT_EQ(convOut.scale, 1.5f);
4459 }
4460
4461 auto *transNode1 =
4462 llvm::dyn_cast<TransposeNode>(saveNode->getInput().getNode());
4463 ASSERT_TRUE(transNode1);
4464 auto *reluNode = llvm::dyn_cast<ReluNode>(transNode1->getInput().getNode());
4465 ASSERT_TRUE(reluNode);
4466 ASSERT_TRUE(reluNode);
4467 EXPECT_TRUE(reluNode->getResult().getType()->isQuantizedType());
4468 EXPECT_EQ(reluNode->getResult().getType()->getScale(), convOut.scale);
4469 EXPECT_EQ(reluNode->getResult().getType()->getOffset(), convOut.offset);
4470 auto *convNode =
4471 llvm::dyn_cast<ConvolutionNode>(reluNode->getInput().getNode());
4472 ASSERT_TRUE(convNode);
4473 EXPECT_TRUE(convNode->getResult().getType()->isQuantizedType());
4474 EXPECT_EQ(convNode->getResult().getType()->getScale(), convOut.scale);
4475 EXPECT_EQ(convNode->getResult().getType()->getOffset(), convOut.offset);
4476 EXPECT_TRUE(convNode->getFilter().getType()->isQuantizedType());
4477 EXPECT_EQ(convNode->getFilter().getType()->getScale(), convWeight.scale);
4478 EXPECT_EQ(convNode->getFilter().getType()->getOffset(), convWeight.offset);
4479 EXPECT_TRUE(convNode->getBias().getType()->isQuantizedType());
4480 EXPECT_EQ(convNode->getBias().getType()->getScale(), convBias.scale);
4481 EXPECT_EQ(convNode->getBias().getType()->getOffset(), convBias.offset);
4482 ASSERT_TRUE(convNode);
4483 auto *transNode2 =
4484 llvm::dyn_cast<TransposeNode>(convNode->getInput().getNode());
4485 ASSERT_TRUE(transNode2);
4486 auto *transNode3 =
4487 llvm::dyn_cast<TransposeNode>(convNode->getFilter().getNode());
4488 ASSERT_TRUE(transNode3);
4489
4490 EE.compile(CompilationMode::Infer);
4491}
4492
4493/// Test that when we load a pre-quantized model when providing
4494/// OriginNameToTQPMap that the quant params are discarded and unique offsets
4495/// are used to track the mapping to names they came from.
4496TEST_F(Caffe2ImporterTest, importInt8ConvReluTrackedDummyQParams) {
4497 testImportTrackedQParams(/* loadUniquedDummyQParams */ true);
4498}
4499
4500/// Test that when we load a pre-quantized model when providing
4501/// OriginNameToTQPMap, but we don't enable loading unique dummy qparams, that
4502/// we correctly have mapped the quant params to the name it came from.
4503TEST_F(Caffe2ImporterTest, importInt8ConvReluTrackedRealQParams) {
4504 testImportTrackedQParams(/* loadUniquedDummyQParams */ false);
4505}
4506
4507/// Check that we clip a Node with 0.f scale to kMinScaleFP16 correctly.
4508TEST_F(Caffe2ImporterTest, ClipZeroScaleFP16QuantOpt) {
4509 ExecutionEngine EE{};
4510 auto &mod = EE.getModule();
4511 Function *F = mod.createFunction("main");
4512
4513 std::string NetDescFilename(
4514 GLOW_DATA_PATH
4515 "tests/models/caffe2Models/int8sumrelu_tiny_scale_pred_net.pbtxt");
4516 std::string NetWeightFilename(
4517 GLOW_DATA_PATH
4518 "tests/models/caffe2Models/int8sumrelu_tiny_scale_init_net.pbtxt");
4519
4520 Placeholder *output;
4521 PlaceholderBindings bindings;
4522
4523 // Destroy the loader after the graph is loaded since the following execution
4524 // will not depend on anything from the loader.
4525 {
4526 Tensor data(ElemKind::Int8QTy, {4, 2}, 1.f, 0);
4527 Caffe2ModelLoader caffe2LD(
4528 NetDescFilename, NetWeightFilename, {"gpu_0/data_0"}, {&data.getType()},
4529 *F, /* errPtr */ nullptr, /* originNameToTQPMap */ nullptr,
4530 /* loadUniquedDummyQParams */ false,
4531 /* zeroScaleFP16Clip */ true);
4532 output = EXIT_ON_ERR(caffe2LD.getSingleOutput());
4533
4534 bindings.allocate(mod.getPlaceholders());
4535 updateInputPlaceholdersByName(bindings, &mod, {"gpu_0/data_0"}, {&data});
4536 }
4537
4538 // High level check on the content of the graph. We should have
4539 // input-=> add => relu => save
4540 // const/
4541 EXPECT_EQ(F->getNodes().size(), 3);
4542 auto *save = getSaveNodeFromDest(output);
4543
4544 // Verify that the structure is as expected *except* that the tiny scales that
4545 // are loaded have been replaced by kMinScaleFP16.
4546 auto *relu = llvm::dyn_cast<ReluNode>(save->getInput().getNode());
4547 ASSERT_TRUE(relu);
4548 EXPECT_EQ(relu->getResult().getType()->getScale(), kMinScaleFP16);
4549 EXPECT_EQ(relu->getResult().getType()->getOffset(), 5 - UINT8_TO_INT8_SHIFT);
4550 auto *add = llvm::dyn_cast<AddNode>(relu->getInput().getNode());
4551 ASSERT_TRUE(add);
4552 EXPECT_EQ(add->getResult().getType()->getScale(), kMinScaleFP16);
4553 EXPECT_EQ(add->getResult().getType()->getOffset(), 5 - UINT8_TO_INT8_SHIFT);
4554 auto *input = llvm::dyn_cast<Placeholder>(add->getLHS().getNode());
4555 ASSERT_TRUE(input);
4556 auto *val = llvm::dyn_cast<Constant>(add->getRHS().getNode());
4557 ASSERT_TRUE(val);
4558 EXPECT_EQ(val->getOutput().getType()->getScale(), kMinScaleFP16);
4559 EXPECT_EQ(val->getOutput().getType()->getOffset(), 13 - UINT8_TO_INT8_SHIFT);
4560
4561 EE.compile(CompilationMode::Infer);
4562}
4563
4564/// Check that we clip a Node with 0.f scale to kMinScaleFP16 correctly.
4565TEST_F(Caffe2ImporterTest, ClipLargeQRangeToFP16) {
4566 ExecutionEngine EE{};
4567 auto &mod = EE.getModule();
4568 Function *F = mod.createFunction("main");
4569
4570 std::string NetDescFilename(
4571 GLOW_DATA_PATH
4572 "tests/models/caffe2Models/int8sumrelu_large_range_pred_net.pbtxt");
4573 std::string NetWeightFilename(
4574 GLOW_DATA_PATH
4575 "tests/models/caffe2Models/int8sumrelu_large_range_init_net.pbtxt");
4576
4577 Placeholder *output;
4578 PlaceholderBindings bindings;
4579
4580 // Destroy the loader after the graph is loaded since the following execution
4581 // will not depend on anything from the loader.
4582 {
4583 Tensor data(ElemKind::FloatTy, {4, 2});
4584 Caffe2ModelLoader caffe2LD(
4585 NetDescFilename, NetWeightFilename, {"gpu_0/data_0"}, {&data.getType()},
4586 *F, /* errPtr */ nullptr, /* originNameToTQPMap */ nullptr,
4587 /* loadUniquedDummyQParams */ false,
4588 /* zeroScaleFP16Clip */ false, /* clipQuantRangeToFP16 */ true);
4589 output = EXIT_ON_ERR(caffe2LD.getSingleOutput());
4590
4591 bindings.allocate(mod.getPlaceholders());
4592 updateInputPlaceholdersByName(bindings, &mod, {"gpu_0/data_0"}, {&data});
4593 }
4594
4595 // High level check on the content of the graph. We should have
4596 // input=> quant-=> add => relu => save
4597 // const/
4598 EXPECT_EQ(F->getNodes().size(), 4);
4599 auto *save = getSaveNodeFromDest(output);
4600
4601 auto clippedQP = quantization::chooseQuantizationParams({kMinFP16, kMaxFP16});
4602
4603 // Verify that the structure is as expected *except* that the range is
4604 // adjusted for clipping to fp16.
4605 auto *relu = llvm::dyn_cast<ReluNode>(save->getInput().getNode());
4606 ASSERT_TRUE(relu);
4607 EXPECT_EQ(relu->getResult().getType()->getScale(), clippedQP.scale);
4608 EXPECT_EQ(relu->getResult().getType()->getOffset(), clippedQP.offset);
4609 auto *add = llvm::dyn_cast<AddNode>(relu->getInput().getNode());
4610 ASSERT_TRUE(add);
4611 EXPECT_EQ(add->getResult().getType()->getScale(), clippedQP.scale);
4612 EXPECT_EQ(add->getResult().getType()->getOffset(), clippedQP.offset);
4613 auto *quant = llvm::dyn_cast<QuantizeNode>(add->getLHS().getNode());
4614 ASSERT_TRUE(quant);
4615 EXPECT_EQ(quant->getResult().getType()->getScale(), 1.f);
4616 EXPECT_EQ(quant->getResult().getType()->getOffset(), 0 - UINT8_TO_INT8_SHIFT);
4617 EXPECT_TRUE(llvm::isa<Placeholder>(quant->getInput().getNode()));
4618 auto *C = llvm::dyn_cast<Constant>(add->getRHS().getNode());
4619 ASSERT_TRUE(C);
4620 EXPECT_EQ(C->getOutput().getType()->getScale(), clippedQP.scale);
4621 EXPECT_EQ(C->getOutput().getType()->getOffset(), clippedQP.offset);
4622
4623 EE.compile(CompilationMode::Infer);
4624}
4625
4626// Here we use a shape that is provided as an argument
4627TEST_F(Caffe2ImporterTest, gaussianFillUseProvidedShape) {
4628 ExecutionEngine EE{};
4629 auto &mod = EE.getModule();
4630 Function *F = mod.createFunction("main");
4631
4632 std::string NetDescFilename(
4633 GLOW_DATA_PATH
4634 "tests/models/caffe2Models/gaussian_fill_use_provided_shape.pbtxt");
4635 std::string NetWeightFilename(
4636 GLOW_DATA_PATH "tests/models/caffe2Models/empty_init_net.pbtxt");
4637
4638 PlaceholderBindings bindings;
4639
4640 Placeholder *output;
4641 // Destroy the loader after the graph is loaded since the following execution
4642 // will not depend on anything from the loader.
4643 {
4644 Caffe2ModelLoader caffe2LD(NetDescFilename, NetWeightFilename, {}, {}, *F);
4645 output = EXIT_ON_ERR(caffe2LD.getSingleOutput());
4646
4647 bindings.allocate(mod.getPlaceholders());
4648 updateInputPlaceholdersByName(bindings, &mod, {}, {});
4649 }
4650
4651 // Shape is defined in .pbtxt file
4652 const std::vector<dim_t> expectedShape{4, 5, 6, 7};
4653 EXPECT_EQ(expectedShape, output->dims().vec());
4654
4655 auto res = bindings.get(output);
4656 EE.compile(CompilationMode::Infer);
4657 EE.run(bindings);
4658
4659 auto result = res->getHandle();
4660 EXPECT_NEAR(0, result.calculateMeanVariance().first, 3);
4661}
4662
4663// Here we use expect input to be 1D tensor and act like shape
4664TEST_F(Caffe2ImporterTest, gaussianFillInputAsShape) {
4665 ExecutionEngine EE{};
4666 auto &mod = EE.getModule();
4667 Function *F = mod.createFunction("main");
4668
4669 std::string NetDescFilename(
4670 GLOW_DATA_PATH
4671 "tests/models/caffe2Models/gaussian_fill_input_as_shape.pbtxt");
4672 std::string NetWeightFilename(
4673 GLOW_DATA_PATH
4674 "tests/models/caffe2Models/gaussian_fill_input_as_shape_init.pbtxt");
4675
4676 PlaceholderBindings bindings;
4677
4678 Placeholder *output;
4679 // Destroy the loader after the graph is loaded since the following execution
4680 // will not depend on anything from the loader.
4681 {
4682 Caffe2ModelLoader caffe2LD(NetDescFilename, NetWeightFilename, {}, {}, *F);
4683 output = EXIT_ON_ERR(caffe2LD.getSingleOutput());
4684
4685 bindings.allocate(mod.getPlaceholders());
4686 updateInputPlaceholdersByName(bindings, &mod, {}, {});
4687 }
4688
4689 // Shape is defined in .pbtxt file
4690 const std::vector<dim_t> expectedShape{4, 5, 6, 7};
4691 EXPECT_EQ(expectedShape, output->dims().vec());
4692
4693 auto res = bindings.get(output);
4694 EE.compile(CompilationMode::Infer);
4695 EE.run(bindings);
4696
4697 auto result = res->getHandle();
4698 EXPECT_NEAR(0, result.calculateMeanVariance().first, 3);
4699}
4700
4701// Here we use input's shape for the result
4702TEST_F(Caffe2ImporterTest, gaussianFillUseInputShape) {
4703 ExecutionEngine EE{};
4704 auto &mod = EE.getModule();
4705 Function *F = mod.createFunction("main");
4706
4707 std::string NetDescFilename(
4708 GLOW_DATA_PATH
4709 "tests/models/caffe2Models/gaussian_fill_use_input_shape.pbtxt");
4710 std::string NetWeightFilename(
4711 GLOW_DATA_PATH "tests/models/caffe2Models/empty_init_net.pbtxt");
4712
4713 PlaceholderBindings bindings;
4714
4715 Placeholder *output;
4716 const std::vector<dim_t> inputShape{4, 5, 6, 7};
4717 Tensor inputs_0(ElemKind::Int64ITy, inputShape);
4718 inputs_0.getHandle<int64_t>().randomize(-10, 10, mod.getPRNG());
4719 // Destroy the loader after the graph is loaded since the following execution
4720 // will not depend on anything from the loader.
4721 {
4722 Caffe2ModelLoader caffe2LD(NetDescFilename, NetWeightFilename, {"inputs_0"},
4723 {&inputs_0.getType()}, *F);
4724 output = EXIT_ON_ERR(caffe2LD.getSingleOutput());
4725
4726 bindings.allocate(mod.getPlaceholders());
4727 updateInputPlaceholdersByName(bindings, &mod, {"inputs_0"}, {&inputs_0});
4728 }
4729
4730 // Check that the shape of the output matches what Caffe2 expects.
4731 EXPECT_EQ(inputShape, output->dims().vec());
4732
4733 auto res = bindings.get(output);
4734 EE.compile(CompilationMode::Infer);
4735 EE.run(bindings);
4736
4737 auto result = res->getHandle();
4738 EXPECT_NEAR(0, result.calculateMeanVariance().first, 3);
4739}
4740
4741// Set the extra_shape argument with input provided
4742TEST_F(Caffe2ImporterTest, gaussianFillExtraShape) {
4743 ExecutionEngine EE{};
4744 auto &mod = EE.getModule();
4745 Function *F = mod.createFunction("main");
4746
4747 std::string NetDescFilename(
4748 GLOW_DATA_PATH "tests/models/caffe2Models/"
4749 "gaussian_fill_use_input_shape_with_extra_shape.pbtxt");
4750 std::string NetWeightFilename(
4751 GLOW_DATA_PATH "tests/models/caffe2Models/empty_init_net.pbtxt");
4752
4753 PlaceholderBindings bindings;
4754
4755 Placeholder *output;
4756 const std::vector<dim_t> inputShape{4, 5, 6, 7};
4757 Tensor inputs_0(ElemKind::Int64ITy, inputShape);
4758 inputs_0.getHandle<int64_t>().randomize(-10, 10, mod.getPRNG());
4759 // Destroy the loader after the graph is loaded since the following execution
4760 // will not depend on anything from the loader.
4761 {
4762 Caffe2ModelLoader caffe2LD(NetDescFilename, NetWeightFilename, {"inputs_0"},
4763 {&inputs_0.getType()}, *F);
4764 output = EXIT_ON_ERR(caffe2LD.getSingleOutput());
4765
4766 bindings.allocate(mod.getPlaceholders());
4767 updateInputPlaceholdersByName(bindings, &mod, {"inputs_0"}, {&inputs_0});
4768 }
4769
4770 // Check that the shape of the output matches what Caffe2 expects.
4771 const std::vector<dim_t> expectedShape{4, 5, 6, 7, 2, 3};
4772 EXPECT_EQ(expectedShape, output->dims().vec());
4773
4774 auto res = bindings.get(output);
4775 EE.compile(CompilationMode::Infer);
4776 EE.run(bindings);
4777
4778 auto result = res->getHandle();
4779 EXPECT_NEAR(0, result.calculateMeanVariance().first, 3);
4780}
4781
4782// Here we use expect input to be 1D tensor and act like shape
4783TEST_F(Caffe2ImporterTest, uniformFillInputAsShape) {
4784 ExecutionEngine EE{};
4785 auto &mod = EE.getModule();
4786 Function *F = mod.createFunction("main");
4787
4788 std::string NetDescFilename(
4789 GLOW_DATA_PATH
4790 "tests/models/caffe2Models/uniform_fill_input_as_shape.pbtxt");
4791 std::string NetWeightFilename(
4792 GLOW_DATA_PATH
4793 "tests/models/caffe2Models/gaussian_fill_input_as_shape_init.pbtxt");
4794
4795 PlaceholderBindings bindings;
4796
4797 Placeholder *output;
4798 // Destroy the loader after the graph is loaded since the following execution
4799 // will not depend on anything from the loader.
4800 {
4801 Caffe2ModelLoader caffe2LD(NetDescFilename, NetWeightFilename, {}, {}, *F);
4802 output = EXIT_ON_ERR(caffe2LD.getSingleOutput());
4803
4804 bindings.allocate(mod.getPlaceholders());
4805 updateInputPlaceholdersByName(bindings, &mod, {}, {});
4806 }
4807
4808 // Shape is defined in .pbtxt file
4809 const std::vector<dim_t> expectedShape{4, 5, 6, 7};
4810 EXPECT_EQ(expectedShape, output->dims().vec());
4811
4812 auto res = bindings.get(output);
4813 EE.compile(CompilationMode::Infer);
4814 EE.run(bindings);
4815
4816 auto result = res->getHandle();
4817 for (dim_t dim1 = 0; dim1 < expectedShape[0]; ++dim1) {
4818 for (dim_t dim2 = 0; dim2 < expectedShape[1]; ++dim2) {
4819 for (dim_t dim3 = 0; dim3 < expectedShape[2]; ++dim3) {
4820 for (dim_t dim4 = 0; dim4 < expectedShape[3]; ++dim4) {
4821 // As defined in the .pbtxt file
4822 EXPECT_LE(0.0f, result.at({dim1, dim2, dim3, dim4}));
4823 EXPECT_GT(10.0f, result.at({dim1, dim2, dim3, dim4}));
4824 }
4825 }
4826 }
4827 }
4828}
4829
4830// Here we use input's shape for the result
4831TEST_F(Caffe2ImporterTest, uniformFillUseInputShape) {
4832 ExecutionEngine EE{};
4833 auto &mod = EE.getModule();
4834 Function *F = mod.createFunction("main");
4835
4836 std::string NetDescFilename(
4837 GLOW_DATA_PATH
4838 "tests/models/caffe2Models/uniform_fill_use_input_shape.pbtxt");
4839 std::string NetWeightFilename(
4840 GLOW_DATA_PATH "tests/models/caffe2Models/empty_init_net.pbtxt");
4841
4842 PlaceholderBindings bindings;
4843
4844 Placeholder *output;
4845 const std::vector<dim_t> inputShape{4, 5, 6, 7};
4846 Tensor inputs_0(ElemKind::Int64ITy, inputShape);
4847 inputs_0.getHandle<int64_t>().randomize(-10, 10, mod.getPRNG());
4848 // Destroy the loader after the graph is loaded since the following execution
4849 // will not depend on anything from the loader.
4850 {
4851 Caffe2ModelLoader caffe2LD(NetDescFilename, NetWeightFilename, {"inputs_0"},
4852 {&inputs_0.getType()}, *F);
4853 output = EXIT_ON_ERR(caffe2LD.getSingleOutput());
4854
4855 bindings.allocate(mod.getPlaceholders());
4856 updateInputPlaceholdersByName(bindings, &mod, {"inputs_0"}, {&inputs_0});
4857 }
4858
4859 // Check that the shape of the output matches what Caffe2 expects.
4860 EXPECT_EQ(inputShape, output->dims().vec());
4861
4862 auto res = bindings.get(output);
4863 EE.compile(CompilationMode::Infer);
4864 EE.run(bindings);
4865
4866 auto result = res->getHandle();
4867 for (dim_t dim1 = 0; dim1 < inputShape[0]; ++dim1) {
4868 for (dim_t dim2 = 0; dim2 < inputShape[1]; ++dim2) {
4869 for (dim_t dim3 = 0; dim3 < inputShape[2]; ++dim3) {
4870 for (dim_t dim4 = 0; dim4 < inputShape[3]; ++dim4) {
4871 // As defined in the .pbtxt file
4872 EXPECT_LE(0.0f, result.at({dim1, dim2, dim3, dim4}));
4873 EXPECT_GT(10.0f, result.at({dim1, dim2, dim3, dim4}));
4874 }
4875 }
4876 }
4877 }
4878}
4879
4880// Here we use a shape that is provided as an argument
4881TEST_F(Caffe2ImporterTest, constantFillUseProvidedShape) {
4882 ExecutionEngine EE{};
4883 auto &mod = EE.getModule();
4884 Function *F = mod.createFunction("main");
4885
4886 std::string NetDescFilename(
4887 GLOW_DATA_PATH
4888 "tests/models/caffe2Models/constant_fill_use_provided_shape.pbtxt");
4889 std::string NetWeightFilename(
4890 GLOW_DATA_PATH "tests/models/caffe2Models/empty_init_net.pbtxt");
4891
4892 PlaceholderBindings bindings;
4893
4894 Placeholder *output;
4895 // Destroy the loader after the graph is loaded since the following execution
4896 // will not depend on anything from the loader.
4897 {
4898 Caffe2ModelLoader caffe2LD(NetDescFilename, NetWeightFilename, {}, {}, *F);
4899 output = EXIT_ON_ERR(caffe2LD.getSingleOutput());
4900
4901 bindings.allocate(mod.getPlaceholders());
4902 updateInputPlaceholdersByName(bindings, &mod, {}, {});
4903 }
4904
4905 // Shape is defined in .pbtxt file
4906 const std::vector<dim_t> expectedShape{7, 11, 13, 17};
4907 EXPECT_EQ(expectedShape, output->dims().vec());
4908
4909 auto res = bindings.get(output);
4910 EE.compile(CompilationMode::Infer);
4911 EE.run(bindings);
4912
4913 auto result = res->getHandle();
4914 for (dim_t dim1 = 0; dim1 < expectedShape[0]; ++dim1) {
4915 for (dim_t dim2 = 0; dim2 < expectedShape[1]; ++dim2) {
4916 for (dim_t dim3 = 0; dim3 < expectedShape[2]; ++dim3) {
4917 for (dim_t dim4 = 0; dim4 < expectedShape[3]; ++dim4) {
4918 // As defined in the .pbtxt file
4919 EXPECT_FLOAT_EQ(3.0f, result.at({dim1, dim2, dim3, dim4}));
4920 }
4921 }
4922 }
4923 }
4924}
4925
4926// Here we use expect input to be 1D tensor and act like shape
4927TEST_F(Caffe2ImporterTest, constantFillInputAsShape) {
4928 ExecutionEngine EE{};
4929 auto &mod = EE.getModule();
4930 Function *F = mod.createFunction("main");
4931
4932 std::string NetDescFilename(
4933 GLOW_DATA_PATH
4934 "tests/models/caffe2Models/constant_fill_input_as_shape.pbtxt");
4935 std::string NetWeightFilename(
4936 GLOW_DATA_PATH
4937 "tests/models/caffe2Models/constant_fill_input_as_shape_init.pbtxt");
4938
4939 PlaceholderBindings bindings;
4940
4941 Placeholder *output;
4942 // Destroy the loader after the graph is loaded since the following execution
4943 // will not depend on anything from the loader.
4944 {
4945 Caffe2ModelLoader caffe2LD(NetDescFilename, NetWeightFilename, {}, {}, *F);
4946 output = EXIT_ON_ERR(caffe2LD.getSingleOutput());
4947
4948 bindings.allocate(mod.getPlaceholders());
4949 updateInputPlaceholdersByName(bindings, &mod, {}, {});
4950 }
4951
4952 // Shape is defined in .pbtxt file
4953 const std::vector<dim_t> expectedShape{17, 23, 29, 31};
4954 EXPECT_EQ(expectedShape, output->dims().vec());
4955
4956 auto res = bindings.get(output);
4957 EE.compile(CompilationMode::Infer);
4958 EE.run(bindings);
4959
4960 auto result = res->getHandle();
4961 for (dim_t dim1 = 0; dim1 < expectedShape[0]; ++dim1) {
4962 for (dim_t dim2 = 0; dim2 < expectedShape[1]; ++dim2) {
4963 for (dim_t dim3 = 0; dim3 < expectedShape[2]; ++dim3) {
4964 for (dim_t dim4 = 0; dim4 < expectedShape[3]; ++dim4) {
4965 // As defined in the .pbtxt file
4966 EXPECT_FLOAT_EQ(3.0f, result.at({dim1, dim2, dim3, dim4}));
4967 }
4968 }
4969 }
4970 }
4971}
4972
4973// Here we use input's shape for the result
4974TEST_F(Caffe2ImporterTest, constantFillUseInputShape) {
4975 ExecutionEngine EE{};
4976 auto &mod = EE.getModule();
4977 Function *F = mod.createFunction("main");
4978
4979 std::string NetDescFilename(
4980 GLOW_DATA_PATH
4981 "tests/models/caffe2Models/constant_fill_use_input_shape.pbtxt");
4982 std::string NetWeightFilename(
4983 GLOW_DATA_PATH "tests/models/caffe2Models/empty_init_net.pbtxt");
4984
4985 PlaceholderBindings bindings;
4986
4987 Placeholder *output;
4988 const std::vector<dim_t> inputShape{7, 11, 13, 17};
4989 Tensor inputs_0(ElemKind::FloatTy, inputShape);
4990 inputs_0.getHandle().randomize(-3.0, 3.0, mod.getPRNG());
4991 // Destroy the loader after the graph is loaded since the following execution
4992 // will not depend on anything from the loader.
4993 {
4994 Caffe2ModelLoader caffe2LD(NetDescFilename, NetWeightFilename, {"inputs_0"},
4995 {&inputs_0.getType()}, *F);
4996 output = EXIT_ON_ERR(caffe2LD.getSingleOutput());
4997
4998 bindings.allocate(mod.getPlaceholders());
4999 updateInputPlaceholdersByName(bindings, &mod, {"inputs_0"}, {&inputs_0});
5000 }
5001
5002 // Check that the shape of the output matches what Caffe2 expects.
5003 EXPECT_EQ(inputShape, output->dims().vec());
5004
5005 auto res = bindings.get(output);
5006 EE.compile(CompilationMode::Infer);
5007 EE.run(bindings);
5008
5009 auto result = res->getHandle();
5010 for (dim_t dim1 = 0; dim1 < inputShape[0]; ++dim1) {
5011 for (dim_t dim2 = 0; dim2 < inputShape[1]; ++dim2) {
5012 for (dim_t dim3 = 0; dim3 < inputShape[2]; ++dim3) {
5013 for (dim_t dim4 = 0; dim4 < inputShape[3]; ++dim4) {
5014 // As defined in the .pbtxt file
5015 EXPECT_FLOAT_EQ(3.0f, result.at({dim1, dim2, dim3, dim4}));
5016 }
5017 }
5018 }
5019 }
5020}
5021
5022TEST_F(Caffe2ImporterTest, reduceBackSum) {
5023 const std::string NetDescFilename(
5024 GLOW_DATA_PATH "tests/models/caffe2Models/reduce_back_sum.pbtxt");
5025 const std::string NetWeightFilename(
5026 GLOW_DATA_PATH "tests/models/caffe2Models/empty_init_net.pbtxt");
5027
5028 // Shape validations
5029 {
5030 const std::vector<std::vector<dim_t>> inputShapes{
5031 {3, 2},
5032 {64, 64, 11},
5033 {2, 3, 4, 5},
5034 };
5035 for (const auto &inputShape : inputShapes) {
5036 ExecutionEngine EE{};
5037 auto &mod = EE.getModule();
5038 Function *F = mod.createFunction("main");
5039
5040 PlaceholderBindings bindings;
5041 Placeholder *output;
5042
5043 Tensor inputs_0{ElemKind::FloatTy, inputShape};
5044 inputs_0.getHandle().randomize(-3.0, 3.0, mod.getPRNG());
5045 // Destroy the loader after the graph is loaded since the following
5046 // execution will not depend on anything from the loader.
5047 {
5048 Caffe2ModelLoader caffe2LD(NetDescFilename, NetWeightFilename,
5049 {"inputs_0"}, {&inputs_0.getType()}, *F);
5050 output = EXIT_ON_ERR(caffe2LD.getSingleOutput());
5051
5052 bindings.allocate(mod.getPlaceholders());
5053 updateInputPlaceholdersByName(bindings, &mod, {"inputs_0"},
5054 {&inputs_0});
5055 }
5056
5057 // Check that the shape of the output matches what Caffe2 expects.
5058 const std::vector<dim_t> expectedOutputShape{inputShape.begin(),
5059 inputShape.end() - 1};
5060 EXPECT_EQ(expectedOutputShape, output->dims().vec());
5061 }
5062 }
5063
5064 // Numeric validations
5065 {
5066 ExecutionEngine EE{};
5067 auto &mod = EE.getModule();
5068 Function *F = mod.createFunction("main");
5069
5070 PlaceholderBindings bindings;
5071 Placeholder *output;
5072
5073 std::vector<dim_t> inputShape{11, 13, 17};
5074
5075 Tensor inputs_0{ElemKind::FloatTy, inputShape};
5076 inputs_0.getHandle().randomize(-3.0, 3.0, mod.getPRNG());
5077 // Destroy the loader after the graph is loaded since the following
5078 // execution will not depend on anything from the loader.
5079 {
5080 Caffe2ModelLoader caffe2LD(NetDescFilename, NetWeightFilename,
5081 {"inputs_0"}, {&inputs_0.getType()}, *F);
5082 output = EXIT_ON_ERR(caffe2LD.getSingleOutput());
5083
5084 bindings.allocate(mod.getPlaceholders());
5085 updateInputPlaceholdersByName(bindings, &mod, {"inputs_0"}, {&inputs_0});
5086 }
5087
5088 auto res = bindings.get(output);
5089 EE.compile(CompilationMode::Infer);
5090 EE.run(bindings);
5091
5092 auto result = res->getHandle();
5093 for (dim_t dim1 = 0; dim1 < inputShape[0]; ++dim1) {
5094 for (dim_t dim2 = 0; dim2 < inputShape[1]; ++dim2) {
5095 float sum = 0;
5096 for (dim_t dim3 = 0; dim3 < inputShape[2]; ++dim3) {
5097 sum += inputs_0.getHandle().at({dim1, dim2, dim3});
5098 }
5099 EXPECT_FLOAT_EQ(sum, result.at({dim1, dim2}));
5100 }
5101 }
5102 }
5103}
5104
5105TEST_F(Caffe2ImporterTest, rmsNorm) {
5106 const std::string NetDescFilename(GLOW_DATA_PATH
5107 "tests/models/caffe2Models/rmsnorm.pbtxt");
5108 const std::string NetWeightFilename(
5109 GLOW_DATA_PATH "tests/models/caffe2Models/empty_init_net.pbtxt");
5110
5111 ExecutionEngine EE{};
5112 auto &mod = EE.getModule();
5113 Function *F = mod.createFunction("main");
5114
5115 PlaceholderBindings bindings;
5116 Placeholder *y;
5117 Placeholder *rrms;
5118
5119 const std::vector<dim_t> xShape{3, 4};
5120 const std::vector<dim_t> gammaShape{4};
5121 const std::vector<dim_t> betaShape{4};
5122
5123 Tensor x{ElemKind::FloatTy, xShape};
5124 Tensor gamma{ElemKind::FloatTy, gammaShape};
5125 Tensor beta{ElemKind::FloatTy, betaShape};
5126
5127 x.getHandle() = {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12};
5128 gamma.getHandle() = {1, 2, 3, 4};
5129 beta.getHandle() = {1, 2, 3, 4};
5130
5131 // Destroy the loader after the graph is loaded since the following
5132 // execution will not depend on anything from the loader.
5133 {
5134 Caffe2ModelLoader caffe2LD(
5135 NetDescFilename, NetWeightFilename, {"x", "gamma", "beta"},
5136 {&x.getType(), &gamma.getType(), &beta.getType()}, *F);
5137 y = EXIT_ON_ERR(caffe2LD.getOutputByName("y"));
5138 rrms = EXIT_ON_ERR(caffe2LD.getOutputByName("rrms"));
5139
5140 bindings.allocate(mod.getPlaceholders());
5141 updateInputPlaceholdersByName(bindings, &mod, {"x", "gamma", "beta"},
5142 {&x, &gamma, &beta});
5143 }
5144
5145 EXPECT_NE(y, nullptr);
5146 EXPECT_NE(rrms, nullptr);
5147
5148 auto yRes = bindings.get(y);
5149 auto rrmsRes = bindings.get(rrms);
5150
5151 const std::vector<dim_t> yShapeExpected{xShape};
5152 const std::vector<dim_t> rrmsShapeExpected{xShape[0]};
5153
5154 EXPECT_EQ(yShapeExpected, yRes->dims().vec());
5155 EXPECT_EQ(rrmsShapeExpected, rrmsRes->dims().vec());
5156
5157 EE.compile(CompilationMode::Infer);
5158 EE.run(bindings);
5159
5160 auto yHandle = yRes->getHandle();
5161
5162 // Results are based on Caffe2 implementation
5163 const std::vector<std::vector<float>> yExpected{
5164 {1.3429972, 3.3719888, 6.0869746, 9.487955},
5165 {1.7495317, 3.798876, 6.148033, 8.797003},
5166 {1.8485281, 3.8856182, 6.11127, 8.525484},
5167 };
5168
5169 for (dim_t d1 = 0; d1 < xShape[0]; ++d1) {
5170 for (dim_t d2 = 0; d2 < xShape[1]; ++d2) {
5171 EXPECT_NEAR(yExpected[d1][d2], yHandle.at({d1, d2}), 1e-5);
5172 }
5173 }
5174}
5175
5176TEST_F(Caffe2ImporterTest, mean) {
5177 const std::string NetDescFilename(
5178 GLOW_DATA_PATH "tests/models/caffe2Models/mean_3inputs.pbtxt");
5179 const std::string NetWeightFilename(
5180 GLOW_DATA_PATH "tests/models/caffe2Models/empty_init_net.pbtxt");
5181
5182 const std::vector<dim_t> inputShape{5, 6};
5183
5184 ExecutionEngine EE{};
5185 auto &mod = EE.getModule();
5186 Function *F = mod.createFunction("main");
5187
5188 PlaceholderBindings bindings;
5189 Placeholder *outputPH;
5190
5191 Tensor input1{ElemKind::FloatTy, inputShape};
5192 Tensor input2{ElemKind::FloatTy, inputShape};
5193 Tensor input3{ElemKind::FloatTy, inputShape};
5194 input1.getHandle().randomize(-3.0, 3.0, mod.getPRNG());
5195 input2.getHandle().randomize(-3.0, 3.0, mod.getPRNG());
5196 input3.getHandle().randomize(-3.0, 3.0, mod.getPRNG());
5197
5198 // Destroy the loader after the graph is loaded since the following
5199 // execution will not depend on anything from the loader.
5200 {
5201 Caffe2ModelLoader caffe2LD(
5202 NetDescFilename, NetWeightFilename, {"input1", "input2", "input3"},
5203 {&input1.getType(), &input2.getType(), &input3.getType()}, *F);
5204 outputPH = EXIT_ON_ERR(caffe2LD.getOutputByName("output"));
5205
5206 bindings.allocate(mod.getPlaceholders());
5207 updateInputPlaceholdersByName(bindings, &mod,
5208 {"input1", "input2", "input3"},
5209 {&input1, &input2, &input3});
5210 }
5211
5212 auto output = bindings.get(outputPH);
5213
5214 EXPECT_EQ(inputShape, output->dims().vec());
5215
5216 EE.compile(CompilationMode::Infer);
5217 EE.run(bindings);
5218
5219 auto outputH = output->getHandle();
5220
5221 for (dim_t d1 = 0; d1 < inputShape[0]; ++d1) {
5222 for (dim_t d2 = 0; d2 < inputShape[1]; ++d2) {
5223 auto val =
5224 (input1.getHandle().at({d1, d2}) + input2.getHandle().at({d1, d2}) +
5225 input3.getHandle().at({d1, d2})) /
5226 3;
5227 EXPECT_NEAR(val, outputH.at({d1, d2}), 1e-5);
5228 }
5229 }
5230}
5231
5232TEST_F(Caffe2ImporterTest, negative) {
5233 const std::string NetDescFilename(GLOW_DATA_PATH
5234 "tests/models/caffe2Models/negative.pbtxt");
5235 const std::string NetWeightFilename(
5236 GLOW_DATA_PATH "tests/models/caffe2Models/empty_init_net.pbtxt");
5237
5238 ExecutionEngine EE{};
5239 auto &mod = EE.getModule();
5240 Function *F = mod.createFunction("main");
5241
5242 PlaceholderBindings bindings;
5243 Placeholder *outputPH;
5244
5245 const std::vector<dim_t> shape{5, 6};
5246 Tensor input{ElemKind::FloatTy, shape};
5247 input.getHandle().randomize(-3.0, 3.0, mod.getPRNG());
5248
5249 // Destroy the loader after the graph is loaded since the following
5250 // execution will not depend on anything from the loader.
5251 {
5252 Caffe2ModelLoader caffe2LD(NetDescFilename, NetWeightFilename, {"input"},
5253 {&input.getType()}, *F);
5254 outputPH = EXIT_ON_ERR(caffe2LD.getOutputByName("output"));
5255
5256 bindings.allocate(mod.getPlaceholders());
5257 updateInputPlaceholdersByName(bindings, &mod, {"input"}, {&input});
5258 }
5259
5260 auto output = bindings.get(outputPH);
5261
5262 EXPECT_EQ(shape, output->dims().vec());
5263
5264 EE.compile(CompilationMode::Infer);
5265 EE.run(bindings);
5266
5267 auto outputH = output->getHandle();
5268
5269 for (dim_t d1 = 0; d1 < shape[0]; ++d1) {
5270 for (dim_t d2 = 0; d2 < shape[1]; ++d2) {
5271 EXPECT_NEAR(-1 * input.getHandle().at({d1, d2}), outputH.at({d1, d2}),
5272 1e-5);
5273 }
5274 }
5275}
5276
5277TEST_F(Caffe2ImporterTest, lpNorm) {
5278 const std::vector<dim_t> inputShape{5, 6};
5279
5280 const auto runTest =
5281 [&inputShape](const std::string &NetDescFilename,
5282 const std::string &NetWeightFilename,
5283 std::function<float(const Tensor &input)> refImpl) {
5284 ExecutionEngine EE{};
5285 auto &mod = EE.getModule();
5286 Function *F = mod.createFunction("main");
5287
5288 PlaceholderBindings bindings;
5289 Placeholder *outputPH;
5290
5291 Tensor input{ElemKind::FloatTy, inputShape};
5292 input.getHandle().randomize(-3.0, 3.0, mod.getPRNG());
5293
5294 // Destroy the loader after the graph is loaded since the following
5295 // execution will not depend on anything from the loader.
5296 {
5297 Caffe2ModelLoader caffe2LD(NetDescFilename, NetWeightFilename,
5298 {"input"}, {&input.getType()}, *F);
5299 outputPH = EXIT_ON_ERR(caffe2LD.getOutputByName("output"));
5300
5301 bindings.allocate(mod.getPlaceholders());
5302 updateInputPlaceholdersByName(bindings, &mod, {"input"}, {&input});
5303 }
5304
5305 auto output = bindings.get(outputPH);
5306
5307 const std::vector<dim_t> expectedOutputShape{1};
5308 EXPECT_EQ(expectedOutputShape, output->dims().vec());
5309
5310 EE.compile(CompilationMode::Infer);
5311 EE.run(bindings);
5312
5313 auto outputH = output->getHandle();
5314
5315 EXPECT_NEAR(refImpl(input), outputH.at({0}), 1e-5);
5316 };
5317
5318 const auto refImplP1 = [&inputShape](const Tensor &input) {
5319 float sum = 0;
5320 for (dim_t d1 = 0; d1 < inputShape[0]; ++d1) {
5321 for (dim_t d2 = 0; d2 < inputShape[1]; ++d2) {
5322 auto val = input.getHandle().at({d1, d2});
5323 sum += ((val >= 0) ? val : -val);
5324 }
5325 }
5326 return sum;
5327 };
5328
5329 const auto refImplP2 = [&inputShape](const Tensor &input) {
5330 float sum = 0;
5331 for (dim_t d1 = 0; d1 < inputShape[0]; ++d1) {
5332 for (dim_t d2 = 0; d2 < inputShape[1]; ++d2) {
5333 auto val = input.getHandle().at({d1, d2});
5334 sum += val * val;
5335 }
5336 }
5337 return sum;
5338 };
5339
5340 runTest(GLOW_DATA_PATH "tests/models/caffe2Models/lpnorm_p1.pbtxt",
5341 GLOW_DATA_PATH "tests/models/caffe2Models/empty_init_net.pbtxt",
5342 refImplP1);
5343
5344 runTest(GLOW_DATA_PATH "tests/models/caffe2Models/lpnorm_p2.pbtxt",
5345 GLOW_DATA_PATH "tests/models/caffe2Models/empty_init_net.pbtxt",
5346 refImplP2);
5347}
5348
5349TEST_F(Caffe2ImporterTest, argMin) {
5350 const std::string NetDescFilename(GLOW_DATA_PATH
5351 "tests/models/caffe2Models/argmin.pbtxt");
5352 const std::string NetWeightFilename(
5353 GLOW_DATA_PATH "tests/models/caffe2Models/empty_init_net.pbtxt");
5354
5355 ExecutionEngine EE{};
5356 auto &mod = EE.getModule();
5357 Function *F = mod.createFunction("main");
5358
5359 PlaceholderBindings bindings;
5360 Placeholder *output;
5361
5362 std::vector<dim_t> inputShape{1, 50};
5363
5364 Tensor input{ElemKind::FloatTy, {inputShape}};
5365 input.getHandle().randomize(-3.0, 3.0, mod.getPRNG());
5366 // Destroy the loader after the graph is loaded since the following
5367 // execution will not depend on anything from the loader.
5368 {
5369 Caffe2ModelLoader caffe2LD(NetDescFilename, NetWeightFilename, {"input"},
5370 {&input.getType()}, *F);
5371 output = EXIT_ON_ERR(caffe2LD.getSingleOutput());
5372
5373 bindings.allocate(mod.getPlaceholders());
5374 updateInputPlaceholdersByName(bindings, &mod, {"input"}, {&input});
5375 }
5376
5377 auto res = bindings.get(output);
5378 EE.compile(CompilationMode::Infer);
5379 EE.run(bindings);
5380
5381 auto result = res->getHandle<int64_t>();
5382 std::vector<dim_t> expectedShape{1};
5383 EXPECT_EQ(expectedShape, result.dims().vec());
5384
5385 dim_t minIndex = 0;
5386 for (dim_t d = 1; d < inputShape[1]; ++d) {
5387 if (input.getHandle().at({0, d}) < input.getHandle().at({0, minIndex})) {
5388 minIndex = d;
5389 }
5390 }
5391 EXPECT_EQ(minIndex, result.at({0}));
5392}
5393
5394TEST_F(Caffe2ImporterTest, scale) {
5395 const std::string NetDescFilename(GLOW_DATA_PATH
5396 "tests/models/caffe2Models/scale.pbtxt");
5397 const std::string NetWeightFilename(
5398 GLOW_DATA_PATH "tests/models/caffe2Models/empty_init_net.pbtxt");
5399
5400 ExecutionEngine EE{};
5401 auto &mod = EE.getModule();
5402 Function *F = mod.createFunction("main");
5403
5404 PlaceholderBindings bindings;
5405 Placeholder *outputPH;
5406
5407 std::vector<dim_t> inputShape{10, 30};
5408
5409 Tensor input{ElemKind::FloatTy, {inputShape}};
5410 input.getHandle().randomize(-10.0, 10.0, mod.getPRNG());
5411 // Destroy the loader after the graph is loaded since the following
5412 // execution will not depend on anything from the loader.
5413 {
5414 Caffe2ModelLoader caffe2LD(NetDescFilename, NetWeightFilename, {"input"},
5415 {&input.getType()}, *F);
5416 outputPH = EXIT_ON_ERR(caffe2LD.getSingleOutput());
5417
5418 bindings.allocate(mod.getPlaceholders());
5419 updateInputPlaceholdersByName(bindings, &mod, {"input"}, {&input});
5420 }
5421
5422 auto output = bindings.get(outputPH);
5423 EXPECT_EQ(inputShape, output->dims().vec());
5424
5425 EE.compile(CompilationMode::Infer);
5426 EE.run(bindings);
5427
5428 auto outputH = output->getHandle();
5429
5430 for (dim_t d1 = 1; d1 < inputShape[0]; ++d1) {
5431 for (dim_t d2 = 1; d2 < inputShape[1]; ++d2) {
5432 auto val = input.getHandle().at({d1, d2});
5433 auto exp = 2 * val;
5434 EXPECT_NEAR(exp, outputH.at({d1, d2}), 1e-3);
5435 }
5436 }
5437}
5438
5439TEST_F(Caffe2ImporterTest, sign) {
5440 const std::string NetDescFilename(GLOW_DATA_PATH
5441 "tests/models/caffe2Models/sign.pbtxt");
5442 const std::string NetWeightFilename(
5443 GLOW_DATA_PATH "tests/models/caffe2Models/empty_init_net.pbtxt");
5444
5445 ExecutionEngine EE{};
5446 auto &mod = EE.getModule();
5447 Function *F = mod.createFunction("main");
5448
5449 PlaceholderBindings bindings;
5450 Placeholder *outputPH;
5451
5452 std::vector<dim_t> inputShape{20, 50};
5453
5454 Tensor input{ElemKind::FloatTy, {inputShape}};
5455 input.getHandle().randomize(-3.0, 3.0, mod.getPRNG());
5456 // Destroy the loader after the graph is loaded since the following
5457 // execution will not depend on anything from the loader.
5458 {
5459 Caffe2ModelLoader caffe2LD(NetDescFilename, NetWeightFilename, {"input"},
5460 {&input.getType()}, *F);
5461 outputPH = EXIT_ON_ERR(caffe2LD.getSingleOutput());
5462
5463 bindings.allocate(mod.getPlaceholders());
5464 updateInputPlaceholdersByName(bindings, &mod, {"input"}, {&input});
5465 }
5466
5467 auto output = bindings.get(outputPH);
5468 EXPECT_EQ(inputShape, output->dims().vec());
5469
5470 EE.compile(CompilationMode::Infer);
5471 EE.run(bindings);
5472
5473 auto outputH = output->getHandle();
5474
5475 for (dim_t d1 = 1; d1 < inputShape[0]; ++d1) {
5476 for (dim_t d2 = 1; d2 < inputShape[1]; ++d2) {
5477 auto val = input.getHandle().at({d1, d2});
5478 auto exp = (val > 0) ? 1 : (val == 0 ? 0 : -1);
5479 EXPECT_EQ(exp, outputH.at({d1, d2}));
5480 }
5481 }
5482}
5483
5484TEST_F(Caffe2ImporterTest, softplus) {
5485 const std::string NetDescFilename(GLOW_DATA_PATH
5486 "tests/models/caffe2Models/softplus.pbtxt");
5487 const std::string NetWeightFilename(
5488 GLOW_DATA_PATH "tests/models/caffe2Models/empty_init_net.pbtxt");
5489
5490 ExecutionEngine EE{};
5491 auto &mod = EE.getModule();
5492 Function *F = mod.createFunction("main");
5493
5494 PlaceholderBindings bindings;
5495 Placeholder *outputPH;
5496
5497 std::vector<dim_t> inputShape{20, 50};
5498
5499 Tensor input{ElemKind::FloatTy, {inputShape}};
5500 input.getHandle().randomize(-3.0, 3.0, mod.getPRNG());
5501 // Destroy the loader after the graph is loaded since the following
5502 // execution will not depend on anything from the loader.
5503 {
5504 Caffe2ModelLoader caffe2LD(NetDescFilename, NetWeightFilename, {"input"},
5505 {&input.getType()}, *F);
5506 outputPH = EXIT_ON_ERR(caffe2LD.getSingleOutput());
5507
5508 bindings.allocate(mod.getPlaceholders());
5509 updateInputPlaceholdersByName(bindings, &mod, {"input"}, {&input});
5510 }
5511
5512 auto output = bindings.get(outputPH);
5513 EXPECT_EQ(inputShape, output->dims().vec());
5514
5515 EE.compile(CompilationMode::Infer);
5516 EE.run(bindings);
5517
5518 auto outputH = output->getHandle();
5519
5520 for (dim_t d1 = 1; d1 < inputShape[0]; ++d1) {
5521 for (dim_t d2 = 1; d2 < inputShape[1]; ++d2) {
5522 auto val = input.getHandle().at({d1, d2});
5523 auto exp = std::log(std::exp(val) + 1);
5524 EXPECT_NEAR(exp, outputH.at({d1, d2}), 1e-3);
5525 }
5526 }
5527}
5528
5529TEST_F(Caffe2ImporterTest, topk) {
5530 const std::string NetDescFilename(GLOW_DATA_PATH
5531 "tests/models/caffe2Models/topk_k2.pbtxt");
5532 const std::string NetWeightFilename(
5533 GLOW_DATA_PATH "tests/models/caffe2Models/empty_init_net.pbtxt");
5534
5535 ExecutionEngine EE{};
5536 auto &mod = EE.getModule();
5537 Function *F = mod.createFunction("main");
5538
5539 PlaceholderBindings bindings;
5540 Placeholder *valuesPH;
5541 Placeholder *indicesPH;
5542
5543 std::vector<dim_t> inputShape{20, 50};
5544
5545 Tensor input{ElemKind::FloatTy, {inputShape}};
5546 input.getHandle().randomize(-3.0, 3.0, mod.getPRNG());
5547 // Destroy the loader after the graph is loaded since the following
5548 // execution will not depend on anything from the loader.
5549 {
5550 Caffe2ModelLoader caffe2LD(NetDescFilename, NetWeightFilename, {"input"},
5551 {&input.getType()}, *F);
5552 valuesPH = EXIT_ON_ERR(caffe2LD.getOutputByName("values"));
5553 indicesPH = EXIT_ON_ERR(caffe2LD.getOutputByName("indices"));
5554
5555 bindings.allocate(mod.getPlaceholders());
5556 updateInputPlaceholdersByName(bindings, &mod, {"input"}, {&input});
5557 }
5558
5559 auto values = bindings.get(valuesPH);
5560 auto indices = bindings.get(indicesPH);
5561 std::vector<dim_t> expectedShape{20, 2};
5562 EXPECT_EQ(expectedShape, values->dims().vec());
5563 EXPECT_EQ(expectedShape, indices->dims().vec());
5564
5565 EE.compile(CompilationMode::Infer);
5566 EE.run(bindings);
5567
5568 auto valuesH = values->getHandle();
5569 auto indicesH = indices->getHandle<int32_t>();
5570
5571 for (dim_t d1 = 0; d1 < inputShape[0]; ++d1) {
5572 dim_t firstLargestIndex = 0;
5573 dim_t secondLargestIndex = 1;
5574 if (input.getHandle().at({d1, 1}) > input.getHandle().at({d1, 0})) {
5575 std::swap(firstLargestIndex, secondLargestIndex);
5576 }
5577 float firstLargestValue = input.getHandle().at({d1, firstLargestIndex});
5578 float secondLargestValue = input.getHandle().at({d1, secondLargestIndex});
5579
5580 for (dim_t d2 = 2; d2 < inputShape[1]; ++d2) {
5581 auto val = input.getHandle().at({d1, d2});
5582 if (val > firstLargestValue) {
5583 secondLargestIndex = firstLargestIndex;
5584 secondLargestValue = firstLargestValue;
5585 firstLargestIndex = d2;
5586 firstLargestValue = val;
5587 } else if (val > secondLargestValue) {
5588 secondLargestIndex = d2;
5589 secondLargestValue = val;
5590 }
5591 }
5592
5593 EXPECT_EQ(indicesH.at({d1, 0}), firstLargestIndex);
5594 EXPECT_EQ(indicesH.at({d1, 1}), secondLargestIndex);
5595 EXPECT_EQ(valuesH.at({d1, 0}), firstLargestValue);
5596 EXPECT_EQ(valuesH.at({d1, 1}), secondLargestValue);
5597 }
5598}
5599
5600TEST_F(Caffe2ImporterTest, SparseLabelSplit) {
5601 const std::string NetDescFilename(
5602 GLOW_DATA_PATH "tests/models/caffe2Models/sparselabelsplit.pbtxt");
5603 const std::string NetWeightFilename(
5604 GLOW_DATA_PATH "tests/models/caffe2Models/empty_init_net.pbtxt");
5605
5606 ExecutionEngine EE{};
5607 auto &mod = EE.getModule();
5608 Function *F = mod.createFunction("main");
5609
5610 PlaceholderBindings bindings;
5611 Placeholder *labelValues0PH;
5612 Placeholder *labelValues1PH;
5613 Placeholder *labelValues2PH;
5614 Placeholder *labelValues3PH;
5615 Placeholder *exampleIds0PH;
5616 Placeholder *exampleIds1PH;
5617 Placeholder *exampleIds2PH;
5618 Placeholder *exampleIds3PH;
5619 Placeholder *gradientOffsetMapPH;
5620
5621 Tensor lengths{ElemKind::Int32ITy, {4}};
5622 Tensor indices{ElemKind::Int64ITy, {8}};
5623 Tensor values{ElemKind::FloatTy, {8}};
5624
5625 lengths.getHandle<int32_t>() = {1, 3, 2, 2};
5626 indices.getHandle<int64_t>() = {3, 1, 2, 0, 0, 2, 1, 3};
5627 values.getHandle() = {1.2, 2.3, 3.1, 6.7, 8.3, 9.0, 3.7, 8.8};
5628
5629 // Destroy the loader after the graph is loaded since the following
5630 // execution will not depend on anything from the loader.
5631 {
5632 Caffe2ModelLoader caffe2LD(
5633 NetDescFilename, NetWeightFilename, {"lengths", "indices", "values"},
5634 {&lengths.getType(), &indices.getType(), &values.getType()}, *F);
5635 labelValues0PH = EXIT_ON_ERR(caffe2LD.getOutputByName("label_values_0"));
5636 labelValues1PH = EXIT_ON_ERR(caffe2LD.getOutputByName("label_values_1"));
5637 labelValues2PH = EXIT_ON_ERR(caffe2LD.getOutputByName("label_values_2"));
5638 labelValues3PH = EXIT_ON_ERR(caffe2LD.getOutputByName("label_values_3"));
5639 exampleIds0PH = EXIT_ON_ERR(caffe2LD.getOutputByName("example_ids_0"));
5640 exampleIds1PH = EXIT_ON_ERR(caffe2LD.getOutputByName("example_ids_1"));
5641 exampleIds2PH = EXIT_ON_ERR(caffe2LD.getOutputByName("example_ids_2"));
5642 exampleIds3PH = EXIT_ON_ERR(caffe2LD.getOutputByName("example_ids_3"));
5643 gradientOffsetMapPH =
5644 EXIT_ON_ERR(caffe2LD.getOutputByName("gradient_offset_map"));
5645
5646 bindings.allocate(mod.getPlaceholders());
5647 updateInputPlaceholdersByName(bindings, &mod,
5648 {"lengths", "indices", "values"},
5649 {&lengths, &indices, &values});
5650 }
5651
5652 EXPECT_NE(labelValues0PH, nullptr);
5653 EXPECT_NE(labelValues1PH, nullptr);
5654 EXPECT_NE(labelValues2PH, nullptr);
5655 EXPECT_NE(labelValues3PH, nullptr);
5656 EXPECT_NE(exampleIds0PH, nullptr);
5657 EXPECT_NE(exampleIds1PH, nullptr);
5658 EXPECT_NE(exampleIds2PH, nullptr);
5659 EXPECT_NE(exampleIds3PH, nullptr);
5660 EXPECT_NE(gradientOffsetMapPH, nullptr);
5661
5662 auto labelValues0 = bindings.get(labelValues0PH);
5663 auto labelValues1 = bindings.get(labelValues1PH);
5664 auto labelValues2 = bindings.get(labelValues2PH);
5665 auto labelValues3 = bindings.get(labelValues3PH);
5666 auto exampleIds0 = bindings.get(exampleIds0PH);
5667 auto exampleIds1 = bindings.get(exampleIds1PH);
5668 auto exampleIds2 = bindings.get(exampleIds2PH);
5669 auto exampleIds3 = bindings.get(exampleIds3PH);
5670 auto gradientOffsetMap = bindings.get(gradientOffsetMapPH);
5671
5672 std::vector<dim_t> expectedOutputShape{2};
5673 EXPECT_EQ(expectedOutputShape, labelValues0->dims().vec());
5674 EXPECT_EQ(expectedOutputShape, labelValues1->dims().vec());
5675 EXPECT_EQ(expectedOutputShape, labelValues2->dims().vec());
5676 EXPECT_EQ(expectedOutputShape, labelValues3->dims().vec());
5677 EXPECT_EQ(expectedOutputShape, exampleIds0->dims().vec());
5678 EXPECT_EQ(expectedOutputShape, exampleIds1->dims().vec());
5679 EXPECT_EQ(expectedOutputShape, exampleIds2->dims().vec());
5680 EXPECT_EQ(expectedOutputShape, exampleIds3->dims().vec());
5681
5682 std::vector<dim_t> expectedGradientOffsetMapShape{8};
5683 EXPECT_EQ(expectedGradientOffsetMapShape, gradientOffsetMap->dims().vec());
5684
5685 EE.compile(CompilationMode::Infer);
5686 EE.run(bindings);
5687
5688 auto labelValues0H = labelValues0->getHandle();
5689 auto labelValues1H = labelValues1->getHandle();
5690 auto labelValues2H = labelValues2->getHandle();
5691 auto labelValues3H = labelValues3->getHandle();
5692 auto exampleIds0H = exampleIds0->getHandle<int32_t>();
5693 auto exampleIds1H = exampleIds1->getHandle<int32_t>();
5694 auto exampleIds2H = exampleIds2->getHandle<int32_t>();
5695 auto exampleIds3H = exampleIds3->getHandle<int32_t>();
5696 auto gradientOffsetMapH = gradientOffsetMap->getHandle<int32_t>();
5697
5698 EXPECT_NEAR(6.7, labelValues0H.at(0), 1e-3);
5699 EXPECT_NEAR(8.3, labelValues0H.at(1), 1e-3);
5700 EXPECT_NEAR(2.3, labelValues1H.at(0), 1e-3);
5701 EXPECT_NEAR(3.7, labelValues1H.at(1), 1e-3);
5702 EXPECT_NEAR(3.1, labelValues2H.at(0), 1e-3);
5703 EXPECT_NEAR(9.0, labelValues2H.at(1), 1e-3);
5704 EXPECT_NEAR(1.2, labelValues3H.at(0), 1e-3);
5705 EXPECT_NEAR(8.8, labelValues3H.at(1), 1e-3);
5706
5707 EXPECT_EQ(1, exampleIds0H.at(0));
5708 EXPECT_EQ(2, exampleIds0H.at(1));
5709 EXPECT_EQ(1, exampleIds1H.at(0));
5710 EXPECT_EQ(3, exampleIds1H.at(1));
5711 EXPECT_EQ(1, exampleIds2H.at(0));
5712 EXPECT_EQ(2, exampleIds2H.at(1));
5713 EXPECT_EQ(0, exampleIds3H.at(0));
5714 EXPECT_EQ(3, exampleIds3H.at(1));
5715
5716 const std::vector<int32_t> expectedGradientOffsetMap{0, 0, 0, 0, 1, 1, 1, 1};
5717 for (dim_t d = 0; d < 8; ++d) {
5718 EXPECT_EQ(expectedGradientOffsetMap[d], gradientOffsetMapH.at(d));
5719 }
5720}
5721
5722TEST_F(Caffe2ImporterTest, Log1p) {
5723 const std::string NetDescFilename(GLOW_DATA_PATH
5724 "tests/models/caffe2Models/log1p.pbtxt");
5725 const std::string NetWeightFilename(
5726 GLOW_DATA_PATH "tests/models/caffe2Models/empty_init_net.pbtxt");
5727
5728 ExecutionEngine EE{};
5729 auto &mod = EE.getModule();
5730 Function *F = mod.createFunction("main");
5731
5732 PlaceholderBindings bindings;
5733 Placeholder *outputPH;
5734
5735 std::vector<dim_t> inputShape{3, 4};
5736
5737 Tensor input{ElemKind::FloatTy, {inputShape}};
5738 input.getHandle() = {0.5, 0.1, 0.01, 1, 2, 3, 10, 20, 30, 1.1, 1.01, 1.001};
5739 // Destroy the loader after the graph is loaded since the following
5740 // execution will not depend on anything from the loader.
5741 {
5742 Caffe2ModelLoader caffe2LD(NetDescFilename, NetWeightFilename, {"input"},
5743 {&input.getType()}, *F);
5744 outputPH = EXIT_ON_ERR(caffe2LD.getSingleOutput());
5745
5746 bindings.allocate(mod.getPlaceholders());
5747 updateInputPlaceholdersByName(bindings, &mod, {"input"}, {&input});
5748 }
5749
5750 auto output = bindings.get(outputPH);
5751 EXPECT_EQ(inputShape, output->dims().vec());
5752
5753 EE.compile(CompilationMode::Infer);
5754 EE.run(bindings);
5755
5756 auto outputH = output->getHandle();
5757
5758 for (dim_t d1 = 1; d1 < inputShape[0]; ++d1) {
5759 for (dim_t d2 = 1; d2 < inputShape[1]; ++d2) {
5760 auto val = input.getHandle().at({d1, d2});
5761 auto exp = std::log(val + 1);
5762 EXPECT_NEAR(exp, outputH.at({d1, d2}), 1e-3) << "Bad value " << val;
5763 }
5764 }
5765}
5766
5767TEST_F(Caffe2ImporterTest, ReduceBackMean) {
5768 const std::string NetDescFilename(
5769 GLOW_DATA_PATH "tests/models/caffe2Models/reducebackmean.pbtxt");
5770 const std::string NetWeightFilename(
5771 GLOW_DATA_PATH "tests/models/caffe2Models/empty_init_net.pbtxt");
5772
5773 ExecutionEngine EE{};
5774 auto &mod = EE.getModule();
5775 Function *F = mod.createFunction("main");
5776
5777 PlaceholderBindings bindings;
5778 Placeholder *outputPH;
5779
5780 std::vector<dim_t> inputShape{3, 4};
5781
5782 Tensor input{ElemKind::FloatTy, {inputShape}};
5783 input.getHandle() = {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12};
5784 // Destroy the loader after the graph is loaded since the following
5785 // execution will not depend on anything from the loader.
5786 {
5787 Caffe2ModelLoader caffe2LD(NetDescFilename, NetWeightFilename, {"input"},
5788 {&input.getType()}, *F);
5789 outputPH = EXIT_ON_ERR(caffe2LD.getSingleOutput());
5790
5791 bindings.allocate(mod.getPlaceholders());
5792 updateInputPlaceholdersByName(bindings, &mod, {"input"}, {&input});
5793 }
5794
5795 const std::vector<dim_t> expectedShape{3};
5796 auto output = bindings.get(outputPH);
5797 EXPECT_EQ(expectedShape, output->dims().vec());
5798
5799 EE.compile(CompilationMode::Infer);
5800 EE.run(bindings);
5801
5802 auto outputH = output->getHandle();
5803
5804 for (dim_t d1 = 0; d1 < inputShape[0]; ++d1) {
5805 float expected = 0;
5806 for (dim_t d2 = 0; d2 < inputShape[1]; ++d2) {
5807 expected += input.getHandle().at({d1, d2});
5808 }
5809 expected /= inputShape[1];
5810 EXPECT_NEAR(expected, outputH.at(d1), 1e-3);
5811 }
5812}
5813
5814// Test that dropout is a no-op
5815TEST_F(Caffe2ImporterTest, Dropout) {
5816 const std::string NetDescFilename(GLOW_DATA_PATH
5817 "tests/models/caffe2Models/dropout.pbtxt");
5818 const std::string NetWeightFilename(
5819 GLOW_DATA_PATH "tests/models/caffe2Models/empty_init_net.pbtxt");
5820
5821 ExecutionEngine EE{};
5822 auto &mod = EE.getModule();
5823 Function *F = mod.createFunction("main");
5824
5825 PlaceholderBindings bindings;
5826 Placeholder *outputPH;
5827
5828 std::vector<dim_t> inputShape{20, 50};
5829 Tensor input{ElemKind::FloatTy, {inputShape}};
5830 input.getHandle().randomize(-3.0, 3.0, mod.getPRNG());
5831 // Destroy the loader after the graph is loaded since the following
5832 // execution will not depend on anything from the loader.
5833 {
5834 Caffe2ModelLoader caffe2LD(NetDescFilename, NetWeightFilename, {"input"},
5835 {&input.getType()}, *F);
5836 outputPH = EXIT_ON_ERR(caffe2LD.getSingleOutput());
5837
5838 bindings.allocate(mod.getPlaceholders());
5839 updateInputPlaceholdersByName(bindings, &mod, {"input"}, {&input});
5840 }
5841
5842 // Expect graph to have one node (1 save node)
5843 EXPECT_EQ(F->getNodes().size(), 1);
5844 auto *save = getSaveNodeFromDest(outputPH);
5845 ASSERT_TRUE(save);
5846
5847 auto output = bindings.get(outputPH);
5848 EXPECT_EQ(inputShape, output->dims().vec());
5849
5850 EE.compile(CompilationMode::Infer);
5851 EE.run(bindings);
5852
5853 auto inputH = input.getHandle();
5854 auto outputH = output->getHandle();
5855 for (dim_t d1 = 0; d1 < inputShape[0]; ++d1) {
5856 for (dim_t d2 = 0; d2 < inputShape[1]; ++d2) {
5857 EXPECT_EQ(inputH.at(d1), outputH.at(d1));
5858 }
5859 }
5860}
5861
5862TEST_F(Caffe2ImporterTest, CastInt64ToInt64) {
5863 const std::string NetDescFilename(
5864 GLOW_DATA_PATH "tests/models/caffe2Models/cast_int64_to_int64.pbtxt");
5865 const std::string NetWeightFilename(
5866 GLOW_DATA_PATH "tests/models/caffe2Models/empty_init_net.pbtxt");
5867
5868 ExecutionEngine EE{};
5869 auto &mod = EE.getModule();
5870 Function *F = mod.createFunction("main");
5871
5872 PlaceholderBindings bindings;
5873 Placeholder *outputPH;
5874
5875 std::vector<dim_t> inputShape{3, 4};
5876
5877 Tensor input{ElemKind::Int64ITy, {inputShape}};
5878 input.getHandle<int64_t>() = {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12};
5879 // Destroy the loader after the graph is loaded since the following
5880 // execution will not depend on anything from the loader.
5881 {
5882 Caffe2ModelLoader caffe2LD(NetDescFilename, NetWeightFilename, {"input"},
5883 {&input.getType()}, *F);
5884 outputPH = EXIT_ON_ERR(caffe2LD.getSingleOutput());
5885
5886 bindings.allocate(mod.getPlaceholders());
5887 updateInputPlaceholdersByName(bindings, &mod, {"input"}, {&input});
5888 }
5889
5890 auto output = bindings.get(outputPH);
5891 EXPECT_EQ(inputShape, output->dims().vec());
5892
5893 EE.compile(CompilationMode::Infer);
5894 EE.run(bindings);
5895
5896 auto outputH = output->getHandle<int64_t>();
5897
5898 for (dim_t d1 = 0; d1 < inputShape[0]; ++d1) {
5899 for (dim_t d2 = 0; d2 < inputShape[1]; ++d2) {
5900 auto val = input.getHandle<int64_t>().at({d1, d2});
5901 auto exp = static_cast<int64_t>(val);
5902 EXPECT_EQ(exp, outputH.at({d1, d2}));
5903 }
5904 }
5905}
5906
5907// Gelu test
5908TEST_F(Caffe2ImporterTest, gelu) {
5909 const std::string NetDescFilename(GLOW_DATA_PATH
5910 "tests/models/caffe2Models/gelu.pbtxt");
5911 const std::string NetWeightFilename(
5912 GLOW_DATA_PATH "tests/models/caffe2Models/empty_init_net.pbtxt");
5913
5914 ExecutionEngine EE{};
5915 auto &mod = EE.getModule();
5916 Function *F = mod.createFunction("main");
5917
5918 PlaceholderBindings bindings;
5919 Placeholder *outputPH;
5920
5921 std::vector<dim_t> inputShape{10, 30};
5922
5923 Tensor input{ElemKind::FloatTy, {inputShape}};
5924 input.getHandle().randomize(-10.0, 10.0, mod.getPRNG());
5925 // Destroy the loader after the graph is loaded since the following
5926 // execution will not depend on anything from the loader.
5927 {
5928 Caffe2ModelLoader caffe2LD(NetDescFilename, NetWeightFilename, {"input"},
5929 {&input.getType()}, *F);
5930 outputPH = EXIT_ON_ERR(caffe2LD.getSingleOutput());
5931
5932 bindings.allocate(mod.getPlaceholders());
5933 updateInputPlaceholdersByName(bindings, &mod, {"input"}, {&input});
5934 }
5935
5936 auto output = bindings.get(outputPH);
5937 EXPECT_EQ(inputShape, output->dims().vec());
5938
5939 EE.compile(CompilationMode::Infer);
5940 EE.run(bindings);
5941
5942 auto outputH = output->getHandle();
5943
5944 for (dim_t d1 = 1; d1 < inputShape[0]; ++d1) {
5945 for (dim_t d2 = 1; d2 < inputShape[1]; ++d2) {
5946 auto val = input.getHandle().at({d1, d2});
5947 // Gaussian Error Linear Unit. An activation function used in the most
5948 // recent Transformers – Google's BERT and OpenAI's GPT-2. This activation
5949 // function takes the form of this equation:
5950 // GELU(x) = 0.5𝑥(1+erf(𝑥/√2))
5951 // Knowing that erf(𝑥) is very close to tanh(𝑥)
5952 // GELU(x) ≃ 0.5x(1+tanh(√2/π(x+0.044715x3)))
5953 // = 0.5𝑥(1 + tanh(0.797885𝑥+0.035677𝑥3))
5954 auto exp =
5955 0.5 * val * (1 + tanh(0.797885 * val + 0.035677 * pow(val, 3)));
5956 EXPECT_NEAR(exp, outputH.at({d1, d2}), 1e-3);
5957 }
5958 }
5959}
5960