1/**
2 * Copyright (c) Glow Contributors. See CONTRIBUTORS file.
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16#include "ImporterTestUtils.h"
17#include "glow/ExecutionEngine/ExecutionEngine.h"
18#include "glow/Graph/Graph.h"
19#include "glow/Graph/Nodes.h"
20#include "glow/Graph/PlaceholderBindings.h"
21#include "glow/Importer/ONNXModelLoader.h"
22#include "llvm/Support/FileSystem.h"
23#include "gtest/gtest.h"
24
25#ifndef GLOW_DATA_PATH
26#define GLOW_DATA_PATH
27#endif
28
29using namespace glow;
30
31#include <fstream>
32using namespace std;
33
34class OnnxImporterTest : public ::testing::Test {
35protected:
36 // By default constant folding at load time is enabled in general, but we do
37 // many tests here loading Constants, so keep it false during these tests by
38 // default.
39 void SetUp() override { glow::setConstantFoldLoaderOpsFlag(false); }
40 void TearDown() override { glow::setConstantFoldLoaderOpsFlag(true); }
41};
42
43/// Loads onnxtxt model file \p filename and \returns ModelProto object.
44Expected<ONNX_NAMESPACE::ModelProto> loadProto(const std::string &filename) {
45 std::ifstream ff(filename, std::ios::in | std::ios::binary);
46 RETURN_ERR_IF_NOT(ff,
47 strFormat("Can't find the model or network files for %s.",
48 filename.c_str()),
49 ErrorValue::ErrorCode::MODEL_LOADER_INVALID_PROTOBUF);
50 if (filename.find(".onnxtxt") != std::string::npos) {
51 std::string str((std::istreambuf_iterator<char>(ff)),
52 std::istreambuf_iterator<char>());
53 ONNX_NAMESPACE::ModelProto MP;
54 bool parseNet = google::protobuf::TextFormat::ParseFromString(str, &MP);
55 RETURN_ERR_IF_NOT(parseNet, "Failed to parse ModelProto",
56 ErrorValue::ErrorCode::MODEL_LOADER_INVALID_PROTOBUF);
57 return MP;
58 }
59 return MAKE_ERR("Can't load proto file");
60}
61
62/// Saves ModelProto object \p model as onnxtxt model file \p filename
63/// and \returns true if successful.
64Expected<bool> saveProto(const std::string &filename,
65 ONNX_NAMESPACE::ModelProto &model) {
66 std::ofstream ff(filename, std::ios::out);
67 RETURN_ERR_IF_NOT(ff, "Can't write the proto file.",
68 ErrorValue::ErrorCode::RUNTIME_ERROR);
69 if (filename.find(".onnxtxt") != std::string::npos) {
70 std::string onnx_message = model.DebugString();
71 ff << onnx_message;
72 ff.close();
73 return true;
74 }
75 ff.close();
76 return false;
77}
78
79/// Replaces placeholders with names \p tensorNames in model proto object \p
80/// model with initializers of same name and values specified in input tensor
81/// array \p tensors and \returns true if successful.
82Expected<bool>
83replacePlaceholderWithConstant(ONNX_NAMESPACE::ModelProto &model,
84 llvm::ArrayRef<const char *> tensorNames,
85 llvm::ArrayRef<Tensor *> tensors) {
86 ONNX_NAMESPACE::NodeProto np;
87 ONNX_NAMESPACE::GraphProto *gp = model.mutable_graph();
88 RETURN_ERR_IF_NOT(gp, "Can't get mutable graph.",
89 ErrorValue::ErrorCode::RUNTIME_ERROR);
90 for (size_t i = 0; i < tensorNames.size(); i++) {
91 for (int j = 0; j < gp->input_size(); j++) {
92 ONNX_NAMESPACE::ValueInfoProto *valueInfo = gp->mutable_input(j);
93 const std::string &inputName = valueInfo->name();
94 if (inputName != tensorNames[i]) {
95 continue;
96 }
97 std::string newName = "dummy_input" + std::to_string(i);
98 valueInfo->set_name(newName);
99 auto RH = tensors[i]->getHandle<>();
100 ONNX_NAMESPACE::TensorProto *tp = gp->add_initializer();
101 tp->set_name(tensorNames[i]);
102 for (size_t k = 0; k < tensors[i]->dims().size(); k++) {
103 tp->add_dims(tensors[i]->dims()[k]);
104 }
105 switch (RH.getElementType()) {
106 case ElemKind::FloatTy:
107 tp->set_data_type(ONNX_NAMESPACE::TensorProto::FLOAT);
108 for (size_t k = 0; k < tensors[i]->size(); k++) {
109 tp->add_float_data(RH.raw(k));
110 }
111 break;
112 case ElemKind::Int64ITy:
113 tp->set_data_type(ONNX_NAMESPACE::TensorProto::INT64);
114 for (size_t k = 0; k < tensors[i]->size(); k++) {
115 tp->add_int64_data(RH.raw(k));
116 }
117 break;
118 case ElemKind::Int32ITy:
119 tp->set_data_type(ONNX_NAMESPACE::TensorProto::INT32);
120 for (size_t k = 0; k < tensors[i]->size(); k++) {
121 tp->add_int32_data(RH.raw(k));
122 }
123 break;
124 default:
125 std::cout << "Unsupported datatype";
126 return false;
127 }
128 }
129 }
130 gp->clear_input();
131 return true;
132}
133
134/// Performs constant folding test on the given model file \p NetFilename
135/// with single output and then checking against expected values
136/// \p expectedValues and \returns true if the test completes without error.
137Error checkConstFoldLegalName(std::string NetFilename,
138 std::vector<float> expectedValues) {
139 Tensor T(glow::ElemKind::FloatTy, {3, 2});
140 T.getHandle<float>() = expectedValues;
141 ONNX_NAMESPACE::ModelProto modelDef;
142 ASSIGN_VALUE_OR_RETURN_ERR(modelDef, loadProto(NetFilename));
143 setConstantFoldLoaderOpsFlag(true);
144
145 // It is expected that loading will fold the whole graph and output
146 // nodes will become constants during the loading process.
147 ExecutionEngine EE;
148 Module &mod = EE.getModule();
149 Function *F = mod.createFunction("temp");
150 ONNXModelLoader onnxLD(NetFilename, {}, {}, *F);
151
152 setConstantFoldLoaderOpsFlag(false);
153
154 // The folded output tensors are expected to be constants and should
155 // match the expected values.
156 NodeValue NV;
157 ASSIGN_VALUE_OR_RETURN_ERR(
158 NV, onnxLD.getNodeValueByName(modelDef.graph().output(0).name()));
159 auto *constOut = llvm::dyn_cast<Constant>(NV.getNode());
160 RETURN_ERR_IF_NOT(constOut, "Failed cast to Constant");
161 EXPECT_TRUE(T.isEqual(constOut->getPayload()));
162 return Error::success();
163}
164
165/// Performs constant folding test on the given model file \p NetFilename
166/// by replacing input tensors with name \p tensorNames, and values \p tensors
167/// and then checking against expected output expectedTensors. \returns true
168/// if the test completes without error.
169Error checkConstFoldedOutput(std::string NetFilename,
170 llvm::ArrayRef<const char *> tensorNames,
171 llvm::ArrayRef<Tensor *> tensors,
172 llvm::ArrayRef<Tensor *> expectedTensors) {
173 ONNX_NAMESPACE::ModelProto modelDef;
174 llvm::SmallVector<char, 64> resultPath;
175 llvm::sys::fs::createTemporaryFile("dummy", "onnxtxt", resultPath);
176 std::string netFilename(resultPath.begin(), resultPath.end());
177
178 ASSIGN_VALUE_OR_RETURN_ERR(modelDef, loadProto(NetFilename));
179 // Replace placeholders in the original onnx model with constants.
180 RETURN_IF_ERR(replacePlaceholderWithConstant(modelDef, tensorNames, tensors)
181 .takeError());
182 RETURN_IF_ERR(saveProto(netFilename, modelDef).takeError());
183 setConstantFoldLoaderOpsFlag(true);
184
185 // It is expected that loading will fold the whole graph and output
186 // nodes will become constants during the loading process.
187 ExecutionEngine EE;
188 Module &mod = EE.getModule();
189 Function *F = mod.createFunction("temp");
190 ONNXModelLoader onnxLD(netFilename, {}, {}, *F);
191 setConstantFoldLoaderOpsFlag(false);
192
193 // The folded output tensors are expected to be constants and should
194 // match the expectedTensors passed in.
195 for (int i = 0; i < modelDef.graph().output_size(); i++) {
196 NodeValue NV;
197 ASSIGN_VALUE_OR_RETURN_ERR(
198 NV, onnxLD.getNodeValueByName(modelDef.graph().output(i).name()));
199 auto *constOut = llvm::dyn_cast<Constant>(NV.getNode());
200 RETURN_ERR_IF_NOT(constOut, "Failed cast to Constant");
201 EXPECT_TRUE(expectedTensors[i]->isEqual(constOut->getPayload()));
202 }
203 return Error::success();
204}
205
206static void importReduceL2Test(const std::string &netFilename,
207 llvm::ArrayRef<float> inputValues,
208 llvm::ArrayRef<dim_t> inputShape,
209 llvm::ArrayRef<dim_t> outputShape,
210 llvm::ArrayRef<float> expectedValues) {
211 float delta = 1e-08;
212 ExecutionEngine EE{};
213 auto &mod = EE.getModule();
214 Function *F = mod.createFunction("main");
215 PlaceholderBindings bindings;
216 Placeholder *graphOutputVar;
217
218 // Load the .onnxtxt model.
219 Type inputType(ElemKind::FloatTy, inputShape);
220 ONNXModelLoader onnxLD(netFilename, {"input"}, {&inputType}, *F);
221 graphOutputVar = EXIT_ON_ERR(onnxLD.getSingleOutput());
222 auto PH = mod.getPlaceholderByNameSlow("input");
223 auto *inTensor = bindings.allocate(PH);
224 inTensor->getHandle() = inputValues;
225 EE.compile(CompilationMode::Infer);
226 bindings.allocate(mod.getPlaceholders());
227 EE.run(bindings);
228 auto result = bindings.get(graphOutputVar)->getHandle();
229 ASSERT_TRUE(result.dims() == (llvm::ArrayRef<dim_t>)outputShape);
230 for (size_t i = 0; i < result.getType().size(); i++) {
231 EXPECT_NEAR(result.raw(i), expectedValues[i], delta);
232 }
233}
234
235/// Test the utility function that gets the inputs name and glow types
236/// from updated graph proto
237
238TEST_F(OnnxImporterTest, getInputNamesAndTypes) {
239 // Set onnx-define-symbol if present in model
240 std::string inputSymbol = "batch_size,5";
241 setOnnxDefineSymbol({inputSymbol});
242
243 std::string netFilename(
244 GLOW_DATA_PATH
245 "tests/models/onnxModels/getInputsOnnxDefineSample.onnxtxt");
246
247 bool isError = false;
248
249 std::vector<std::string> names;
250 std::vector<Type> types;
251
252 std::vector<std::string> expectedNames = {"input"};
253 std::vector<std::vector<dim_t>> expectedDims = {{5, 3, 224, 224}};
254
255 isError = ERR_TO_BOOL(
256 ONNXModelLoader::getInputsNamesAndTypes(names, types, netFilename));
257
258 EXPECT_FALSE(isError);
259
260 for (size_t i = 0; i < expectedNames.size(); i++) {
261 EXPECT_TRUE(expectedNames[i] == names[i]);
262 std::vector<dim_t> dims = types[i].dims();
263 for (size_t j = 0; j < expectedDims[i].size(); j++) {
264 EXPECT_EQ(expectedDims[i][j], dims[j]);
265 }
266 }
267}
268
269/// Test the utility function which wraps a negative axis.
270TEST_F(OnnxImporterTest, getPositiveAxis) {
271 int axisPos;
272 ASSIGN_VALUE_OR_FAIL_TEST(axisPos, getPositiveAxis<int>(-3, 3));
273 EXPECT_EQ(axisPos, 0);
274 ASSIGN_VALUE_OR_FAIL_TEST(axisPos, getPositiveAxis<int>(-2, 3));
275 EXPECT_EQ(axisPos, 1);
276 ASSIGN_VALUE_OR_FAIL_TEST(axisPos, getPositiveAxis<int>(-1, 3));
277 EXPECT_EQ(axisPos, 2);
278 ASSIGN_VALUE_OR_FAIL_TEST(axisPos, getPositiveAxis<int>(0, 3));
279 EXPECT_EQ(axisPos, 0);
280 ASSIGN_VALUE_OR_FAIL_TEST(axisPos, getPositiveAxis<int>(1, 3));
281 EXPECT_EQ(axisPos, 1);
282 ASSIGN_VALUE_OR_FAIL_TEST(axisPos, getPositiveAxis<int>(2, 3));
283 EXPECT_EQ(axisPos, 2);
284}
285
286/// Test loading reduceL2 op from an ONNX model
287/// with axes = [].
288TEST_F(OnnxImporterTest, reduceL2NoAxis) {
289 std::vector<float> inputValues = {1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2};
290 std::vector<dim_t> inputShape = {2, 3, 2};
291 std::vector<dim_t> outputShape = {1, 1, 1};
292 std::vector<float> expectedValues = {5.477226};
293 std::string netFilename(GLOW_DATA_PATH
294 "tests/models/onnxModels/ReduceL2NoAxis.onnxtxt");
295 importReduceL2Test(netFilename, inputValues, inputShape, outputShape,
296 expectedValues);
297}
298
299/// Test loading reduceL2 op from an ONNX model
300/// with negative axis values.
301TEST_F(OnnxImporterTest, reduceL2NegAxis) {
302 std::vector<float> inputValues = {1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2};
303 std::vector<dim_t> inputShape = {2, 3, 2};
304 std::vector<dim_t> outputShape = {2, 1, 1};
305 std::vector<float> expectedValues = {3.8729835, 3.8729835};
306 std::string netFilename(GLOW_DATA_PATH
307 "tests/models/onnxModels/ReduceL2NegAxis.onnxtxt");
308 importReduceL2Test(netFilename, inputValues, inputShape, outputShape,
309 expectedValues);
310}
311
312/// Test loading reduceL2 op from an ONNX model
313/// with keepdims = True.
314TEST_F(OnnxImporterTest, reduceL2KeepDims) {
315 std::vector<float> inputValues = {1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2};
316 std::vector<dim_t> inputShape = {2, 3, 2};
317 std::vector<dim_t> outputShape = {2, 1, 1};
318 std::vector<float> expectedValues = {3.8729835, 3.8729835};
319 std::string netFilename(GLOW_DATA_PATH
320 "tests/models/onnxModels/ReduceL2KeepDims.onnxtxt");
321 importReduceL2Test(netFilename, inputValues, inputShape, outputShape,
322 expectedValues);
323}
324
325/// Test loading reduceL2 op from an ONNX model
326/// with keepdims = False.
327TEST_F(OnnxImporterTest, reduceL2NoKeepDims) {
328 std::vector<float> inputValues = {1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2};
329 std::vector<dim_t> inputShape = {2, 3, 2};
330 std::vector<dim_t> outputShape = {2};
331 std::vector<float> expectedValues = {3.8729835, 3.8729835};
332 std::string netFilename(GLOW_DATA_PATH
333 "tests/models/onnxModels/ReduceL2NoKeepDims.onnxtxt");
334 importReduceL2Test(netFilename, inputValues, inputShape, outputShape,
335 expectedValues);
336}
337
338/// Test loading constant+relu ops with numeric input names from an ONNX model.
339TEST_F(OnnxImporterTest, reluConstFoldLegalName) {
340 std::string NetFilename(GLOW_DATA_PATH
341 "tests/models/onnxModels/constRelu.onnxtxt");
342 FAIL_TEST_IF_ERR(
343 checkConstFoldLegalName(NetFilename, {1.0, 0.0, 0.0, 1.0, 1.0, 1.0}));
344}
345
346template <class OpType>
347static void
348importArithMultiBroadcastTest(std::string fileName,
349 llvm::ArrayRef<dim_t> inputShape, bool multi,
350 bool leftBroadcast, bool rightBroadcast,
351 const std::function<float(float, float)> &op) {
352 ExecutionEngine EE{};
353 auto &mod = EE.getModule();
354 Function *F = mod.createFunction("main");
355
356 std::string NetFilename =
357 std::string(GLOW_DATA_PATH "tests/models/onnxModels/") + fileName;
358 PlaceholderBindings bindings;
359 Placeholder *graphOutputVar;
360 // Destroy the loader after the graph is loaded since the following execution
361 // will not depend on anyting from the loader.
362 Tensor data;
363 getNCHWData(&data, inputShape[0], inputShape[1], inputShape[2],
364 inputShape[3]);
365 {
366 ONNXModelLoader onnxLD(NetFilename, {"data"}, {&data.getType()}, *F);
367 graphOutputVar = EXIT_ON_ERR(onnxLD.getSingleOutput());
368 bindings.allocate(mod.getPlaceholders());
369 updateInputPlaceholdersByName(bindings, &mod, {"data"}, {&data});
370 }
371 // ONNX importer loads an arithmetic node and inserts:
372 // Check the graph structure
373 auto *saveNode = getSaveNodeFromDest(graphOutputVar);
374 auto *node = saveNode->getInput().getNode();
375 auto *opNode = llvm::dyn_cast<OpType>(node);
376 EXPECT_NE(nullptr, opNode);
377
378 BroadcastNode *leftBN =
379 llvm::dyn_cast<BroadcastNode>(opNode->getLHS().getNode());
380 BroadcastNode *rightBN =
381 llvm::dyn_cast<BroadcastNode>(opNode->getRHS().getNode());
382 EXPECT_NE(leftBroadcast, leftBN == nullptr);
383 EXPECT_NE(rightBroadcast, rightBN == nullptr);
384
385 // Compile&run the graph, and check the output
386 EE.compile(CompilationMode::Infer);
387 EE.run(bindings);
388 auto result = bindings.get(graphOutputVar)->getHandle();
389 std::vector<dim_t> expectedDims = {1, 3, 4, 2};
390 std::vector<float> expectedValues;
391
392 if (multi) {
393 expectedValues = {op(0, 2), op(1, 2), op(0, 2), op(1, 2), op(0, 2),
394 op(1, 2), op(0, 2), op(1, 2), op(2, 2), op(3, 2),
395 op(2, 2), op(3, 2), op(2, 2), op(3, 2), op(2, 2),
396 op(3, 2), op(4, 2), op(5, 2), op(4, 2), op(5, 2),
397 op(4, 2), op(5, 2), op(4, 2), op(5, 2)};
398 } else {
399 expectedValues = {op(0, 2), op(1, 2), op(2, 2), op(3, 2), op(4, 2),
400 op(5, 2), op(6, 2), op(7, 2), op(8, 2), op(9, 2),
401 op(10, 2), op(11, 2), op(12, 2), op(13, 2), op(14, 2),
402 op(15, 2), op(16, 2), op(17, 2), op(18, 2), op(19, 2),
403 op(20, 2), op(21, 2), op(22, 2), op(23, 2)};
404 }
405 EXPECT_TRUE(result.dims().vec() == expectedDims);
406 for (size_t i = 0; i < result.getType().size(); i++) {
407 EXPECT_FLOAT_EQ(result.raw(i), expectedValues[i]);
408 }
409 // Constant Folding Test.
410 FAIL_TEST_IF_ERR(checkConstFoldedOutput(NetFilename, {"data"}, {&data},
411 {bindings.get(graphOutputVar)}));
412}
413
414static void importExpandTest(const std::string &netFilename,
415 llvm::ArrayRef<float> inputValues,
416 llvm::ArrayRef<dim_t> inputShape,
417 llvm::ArrayRef<dim_t> outputShape,
418 llvm::ArrayRef<float> expectedValues) {
419 float delta = 1e-08;
420 ExecutionEngine EE{};
421 auto &mod = EE.getModule();
422 Function *F = mod.createFunction("main");
423 PlaceholderBindings bindings;
424 Placeholder *graphOutputVar;
425 // Load the .onnxtxt model.
426 Type inputType(ElemKind::FloatTy, inputShape);
427 ONNXModelLoader onnxLD(netFilename, {"x"}, {&inputType}, *F);
428 graphOutputVar = EXIT_ON_ERR(onnxLD.getSingleOutput());
429 auto *PH = mod.getPlaceholderByNameSlow("x");
430 auto *inTensor = bindings.allocate(PH);
431 inTensor->getHandle() = inputValues;
432 EE.compile(CompilationMode::Infer);
433 bindings.allocate(mod.getPlaceholders());
434 EE.run(bindings);
435 auto result = bindings.get(graphOutputVar)->getHandle();
436 ASSERT_TRUE(result.dims() == (llvm::ArrayRef<dim_t>)outputShape);
437 for (size_t i = 0; i < result.getType().size(); i++) {
438 EXPECT_NEAR(result.raw(i), expectedValues[i], delta);
439 }
440}
441
442/// Import maxPool1D
443static void importMaxPool1DTest(std::string &netFilename,
444 llvm::ArrayRef<float> inputValues,
445 llvm::ArrayRef<dim_t> inputShape,
446 llvm::ArrayRef<dim_t> outputShape,
447 llvm::ArrayRef<float> expectedValues) {
448 float delta = 1e-08;
449 ExecutionEngine EE{};
450 auto &mod = EE.getModule();
451 Function *F = mod.createFunction("main");
452 PlaceholderBindings bindings;
453 Placeholder *graphOutputVar;
454
455 Type input_type(ElemKind::FloatTy, inputShape);
456 ONNXModelLoader onnxLD(netFilename, {"x"}, {&input_type}, *F);
457
458 graphOutputVar = EXIT_ON_ERR(onnxLD.getSingleOutput());
459
460 auto PH = mod.getPlaceholderByNameSlow("x");
461 auto *inTensor = bindings.allocate(PH);
462 inTensor->getHandle() = inputValues;
463
464 EE.compile(CompilationMode::Infer);
465 bindings.allocate(mod.getPlaceholders());
466 EE.run(bindings);
467
468 auto result = bindings.get(graphOutputVar)->getHandle();
469 ASSERT_TRUE(result.dims() == (llvm::ArrayRef<dim_t>)outputShape);
470 for (size_t i = 0; i < result.getType().size(); i++) {
471 EXPECT_NEAR(result.raw(i), expectedValues[i], delta);
472 }
473}
474
475/// Test loading expand op from an ONNX model
476/// with different output shape.
477TEST_F(OnnxImporterTest, expandDiffShape) {
478 std::vector<float> inputValues = {1, 2, 3};
479 std::vector<dim_t> inputShape = {3, 1};
480 std::vector<dim_t> outputShape = {2, 3, 6};
481 std::vector<float> expectedValues = {
482 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3,
483 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3,
484 };
485 std::string netFilename(
486 GLOW_DATA_PATH "tests/models/onnxModels/expandnodeDiffShape.onnxtxt");
487 importExpandTest(netFilename, inputValues, inputShape, outputShape,
488 expectedValues);
489}
490
491/// Test loading expand op from an ONNX model
492/// with same output shape.
493TEST_F(OnnxImporterTest, expandSameShape) {
494 std::vector<float> inputValues = {1, 2, 3};
495 std::vector<dim_t> inputShape = {3, 1};
496 std::vector<dim_t> outputShape = {3, 4};
497 std::vector<float> expectedValues = {
498 1, 1, 1, 1, 2, 2, 2, 2, 3, 3, 3, 3,
499 };
500 std::string netFilename(
501 GLOW_DATA_PATH "tests/models/onnxModels/expandnodeSameShape.onnxtxt");
502 importExpandTest(netFilename, inputValues, inputShape, outputShape,
503 expectedValues);
504}
505
506/// Test loading maxPool1D op from an ONNX model
507/// with different output shape.
508TEST_F(OnnxImporterTest, maxPool1D) {
509 std::vector<float> inputValues = {
510 1.4206449, 0.54408556, 1.3318906, 0.771925, 0.9450552,
511 0.08600737, 0.30009857, 1.4206449, 0.54408556, 1.3318906,
512 0.771925, 0.9450552, 0.08600737, 0.30009857};
513
514 std::vector<dim_t> inputShape = {1, 2, 7};
515 std::vector<dim_t> outputShape = {1, 2, 2};
516 std::vector<float> expectedValues = {
517 1.4206449,
518 0.9450552,
519 1.4206449,
520 0.9450552,
521 };
522 std::string netFilename(GLOW_DATA_PATH
523 "tests/models/onnxModels/maxPool1D.onnxtxt");
524 importMaxPool1DTest(netFilename, inputValues, inputShape, outputShape,
525 expectedValues);
526}
527
528/// Test loading LeakyRelu op from an ONNX model.
529TEST_F(OnnxImporterTest, leakyRelu) {
530 ExecutionEngine EE{};
531 auto &mod = EE.getModule();
532 Function *F = mod.createFunction("main");
533
534 std::string netFilename(GLOW_DATA_PATH
535 "tests/models/onnxModels/leakyRelu.onnxtxt");
536
537 PlaceholderBindings bindings;
538 Placeholder *output;
539 {
540 Tensor x(ElemKind::FloatTy, {7});
541 x.getHandle() = {0, -1, -2, -3, 4, 5, 6};
542
543 ONNXModelLoader onnxLD(netFilename, {"x"}, {&x.getType()}, *F);
544 output = EXIT_ON_ERR(onnxLD.getSingleOutput());
545 }
546
547 auto *save = getSaveNodeFromDest(output);
548 LeakyReluNode *LR = llvm::dyn_cast<LeakyReluNode>(save->getInput().getNode());
549 ASSERT_TRUE(LR);
550 EXPECT_FLOAT_EQ(LR->getAlpha(), 0.100000001);
551}
552
553/// Test Loading LeakyRelu op from an ONNX model with default alpha.
554TEST_F(OnnxImporterTest, leakyReluDefault) {
555 ExecutionEngine EE{};
556 auto &mod = EE.getModule();
557 Function *F = mod.createFunction("main");
558
559 std::string netFilename(GLOW_DATA_PATH
560 "tests/models/onnxModels/leakyReluDefault.onnxtxt");
561
562 PlaceholderBindings bindings;
563 Placeholder *output;
564 {
565 Tensor x(ElemKind::FloatTy, {7});
566 x.getHandle() = {0, -1, -2, -3, 4, 5, 6};
567
568 ONNXModelLoader onnxLD(netFilename, {"x"}, {&x.getType()}, *F);
569 output = EXIT_ON_ERR(onnxLD.getSingleOutput());
570 }
571
572 auto *save = getSaveNodeFromDest(output);
573 LeakyReluNode *LR = llvm::dyn_cast<LeakyReluNode>(save->getInput().getNode());
574 ASSERT_TRUE(LR);
575 EXPECT_FLOAT_EQ(LR->getAlpha(), 0.01);
576}
577
578TEST_F(OnnxImporterTest, importAddMultiBroadcastOp7) {
579 importArithMultiBroadcastTest<AddNode>(
580 "addMultiBroadcastOp7.onnxtxt", {1, 3, 1, 2}, /* multi */ true,
581 /* leftBroadcast */ true, /* rightBroadcast */ true,
582 [](float a, float b) { return a + b; });
583}
584
585TEST_F(OnnxImporterTest, importAddUniBroadcastOp6NoAxis) {
586 importArithMultiBroadcastTest<AddNode>(
587 "addUniBroadcastOp6NoAxis.onnxtxt", {1, 3, 4, 2}, /* multi */ false,
588 /* leftBroadcast */ false, /* rightBroadcast */ true,
589 [](float a, float b) { return a + b; });
590}
591
592TEST_F(OnnxImporterTest, importAddUniBroadcastOp6Axis) {
593 importArithMultiBroadcastTest<AddNode>(
594 "addUniBroadcastOp6Axis.onnxtxt", {1, 3, 4, 2}, /* multi */ false,
595 /* leftBroadcast */ false, /* rightBroadcast */ true,
596 [](float a, float b) { return a + b; });
597}
598
599TEST_F(OnnxImporterTest, importSubMultiBroadcastOp7) {
600 importArithMultiBroadcastTest<SubNode>(
601 "subMultiBroadcastOp7.onnxtxt", {1, 3, 1, 2}, /* multi */ true,
602 /* leftBroadcast */ true, /* rightBroadcast */ true,
603 [](float a, float b) { return a - b; });
604}
605
606TEST_F(OnnxImporterTest, importSubUniBroadcastOp6NoAxis) {
607 importArithMultiBroadcastTest<SubNode>(
608 "subUniBroadcastOp6NoAxis.onnxtxt", {1, 3, 4, 2}, /* multi */ false,
609 /* leftBroadcast */ false, /* rightBroadcast */ true,
610 [](float a, float b) { return a - b; });
611}
612
613TEST_F(OnnxImporterTest, importSubUniBroadcastOp6Axis) {
614 importArithMultiBroadcastTest<SubNode>(
615 "subUniBroadcastOp6Axis.onnxtxt", {1, 3, 4, 2}, /* multi */ false,
616 /* leftBroadcast */ false, /* rightBroadcast */ true,
617 [](float a, float b) { return a - b; });
618}
619
620TEST_F(OnnxImporterTest, importMulMultiBroadcastOp7) {
621 importArithMultiBroadcastTest<MulNode>(
622 "mulMultiBroadcastOp7.onnxtxt", {1, 3, 1, 2}, /* multi */ true,
623 /* leftBroadcast */ true, /* rightBroadcast */ true,
624 [](float a, float b) { return a * b; });
625}
626
627TEST_F(OnnxImporterTest, importMulUniBroadcastOp6NoAxis) {
628 importArithMultiBroadcastTest<MulNode>(
629 "mulUniBroadcastOp6NoAxis.onnxtxt", {1, 3, 4, 2}, /* multi */ false,
630 /* leftBroadcast */ false, /* rightBroadcast */ true,
631 [](float a, float b) { return a * b; });
632}
633
634TEST_F(OnnxImporterTest, importMulUniBroadcastOp6Axis) {
635 importArithMultiBroadcastTest<MulNode>(
636 "mulUniBroadcastOp6Axis.onnxtxt", {1, 3, 4, 2}, /* multi */ false,
637 /* leftBroadcast */ false, /* rightBroadcast */ true,
638 [](float a, float b) { return a * b; });
639}
640
641TEST_F(OnnxImporterTest, importDivMultiBroadcastOp7) {
642 importArithMultiBroadcastTest<DivNode>(
643 "divMultiBroadcastOp7.onnxtxt", {1, 3, 1, 2}, /* multi */ true,
644 /* leftBroadcast */ true, /* rightBroadcast */ true,
645 [](float a, float b) { return a / b; });
646}
647
648TEST_F(OnnxImporterTest, importDivUniBroadcastOp6NoAxis) {
649 importArithMultiBroadcastTest<DivNode>(
650 "divUniBroadcastOp6NoAxis.onnxtxt", {1, 3, 4, 2}, /* multi */ false,
651 /* leftBroadcast */ false, /* rightBroadcast */ true,
652 [](float a, float b) { return a / b; });
653}
654
655TEST_F(OnnxImporterTest, importDivUniBroadcastOp6Axis) {
656 importArithMultiBroadcastTest<DivNode>(
657 "divUniBroadcastOp6Axis.onnxtxt", {1, 3, 4, 2}, /* multi */ false,
658 /* leftBroadcast */ false, /* rightBroadcast */ true,
659 [](float a, float b) { return a / b; });
660}
661
662TEST_F(OnnxImporterTest, importPowMultiBroadcastOp7) {
663 importArithMultiBroadcastTest<PowNode>(
664 "powMultiBroadcastOp7.onnxtxt", {1, 3, 1, 2}, /* multi */ true,
665 /* leftBroadcast */ true, /* rightBroadcast */ true,
666 [](float a, float b) { return std::pow(a, b); });
667}
668
669/// This tests reproduces issue #2135.
670TEST_F(OnnxImporterTest, importUniBroadcastMultiOutput) {
671 ExecutionEngine EE{};
672 auto &mod = EE.getModule();
673 Function *F = mod.createFunction("main");
674
675 std::string NetFilename = std::string(
676 GLOW_DATA_PATH "tests/models/onnxModels/UniBroadcastIssue2135.onnxtxt");
677 Tensor data(ElemKind::FloatTy, {20});
678 ONNXModelLoader onnxLD(NetFilename, {"data"}, {&data.getType()}, *F);
679 (void)onnxLD;
680}
681
682/// Test Onnx QuantizeLinear and DequantizeLinear together.
683TEST_F(OnnxImporterTest, quantizeLinearDequantizeLinear) {
684 ExecutionEngine EE{};
685 auto &mod = EE.getModule();
686 Function *F = mod.createFunction("main");
687 std::string fileName = "QuantizeLinearDequantizeLinear.onnxtxt";
688 std::string NetFilename =
689 std::string(GLOW_DATA_PATH "tests/models/onnxModels/") + fileName;
690 PlaceholderBindings bindings;
691 Placeholder *graphOutputVar;
692 std::vector<dim_t> inputShape{6};
693 Type input_type(ElemKind::FloatTy, inputShape);
694 std::string inputName = "x";
695 ONNXModelLoader onnxLD(NetFilename, {inputName.c_str()}, {&input_type}, *F);
696 graphOutputVar = EXIT_ON_ERR(onnxLD.getSingleOutput());
697 auto *PH = mod.getPlaceholderByNameSlow(inputName);
698 auto *inTensor = bindings.allocate(PH);
699 inTensor->getHandle().randomize(-1.0, 1.0, mod.getPRNG());
700 // Compile&run the graph, and check the output
701 EE.compile(CompilationMode::Infer);
702 bindings.allocate(mod.getPlaceholders());
703 EE.run(bindings);
704 auto result = bindings.get(graphOutputVar)->getHandle();
705 auto inHandle = inTensor->getHandle();
706 for (size_t i = 0; i < result.getType().size(); i++) {
707 EXPECT_NEAR(result.raw(i), inHandle.raw(i), 1e-05);
708 }
709}
710
711/// Test loading of Elementwise Unary Ops floating point.
712static void testEltwiseUnaryOpFloat(std::string fileName,
713 llvm::ArrayRef<dim_t> inputShape,
714 std::string input_name, float delta,
715 const std::function<float(float)> &op) {
716 ExecutionEngine EE{};
717 auto &mod = EE.getModule();
718 Function *F = mod.createFunction("main");
719 std::string NetFilename =
720 std::string(GLOW_DATA_PATH "tests/models/onnxModels/") + fileName;
721 PlaceholderBindings bindings;
722 Placeholder *graphOutputVar;
723 Type input_type(ElemKind::FloatTy, inputShape);
724 ONNXModelLoader onnxLD(NetFilename, {input_name.c_str()}, {&input_type}, *F);
725 graphOutputVar = EXIT_ON_ERR(onnxLD.getSingleOutput());
726 auto PH = mod.getPlaceholderByNameSlow(input_name);
727 auto *inTensor = bindings.allocate(PH);
728 inTensor->getHandle().randomize(-10.0, 10.0, mod.getPRNG());
729 // Compile&run the graph, and check the output
730 EE.compile(CompilationMode::Infer);
731 bindings.allocate(mod.getPlaceholders());
732 EE.run(bindings);
733 auto result = bindings.get(graphOutputVar)->getHandle();
734 auto inHandle = inTensor->getHandle();
735 ASSERT_TRUE(result.dims() == inputShape);
736 for (size_t i = 0; i < result.getType().size(); i++) {
737 EXPECT_NEAR(result.raw(i), op(inHandle.raw(i)), delta);
738 }
739}
740
741TEST_F(OnnxImporterTest, importExp) {
742 testEltwiseUnaryOpFloat("exp.onnxtxt", {1, 2, 4, 3}, "data", 0.002,
743 [](float a) { return std::exp(a); });
744}
745
746TEST(onnx, importNeg) {
747 testEltwiseUnaryOpFloat("neg.onnxtxt", {1, 2, 4, 3}, "data", 0.000,
748 [](float a) { return -a; });
749}
750
751TEST(onnx, importCeil) {
752 testEltwiseUnaryOpFloat("ceil.onnxtxt", {1, 2, 4, 3}, "data", 0.000,
753 [](float a) { return std::ceil(a); });
754}
755
756TEST(onnx, importFloor) {
757 testEltwiseUnaryOpFloat("floor.onnxtxt", {1, 2, 4, 3}, "data", 0.000,
758 [](float a) { return std::floor(a); });
759}
760
761TEST_F(OnnxImporterTest, importSin) {
762 testEltwiseUnaryOpFloat("Sin.onnxtxt", {2, 3, 1}, "X", 0.002,
763 [](float a) { return std::sin(a); });
764}
765
766TEST_F(OnnxImporterTest, importCos) {
767 testEltwiseUnaryOpFloat("Cos.onnxtxt", {2, 3, 1}, "X", 0.002,
768 [](float a) { return std::cos(a); });
769}
770
771TEST_F(OnnxImporterTest, importErf) {
772 testEltwiseUnaryOpFloat("Erf.onnxtxt", {1, 3, 4, 5}, "input", 0.002,
773 [](float a) { return std::erf(a); });
774}
775
776TEST(onnx, importAbs) {
777 testEltwiseUnaryOpFloat("abs.onnxtxt", {1, 2, 3, 2}, "input", 0.002,
778 [](float a) { return std::abs(a); });
779}
780
781// Tests log node for random positive values.
782static void testImportLog(std::string fileName,
783 llvm::ArrayRef<dim_t> inputShape,
784 std::string input_name, float delta,
785 const std::function<float(float)> &op) {
786
787 ExecutionEngine EE{};
788 auto &mod = EE.getModule();
789 Function *F = mod.createFunction("main");
790 std::string NetFilename =
791 std::string(GLOW_DATA_PATH "tests/models/onnxModels/") + fileName;
792 PlaceholderBindings bindings;
793 Placeholder *graphOutputVar;
794 Type input_type(ElemKind::FloatTy, inputShape);
795 ONNXModelLoader onnxLD(NetFilename, {input_name.c_str()}, {&input_type}, *F);
796 graphOutputVar = EXIT_ON_ERR(onnxLD.getSingleOutput());
797 auto PH = mod.getPlaceholderByNameSlow(input_name);
798 auto *inTensor = bindings.allocate(PH);
799
800 inTensor->getHandle().randomize(0, 500.0, mod.getPRNG());
801 // Compile&run the graph, and check the output
802 EE.compile(CompilationMode::Infer);
803 bindings.allocate(mod.getPlaceholders());
804 EE.run(bindings);
805 auto result = bindings.get(graphOutputVar)->getHandle();
806 auto inHandle = inTensor->getHandle();
807 ASSERT_TRUE(result.dims() == inputShape);
808 for (size_t i = 0; i < result.getType().size(); i++) {
809 EXPECT_NEAR(result.raw(i), op(inHandle.raw(i)), delta);
810 }
811}
812
813/// Test loading of Elemenntwise Trigonometric Ops
814/// Extendable for other ops in future
815static void
816testEltwiseTrigonometricOpFloat(std::string fileName,
817 llvm::ArrayRef<dim_t> inputShape,
818 std::string input_name, float delta,
819 const std::function<float(float)> &op) {
820 ExecutionEngine EE{};
821 auto &mod = EE.getModule();
822 Function *F = mod.createFunction("main");
823 std::string NetFilename =
824 std::string(GLOW_DATA_PATH "tests/models/onnxModels/") + fileName;
825 PlaceholderBindings bindings;
826 Placeholder *graphOutputVar;
827 Type input_type(ElemKind::FloatTy, inputShape);
828 ONNXModelLoader onnxLD(NetFilename, {input_name.c_str()}, {&input_type}, *F);
829 graphOutputVar = EXIT_ON_ERR(onnxLD.getSingleOutput());
830 auto PH = mod.getPlaceholderByNameSlow(input_name);
831 auto *inTensor = bindings.allocate(PH);
832
833 // Range of Asin/Acos is -1 to 1
834 inTensor->getHandle().randomize(-1.0, 1.0, mod.getPRNG());
835 // Compile&run the graph, and check the output
836 EE.compile(CompilationMode::Infer);
837 bindings.allocate(mod.getPlaceholders());
838 EE.run(bindings);
839 auto result = bindings.get(graphOutputVar)->getHandle();
840 auto inHandle = inTensor->getHandle();
841 ASSERT_TRUE(result.dims() == inputShape);
842 for (size_t i = 0; i < result.getType().size(); i++) {
843 EXPECT_NEAR(result.raw(i), op(inHandle.raw(i)), delta);
844 }
845}
846
847TEST_F(OnnxImporterTest, importAsin) {
848 testEltwiseTrigonometricOpFloat("Asin.onnxtxt", {1, 3, 4, 5}, "input", 0.002,
849 [](float a) { return std::asin(a); });
850}
851
852TEST_F(OnnxImporterTest, importAcos) {
853 testEltwiseTrigonometricOpFloat("Acos.onnxtxt", {1, 3, 4, 5}, "input", 0.002,
854 [](float a) { return std::acos(a); });
855}
856
857TEST_F(OnnxImporterTest, importAtan) {
858 testEltwiseTrigonometricOpFloat("Atan.onnxtxt", {1, 3, 4, 5}, "input", 0.002,
859 [](float a) { return std::atan(a); });
860}
861
862TEST_F(OnnxImporterTest, importLog) {
863 testImportLog("log.onnxtxt", {1, 2, 3, 2}, "data", 0.002,
864 [](float a) { return std::log(a); });
865}
866
867static void testImportPRelu(std::string filename,
868 llvm::ArrayRef<dim_t> inputShape,
869 std::vector<float> expectedSlope) {
870 ExecutionEngine EE{};
871 auto &mod = EE.getModule();
872 Function *F = mod.createFunction("main");
873
874 std::string NetFileName =
875 std::string(GLOW_DATA_PATH "tests/models/onnxModels/") + filename;
876
877 PlaceholderBindings bindings;
878 Placeholder *graphOutputVar;
879 // Destroy the loader after the graph is loaded since the following execution
880 // will not depend on anyting from the loader.
881 Tensor data(ElemKind::FloatTy, inputShape);
882 data.getHandle().randomize(-4.0, 4.0, mod.getPRNG());
883 {
884 ONNXModelLoader onnxLoader(NetFileName, {"data"}, {&data.getType()}, *F);
885 graphOutputVar = EXIT_ON_ERR(onnxLoader.getSingleOutput());
886 bindings.allocate(mod.getPlaceholders());
887 updateInputPlaceholdersByName(bindings, &mod, {"data"}, {&data});
888 }
889
890 // Compile&run the graph, and check the output.
891 EE.compile(CompilationMode::Infer);
892 EE.run(bindings);
893 auto dataH =
894 bindings.get(bindings.getPlaceholderByNameSlow("data"))->getHandle();
895 auto result = bindings.get(graphOutputVar)->getHandle();
896 std::vector<dim_t> expectedDims = {inputShape[0], inputShape[1],
897 inputShape[2], inputShape[3]};
898
899 EXPECT_TRUE(result.dims().vec() == expectedDims);
900 for (size_t i = 0; i < dataH.size(); i++) {
901 float expectedVal = expectedSlope[i] * std::min<float>(0, dataH.raw(i)) +
902 std::max<float>(0, dataH.raw(i));
903 EXPECT_FLOAT_EQ(result.raw(i), expectedVal);
904 }
905
906 // Constant Folding Test.
907 FAIL_TEST_IF_ERR(checkConstFoldedOutput(NetFileName, {"data"}, {&data},
908 {bindings.get(graphOutputVar)}));
909}
910
911TEST_F(OnnxImporterTest, importPreluSlopeHasSameShape) {
912 // The expected slope values correspond to the pre-broadcast
913 // initializer values in the model file.
914 std::vector<float> expectedSlope = {1.0, 1.0, 1.0, 1.0, 2.0, 2.0, 2.0, 2.0,
915 3.0, 3.0, 3.0, 3.0, 4.0, 4.0, 4.0, 4.0};
916 testImportPRelu("preluSlopeHasSameShape.onnxtxt", {1, 4, 2, 2},
917 expectedSlope);
918}
919
920TEST_F(OnnxImporterTest, importPReluBroadcastSlope) {
921 // The expected slope values correspond to the pre-broadcast
922 // initializer values in the model file.
923 std::vector<float> expectedSlope = {1.0, 1.0, 1.0, 1.0, 2.0, 2.0, 2.0, 2.0,
924 3.0, 3.0, 3.0, 3.0, 4.0, 4.0, 4.0, 4.0};
925 testImportPRelu("preluBroadcastSlope.onnxtxt", {1, 4, 2, 2}, expectedSlope);
926}
927
928/// Expects failure to load PRelu in case of invalid slope shape.
929TEST_F(OnnxImporterTest, importPReluInvalidBroadcastSlope) {
930 ExecutionEngine EE{};
931 auto &mod = EE.getModule();
932 Function *F = mod.createFunction("main");
933
934 std::string NetFileName =
935 std::string(GLOW_DATA_PATH
936 "tests/models/onnxModels/preluInvalidBroadcastSlope.onnxtxt");
937
938 // Destroy the loader after the graph is loaded since the following execution
939 // will not depend on anyting from the loader.
940 {
941 Tensor data(ElemKind::FloatTy, {1, 4, 2, 2});
942 EXPECT_DEATH(ONNXModelLoader(NetFileName, {"data"}, {&data.getType()}, *F),
943 "");
944 }
945}
946
947/// Test loading HardSigmoid op from an ONNX model.
948TEST_F(OnnxImporterTest, hardsigmoid) {
949 ExecutionEngine EE{};
950 auto &mod = EE.getModule();
951 Function *F = mod.createFunction("main");
952
953 std::string netFilename(GLOW_DATA_PATH
954 "tests/models/onnxModels/hardsigmoid.onnxtxt");
955
956 PlaceholderBindings bindings;
957 Placeholder *output;
958 {
959 Tensor x(ElemKind::FloatTy, {5});
960 x.getHandle() = {-3, -1, 0, 1, 3};
961
962 ONNXModelLoader onnxLD(netFilename, {"input"}, {&x.getType()}, *F);
963 output = EXIT_ON_ERR(onnxLD.getSingleOutput());
964 }
965
966 auto *save = getSaveNodeFromDest(output);
967 ClipNode *LR = llvm::dyn_cast<ClipNode>(save->getInput().getNode());
968 ASSERT_TRUE(LR);
969
970 // check beta
971 AddNode *addBeta = llvm::dyn_cast<AddNode>(LR->getInput());
972 ASSERT_TRUE(addBeta);
973 SplatNode *betaSplat = llvm::dyn_cast<SplatNode>(addBeta->getRHS());
974 ASSERT_TRUE(betaSplat);
975 EXPECT_FLOAT_EQ(betaSplat->getValue(), 0.500000001);
976
977 // check alpha
978 MulNode *mulAlpha = llvm::dyn_cast<MulNode>(addBeta->getLHS());
979 ASSERT_TRUE(mulAlpha);
980 SplatNode *alphaSplat = llvm::dyn_cast<SplatNode>(mulAlpha->getLHS());
981 ASSERT_TRUE(alphaSplat);
982 EXPECT_FLOAT_EQ(alphaSplat->getValue(), 0.16666667);
983}
984
985/// Helper method to run the Conv operator test cases.
986/// \p filename contains the model .onnxtxt.
987/// \p expectedDims: output Tensor dimensions.
988/// \p expectedValues : output Tensor values expected.
989/// The input is N*C*H*W (1*1*3*3), the kernels is {2, 2},
990/// strides is {1, 1}, group is 1. Pads can vary.
991static void convTestHelper(std::string &filename,
992 llvm::ArrayRef<dim_t> expectedDims,
993 llvm::ArrayRef<float> expectedValues) {
994
995 ExecutionEngine EE{};
996 auto &mod = EE.getModule();
997 Function *F = mod.createFunction("main");
998
999 std::string NetFilename =
1000 std::string(GLOW_DATA_PATH "tests/models/onnxModels/") + filename;
1001
1002 PlaceholderBindings bindings;
1003 Placeholder *graphOutputVar;
1004 // Destroy the loader after the graph is loaded since the following execution
1005 // will not depend on anyting from the loader.
1006 {
1007 Tensor data;
1008 getNCHWData(&data, 1, 1, 3, 3);
1009 ONNXModelLoader onnxLD(NetFilename, {"data"}, {&data.getType()}, *F);
1010 graphOutputVar = EXIT_ON_ERR(onnxLD.getSingleOutput());
1011 bindings.allocate(mod.getPlaceholders());
1012 updateInputPlaceholdersByName(bindings, &mod, {"data"}, {&data});
1013 }
1014
1015 // ONNX importer loads a conv node and converts it to 4 ops:
1016 // Transpose (input) -> Conv -> Transpose
1017 // Transpose (filter) ->
1018 // A save node is added in the network as well. Therefore there are 5 nodes:
1019 // Transpose (input) -> Conv -> Transpose -> Save
1020 // Transpose (filter) ->
1021 // Note that in case the convolution filter is a constant tensor, the filter
1022 // transpose node will be later optimized out by the optimizer.
1023 EXPECT_EQ(F->getNodes().size(), 5);
1024 EXPECT_EQ(mod.getPlaceholders().size(), 2);
1025 EXPECT_EQ(mod.getConstants().size(), 2);
1026
1027 auto *saveNode = getSaveNodeFromDest(graphOutputVar);
1028 auto *node = saveNode->getInput().getNode();
1029
1030 EXPECT_TRUE(node->getKind() == Kinded::Kind::TransposeNodeKind);
1031 auto *convNode = llvm::dyn_cast<TransposeNode>(node)->getInput().getNode();
1032
1033 EXPECT_TRUE(convNode->getKind() == Kinded::Kind::ConvolutionNodeKind);
1034 auto *tInNode =
1035 llvm::dyn_cast<ConvolutionNode>(convNode)->getInput().getNode();
1036 auto *tFilterNode =
1037 llvm::dyn_cast<ConvolutionNode>(convNode)->getFilter().getNode();
1038 EXPECT_TRUE(tInNode->getKind() == Kinded::Kind::TransposeNodeKind);
1039 EXPECT_TRUE(tFilterNode->getKind() == Kinded::Kind::TransposeNodeKind);
1040
1041 EE.compile(CompilationMode::Infer);
1042 EE.run(bindings);
1043 auto result = bindings.get(graphOutputVar)->getHandle();
1044 EXPECT_TRUE(result.dims() == expectedDims);
1045 for (size_t i = 0, e = expectedValues.size(); i < e; i++) {
1046 EXPECT_FLOAT_EQ(result.raw(i), expectedValues[i]);
1047 }
1048}
1049
1050/// Helper method to run the Conv operator test cases.
1051/// \p filename contains the model .onnxtxt.
1052/// \p expectedDims: output Tensor dimensions.
1053/// \p expectedValues : output Tensor values expected.
1054/// The input is N*C*H*W (1*1*2*3*3), the kernels is {2, 3, 3},
1055/// strides is {1, 1, 1}, group is 1. Pads can vary.
1056static void conv3DTestHelper(std::string &filename,
1057 llvm::ArrayRef<dim_t> inputDims,
1058 llvm::ArrayRef<dim_t> expectedDims,
1059 llvm::ArrayRef<float> expectedValues) {
1060
1061 ExecutionEngine EE{};
1062 auto &mod = EE.getModule();
1063 Function *F = mod.createFunction("main");
1064
1065 std::string NetFilename =
1066 std::string(GLOW_DATA_PATH "tests/models/onnxModels/") + filename;
1067
1068 PlaceholderBindings bindings;
1069 Placeholder *graphOutputVar;
1070 // Destroy the loader after the graph is loaded since the following execution
1071 // will not depend on anyting from the loader.
1072 {
1073 Tensor data;
1074 getNCTHWData(&data, inputDims[0], inputDims[1], inputDims[2], inputDims[3],
1075 inputDims[4]);
1076 ONNXModelLoader onnxLD(NetFilename, {"data"}, {&data.getType()}, *F);
1077 graphOutputVar = EXIT_ON_ERR(onnxLD.getSingleOutput());
1078 bindings.allocate(mod.getPlaceholders());
1079 updateInputPlaceholdersByName(bindings, &mod, {"data"}, {&data});
1080 }
1081
1082 // ONNX importer loads a conv node and converts it to 4 ops:
1083 // Transpose (input) -> Conv -> Transpose
1084 // Transpose (filter) ->
1085 // A save node is added in the network as well. Therefore there are 5 nodes:
1086 // Transpose (input) -> Conv -> Transpose -> Save
1087 // Transpose (filter) ->
1088 // Note that in case the convolution filter is a constant tensor, the filter
1089 // transpose node will be later optimized out by the optimizer.
1090 EXPECT_EQ(F->getNodes().size(), 5);
1091 EXPECT_EQ(mod.getPlaceholders().size(), 2);
1092 EXPECT_EQ(mod.getConstants().size(), 2);
1093
1094 auto *saveNode = getSaveNodeFromDest(graphOutputVar);
1095 auto *node = saveNode->getInput().getNode();
1096
1097 EXPECT_TRUE(node->getKind() == Kinded::Kind::TransposeNodeKind);
1098 auto *convNode = llvm::dyn_cast<TransposeNode>(node)->getInput().getNode();
1099
1100 EXPECT_TRUE(convNode->getKind() == Kinded::Kind::Convolution3DNodeKind);
1101 auto *tInNode =
1102 llvm::dyn_cast<Convolution3DNode>(convNode)->getInput().getNode();
1103 auto *tFilterNode =
1104 llvm::dyn_cast<Convolution3DNode>(convNode)->getFilter().getNode();
1105 EXPECT_TRUE(tInNode->getKind() == Kinded::Kind::TransposeNodeKind);
1106 EXPECT_TRUE(tFilterNode->getKind() == Kinded::Kind::TransposeNodeKind);
1107
1108 EE.compile(CompilationMode::Infer);
1109 EE.run(bindings);
1110 auto result = bindings.get(graphOutputVar)->getHandle();
1111 EXPECT_TRUE(result.dims() == expectedDims);
1112 for (size_t i = 0, e = expectedValues.size(); i < e; i++) {
1113 EXPECT_FLOAT_EQ(result.raw(i), expectedValues[i]);
1114 }
1115}
1116
1117/// Test loading conv op from a ONNX model.
1118/// The input is N*C*H*W (1*1*3*3), the kernels is {2, 2},
1119/// strides is {1, 1}, pads is {1, 1, 1, 1}, group is 1.
1120TEST_F(OnnxImporterTest, importConv) {
1121 std::string filename("simpleConv.onnxtxt");
1122 std::vector<dim_t> expectedDims = {1, 1, 4, 4};
1123 std::vector<float> expectedValues = {2, 3, 5, 4, 5, 10, 14, 9,
1124 11, 22, 26, 15, 8, 15, 17, 10};
1125 convTestHelper(filename, expectedDims, expectedValues);
1126}
1127
1128/// Test loading conv op from a ONNX model.
1129/// The input is N*C*H*W (1*1*3*3), the kernels is {2, 2},
1130/// strides is {1, 1}, pads is {1, 1, 1, 1}, group is 1, dilation is {1, 2}.
1131TEST_F(OnnxImporterTest, importConvNonSquareDilation) {
1132 std::string filename("simpleConvNonSquareDilation.onnxtxt");
1133 std::vector<dim_t> expectedDims = {1, 1, 4, 3};
1134 std::vector<float> expectedValues = {3, 4, 3, 7, 12, 7, 13, 24, 13, 9, 16, 9};
1135 convTestHelper(filename, expectedDims, expectedValues);
1136}
1137
1138/// Test loading conv op from a ONNX model.
1139/// The input is N*C*H*W (1*1*3*3), the kernels is {2, 2},
1140/// strides is {1, 1}, auto_pad VALID (i.e. no padding), group is 1.
1141TEST_F(OnnxImporterTest, importConvAutoPadValid) {
1142 std::string filename("simpleConvAutoPadValid.onnxtxt");
1143 std::vector<dim_t> expectedDims = {1, 1, 2, 2};
1144 std::vector<float> expectedValues = {10, 14, 22, 26};
1145 convTestHelper(filename, expectedDims, expectedValues);
1146}
1147
1148/// Test loading conv op from a ONNX model.
1149/// The input is N*C*H*W (1*1*3*3), the kernels is {2, 2},
1150/// strides is {1, 1}, auto_pad SAME_UPPER, group is 1.
1151TEST_F(OnnxImporterTest, importConvAutoPadSameUpper) {
1152 std::string filename("simpleConvAutoPadSameUpper.onnxtxt");
1153 std::vector<dim_t> expectedDims = {1, 1, 3, 3};
1154 std::vector<float> expectedValues = {10, 14, 9, 22, 26, 15, 15, 17, 10};
1155 convTestHelper(filename, expectedDims, expectedValues);
1156}
1157
1158/// Test loading conv op from a ONNX model.
1159/// The input is N*C*H*W (1*1*3*3), the kernels is {2, 2},
1160/// strides is {1, 1}, auto_pad SAME_LOWER, group is 1.
1161TEST_F(OnnxImporterTest, importConvAutoPadSameLower) {
1162 std::string filename("simpleConvAutoPadSameLower.onnxtxt");
1163 std::vector<dim_t> expectedDims = {1, 1, 3, 3};
1164 std::vector<float> expectedValues = {2, 3, 5, 5, 10, 14, 11, 22, 26};
1165 convTestHelper(filename, expectedDims, expectedValues);
1166}
1167
1168/// Test loading conv 3D op from a ONNX model.
1169/// The input is N*C*T*H*W (1*1*2*3*3), the kernels is {2, 3, 3},
1170/// strides is {1, 1, 1}, pads is {1, 1, 1, 1, 1, 1}, group is 1.
1171TEST_F(OnnxImporterTest, importConv3D) {
1172 std::string filename("simpleConv3D.onnxtxt");
1173 std::vector<dim_t> inputDims = {1, 1, 2, 3, 3};
1174 std::vector<dim_t> expectedDims = {1, 1, 3, 3, 3};
1175 std::vector<float> expectedValues = {
1176 3.0, 6.0, 6.0, 10.0, 16.0, 14.0, 12.0, 18.0, 15.0,
1177 26.25, 39.0, 32.25, 47.0, 68.0, 55.0, 44.25, 63.0, 50.25,
1178 23.25, 33.0, 26.25, 37.0, 52.0, 41.0, 32.25, 45.0, 35.25};
1179 conv3DTestHelper(filename, inputDims, expectedDims, expectedValues);
1180}
1181
1182/// Test loading conv 3D op from a ONNX model.
1183/// The input is N*C*T*H*W (1*1*2*3*3), the kernels is {2, 3, 3},
1184/// strides is {1, 1, 1}, pads is {1, 1, 1, 1, 1, 1}, group is 1.
1185/// Dilation is {1, 2, 1}.
1186// TEST_F(OnnxImporterTest, importConv3DNonSquareDilation) {
1187// std::string filename("simpleConv3D.onnxtxt");
1188// std::vector<dim_t> inputDims = {1, 1, 2, 3, 3};
1189// std::vector<dim_t> expectedDims = {1, 1, 3, 1, 3};
1190// std::vector<float> expectedValues = {
1191// 5.0, 8.0, 7.0, 23.5, 34.0, 27.5, 18.5, 26.0, 20.5
1192// };
1193// conv3DTestHelper(filename, inputDims, expectedDims, expectedValues);
1194//}
1195
1196/// Test loading conv 3D op from a ONNX model.
1197/// The input is N*C*T*H*W (1*1*2*3*3), the kernels is {2, 3, 3},
1198/// strides is {1, 1, 1}, auto_pad VALID (i.e. no padding), group is 1.
1199TEST_F(OnnxImporterTest, importConv3DAutoPadValid) {
1200 std::string filename("simpleConv3DAutoPadValid.onnxtxt");
1201 std::vector<dim_t> inputDims = {1, 1, 2, 3, 3};
1202 std::vector<dim_t> expectedDims = {1, 1, 1, 1, 1};
1203 std::vector<float> expectedValues = {68.0};
1204 conv3DTestHelper(filename, inputDims, expectedDims, expectedValues);
1205}
1206
1207/// Test loading conv 3D op from a ONNX model.
1208/// The input is N*C*T*H*W (1*1*2*3*3), the kernels is {2, 3, 3},
1209/// strides is {1, 2, 2}, auto_pad SAME_LOWER, group is 1.
1210TEST_F(OnnxImporterTest, importConv3DAutoPadSameLower) {
1211 std::string filename("simpleConv3DAutoPadSameLower.onnxtxt");
1212 std::vector<dim_t> inputDims = {1, 1, 2, 3, 3};
1213 std::vector<dim_t> expectedDims = {1, 1, 2, 2, 2};
1214 std::vector<float> expectedValues = {3.0, 6.0, 12.0, 15.0,
1215 26.25, 32.25, 44.25, 50.25};
1216 conv3DTestHelper(filename, inputDims, expectedDims, expectedValues);
1217}
1218
1219/// Test loading conv 3D op from a ONNX model.
1220/// The input is N*C*T*H*W (1*1*2*3*3), the kernels is {2, 3, 3},
1221/// strides is {1, 2, 2}, auto_pad SAME_UPPER, group is 1.
1222TEST_F(OnnxImporterTest, importConv3DAutoPadSameUpper) {
1223 std::string filename("simpleConv3DAutoPadSameUpper.onnxtxt");
1224 std::vector<dim_t> inputDims = {1, 1, 2, 3, 3};
1225 std::vector<dim_t> expectedDims = {1, 1, 2, 2, 2};
1226 std::vector<float> expectedValues = {26.25, 32.25, 44.25, 50.25,
1227 23.25, 26.25, 32.25, 35.25};
1228 conv3DTestHelper(filename, inputDims, expectedDims, expectedValues);
1229}
1230
1231/// Test loading conv 3D op with non-cubic pads from a ONNX model.
1232/// The input is N*C*T*H*W (1*1*3*3*3), kernels is {1, 1, 1},
1233/// strides is {1, 1, 1}, pads is {1, 2, 3, 3, 1, 2}, group is 1.
1234/// Filter is 1.0 so that output equals input + padding
1235TEST_F(OnnxImporterTest, importConv3DNonCubicPads) {
1236 std::string filename("simpleConv3DNonCubicPads.onnxtxt");
1237 std::vector<dim_t> inputDims = {1, 1, 3, 3, 3};
1238 std::vector<dim_t> expectedDims = {1, 1, 7, 6, 8};
1239 std::vector<float> expectedValues = {
1240 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00,
1241 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00,
1242 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00,
1243 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00,
1244 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00,
1245
1246 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00,
1247 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00,
1248 1.00, 2.00, 0.00, 0.00, 0.00, 0.00, 0.00, 3.00, 4.00, 5.00,
1249 0.00, 0.00, 0.00, 0.00, 0.00, 6.00, 7.00, 8.00, 0.00, 0.00,
1250 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00,
1251
1252 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00,
1253 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 9.00,
1254 10.00, 11.00, 0.00, 0.00, 0.00, 0.00, 0.00, 12.00, 13.00, 14.00,
1255 0.00, 0.00, 0.00, 0.00, 0.00, 15.00, 16.00, 17.00, 0.00, 0.00,
1256 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00,
1257
1258 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00,
1259 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 18.00,
1260 19.00, 20.00, 0.00, 0.00, 0.00, 0.00, 0.00, 21.00, 22.00, 23.00,
1261 0.00, 0.00, 0.00, 0.00, 0.00, 24.00, 25.00, 26.00, 0.00, 0.00,
1262 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00,
1263
1264 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00,
1265 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00,
1266 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00,
1267 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00,
1268 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00,
1269
1270 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00,
1271 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00,
1272 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00,
1273 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00,
1274 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00,
1275
1276 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00,
1277 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00,
1278 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00,
1279 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00,
1280 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00};
1281 conv3DTestHelper(filename, inputDims, expectedDims, expectedValues);
1282}
1283
1284/// Import conv1D
1285static void importConv1DTest(std::string &netFilename,
1286 llvm::ArrayRef<float> inputXValues,
1287 llvm::ArrayRef<dim_t> inputXShape,
1288 llvm::ArrayRef<float> inputWValues,
1289 llvm::ArrayRef<dim_t> inputWShape,
1290 llvm::ArrayRef<dim_t> outputShape,
1291 llvm::ArrayRef<float> expectedValues) {
1292 float delta = 1e-07;
1293 ExecutionEngine EE{};
1294 auto &mod = EE.getModule();
1295 Function *F = mod.createFunction("main");
1296 PlaceholderBindings bindings;
1297 Placeholder *graphOutputVar;
1298
1299 Type input_type_x(ElemKind::FloatTy, inputXShape);
1300 Type input_type_w(ElemKind::FloatTy, inputWShape);
1301 ONNXModelLoader onnxLD(netFilename, {"x", "w"},
1302 {&input_type_x, &input_type_w}, *F);
1303
1304 graphOutputVar = EXIT_ON_ERR(onnxLD.getSingleOutput());
1305
1306 auto PHX = mod.getPlaceholderByNameSlow("x");
1307 auto *inTensorX = bindings.allocate(PHX);
1308 inTensorX->getHandle() = inputXValues;
1309
1310 auto PHW = mod.getPlaceholderByNameSlow("w");
1311 auto *inTensorW = bindings.allocate(PHW);
1312 inTensorW->getHandle() = inputWValues;
1313
1314 EE.compile(CompilationMode::Infer);
1315 bindings.allocate(mod.getPlaceholders());
1316 EE.run(bindings);
1317
1318 auto result = bindings.get(graphOutputVar)->getHandle();
1319 ASSERT_TRUE(result.dims() == (llvm::ArrayRef<dim_t>)outputShape);
1320 for (size_t i = 0; i < result.getType().size(); i++) {
1321 EXPECT_NEAR(result.raw(i), expectedValues[i], delta);
1322 }
1323}
1324
1325/// Test Conv1D
1326TEST_F(OnnxImporterTest, conv1D) {
1327 std::vector<float> inputXValues = {
1328 1.4206449, -0.54408556, -1.3318906, 0.771925, 0.9450552, 0.08600737,
1329 0.30009857, -0.36060193, -0.33999684, -0.9809143, -1.0172559, -0.4921318,
1330 -1.0513021, 1.8671927, -0.842103, -0.8903683};
1331 std::vector<float> inputWValues = {0.16575365, -0.42219377, 0.55620337,
1332 -0.5700942, -1.1148645, -0.33808824};
1333 std::vector<dim_t> inputXShape = {1, 2, 8};
1334 std::vector<dim_t> inputWShape = {3, 2, 1};
1335 std::vector<dim_t> outputShape = {1, 3, 8};
1336 std::vector<float> expectedValues = {
1337 0.3790216, 0.32395172, 0.20871338, 0.33572435, 0.6004995, -0.7740611,
1338 0.40527308, 0.31613684, 0.9839977, 0.25659135, -0.16087033, 0.7099088,
1339 1.1249841, -1.0166382, 0.6469939, 0.30702582, -1.4688776, 0.9382173,
1340 1.8287997, -0.6942077, -0.69817555, -0.7271625, -0.04986412, 0.7030453};
1341 std::string netFilename(GLOW_DATA_PATH
1342 "tests/models/onnxModels/conv1D.onnxtxt");
1343 importConv1DTest(netFilename, inputXValues, inputXShape, inputWValues,
1344 inputWShape, outputShape, expectedValues);
1345}
1346
1347/// Test to ensure error handling for missing bias
1348/// input is handled correctly. Remaining input is
1349/// still sane to make sure it only fails for the
1350/// intended case.
1351TEST_F(OnnxImporterTest, importConvBiasFail) {
1352 ExecutionEngine EE{};
1353 auto &mod = EE.getModule();
1354 Function *F = mod.createFunction("main");
1355
1356 std::string NetFilename(GLOW_DATA_PATH
1357 "tests/models/onnxModels/simpleConvBiasFail.onnxtxt");
1358
1359 // Destroy the loader after the graph is loaded since the following execution
1360 // will not depend on anyting from the loader.
1361 {
1362 Tensor data;
1363 getNCHWData(&data, 1, 1, 3, 3);
1364
1365 EXPECT_DEATH(ONNXModelLoader(NetFilename, {"data"}, {&data.getType()}, *F),
1366 "");
1367 }
1368}
1369
1370/// Helper method to run the ConvTranspose operator test cases.
1371/// \p filename contains the model .onnxtxt.
1372/// \p expectedDims: output Tensor dimensions.
1373/// \p expectedValues : output Tensor values expected.
1374/// The input is N*C*H*W (1*1*2*2), the kernels is {3, 3},
1375/// strides is {1, 1}, group is 1. Pads can vary.
1376static void convTransposeTestHelper(std::string &filename,
1377 llvm::ArrayRef<dim_t> expectedDims,
1378 llvm::ArrayRef<float> expectedValues) {
1379
1380 ExecutionEngine EE{};
1381 auto &mod = EE.getModule();
1382 Function *F = mod.createFunction("main");
1383
1384 std::string NetFilename =
1385 std::string(GLOW_DATA_PATH "tests/models/onnxModels/") + filename;
1386
1387 PlaceholderBindings bindings;
1388 Placeholder *graphOutputVar;
1389 // Destroy the loader after the graph is loaded since the following execution
1390 // will not depend on anyting from the loader.
1391 {
1392 Tensor data(ElemKind::FloatTy, {1, 1, 2, 2});
1393 data.getHandle() = {2., 3., 4., 5.};
1394
1395 ONNXModelLoader onnxLD(NetFilename, {"data"}, {&data.getType()}, *F);
1396 graphOutputVar = EXIT_ON_ERR(onnxLD.getSingleOutput());
1397 bindings.allocate(mod.getPlaceholders());
1398 updateInputPlaceholdersByName(bindings, &mod, {"data"}, {&data});
1399 }
1400
1401 // ONNX importer loads a ConvTranspose node and converts it to 4 ops:
1402 // Transpose (input) -> Conv -> Transpose
1403 // Transpose (filter) ->
1404 // A save node is added in the network as well. Therefore there are 5 nodes:
1405 // Transpose (input) -> Conv -> Transpose -> Save
1406 // Transpose (filter) ->
1407 // Note that in case the convolution filter is a constant tensor, the filter
1408 // transpose node will be later optimized out by the optimizer.
1409 EXPECT_EQ(F->getNodes().size(), 5);
1410 EXPECT_EQ(mod.getPlaceholders().size(), 2);
1411 EXPECT_EQ(mod.getConstants().size(), 2);
1412
1413 auto *saveNode = getSaveNodeFromDest(graphOutputVar);
1414 auto *node = saveNode->getInput().getNode();
1415
1416 EXPECT_TRUE(node->getKind() == Kinded::Kind::TransposeNodeKind);
1417 auto *convTrNode = llvm::dyn_cast<TransposeNode>(node)->getInput().getNode();
1418
1419 EXPECT_TRUE(convTrNode->getKind() == Kinded::Kind::ConvTransposeNodeKind);
1420 auto *tInNode =
1421 llvm::dyn_cast<ConvTransposeNode>(convTrNode)->getInput().getNode();
1422 auto *tFilterNode =
1423 llvm::dyn_cast<ConvTransposeNode>(convTrNode)->getFilter().getNode();
1424 EXPECT_TRUE(tInNode->getKind() == Kinded::Kind::TransposeNodeKind);
1425 EXPECT_TRUE(tFilterNode->getKind() == Kinded::Kind::TransposeNodeKind);
1426
1427 EE.compile(CompilationMode::Infer);
1428 EE.run(bindings);
1429
1430 EXPECT_EQ(F->getNodes().size(), 4);
1431 EXPECT_EQ(mod.getPlaceholders().size(), 2);
1432 EXPECT_EQ(mod.getConstants().size(), 2);
1433
1434 auto result = bindings.get(graphOutputVar)->getHandle();
1435 EXPECT_TRUE(result.dims() == expectedDims);
1436 for (dim_t i = 0, e = expectedValues.size(); i < e; i++) {
1437 EXPECT_FLOAT_EQ(result.raw(i), expectedValues[i]);
1438 }
1439}
1440
1441/// Test loading ConvTranspose op from a ONNX model, no pads.
1442TEST_F(OnnxImporterTest, importConvTranspose) {
1443 std::string filename("simpleConvTranspose.onnxtxt");
1444 std::vector<dim_t> expectedDims = {1, 1, 4, 4};
1445 std::vector<float> expectedValues = {5, 13, 18, 13, 19, 50, 64, 42,
1446 37, 92, 106, 66, 33, 77, 86, 51};
1447 convTransposeTestHelper(filename, expectedDims, expectedValues);
1448}
1449
1450/// Test loading ConvTranspose op from a ONNX model, symmetric pads.
1451TEST_F(OnnxImporterTest, importConvTransposePads) {
1452 std::string filename("simpleConvTransposePads.onnxtxt");
1453 std::vector<dim_t> expectedDims = {1, 1, 3, 3};
1454 std::vector<float> expectedValues = {14., 19., 14., 51., 65.,
1455 43., 93., 107., 67.};
1456 convTransposeTestHelper(filename, expectedDims, expectedValues);
1457}
1458
1459/// Test loading ConvTranspose op from a ONNX model, auto_pad=VALID
1460TEST_F(OnnxImporterTest, importConvTransposeAutoPadValid) {
1461 std::string filename("simpleConvTransposeAutoPadValid.onnxtxt");
1462 std::vector<dim_t> expectedDims = {1, 1, 4, 4};
1463 std::vector<float> expectedValues = {4, 12, 17, 12, 18, 49, 63, 41,
1464 36, 91, 105, 65, 32, 76, 85, 50};
1465 convTransposeTestHelper(filename, expectedDims, expectedValues);
1466}
1467
1468/// Test loading ConvTranspose op from a ONNX model, auto_pad=SAME_UPPER
1469TEST_F(OnnxImporterTest, importConvTransposeAutoPadSameUpper) {
1470 std::string filename("simpleConvTransposeAutoPadSameUpper.onnxtxt");
1471 std::vector<dim_t> expectedDims = {1, 1, 2, 2};
1472 std::vector<float> expectedValues = {49., 63., 91., 105.};
1473 convTransposeTestHelper(filename, expectedDims, expectedValues);
1474}
1475
1476/// Test loading ConvTranspose op from a ONNX model, auto_pad=SAME_LOWER
1477TEST_F(OnnxImporterTest, importConvTransposeAutoPadSameLower) {
1478 std::string filename("simpleConvTransposeAutoPadSameLower.onnxtxt");
1479 std::vector<dim_t> expectedDims = {1, 1, 2, 2};
1480 std::vector<float> expectedValues = {49., 63., 91., 105.};
1481 convTransposeTestHelper(filename, expectedDims, expectedValues);
1482}
1483
1484/// Test loading ConvTranspose op, explicit output_shape, auto_pad=SAME_UPPER.
1485TEST_F(OnnxImporterTest, importConvTransposeOutputShapeSameUpper) {
1486 std::string filename("simpleConvTransposeOutShapeSameUpper.onnxtxt");
1487 std::vector<dim_t> expectedDims = {1, 1, 4, 4};
1488 std::vector<float> expectedValues = {4, 12, 17, 12, 18, 49, 63, 41,
1489 36, 91, 105, 65, 32, 76, 85, 50};
1490 convTransposeTestHelper(filename, expectedDims, expectedValues);
1491}
1492
1493/// Test loading deconv op, explicit output_shape, auto_pad=SAME_LOWER.
1494TEST_F(OnnxImporterTest, importConvTransposeOutputShapeSameLower) {
1495 std::string filename("simpleConvTransposeOutShapeSameLower.onnxtxt");
1496 std::vector<dim_t> expectedDims = {1, 1, 4, 4};
1497 std::vector<float> expectedValues = {4, 12, 17, 12, 18, 49, 63, 41,
1498 36, 91, 105, 65, 32, 76, 85, 50};
1499 convTransposeTestHelper(filename, expectedDims, expectedValues);
1500}
1501
1502/// Test loading ConvTranspose op, explicit output_shape, auto_pad not set.
1503TEST_F(OnnxImporterTest, importConvTransposeOutputShape) {
1504 std::string filename("simpleConvTransposeOutShape.onnxtxt");
1505 std::vector<dim_t> expectedDims = {1, 1, 4, 4};
1506 std::vector<float> expectedValues = {4, 12, 17, 12, 18, 49, 63, 41,
1507 36, 91, 105, 65, 32, 76, 85, 50};
1508 convTransposeTestHelper(filename, expectedDims, expectedValues);
1509}
1510
1511/// Helper method to run the Range operator test cases.
1512/// \p filename contains the model .onnxtxt.
1513/// \p expectedDims: output Tensor dimensions.
1514/// \p expectedValues : output Tensor values expected.
1515template <typename T>
1516static void rangeTestHelper(std::string &filename,
1517 llvm::ArrayRef<dim_t> expectedDims,
1518 llvm::ArrayRef<T> expectedValues) {
1519 ExecutionEngine EE{};
1520 auto &mod = EE.getModule();
1521 Function *F = mod.createFunction("main");
1522
1523 std::string NetFilename =
1524 std::string(GLOW_DATA_PATH "tests/models/onnxModels/") + filename;
1525
1526 PlaceholderBindings bindings;
1527 Placeholder *output;
1528 {
1529 ONNXModelLoader onnxLD(NetFilename, {}, {}, *F);
1530 output = EXIT_ON_ERR(onnxLD.getSingleOutput());
1531 bindings.allocate(mod.getPlaceholders());
1532 updateInputPlaceholdersByName(bindings, &mod, {}, {});
1533 }
1534 auto *res = bindings.get(output);
1535 EE.compile(CompilationMode::Infer);
1536 EE.run(bindings);
1537 auto result = res->getHandle<T>();
1538 EXPECT_TRUE(result.dims() == expectedDims);
1539 for (dim_t i = 0, e = expectedValues.size(); i < e; i++) {
1540 EXPECT_FLOAT_EQ(result.raw(i), expectedValues[i]);
1541 }
1542}
1543
1544/// Test loading Range with int32 datatype.
1545TEST(onnx, importRangeInt32) {
1546 std::string filename("RangeInt32.onnxtxt");
1547 std::vector<dim_t> expectedDims = {2};
1548 std::vector<int32_t> expectedValues = {10, 7};
1549 rangeTestHelper<int32_t>(filename, expectedDims, expectedValues);
1550}
1551
1552/// Test loading Range with float datatype.
1553TEST(onnx, importRangeFloat) {
1554 std::string filename("RangeFloat.onnxtxt");
1555 std::vector<dim_t> expectedDims = {5};
1556 std::vector<float> expectedValues = {0.0, 1.0, 2.0, 3.0, 4.0};
1557 rangeTestHelper<float>(filename, expectedDims, expectedValues);
1558}
1559
1560/// Test loading ConvTranspose, implicit kernel, multi-channel input/output,
1561/// asymmetric kernel and pads.
1562TEST(onnx, importDeconvAsymmetric) {
1563
1564 ExecutionEngine EE{};
1565 auto &mod = EE.getModule();
1566 Function *F = mod.createFunction("main");
1567
1568 std::string NetFilename = std::string(
1569 GLOW_DATA_PATH "tests/models/onnxModels/convTransposeAsymmetric.onnxtxt");
1570
1571 PlaceholderBindings bindings;
1572 Placeholder *output;
1573 {
1574 Tensor input(ElemKind::FloatTy, {1, 3, 4, 4});
1575 for (dim_t i = 0; i < 3 * 4 * 4; i++) {
1576 input.getHandle().raw(i) = i;
1577 }
1578 Tensor filter(ElemKind::FloatTy, {3, 2, 3, 2});
1579 for (dim_t i = 0; i < 3 * 2 * 3 * 2; i++) {
1580 filter.getHandle().raw(i) = i * 2;
1581 }
1582 ONNXModelLoader onnxLD(NetFilename, {"X", "W"},
1583 {&input.getType(), &filter.getType()}, *F);
1584 output = EXIT_ON_ERR(onnxLD.getSingleOutput());
1585 bindings.allocate(mod.getPlaceholders());
1586 updateInputPlaceholdersByName(bindings, &mod, {"X", "W"},
1587 {&input, &filter});
1588 }
1589 auto *res = bindings.get(output);
1590 EE.compile(CompilationMode::Infer);
1591 EE.run(bindings);
1592
1593 auto result = res->getHandle();
1594
1595 EXPECT_TRUE(result.dims() == llvm::ArrayRef<dim_t>({1, 2, 5, 3}));
1596
1597 std::vector<float> expected = {
1598 2095.1, 2065.1, 2173.1, 4705.1, 4633.1, 4873.1, 7879.1, 7753.1,
1599 8149.1, 8959.1, 8761.1, 9229.1, 6697.1, 6553.1, 6889.1, 2708.2,
1600 2714.2, 2822.2, 6074.2, 6074.2, 6314.2, 10148.2, 10130.2, 10526.2,
1601 11660.2, 11570.2, 12038.2, 8642.2, 8570.2, 8906.2};
1602
1603 for (dim_t i = 0, e = expected.size(); i < e; i++) {
1604 EXPECT_FLOAT_EQ(result.raw(i), expected[i]);
1605 }
1606}
1607
1608// ConvTranspose test with Group>1
1609TEST(onnx, importDeconvGrouped) {
1610
1611 ExecutionEngine EE{};
1612 auto &mod = EE.getModule();
1613 Function *F = mod.createFunction("main");
1614
1615 std::string NetFilename = std::string(
1616 GLOW_DATA_PATH "tests/models/onnxModels/convTransposeGroup.onnxtxt");
1617
1618 PlaceholderBindings bindings;
1619 Placeholder *output;
1620 {
1621 Tensor input(ElemKind::FloatTy, {1, 2, 3, 3});
1622 for (dim_t i = 0; i < 2 * 3 * 3; i++) {
1623 input.getHandle().raw(i) = i;
1624 }
1625 Tensor filter(ElemKind::FloatTy, {2, 1, 2, 2});
1626 for (dim_t i = 0; i < 2 * 2 * 2; i++) {
1627 filter.getHandle().raw(i) = i * 2;
1628 }
1629 ONNXModelLoader onnxLD(NetFilename, {"X", "W"},
1630 {&input.getType(), &filter.getType()}, *F);
1631 output = EXIT_ON_ERR(onnxLD.getSingleOutput());
1632 bindings.allocate(mod.getPlaceholders());
1633 updateInputPlaceholdersByName(bindings, &mod, {"X", "W"},
1634 {&input, &filter});
1635 }
1636 auto *res = bindings.get(output);
1637 EE.compile(CompilationMode::Infer);
1638 EE.run(bindings);
1639
1640 auto result = res->getHandle();
1641
1642 EXPECT_TRUE(result.dims() == llvm::ArrayRef<dim_t>({1, 2, 6, 6}));
1643
1644 std::vector<float> expected = {
1645 0, 0, 0, 2, 0, 4, 0, 0, 4, 6, 8, 12, 0, 6, 0,
1646 8, 0, 10, 12, 18, 16, 24, 20, 30, 0, 12, 0, 14, 0, 16,
1647 24, 36, 28, 42, 32, 48, 72, 90, 80, 100, 88, 110, 108, 126, 120,
1648 140, 132, 154, 96, 120, 104, 130, 112, 140, 144, 168, 156, 182, 168, 196,
1649 120, 150, 128, 160, 136, 170, 180, 210, 192, 224, 204, 238};
1650
1651 for (dim_t i = 0, e = expected.size(); i < e; i++) {
1652 EXPECT_FLOAT_EQ(result.raw(i), expected[i]);
1653 }
1654}
1655
1656/// Helper method to run the AveragePool operator test cases.
1657/// \p filename contains the model .onnxtxt.
1658/// \p expectedDims: output Tensor dimensions.
1659/// \p expectedValues : output Tensor values expected.
1660/// \p global: GlobalAveragePool if true, AveragePool if false.
1661/// The input is N*C*H*W (1*1*3*3), the kernels is {2, 2},
1662/// strides is {1, 1}, group is 1. Pads can vary in filename.
1663static void averagePoolTestHelper(std::string &filename,
1664 llvm::ArrayRef<dim_t> expectedDims,
1665 llvm::ArrayRef<float> expectedValues) {
1666
1667 ExecutionEngine EE{};
1668 auto &mod = EE.getModule();
1669 Function *F = mod.createFunction("main");
1670
1671 std::string NetFilename =
1672 std::string(GLOW_DATA_PATH "tests/models/onnxModels/") + filename;
1673
1674 PlaceholderBindings bindings;
1675 Placeholder *graphOutputVar;
1676 // Destroy the loader after the graph is loaded since the following execution
1677 // will not depend on anyting from the loader.
1678 Tensor data;
1679 getNCHWData(&data, 1, 1, 3, 3);
1680 {
1681 ONNXModelLoader onnxLD(NetFilename, {"x"}, {&data.getType()}, *F);
1682 graphOutputVar = EXIT_ON_ERR(onnxLD.getSingleOutput());
1683 bindings.allocate(mod.getPlaceholders());
1684 updateInputPlaceholdersByName(bindings, &mod, {"x"}, {&data});
1685 }
1686
1687 // ONNX importer loads a AveragePool node and converts it to 4 ops:
1688 // Transpose (input) -> AveragePool -> Transpose -> Save
1689 EXPECT_EQ(F->getNodes().size(), 4);
1690 EXPECT_EQ(mod.getPlaceholders().size(), 2);
1691
1692 auto *saveNode = getSaveNodeFromDest(graphOutputVar);
1693 auto *node = saveNode->getInput().getNode();
1694
1695 EXPECT_TRUE(node->getKind() == Kinded::Kind::TransposeNodeKind);
1696 auto *poolNode = llvm::dyn_cast<TransposeNode>(node)->getInput().getNode();
1697
1698 EXPECT_TRUE(poolNode->getKind() == Kinded::Kind::AvgPoolNodeKind);
1699 auto *tInNode = llvm::dyn_cast<AvgPoolNode>(poolNode)->getInput().getNode();
1700
1701 EXPECT_TRUE(tInNode->getKind() == Kinded::Kind::TransposeNodeKind);
1702
1703 EE.compile(CompilationMode::Infer);
1704 EE.run(bindings);
1705 auto result = bindings.get(graphOutputVar)->getHandle();
1706 EXPECT_TRUE(result.dims() == expectedDims);
1707 for (size_t i = 0, e = expectedValues.size(); i < e; i++) {
1708 EXPECT_FLOAT_EQ(result.raw(i), expectedValues[i]);
1709 }
1710
1711 // Constant Folding Test.
1712 FAIL_TEST_IF_ERR(checkConstFoldedOutput(NetFilename, {"x"}, {&data},
1713 {bindings.get(graphOutputVar)}));
1714}
1715
1716/// Test loading AveragePool op from a ONNX model.
1717/// The input is N*C*H*W (1*1*3*3), the kernels is {2, 2},
1718/// strides is {1, 1}, pads is auto_pad VALID (no padding), group is 1.
1719TEST_F(OnnxImporterTest, importAveragePool2DAutoPadValid) {
1720 std::string filename("averagePool2DAutoPadValid.onnxtxt");
1721 std::vector<dim_t> expectedDims = {1, 1, 2, 2};
1722 std::vector<float> expectedValues = {2, 3, 5, 6};
1723 averagePoolTestHelper(filename, expectedDims, expectedValues);
1724}
1725
1726/// Test loading AveragePool op from a ONNX model.
1727/// The input is N*C*H*W (1*1*3*3), the kernels is {2, 2},
1728/// strides is {1, 1}, pads is auto_pad SAME_UPPER, group is 1.
1729TEST_F(OnnxImporterTest, importAveragePool2DAutoPadSameUpper) {
1730 std::string filename("averagePool2DAutoPadSameUpper.onnxtxt");
1731 std::vector<dim_t> expectedDims = {1, 1, 3, 3};
1732 std::vector<float> expectedValues = {2, 3, 1.75, 5, 6, 3.25, 3.25, 3.75, 2};
1733 averagePoolTestHelper(filename, expectedDims, expectedValues);
1734}
1735
1736/// Test loading AveragePool op from a ONNX model.
1737/// The input is N*C*H*W (1*1*3*3), the kernels is {2, 2},
1738/// strides is {1, 1}, pads is auto_pad SAME_LOWER, group is 1.
1739TEST_F(OnnxImporterTest, importAveragePool2DAutoPadSameLower) {
1740 std::string filename("averagePool2DAutoPadSameLower.onnxtxt");
1741 std::vector<dim_t> expectedDims = {1, 1, 3, 3};
1742 std::vector<float> expectedValues = {0, 0.25, 0.75, 0.75, 2, 3, 2.25, 5, 6};
1743 averagePoolTestHelper(filename, expectedDims, expectedValues);
1744}
1745
1746/// Test loading AveragePool op from a ONNX model.
1747/// The input is N*C*H*W (1*1*3*3), the kernels is {3, 3},
1748/// strides is {2, 2}, pads is {1, 1, 1, 1},
1749/// countIncludePads is false.
1750TEST_F(OnnxImporterTest, importAveragePool2DCountExcludePads) {
1751 std::string filename("averagePool2DCountExcludePads.onnxtxt");
1752 std::vector<dim_t> expectedDims = {1, 1, 2, 2};
1753 std::vector<float> expectedValues = {2, 3, 5, 6};
1754 averagePoolTestHelper(filename, expectedDims, expectedValues);
1755}
1756
1757TEST_F(OnnxImporterTest, importAveragePool3D) {
1758 ExecutionEngine EE{};
1759 auto &mod = EE.getModule();
1760 Function *F = mod.createFunction("main");
1761
1762 std::string NetFilename(GLOW_DATA_PATH
1763 "tests/models/onnxModels/averagePool3D.onnxtxt");
1764
1765 // Destroy the loader after the graph is loaded since the following execution
1766 // will not depend on anyting from the loader.
1767 {
1768 Tensor data(ElemKind::FloatTy, {1, 3, 32, 32, 32});
1769 EXPECT_DEATH(ONNXModelLoader(NetFilename, {"x"}, {&data.getType()}, *F),
1770 "");
1771 }
1772}
1773
1774static void testReductionOps(std::string modelName,
1775 const std::vector<dim_t> &expectedDims,
1776 const std::vector<float> &expectedValues) {
1777 ExecutionEngine EE{};
1778 auto &mod = EE.getModule();
1779 Function *F = mod.createFunction("main");
1780
1781 // Input.
1782 Tensor x(ElemKind::FloatTy, {2, 2, 2, 2});
1783 x.getHandle() = {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16};
1784
1785 // Load model.
1786 std::string netFilename =
1787 std::string(GLOW_DATA_PATH "tests/models/onnxModels/") + modelName;
1788 ONNXModelLoader onnxLD(netFilename, {"x"}, {&x.getType()}, *F);
1789 Placeholder *output = EXIT_ON_ERR(onnxLD.getSingleOutput());
1790
1791 // Allocate placeholders.
1792 PlaceholderBindings bindings;
1793 bindings.allocate(mod.getPlaceholders());
1794 updateInputPlaceholdersByName(bindings, &mod, {"x"}, {&x});
1795
1796 auto *res = bindings.get(output);
1797 EE.compile(CompilationMode::Infer);
1798 EE.run(bindings);
1799
1800 // Compare results.
1801 auto result = res->getHandle();
1802 EXPECT_TRUE(result.dims().vec() == expectedDims);
1803 for (dim_t i = 0; i < result.size(); i++) {
1804 EXPECT_FLOAT_EQ(result.raw(i), expectedValues[i]);
1805 }
1806
1807 // Constant Folding Test.
1808 FAIL_TEST_IF_ERR(
1809 checkConstFoldedOutput(netFilename, {"x"}, {&x}, {bindings.get(output)}));
1810}
1811
1812/// Test loading ReduceMean op from a ONNX model.
1813/// Input shape is 4D, one dimension is reduced, and output shape is 3D.
1814TEST_F(OnnxImporterTest, reduceMean4Dto3D) {
1815 testReductionOps("reduceMean4Dto3D.onnxtxt", {2, 2, 2},
1816 {1.5, 3.5, 5.5, 7.5, 9.5, 11.5, 13.5, 15.5});
1817}
1818
1819/// Test loading ReduceMean op from a ONNX model.
1820/// Input shape is 4D, one dimension is reduced, and output shape stays 4D.
1821TEST_F(OnnxImporterTest, reduceMean4Dto4D) {
1822 testReductionOps("reduceMean4Dto4D.onnxtxt", {2, 2, 2, 1},
1823 {1.5, 3.5, 5.5, 7.5, 9.5, 11.5, 13.5, 15.5});
1824}
1825
1826/// Test loading ReduceSum op from a ONNX model.
1827/// Input shape is 4D, one dimension is reduced, and output shape is 4D.
1828TEST_F(OnnxImporterTest, reduceSum4D) {
1829 testReductionOps("reduceSum4D.onnxtxt", {2, 2, 2, 1},
1830 {3, 7, 11, 15, 19, 23, 27, 31});
1831}
1832
1833/// Test loading ReduceMean op from a ONNX model.
1834/// Input shape is 4D, two dimensions are reduced, targeting ReduceMean
1835/// optimization using AvgPool. Output shape is 4D.
1836TEST_F(OnnxImporterTest, reduceMean2AvgPoolKeepDims) {
1837 testReductionOps("reduceMean2AvgPool.onnxtxt", {2, 2, 1, 1},
1838 {2.5, 6.5, 10.5, 14.5});
1839}
1840
1841/// Test loading ReduceSumSquare op from a ONNX model.
1842/// Input shape is 4D, one dimension is reduced, and output shape is 4D.
1843TEST_F(OnnxImporterTest, reduceSumSquare4D) {
1844 ExecutionEngine EE{};
1845 auto &mod = EE.getModule();
1846 Function *F = mod.createFunction("main");
1847
1848 std::string netFilename(GLOW_DATA_PATH
1849 "tests/models/onnxModels/reduceSumSquare4D.onnxtxt");
1850
1851 PlaceholderBindings bindings;
1852 Placeholder *output;
1853 Tensor x(ElemKind::FloatTy, {2, 2, 2, 2});
1854 x.getHandle() = {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16};
1855
1856 {
1857
1858 ONNXModelLoader onnxLD(netFilename, {"x"}, {&x.getType()}, *F);
1859 output = EXIT_ON_ERR(onnxLD.getSingleOutput());
1860 bindings.allocate(mod.getPlaceholders());
1861
1862 updateInputPlaceholdersByName(bindings, &mod, {"x"}, {&x});
1863 }
1864
1865 auto *res = bindings.get(output);
1866 EE.compile(CompilationMode::Infer);
1867 EE.run(bindings);
1868 auto result = res->getHandle();
1869 std::vector<dim_t> expectedDims = {2, 2, 2, 1};
1870 std::vector<float> expectedValues = {5, 25, 61, 113, 181, 265, 365, 481};
1871
1872 EXPECT_TRUE(result.dims().vec() == expectedDims);
1873 for (size_t i = 0; i < 8; i++) {
1874 EXPECT_FLOAT_EQ(result.raw(i), expectedValues[i]);
1875 }
1876 // Constant Folding Test.
1877 FAIL_TEST_IF_ERR(
1878 checkConstFoldedOutput(netFilename, {"x"}, {&x}, {bindings.get(output)}));
1879}
1880
1881/// Test loading ReduceMean op from a ONNX model.
1882/// Input shape is 4D, two dimensions are reduced, targeting ReduceMean
1883/// optimization using AvgPool. Output shape is 2D.
1884TEST_F(OnnxImporterTest, reduceMean2AvgPoolNoKeepDims) {
1885 testReductionOps("reduceMean2AvgPoolNoKeep.onnxtxt", {2, 2},
1886 {2.5, 6.5, 10.5, 14.5});
1887}
1888
1889/// Test loading ReduceMax op from a ONNX model.
1890/// Input shape is 4D, two dimensions are reduced,Output shape is 4D.
1891TEST_F(OnnxImporterTest, reduceMaxKeepDims) {
1892 testReductionOps("reduceMax.onnxtxt", {2, 2, 1, 1}, {4, 8, 12, 16});
1893}
1894
1895/// Test loading ReduceMax op from a ONNX model.
1896/// Input shape is 4D, two dimensions are reduced, targeting ReduceMean
1897/// optimization using AvgPool. Output shape is 2D.
1898TEST_F(OnnxImporterTest, reduceMaxNoKeepDims) {
1899 testReductionOps("reduceMaxNoKeep.onnxtxt", {2, 2}, {4, 8, 12, 16});
1900}
1901
1902/// Test loading ReduceMax op from a ONNX model.
1903/// Input shape is 4D, two dimensions are reduced,Output shape is 4D.
1904TEST_F(OnnxImporterTest, reduceMaxKeepDimsDefaultAxis) {
1905 testReductionOps("reduceMaxDefaultAxis.onnxtxt", {1, 1, 1, 1}, {16});
1906}
1907
1908/// Test loading ReduceMin op from a ONNX model.
1909/// Input shape is 4D, two dimensions are reduced,Output shape is 4D.
1910TEST_F(OnnxImporterTest, reduceMinKeepDims) {
1911 testReductionOps("reduceMin.onnxtxt", {2, 2, 1, 1}, {1, 5, 9, 13});
1912}
1913
1914/// Test loading ReduceMin op from a ONNX model.
1915/// Input shape is 4D, two dimensions are reduced, targeting ReduceMean
1916/// optimization using AvgPool. Output shape is 2D.
1917TEST_F(OnnxImporterTest, reduceMinNoKeepDims) {
1918 testReductionOps("reduceMinNoKeep.onnxtxt", {2, 2}, {1, 5, 9, 13});
1919}
1920
1921/// Test loading ReduceMin op from a ONNX model.
1922/// Input shape is 4D, two dimensions are reduced,Output shape is 4D.
1923TEST_F(OnnxImporterTest, reduceMinKeepDimsDefaultAxis) {
1924 testReductionOps("reduceMinDefaultAxis.onnxtxt", {1, 1, 1, 1}, {1});
1925}
1926
1927/// Test loading ReduceProd op from a ONNX model.
1928/// Input shape is 4D, one dimension is reduced, and output shape is 4D
1929TEST_F(OnnxImporterTest, reduceProd4D) {
1930 testReductionOps("reduceProd.onnxtxt", {2, 2, 2, 1},
1931 {2, 12, 30, 56, 90, 132, 182, 240});
1932}
1933
1934static void testDepthToSpace(std::string &filename,
1935 const std::vector<dim_t> &expectedDims,
1936 const std::vector<float> &expectedValues) {
1937 ExecutionEngine EE{};
1938 auto &mod = EE.getModule();
1939 Function *F = mod.createFunction("main");
1940
1941 std::string netFilename =
1942 std::string(GLOW_DATA_PATH "tests/models/onnxModels/") + filename;
1943
1944 PlaceholderBindings bindings;
1945 Placeholder *output;
1946 {
1947 // NCHW
1948 Tensor x(ElemKind::FloatTy, {1, 8, 2, 3});
1949 x.getHandle() = {0., 1., 2., 3., 4., 5., 9., 10., 11., 12.,
1950 13., 14., 18., 19., 20., 21., 22., 23., 27., 28.,
1951 29., 30., 31., 32., 36., 37., 38., 39., 40., 41.,
1952 45., 46., 47., 48., 49., 50., 54., 55., 56., 57.,
1953 58., 59., 63., 64., 65., 66., 67., 68.};
1954
1955 ONNXModelLoader onnxLD(netFilename, {"x"}, {&x.getType()}, *F);
1956 output = EXIT_ON_ERR(onnxLD.getSingleOutput());
1957 bindings.allocate(mod.getPlaceholders());
1958 updateInputPlaceholdersByName(bindings, &mod, {"x"}, {&x});
1959 }
1960
1961 auto *res = bindings.get(output);
1962 EE.compile(CompilationMode::Infer);
1963 EE.run(bindings);
1964
1965 auto result = res->getHandle();
1966 EXPECT_TRUE(result.dims().vec() == expectedDims);
1967 for (size_t i = 0; i < result.size(); i++) {
1968 EXPECT_FLOAT_EQ(result.raw(i), expectedValues[i]);
1969 }
1970}
1971
1972/// Test loading DepthToSpace with mode=CRD from an ONNX model.
1973TEST_F(OnnxImporterTest, depthToSpaceCRD) {
1974 std::string filename("depthToSpace_crd.onnxtxt");
1975 std::vector<dim_t> expectedDims = {1, 2, 4, 6};
1976 std::vector<float> expectedValues = {
1977 0, 9, 1, 10, 2, 11, 18, 27, 19, 28, 20, 29, 3, 12, 4, 13,
1978 5, 14, 21, 30, 22, 31, 23, 32, 36, 45, 37, 46, 38, 47, 54, 63,
1979 55, 64, 56, 65, 39, 48, 40, 49, 41, 50, 57, 66, 58, 67, 59, 68};
1980 testDepthToSpace(filename, expectedDims, expectedValues);
1981}
1982
1983/// Test loading DepthToSpace with default mode(DCR) from an ONNX model.
1984TEST_F(OnnxImporterTest, depthToSpaceDCR) {
1985 std::string filename("depthToSpace.onnxtxt");
1986 std::vector<dim_t> expectedDims = {1, 2, 4, 6};
1987 std::vector<float> expectedValues = {
1988 0, 18, 1, 19, 2, 20, 36, 54, 37, 55, 38, 56, 3, 21, 4, 22,
1989 5, 23, 39, 57, 40, 58, 41, 59, 9, 27, 10, 28, 11, 29, 45, 63,
1990 46, 64, 47, 65, 12, 30, 13, 31, 14, 32, 48, 66, 49, 67, 50, 68,
1991 };
1992 testDepthToSpace(filename, expectedDims, expectedValues);
1993}
1994
1995/// Test loading SpaceToDepth op from an ONNX model.
1996TEST_F(OnnxImporterTest, spaceToDepth) {
1997 ExecutionEngine EE{};
1998 auto &mod = EE.getModule();
1999 Function *F = mod.createFunction("main");
2000
2001 std::string netFilename(GLOW_DATA_PATH
2002 "tests/models/onnxModels/spaceToDepth.onnxtxt");
2003
2004 PlaceholderBindings bindings;
2005 Placeholder *output;
2006 {
2007 Tensor x(ElemKind::FloatTy, {1, 2, 4, 4});
2008 x.zero();
2009
2010 ONNXModelLoader onnxLD(netFilename, {"x"}, {&x.getType()}, *F);
2011 output = EXIT_ON_ERR(onnxLD.getSingleOutput());
2012 }
2013
2014 auto *save = getSaveNodeFromDest(output);
2015 TransposeNode *TRN =
2016 llvm::dyn_cast<TransposeNode>(save->getInput().getNode());
2017 ASSERT_TRUE(TRN);
2018 SpaceToDepthNode *STDN =
2019 llvm::dyn_cast<SpaceToDepthNode>(TRN->getInput().getNode());
2020 ASSERT_TRUE(STDN);
2021 unsigned blockSize = STDN->getBlockSize();
2022 EXPECT_EQ(blockSize, 2);
2023}
2024
2025/// Test loading clip op from an ONNX model.
2026/// Test with arg min = 20.0 max = 60.0
2027TEST_F(OnnxImporterTest, importClip) {
2028 ExecutionEngine EE{};
2029 auto &mod = EE.getModule();
2030 Function *F = mod.createFunction("main");
2031
2032 std::string netFilename(GLOW_DATA_PATH
2033 "tests/models/onnxModels/clip.onnxtxt");
2034
2035 PlaceholderBindings bindings;
2036 Placeholder *output;
2037 Tensor x(ElemKind::FloatTy, {3, 3});
2038 x.getHandle() = {1, 2, 3, 40, 5, 6, 7, 8, 90};
2039
2040 {
2041 ONNXModelLoader onnxLD(netFilename, {"x"}, {&x.getType()}, *F);
2042 output = EXIT_ON_ERR(onnxLD.getSingleOutput());
2043 bindings.allocate(mod.getPlaceholders());
2044
2045 updateInputPlaceholdersByName(bindings, &mod, {"x"}, {&x});
2046 }
2047
2048 auto *res = bindings.get(output);
2049 EE.compile(CompilationMode::Infer);
2050 EE.run(bindings);
2051
2052 auto result = res->getHandle();
2053 std::vector<dim_t> expectedDims = {3, 3};
2054 std::vector<float> expectedValues = {20, 20, 20, 40, 20, 20, 20, 20, 60};
2055
2056 EXPECT_TRUE(result.dims().vec() == expectedDims);
2057 for (size_t i = 0; i < 3 * 3; i++) {
2058 EXPECT_FLOAT_EQ(result.raw(i), expectedValues[i]);
2059 }
2060
2061 // Constant Folding Test.
2062 FAIL_TEST_IF_ERR(
2063 checkConstFoldedOutput(netFilename, {"x"}, {&x}, {bindings.get(output)}));
2064}
2065
2066/// Test loading MatMul op from an ONNX model with dimension equal to 3
2067TEST_F(OnnxImporterTest, importMatMul) {
2068 ExecutionEngine EE{};
2069 auto &mod = EE.getModule();
2070 Function *F = mod.createFunction("main");
2071 std::string netFilename(GLOW_DATA_PATH
2072 "tests/models/onnxModels/matmul.onnxtxt");
2073
2074 PlaceholderBindings bindings;
2075 Placeholder *output;
2076 Tensor inputs_0(ElemKind::FloatTy, {20, 40, 7});
2077 Tensor inputs_1(ElemKind::FloatTy, {20, 7, 40});
2078 auto data_0 = inputs_0.getHandle();
2079 auto data_1 = inputs_1.getHandle();
2080 // Fill inputs with random positive values.
2081 data_0.randomize(0.0, 5.0, mod.getPRNG());
2082 data_1.randomize(1.0, 2.0, mod.getPRNG());
2083 {
2084 ONNXModelLoader onnxLD(netFilename, {"inputs_0", "inputs_1"},
2085 {&inputs_0.getType(), &inputs_1.getType()}, *F);
2086 output = EXIT_ON_ERR(onnxLD.getSingleOutput());
2087 bindings.allocate(mod.getPlaceholders());
2088 updateInputPlaceholdersByName(bindings, &mod, {"inputs_0", "inputs_1"},
2089 {&inputs_0, &inputs_1});
2090 }
2091 auto *res = bindings.get(output);
2092 EE.compile(CompilationMode::Infer);
2093 EE.run(bindings);
2094
2095 auto result = res->getHandle();
2096 std::vector<dim_t> expectedDims = {20, 40, 40};
2097 EXPECT_EQ(result.dims().vec(), expectedDims);
2098}
2099
2100/// Test loading BatchMatMul op from an ONNX model.
2101TEST_F(OnnxImporterTest, importBatchMatMul) {
2102 ExecutionEngine EE{};
2103 auto &mod = EE.getModule();
2104 Function *F = mod.createFunction("main");
2105 std::string netFilename(GLOW_DATA_PATH
2106 "tests/models/onnxModels/batch_matmul.onnxtxt");
2107
2108 PlaceholderBindings bindings;
2109 Placeholder *output;
2110 Tensor inputs_0(ElemKind::FloatTy, {20, 40, 7});
2111 Tensor inputs_1(ElemKind::FloatTy, {20, 7, 40});
2112 auto data_0 = inputs_0.getHandle();
2113 auto data_1 = inputs_1.getHandle();
2114 // Fill inputs with random positive values.
2115 data_0.randomize(0.0, 5.0, mod.getPRNG());
2116 data_1.randomize(1.0, 2.0, mod.getPRNG());
2117 {
2118 ONNXModelLoader onnxLD(netFilename, {"inputs_0", "inputs_1"},
2119 {&inputs_0.getType(), &inputs_1.getType()}, *F);
2120 output = EXIT_ON_ERR(onnxLD.getSingleOutput());
2121 bindings.allocate(mod.getPlaceholders());
2122 updateInputPlaceholdersByName(bindings, &mod, {"inputs_0", "inputs_1"},
2123 {&inputs_0, &inputs_1});
2124 }
2125 auto *res = bindings.get(output);
2126 EE.compile(CompilationMode::Infer);
2127 EE.run(bindings);
2128
2129 auto result = res->getHandle();
2130 std::vector<dim_t> expectedDims = {20, 7, 7};
2131 EXPECT_EQ(result.dims().vec(), expectedDims);
2132
2133 // High level check on the content of the graph.
2134 // We have 2 transpose, 20 * (matmul, 2 slices, 2 reshapes), 1 concat, 1
2135 // reshape, 1 save.
2136 EXPECT_EQ(F->getNodes().size(), 2 + 20 * 5 + 3);
2137 // With have 2 inputs and one outputs.
2138 EXPECT_EQ(mod.getPlaceholders().size(), 3);
2139 // Check that the graph has the expected shape,
2140 // starting from the output.
2141 // Batched matmul with broadcasted RHS are lowered
2142 // to a regular matmul, where LHS is reshaped from
2143 // a 3D tensor to a flattened matrix.
2144 auto *saveNode = getSaveNodeFromDest(output);
2145 auto *reshapeResult =
2146 llvm::dyn_cast<ReshapeNode>(saveNode->getInput().getNode());
2147 ASSERT_TRUE(reshapeResult);
2148 auto *concat =
2149 llvm::dyn_cast<ConcatNode>(reshapeResult->getInput().getNode());
2150 ASSERT_TRUE(concat);
2151 for (size_t i = 0; i < 20; ++i) {
2152 auto *matmulI =
2153 llvm::dyn_cast<MatMulNode>(concat->getNthInput(i).getNode());
2154 ASSERT_TRUE(matmulI);
2155 for (size_t j = 0; j < 2; ++j) {
2156 auto *reshape0 =
2157 llvm::dyn_cast<ReshapeNode>(matmulI->getNthInput(j).getNode());
2158 ASSERT_TRUE(reshape0);
2159 auto *slice0 = llvm::dyn_cast<SliceNode>(reshape0->getInput().getNode());
2160 ASSERT_TRUE(slice0);
2161 }
2162 }
2163 // Constant Folding Test.
2164 FAIL_TEST_IF_ERR(checkConstFoldedOutput(netFilename, {"inputs_0", "inputs_1"},
2165 {&inputs_0, &inputs_1},
2166 {bindings.get(output)}));
2167}
2168
2169/// Test loading BatchBoxCox op from an ONNX model.
2170TEST_F(OnnxImporterTest, importBatchBoxCox) {
2171 ExecutionEngine EE{};
2172 auto &mod = EE.getModule();
2173 Function *F = mod.createFunction("main");
2174
2175 std::string netFilename(GLOW_DATA_PATH
2176 "tests/models/onnxModels/batchBoxCox.onnxtxt");
2177
2178 PlaceholderBindings bindings;
2179 Placeholder *output;
2180
2181 // Make input tensors.
2182 const dim_t kRows = 3;
2183 const dim_t kCols = 3;
2184 Tensor data(ElemKind::FloatTy, {kRows, kCols});
2185 Tensor lambda1(ElemKind::FloatTy, {kCols});
2186 Tensor lambda2(ElemKind::FloatTy, {kCols});
2187 auto dataH = data.getHandle();
2188 auto lambda1H = lambda1.getHandle();
2189 auto lambda2H = lambda2.getHandle();
2190
2191 // Fill inputs with random positive values.
2192 dataH.randomize(0.0, 5.0, mod.getPRNG());
2193 lambda1H.randomize(1.0, 2.0, mod.getPRNG());
2194 lambda2H.randomize(1.0, 2.0, mod.getPRNG());
2195
2196 // Zero out every other element to lambda1 to test that case of the transform.
2197 for (dim_t i = 0; i < kCols; i += 2) {
2198 lambda1H.at({i}) = 0;
2199 }
2200
2201 {
2202 ONNXModelLoader onnxLD(
2203 netFilename, {"data", "lambda1", "lambda2"},
2204 {&data.getType(), &lambda1.getType(), &lambda2.getType()}, *F);
2205 output = EXIT_ON_ERR(onnxLD.getSingleOutput());
2206 bindings.allocate(mod.getPlaceholders());
2207
2208 updateInputPlaceholdersByName(bindings, &mod,
2209 {"data", "lambda1", "lambda2"},
2210 {&data, &lambda1, &lambda2});
2211 }
2212
2213 auto *res = bindings.get(output);
2214 EE.compile(CompilationMode::Infer);
2215 EE.run(bindings);
2216
2217 auto result = res->getHandle();
2218
2219 // Output should have the same dims as the inputs.
2220 EXPECT_TRUE(result.dims().vec() == data.dims().vec());
2221
2222 // Compute elementwise Box-Cox transform and compare with corresponding
2223 // element of result.
2224 for (dim_t i = 0; i < kRows; ++i) {
2225 for (dim_t j = 0; j < kCols; ++j) {
2226 float d = dataH.at({i, j});
2227 float l1 = lambda1H.at({j});
2228 float l2 = lambda2H.at({j});
2229
2230 float tmp = std::max(d + l2, 1e-6f);
2231 float y = 0;
2232
2233 if (l1 == 0) {
2234 // Clip argument to log and pow at 1e-6 to avoid saturation.
2235 y = std::log(tmp);
2236 } else {
2237 y = (std::pow(tmp, l1) - 1) / l1;
2238 }
2239
2240 EXPECT_FLOAT_EQ(y, result.at({i, j}));
2241 }
2242 }
2243
2244 // Constant Folding Test.
2245 FAIL_TEST_IF_ERR(checkConstFoldedOutput(
2246 netFilename, {"data", "lambda1", "lambda2"}, {&data, &lambda1, &lambda2},
2247 {bindings.get(output)}));
2248}
2249
2250/// Test loading DotProduct op from an ONNX model.
2251TEST_F(OnnxImporterTest, importDotProduct) {
2252 ExecutionEngine EE{};
2253 auto &mod = EE.getModule();
2254 Function *F = mod.createFunction("main");
2255
2256 std::string netFilename(GLOW_DATA_PATH
2257 "tests/models/onnxModels/dot_product.onnxtxt");
2258
2259 Placeholder *output;
2260 {
2261 Tensor x(ElemKind::FloatTy, {3, 3});
2262 Tensor y(ElemKind::FloatTy, {3, 3});
2263
2264 ONNXModelLoader onnxLD(netFilename, {"x", "y"},
2265 {&x.getType(), &y.getType()}, *F);
2266 output = EXIT_ON_ERR(onnxLD.getSingleOutput());
2267 }
2268
2269 // Just verify the structure.
2270 // SaveNode + MulNode + BatchedReduceAddNode.
2271 ASSERT_EQ(3, F->getNodes().size());
2272 auto *saveNode = getSaveNodeFromDest(output);
2273 auto *saveInput = saveNode->getInput().getNode();
2274 ASSERT_TRUE(llvm::isa<BatchedReduceAddNode>(saveInput));
2275
2276 auto *batchedReduceAdd = llvm::cast<BatchedReduceAddNode>(saveInput);
2277 ASSERT_TRUE(llvm::isa<MulNode>(batchedReduceAdd->getBatch()));
2278}
2279
2280/// Test loading Sum with more than 2 inputs
2281TEST_F(OnnxImporterTest, importSumN) {
2282 ExecutionEngine EE{};
2283 auto &mod = EE.getModule();
2284 Function *F = mod.createFunction("main");
2285 std::string netFilename(GLOW_DATA_PATH
2286 "tests/models/onnxModels/sumN.onnxtxt");
2287
2288 PlaceholderBindings bindings;
2289 Placeholder *output;
2290 Tensor i0(ElemKind::FloatTy, {3});
2291 i0.getHandle() = {1, 2, 3};
2292 Tensor i1(ElemKind::FloatTy, {3});
2293 i1.getHandle() = {4, 5, 6};
2294 Tensor i2(ElemKind::FloatTy, {3});
2295 i2.getHandle() = {7, 8, 9};
2296 {
2297
2298 ONNXModelLoader onnxLD(netFilename, {"i0", "i1", "i2"},
2299 {&i0.getType(), &i1.getType(), &i2.getType()}, *F);
2300 output = EXIT_ON_ERR(onnxLD.getSingleOutput());
2301
2302 bindings.allocate(mod.getPlaceholders());
2303 updateInputPlaceholdersByName(bindings, &mod, {"i0", "i1", "i2"},
2304 {&i0, &i1, &i2});
2305 }
2306
2307 auto *res = bindings.get(output);
2308 EE.compile(CompilationMode::Infer);
2309 EE.run(bindings);
2310
2311 auto result = res->getHandle();
2312 std::vector<dim_t> expectedDims = {3};
2313 std::vector<float> expectedValues = {12, 15, 18};
2314
2315 EXPECT_EQ(result.dims().vec(), expectedDims);
2316 for (size_t i = 0; i < 3; i++) {
2317 EXPECT_FLOAT_EQ(result.raw(i), expectedValues[i]);
2318 }
2319
2320 // Verify the structure
2321 // Reshape x 3 -> Concat -> batchedReduceAdd -> Save
2322 ASSERT_EQ(6, F->getNodes().size());
2323 auto *saveNode = getSaveNodeFromDest(output);
2324 auto *batchedReduceAdd =
2325 llvm::dyn_cast<BatchedReduceAddNode>(saveNode->getInput().getNode());
2326 ASSERT_TRUE(batchedReduceAdd);
2327 auto *concat =
2328 llvm::dyn_cast<ConcatNode>(batchedReduceAdd->getBatch().getNode());
2329 ASSERT_TRUE(concat);
2330 for (size_t i = 0; i < 3; ++i) {
2331 auto *reshape =
2332 llvm::dyn_cast<ReshapeNode>(concat->getNthInput(i).getNode());
2333 ASSERT_TRUE(reshape);
2334 }
2335
2336 // Constant Folding Test.
2337 FAIL_TEST_IF_ERR(checkConstFoldedOutput(netFilename, {"i0", "i1", "i2"},
2338 {&i0, &i1, &i2},
2339 {bindings.get(output)}));
2340}
2341
2342/// Test loading Sum with one input and one output
2343TEST_F(OnnxImporterTest, importSum1) {
2344 ExecutionEngine EE{};
2345 auto &mod = EE.getModule();
2346 Function *F = mod.createFunction("main");
2347 std::string netFilename(GLOW_DATA_PATH
2348 "tests/models/onnxModels/sum1.onnxtxt");
2349
2350 PlaceholderBindings bindings;
2351 Placeholder *output;
2352 Tensor x(ElemKind::FloatTy, {3});
2353 x.getHandle() = {1, 2, 3};
2354
2355 {
2356 ONNXModelLoader onnxLD(netFilename, {"x"}, {&x.getType()}, *F);
2357 output = EXIT_ON_ERR(onnxLD.getSingleOutput());
2358
2359 bindings.allocate(mod.getPlaceholders());
2360 updateInputPlaceholdersByName(bindings, &mod, {"x"}, {&x});
2361 }
2362
2363 auto *res = bindings.get(output);
2364 EE.compile(CompilationMode::Infer);
2365 EE.run(bindings);
2366
2367 auto result = res->getHandle();
2368 std::vector<dim_t> expectedDims = {3};
2369 std::vector<float> expectedValues = {1, 2, 3};
2370
2371 EXPECT_EQ(result.dims().vec(), expectedDims);
2372 for (size_t i = 0; i < 3; i++) {
2373 EXPECT_FLOAT_EQ(result.raw(i), expectedValues[i]);
2374 }
2375
2376 // Verify structure: input -> Save -> output
2377 ASSERT_EQ(mod.getPlaceholders().size(), 2);
2378 ASSERT_EQ(F->getNodes().size(), 1);
2379 auto *save = getSaveNodeFromDest(output);
2380 ASSERT_TRUE(llvm::isa<Placeholder>(save->getInput().getNode()));
2381
2382 // Constant Folding Test.
2383 FAIL_TEST_IF_ERR(
2384 checkConstFoldedOutput(netFilename, {"x"}, {&x}, {bindings.get(output)}));
2385}
2386
2387/// Test loading LengthsToRanges from an ONNX model.
2388TEST_F(OnnxImporterTest, importLengthsToRanges) {
2389 ExecutionEngine EE;
2390 auto &mod = EE.getModule();
2391 auto *F = mod.createFunction("main");
2392 std::string netFilename(GLOW_DATA_PATH
2393 "tests/models/onnxModels/lengths_to_ranges.onnxtxt");
2394 Placeholder *output;
2395 {
2396 Tensor lengths(ElemKind::Int32ITy, {4});
2397 ONNXModelLoader onnxLD(netFilename, {"lengths"}, {&lengths.getType()}, *F);
2398 output = EXIT_ON_ERR(onnxLD.getSingleOutput());
2399 }
2400 // Verify structure: PH -> LengthsToRanges -> Save -> PH.
2401 ASSERT_EQ(mod.getPlaceholders().size(), 2);
2402 ASSERT_EQ(F->getNodes().size(), 2);
2403 auto *save = getSaveNodeFromDest(output);
2404 auto *LTR = llvm::dyn_cast<LengthsToRangesNode>(save->getInput().getNode());
2405 ASSERT_TRUE(LTR);
2406 ASSERT_TRUE(llvm::isa<Placeholder>(LTR->getLengths()));
2407}
2408
2409/// Test loading ReplaceNaN op from an ONNX model.
2410/// Test with arg value = 1.0.
2411TEST_F(OnnxImporterTest, importReplaceNaN) {
2412 ExecutionEngine EE{};
2413 auto &mod = EE.getModule();
2414 Function *F = mod.createFunction("main");
2415
2416 std::string netFilename(GLOW_DATA_PATH
2417 "tests/models/onnxModels/replaceNaN.onnxtxt");
2418
2419 PlaceholderBindings bindings;
2420 Placeholder *output;
2421 Tensor x(ElemKind::FloatTy, {3, 3});
2422
2423 {
2424 ONNXModelLoader onnxLD(netFilename, {"x"}, {&x.getType()}, *F);
2425 output = EXIT_ON_ERR(onnxLD.getSingleOutput());
2426 bindings.allocate(mod.getPlaceholders());
2427 updateInputPlaceholdersByName(bindings, &mod, {"x"}, {&x});
2428 }
2429
2430 // Verify structure: Input -> ReplaceNaN -> Save.
2431 EXPECT_EQ(F->getNodes().size(), 2);
2432 auto *saveNode = getSaveNodeFromDest(output);
2433 auto *replaceNaNNode =
2434 llvm::dyn_cast<ReplaceNaNNode>(saveNode->getInput().getNode());
2435 EXPECT_EQ(replaceNaNNode->getValue(), 1.0f);
2436 auto *inputNode =
2437 llvm::dyn_cast<Placeholder>(replaceNaNNode->getInput().getNode());
2438 ASSERT_EQ(inputNode, mod.getPlaceholderByNameSlow("x"));
2439
2440 // We have one input and one output.
2441 EXPECT_EQ(mod.getPlaceholders().size(), 2);
2442}
2443
2444/// Test loading SparseToDense op from an ONNX model.
2445TEST_F(OnnxImporterTest, importSparseToDense) {
2446 ExecutionEngine EE{};
2447 auto &mod = EE.getModule();
2448 Function *F = mod.createFunction("main");
2449
2450 std::string netFilename(GLOW_DATA_PATH
2451 "tests/models/onnxModels/sparseToDense.onnxtxt");
2452
2453 PlaceholderBindings bindings;
2454 Placeholder *output;
2455
2456 // Create inputs.
2457 constexpr dim_t kNumIndices = 5;
2458 constexpr dim_t kMaxIndex = 20;
2459 constexpr dim_t kRows = 10;
2460 constexpr dim_t kCols = 5;
2461 Tensor indices(ElemKind::Int64ITy, {kNumIndices});
2462 Tensor values(ElemKind::FloatTy, {kNumIndices, kRows, kCols});
2463 Tensor dataToInferDim(ElemKind::FloatTy, {kMaxIndex, kRows, kCols});
2464
2465 // Load model.
2466 {
2467 ONNXModelLoader onnxLD(
2468 netFilename, {"indices", "values", "dataToInferDim"},
2469 {&indices.getType(), &values.getType(), &dataToInferDim.getType()}, *F);
2470 output = EXIT_ON_ERR(onnxLD.getSingleOutput());
2471 }
2472
2473 // Verify structure: Inputs -> Splat + Reshape -> ScatterData -> Save.
2474 ASSERT_EQ(mod.getPlaceholders().size(), 4);
2475 ASSERT_EQ(F->getNodes().size(), 4);
2476
2477 auto *save = getSaveNodeFromDest(output);
2478 auto *out = save->getPlaceholder();
2479 EXPECT_TRUE(out->dims().vec() == dataToInferDim.dims().vec());
2480
2481 auto *STD = llvm::dyn_cast<ScatterDataNode>(save->getInput().getNode());
2482 ASSERT_TRUE(STD);
2483 auto *reshapeNode = llvm::dyn_cast<ReshapeNode>(STD->getIndices().getNode());
2484 ASSERT_TRUE(reshapeNode);
2485 auto *idx = llvm::dyn_cast<Placeholder>(reshapeNode->getInput().getNode());
2486 EXPECT_EQ(idx, mod.getPlaceholderByNameSlow("indices"));
2487 auto *vals = llvm::dyn_cast<Placeholder>(STD->getSlices().getNode());
2488 EXPECT_EQ(vals, mod.getPlaceholderByNameSlow("values"));
2489}
2490
2491/// Test loading SparseLengthsSum from an ONNX model.
2492TEST_F(OnnxImporterTest, importSparseLengthsSum) {
2493 ExecutionEngine EE;
2494 auto &mod = EE.getModule();
2495 auto *F = mod.createFunction("main");
2496 std::string netFilename(GLOW_DATA_PATH
2497 "tests/models/onnxModels/sparseLengthsSum.onnxtxt");
2498 Placeholder *output;
2499 {
2500 Tensor data(ElemKind::FloatTy, {2, 1});
2501 Tensor indices(ElemKind::Int64ITy, {2});
2502 Tensor lengths(ElemKind::Int32ITy, {2});
2503 ONNXModelLoader onnxLD(
2504 netFilename, {"data", "indices", "lengths"},
2505 {&data.getType(), &indices.getType(), &lengths.getType()}, *F);
2506 output = EXIT_ON_ERR(onnxLD.getSingleOutput());
2507 }
2508 // Verify structure: PH, PH -> SparseLengthsSum -> Save -> PH.
2509 // PH -> Splat /
2510 ASSERT_EQ(mod.getPlaceholders().size(), 4);
2511 ASSERT_EQ(F->getNodes().size(), 2);
2512 auto *save = getSaveNodeFromDest(output);
2513 auto *LS = llvm::dyn_cast<SparseLengthsSumNode>(save->getInput().getNode());
2514 ASSERT_TRUE(LS);
2515 ASSERT_TRUE(llvm::isa<Placeholder>(LS->getData()));
2516 ASSERT_TRUE(llvm::isa<Placeholder>(LS->getIndices()));
2517 ASSERT_TRUE(llvm::isa<Placeholder>(LS->getLengths()));
2518}
2519
2520/// Test loading LengthsSum from an ONNX model.
2521TEST_F(OnnxImporterTest, importLengthsSum) {
2522 ExecutionEngine EE;
2523 auto &mod = EE.getModule();
2524 auto *F = mod.createFunction("main");
2525 std::string netFilename(GLOW_DATA_PATH
2526 "tests/models/onnxModels/lengths_sum.onnxtxt");
2527 Placeholder *output;
2528 {
2529 Tensor data(ElemKind::FloatTy, {10, 2, 3});
2530 Tensor lengths(ElemKind::Int32ITy, {5});
2531 ONNXModelLoader onnxLD(netFilename, {"data", "lengths"},
2532 {&data.getType(), &lengths.getType()}, *F);
2533 output = EXIT_ON_ERR(onnxLD.getSingleOutput());
2534 }
2535 // Verify structure: PH, PH -> LengthsSum -> Save -> PH.
2536 ASSERT_EQ(mod.getPlaceholders().size(), 3);
2537 ASSERT_EQ(F->getNodes().size(), 2);
2538 auto *save = getSaveNodeFromDest(output);
2539 auto *LS = llvm::dyn_cast<LengthsSumNode>(save->getInput().getNode());
2540 ASSERT_TRUE(LS);
2541 ASSERT_TRUE(llvm::isa<Placeholder>(LS->getData()));
2542 ASSERT_TRUE(llvm::isa<Placeholder>(LS->getLengths()));
2543}
2544
2545/// Test loading CumSum from an ONNX model.
2546TEST_F(OnnxImporterTest, importCumSum) {
2547 ExecutionEngine EE;
2548 auto &mod = EE.getModule();
2549 auto *F = mod.createFunction("main");
2550 std::string netFilename(GLOW_DATA_PATH
2551 "tests/models/onnxModels/cumsum.onnxtxt");
2552 Placeholder *output;
2553 {
2554 Tensor lengths(ElemKind::FloatTy, {10});
2555 lengths.getHandle() = {10, 9, 8, 7, 6, 5, 4, 3, 2, 1};
2556 ONNXModelLoader onnxLD(netFilename, {"lengths"}, {&lengths.getType()}, *F);
2557 output = EXIT_ON_ERR(onnxLD.getSingleOutput());
2558 }
2559 // Verify structure: PH -> CumSum -> Save -> PH.
2560 ASSERT_EQ(mod.getPlaceholders().size(), 2);
2561 ASSERT_EQ(F->getNodes().size(), 2);
2562 auto *save = getSaveNodeFromDest(output);
2563 auto *CS = llvm::dyn_cast<CumSumNode>(save->getInput().getNode());
2564 ASSERT_TRUE(CS);
2565 ASSERT_TRUE(llvm::isa<Placeholder>(CS->getInput()));
2566 ASSERT_FALSE(CS->getExclusive());
2567 ASSERT_TRUE(CS->getReverse());
2568}
2569
2570/// Test loading a FCTransposed node: I * W + B, where I is need to be flatten.
2571TEST_F(OnnxImporterTest, FCTransposedWithFlatten) {
2572 ExecutionEngine EE{};
2573 auto &mod = EE.getModule();
2574 Function *F = mod.createFunction("main");
2575
2576 std::string netFilename(GLOW_DATA_PATH
2577 "tests/models/onnxModels/FCTransposed.onnxtxt");
2578
2579 Placeholder *output;
2580
2581 {
2582 Tensor data(ElemKind::FloatTy, {2, 1, 3});
2583 data.getHandle() = {1, 2, 3, 4, 5, 6};
2584 ONNXModelLoader onnxLD(netFilename, {"data"}, {&data.getType()}, *F);
2585 output = EXIT_ON_ERR(onnxLD.getSingleOutput());
2586 }
2587
2588 // High level check on the content of the graph. We have 1 reshape, 1 FC,
2589 // and 1 save.
2590 EXPECT_EQ(F->getNodes().size(), 3);
2591 auto *saveNode = getSaveNodeFromDest(output);
2592 auto *fcNode =
2593 llvm::dyn_cast<FullyConnectedNode>(saveNode->getInput().getNode());
2594 ASSERT_TRUE(fcNode);
2595 auto *reshape = llvm::dyn_cast<ReshapeNode>(fcNode->getInput());
2596 ASSERT_TRUE(reshape);
2597}
2598
2599/// Test loading Constant from an ONNX model.
2600TEST_F(OnnxImporterTest, constant) {
2601 ExecutionEngine EE;
2602 auto &mod = EE.getModule();
2603 auto *F = mod.createFunction("main");
2604 std::string netFilename(GLOW_DATA_PATH
2605 "tests/models/onnxModels/constant.onnxtxt");
2606 Placeholder *output;
2607 {
2608 ONNXModelLoader onnxLD(netFilename, {}, {}, *F);
2609 output = EXIT_ON_ERR(onnxLD.getSingleOutput());
2610 EXPECT_NE(output, nullptr);
2611 }
2612 // Constant -> Save -> PH
2613 ASSERT_EQ(mod.getPlaceholders().size(), 1);
2614 ASSERT_EQ(F->getNodes().size(), 1);
2615}
2616
2617/// Test loading of testConstantOfShape.
2618template <class ElemType>
2619static void testConstantOfShape(std::string fileName, ElemType ref) {
2620 ExecutionEngine EE;
2621 auto &mod = EE.getModule();
2622 auto *F = mod.createFunction("main");
2623 PlaceholderBindings bindings;
2624
2625 std::string netFilename =
2626 std::string(GLOW_DATA_PATH "tests/models/onnxModels/") + fileName;
2627 Placeholder *output;
2628 {
2629 ONNXModelLoader onnxLD(netFilename, {}, {}, *F);
2630 output = EXIT_ON_ERR(onnxLD.getSingleOutput());
2631 EXPECT_NE(output, nullptr);
2632 }
2633 // ConstantOfShape -> Save -> PH
2634 ASSERT_EQ(mod.getPlaceholders().size(), 1);
2635 ASSERT_EQ(F->getNodes().size(), 2);
2636
2637 EE.compile(CompilationMode::Infer);
2638 bindings.allocate(mod.getPlaceholders());
2639 EE.run(bindings);
2640
2641 auto result = bindings.get(output)->getHandle<ElemType>();
2642 for (size_t i = 0; i < result.getType().size(); i++) {
2643 ElemType val = result.raw(i);
2644 EXPECT_EQ(val, ref);
2645 }
2646}
2647
2648/// Test loading of testConstantOfShape.
2649template <class ElemType>
2650static void testConstantOfShapeFailure(std::string fileName) {
2651 ExecutionEngine EE;
2652 auto &mod = EE.getModule();
2653 auto *F = mod.createFunction("main");
2654 std::string netFilename =
2655 std::string(GLOW_DATA_PATH "tests/models/onnxModels/") + fileName;
2656 ASSERT_DEATH(ONNXModelLoader(netFilename, {}, {}, *F), "losses");
2657}
2658
2659TEST_F(OnnxImporterTest, importConstantOfShapeFloat) {
2660 testConstantOfShape<float>("constantOfShape.onnxtxt", 1.0F);
2661}
2662
2663TEST_F(OnnxImporterTest, importConstantOfShapeInt32) {
2664 testConstantOfShape<int32_t>("constantOfShapeInt32.onnxtxt", 65535);
2665}
2666
2667TEST_F(OnnxImporterTest, importConstantOfShapeInt64) {
2668 testConstantOfShape<int64_t>("constantOfShapeInt64.onnxtxt", 16777216LL);
2669}
2670
2671TEST_F(OnnxImporterTest, importConstantOfShapeInt64LossFailure) {
2672 testConstantOfShapeFailure<int64_t>("constantOfShapeInt64Fail.onnxtxt");
2673}
2674
2675TEST_F(OnnxImporterTest, importConstantOfShapeInt32LossFailure) {
2676 testConstantOfShapeFailure<int32_t>("constantOfShapeInt32Fail.onnxtxt");
2677}
2678
2679/// Test loading ExpandDims from an ONNX model.
2680TEST_F(OnnxImporterTest, expandDims) {
2681 ExecutionEngine EE;
2682 auto &mod = EE.getModule();
2683 auto *F = mod.createFunction("main");
2684 std::string netFilename(GLOW_DATA_PATH
2685 "tests/models/onnxModels/expandDims.onnxtxt");
2686 Placeholder *output;
2687 {
2688 Tensor x(ElemKind::FloatTy, {2, 2});
2689 ONNXModelLoader onnxLD(netFilename, {"x"}, {&x.getType()}, *F);
2690 output = EXIT_ON_ERR(onnxLD.getSingleOutput());
2691 }
2692
2693 // Verify structure: PH -> Reshape -> Save -> PH.
2694 ASSERT_EQ(mod.getPlaceholders().size(), 2);
2695 ASSERT_EQ(F->getNodes().size(), 2);
2696 auto *save = getSaveNodeFromDest(output);
2697 auto *reshape = llvm::dyn_cast<ReshapeNode>(save->getInput().getNode());
2698 ASSERT_TRUE(reshape);
2699 EXPECT_TRUE(reshape->getDims().equals({1, 2, 2, 1}));
2700}
2701
2702/// Helper method to run the gather operator test cases.
2703/// \p filename contains the model .onnxtxt.
2704/// \p dataShape: data Tensor dimensions.
2705/// \p indicesShape: indices Tensor dimensions
2706/// \p expectedValues : output Tensor values expected.
2707template <class OpType>
2708static void gatherTestHelper(llvm::StringRef fileName,
2709 llvm::ArrayRef<dim_t> dataShape,
2710 llvm::ArrayRef<dim_t> indicesShape,
2711 llvm::ArrayRef<dim_t> expectedDims) {
2712 ExecutionEngine EE{};
2713 auto &mod = EE.getModule();
2714 Function *F = mod.createFunction("main");
2715 std::string netFilename =
2716 std::string(GLOW_DATA_PATH "tests/models/onnxModels/") + fileName.str();
2717 Placeholder *output;
2718 Tensor data(ElemKind::FloatTy, dataShape);
2719 Tensor indices(ElemKind::Int32ITy, indicesShape);
2720
2721 {
2722 ONNXModelLoader onnxLD(netFilename, {"data", "indices"},
2723 {&data.getType(), &indices.getType()}, *F);
2724 output = EXIT_ON_ERR(onnxLD.getSingleOutput());
2725 }
2726
2727 // Verify structure: PH/PH -> Gather/GatherND -> Save -> PH.
2728 auto *saveNode = getSaveNodeFromDest(output);
2729 auto *node = saveNode->getInput().getNode();
2730 auto *nodeGather = llvm::dyn_cast<OpType>(node);
2731 ASSERT_TRUE(nodeGather);
2732 EXPECT_TRUE(nodeGather->getResult().dims().equals({expectedDims}));
2733}
2734
2735/// Test loading gather op from a ONNX model.
2736TEST_F(OnnxImporterTest, importGather) {
2737 std::string filename("gather.onnxtxt");
2738 std::vector<dim_t> dataShape = {3, 2};
2739 std::vector<dim_t> indicesShape = {2, 4};
2740 std::vector<dim_t> expectedDims = {2, 4, 2};
2741 gatherTestHelper<GatherNode>(filename, dataShape, indicesShape, expectedDims);
2742}
2743
2744/// Test loading gatherND op from a ONNX model.
2745TEST_F(OnnxImporterTest, importGatherND) {
2746 std::string filename("gatherND.onnxtxt");
2747 std::vector<dim_t> dataShape = {2, 2, 2};
2748 std::vector<dim_t> indicesShape = {2, 2};
2749 std::vector<dim_t> expectedDims = {2, 2};
2750 gatherTestHelper<GatherNDNode>(filename, dataShape, indicesShape,
2751 expectedDims);
2752}
2753
2754/// Test loading ScatterND from an ONNX model.
2755// Simplified test
2756TEST_F(OnnxImporterTest, scatterND) {
2757 ExecutionEngine EE;
2758 auto &mod = EE.getModule();
2759 std::string netFilename(GLOW_DATA_PATH
2760 "tests/models/onnxModels/scatterND.onnxtxt");
2761 auto *F = mod.createFunction("main");
2762 Placeholder *output;
2763 Tensor data(ElemKind::FloatTy, {8});
2764 Tensor indices(ElemKind::Int64ITy, {4, 1});
2765 Tensor updates(ElemKind::FloatTy, {4});
2766
2767 ONNXModelLoader onnxLD(
2768 netFilename, {"data", "indices", "updates"},
2769 {&data.getType(), &indices.getType(), &updates.getType()}, *F);
2770 output = EXIT_ON_ERR(onnxLD.getSingleOutput());
2771
2772 // Verify structure: PH/PH/PH -> ScatterND -> Save -> PH.
2773 ASSERT_EQ(mod.getPlaceholders().size(), 4);
2774 ASSERT_EQ(F->getNodes().size(), 2);
2775 auto *save = getSaveNodeFromDest(output);
2776 auto *scatter = llvm::dyn_cast<ScatterDataNode>(save->getInput().getNode());
2777 ASSERT_TRUE(scatter);
2778 EXPECT_TRUE(scatter->getResult().dims().equals({8}));
2779}
2780
2781/// Test loading ScatterND from an ONNX model.
2782// multi-dim test
2783TEST_F(OnnxImporterTest, mscatterND) {
2784 ExecutionEngine EE;
2785 auto &mod = EE.getModule();
2786 std::string netFilename(GLOW_DATA_PATH
2787 "tests/models/onnxModels/mscatterND.onnxtxt");
2788 auto *F = mod.createFunction("main");
2789 Placeholder *output;
2790 Tensor data(ElemKind::FloatTy, {4, 4, 4});
2791 Tensor indices(ElemKind::Int64ITy, {2, 1});
2792 Tensor updates(ElemKind::FloatTy, {2, 4, 4});
2793
2794 ONNXModelLoader onnxLD(
2795 netFilename, {"data", "indices", "updates"},
2796 {&data.getType(), &indices.getType(), &updates.getType()}, *F);
2797 output = EXIT_ON_ERR(onnxLD.getSingleOutput());
2798
2799 // Verify structure: PH/PH/PH -> ScatterND -> Save -> PH.
2800 ASSERT_EQ(mod.getPlaceholders().size(), 4);
2801 ASSERT_EQ(F->getNodes().size(), 2);
2802 auto *save = getSaveNodeFromDest(output);
2803 auto *scatter = llvm::dyn_cast<ScatterDataNode>(save->getInput().getNode());
2804 ASSERT_TRUE(scatter);
2805 EXPECT_TRUE(scatter->getResult().dims().equals({4, 4, 4}));
2806}
2807
2808/// Test loading GatherRanges from an ONNX model.
2809TEST_F(OnnxImporterTest, gatherRanges) {
2810 ExecutionEngine EE;
2811 auto &mod = EE.getModule();
2812 std::string netFilename(GLOW_DATA_PATH
2813 "tests/models/onnxModels/gatherranges.onnxtxt");
2814 auto *F = mod.createFunction("main");
2815 Placeholder *output;
2816 Tensor data(ElemKind::FloatTy, {6});
2817 Tensor ranges(ElemKind::Int32ITy, {2, 2, 2});
2818
2819 {
2820 ONNXModelLoader onnxLD(netFilename, {"data", "ranges"},
2821 {&data.getType(), &ranges.getType()}, *F);
2822 output = EXIT_ON_ERR(onnxLD.getOutputByName("output"));
2823 }
2824
2825 // Verify structure: PH/PH -> GatherRanges -> Save -> PH/PH.
2826 ASSERT_EQ(mod.getPlaceholders().size(), 4);
2827 ASSERT_EQ(F->getNodes().size(), 3);
2828 auto *save = getSaveNodeFromDest(output);
2829 auto *gatherRanges =
2830 llvm::dyn_cast<GatherRangesNode>(save->getInput().getNode());
2831 ASSERT_TRUE(gatherRanges);
2832 EXPECT_TRUE(gatherRanges->getOutput().dims().equals({5}));
2833 EXPECT_TRUE(gatherRanges->getLengths().dims().equals({2}));
2834}
2835
2836/// Test loading Gather ops with constant folding from an ONNX model.
2837TEST_F(OnnxImporterTest, gatherOpConstantFoldingAndReshape) {
2838 // This test verifies that Gather gets constant-folded, so that the argument
2839 // of the reshape becomes constant.
2840 ExecutionEngine EE;
2841 auto &mod = EE.getModule();
2842 std::string netFilename(
2843 GLOW_DATA_PATH "tests/models/onnxModels/gatherConstantFolding.onnxtxt");
2844 PlaceholderBindings bindings;
2845 auto *F = mod.createFunction("main");
2846 Placeholder *output;
2847 Tensor data(ElemKind::FloatTy, {1, 2, 4, 3});
2848 setConstantFoldLoaderOpsFlag(true);
2849 {
2850 ONNXModelLoader onnxLD(netFilename, {"input"}, {&data.getType()}, *F);
2851 output = EXIT_ON_ERR(onnxLD.getSingleOutput());
2852 EXPECT_EQ(mod.getPlaceholders().size(), 2);
2853 bindings.allocate(mod.getPlaceholders());
2854 }
2855 EE.compile(CompilationMode::Infer);
2856 EE.run(bindings);
2857 setConstantFoldLoaderOpsFlag(false);
2858
2859 auto result = bindings.get(output)->getHandle();
2860 std::vector<dim_t> expectedDims = {1, 4, 3, 2};
2861 EXPECT_TRUE(result.dims().vec() == expectedDims);
2862}
2863
2864static void importSliceTest(std::string fileName, const char *inputName,
2865 llvm::ArrayRef<dim_t> inputShape,
2866 llvm::ArrayRef<dim_t> starts,
2867 llvm::ArrayRef<dim_t> outputShape,
2868 bool expectLoadError = false) {
2869 ExecutionEngine EE{};
2870 auto &mod = EE.getModule();
2871 Function *F = mod.createFunction("main");
2872
2873 std::string NetFilename =
2874 std::string(GLOW_DATA_PATH "tests/models/onnxModels/") + fileName;
2875 PlaceholderBindings bindings;
2876 Placeholder *graphOutputVar;
2877 // Destroy the loader after the graph is loaded since the following execution
2878 // will not depend on anyting from the loader.
2879 Tensor data;
2880 getNCHWData(&data, inputShape[0], inputShape[1], inputShape[2],
2881 inputShape[3]);
2882 {
2883 if (expectLoadError) {
2884 Error err = Error::empty();
2885 ONNXModelLoader(NetFilename, {inputName}, {&data.getType()}, *F, &err);
2886 EXPECT_TRUE(ERR_TO_BOOL(std::move(err)));
2887 return;
2888 }
2889 ONNXModelLoader onnxLD(NetFilename, {inputName}, {&data.getType()}, *F);
2890 graphOutputVar = EXIT_ON_ERR(onnxLD.getSingleOutput());
2891 bindings.allocate(mod.getPlaceholders());
2892 updateInputPlaceholdersByName(bindings, &mod, {inputName}, {&data});
2893 }
2894
2895 // ONNX importer loads an Slice operator and adds to the IR:
2896 // - a Slice node
2897
2898 // Check the graph structure.
2899 auto *saveNode = getSaveNodeFromDest(graphOutputVar);
2900 auto *node = saveNode->getInput().getNode();
2901 auto *sliceNode = llvm::dyn_cast<SliceNode>(node);
2902 EXPECT_NE(nullptr, sliceNode);
2903
2904 // Compile&run the graph, and check the output.
2905 EE.compile(CompilationMode::Infer);
2906 EE.run(bindings);
2907 auto result = bindings.get(graphOutputVar)->getHandle();
2908 EXPECT_TRUE(result.dims().vec() == outputShape.vec());
2909 dim_t wSliceSize = inputShape[3];
2910 dim_t hSliceSize = inputShape[2] * wSliceSize;
2911 dim_t cSliceSize = inputShape[1] * hSliceSize;
2912 dim_t indexOutput = 0;
2913 for (dim_t n = 0; n < outputShape[0]; n++) {
2914 for (dim_t c = 0; c < outputShape[1]; c++) {
2915 for (dim_t h = 0; h < outputShape[2]; h++) {
2916 for (dim_t w = 0; w < outputShape[3]; w++) {
2917 dim_t indexInput = (starts[0] + n) * cSliceSize +
2918 (starts[1] + c) * hSliceSize +
2919 (starts[2] + h) * wSliceSize + (starts[3] + w);
2920 EXPECT_FLOAT_EQ(result.raw(indexOutput++), indexInput);
2921 }
2922 }
2923 }
2924 }
2925
2926 // Constant Folding Test.
2927 FAIL_TEST_IF_ERR(checkConstFoldedOutput(NetFilename, {inputName}, {&data},
2928 {bindings.get(graphOutputVar)}));
2929}
2930
2931TEST_F(OnnxImporterTest, importSliceDynamicNoAxes) {
2932 importSliceTest("sliceDynamic.onnxtxt", "data", {2, 3, 3, 3} /* input */,
2933 {0, 1, 1, 1} /* starts */, /* ends: {2, 2, 3, 3} */
2934 {2, 1, 2, 2} /* output */);
2935}
2936
2937TEST_F(OnnxImporterTest, importSliceAxesFull) {
2938 importSliceTest("sliceAxesFull.onnxtxt", "data", {2, 3, 3, 3} /* input */,
2939 {0, 1, 1, 2} /* starts */, /* ends: {1, 2, 3, 3} */
2940 {1, 1, 2, 1} /* output */);
2941}
2942
2943TEST_F(OnnxImporterTest, importSliceAxesAnyOrder) {
2944 importSliceTest("sliceAxesAnyOrder.onnxtxt", "data", {2, 3, 3, 3} /* input */,
2945 {1, 2, 0, 2} /* starts */, /* ends: {2, 3, 1, 3} */
2946 {1, 1, 1, 1} /* output */);
2947}
2948
2949TEST_F(OnnxImporterTest, importSliceAxesOverwrite) {
2950 importSliceTest("sliceAxesOverwrite.onnxtxt", "data",
2951 {2, 3, 3, 3} /* input */,
2952 {0, 1, 1, 2} /* starts */, /* ends: {1, 2, 3, 3} */
2953 {1, 1, 2, 1} /* output */);
2954}
2955
2956TEST_F(OnnxImporterTest, importSliceAxesPartial) {
2957 importSliceTest("sliceAxesPartial.onnxtxt", "data", {2, 3, 3, 3} /* input */,
2958 {0, 1, 1, 0} /* starts */, /* ends: {2, 2, 3, 3} */
2959 {2, 1, 2, 3} /* output */);
2960}
2961
2962TEST_F(OnnxImporterTest, importSliceNoAxes) {
2963 importSliceTest("sliceNoAxes.onnxtxt", "data", {2, 3, 3, 3} /* input */,
2964 {0, 1, 1, 1} /* starts */, /* ends: {2, 2, 3, 3} */
2965 {2, 1, 2, 2} /* output */);
2966}
2967
2968TEST_F(OnnxImporterTest, importSliceInvalidAxes) {
2969 importSliceTest("sliceInvalidAxes.onnxtxt", "data", {2, 3, 3, 3} /* input */,
2970 {0, 1, 1, 1} /* starts */, /* ends: {2, 2, 3, 3} */
2971 {2, 1, 2, 2} /* output */, true);
2972}
2973
2974TEST_F(OnnxImporterTest, importSliceWithStep) {
2975 importSliceTest("sliceWithStep.onnxtxt", "data", {2, 3, 3, 3} /* input */,
2976 {0, 1, 1, 1} /* starts */, /* ends: {2, 2, 3, 3} */
2977 {2, 1, 2, 2} /* output */);
2978}
2979
2980TEST_F(OnnxImporterTest, importSliceWithUnsupportedStep) {
2981 importSliceTest("sliceWithUnsupportedStep.onnxtxt", "data",
2982 {2, 3, 3, 3} /* input */,
2983 {0, 1, 1, 1} /* starts */, /* ends: {2, 2, 3, 3} */
2984 {2, 1, 2, 2} /* output */, true);
2985}
2986
2987static void importCast(llvm::StringRef fileName, llvm::StringRef inputName,
2988 llvm::ArrayRef<dim_t> inputShape, ElemKind outputKind) {
2989 ExecutionEngine EE{};
2990 auto &mod = EE.getModule();
2991 Function *F = mod.createFunction("main");
2992
2993 std::string NetFilename =
2994 std::string(GLOW_DATA_PATH "tests/models/onnxModels/") + fileName.str();
2995 PlaceholderBindings bindings;
2996 Placeholder *graphOutputVar;
2997 {
2998 Tensor data;
2999 getNCHWData(&data, inputShape[0], inputShape[1], inputShape[2],
3000 inputShape[3]);
3001 ONNXModelLoader onnxLD(NetFilename, {inputName.str().c_str()},
3002 {&data.getType()}, *F);
3003 graphOutputVar = EXIT_ON_ERR(onnxLD.getSingleOutput());
3004 bindings.allocate(mod.getPlaceholders());
3005 updateInputPlaceholdersByName(bindings, &mod, {inputName}, {&data});
3006 }
3007
3008 // ONNX importer loads a Cast operator and adds to the IR:
3009 // - a ConvertTo node
3010
3011 // Check the graph structure.
3012 auto *saveNode = getSaveNodeFromDest(graphOutputVar);
3013 auto *node = saveNode->getInput().getNode();
3014 auto *castNode = llvm::dyn_cast<ConvertToNode>(node);
3015 ASSERT_NE(nullptr, castNode);
3016
3017 // Check node output type.
3018 ASSERT_EQ(castNode->getResult().getType()->getElementType(), outputKind);
3019}
3020
3021TEST_F(OnnxImporterTest, importCastToFloat) {
3022 importCast("castToFloat.onnxtxt", "data", {1, 2, 2, 2}, ElemKind::FloatTy);
3023}
3024TEST_F(OnnxImporterTest, importCastToFloat16) {
3025 importCast("castToFloat16.onnxtxt", "data", {1, 2, 2, 2},
3026 ElemKind::Float16Ty);
3027}
3028TEST_F(OnnxImporterTest, importCastToInt32) {
3029 importCast("castToInt32.onnxtxt", "data", {1, 2, 2, 2}, ElemKind::Int32ITy);
3030}
3031TEST_F(OnnxImporterTest, importCastToInt64) {
3032 importCast("castToInt64.onnxtxt", "data", {1, 2, 2, 2}, ElemKind::Int64ITy);
3033}
3034TEST(onnx, importCastToBool) {
3035 importCast("castToBool.onnxtxt", "data", {1, 2, 2, 2}, ElemKind::BoolTy);
3036}
3037
3038TEST_F(OnnxImporterTest, cast_32_64) {
3039 ExecutionEngine EE{};
3040 auto &mod = EE.getModule();
3041 Function *F = mod.createFunction("main");
3042
3043 std::string netFilename(GLOW_DATA_PATH
3044 "tests/models/onnxModels/castInt-32-64.onnxtxt");
3045 PlaceholderBindings bindings;
3046 Placeholder *graphOutputVar;
3047 std::vector<float> init(1 * 2 * 4 * 3);
3048 std::vector<float> expectedOut(1 * 2 * 4 * 3);
3049 for (size_t i = 0; i < init.size(); i++) {
3050 const float value = i * 12.345678f;
3051 init[i] = value;
3052 expectedOut[i] = int32_t(value);
3053 }
3054 {
3055 Tensor data(ElemKind::FloatTy, {1, 2, 4, 3});
3056 data.getHandle() = init;
3057 ONNXModelLoader onnxLD(netFilename, {"input"}, {&data.getType()}, *F);
3058 graphOutputVar = EXIT_ON_ERR(onnxLD.getSingleOutput());
3059 bindings.allocate(mod.getPlaceholders());
3060 updateInputPlaceholdersByName(bindings, &mod, {"input"}, {&data});
3061 }
3062
3063 EE.compile(CompilationMode::Infer);
3064 EE.run(bindings);
3065 // Make sure that the optimizer did not eliminate float->int casts. They are
3066 // not NOOP. Conversions int32 -> int64 -> int32 are always NOOP, so they can
3067 // be optimized away.
3068 EXPECT_EQ(F->getNodes().size(), 3);
3069 auto result = bindings.get(graphOutputVar)->getHandle();
3070 std::vector<dim_t> expectedDims = {1, 2, 4, 3};
3071
3072 EXPECT_TRUE(result.dims().vec() == expectedDims);
3073 for (size_t i = 0; i < expectedOut.size(); i++) {
3074 EXPECT_EQ(result.raw(i), expectedOut[i]);
3075 }
3076}
3077
3078static void importPad(std::string fileName, const char *inputName,
3079 llvm::ArrayRef<dim_t> inputShape,
3080 llvm::ArrayRef<sdim_t> starts,
3081 llvm::ArrayRef<sdim_t> ends, PaddingMode mode,
3082 float value, bool testOutput,
3083 bool expectLoadError = false) {
3084 ExecutionEngine EE{};
3085 auto &mod = EE.getModule();
3086 Function *F = mod.createFunction("main");
3087
3088 std::string NetFilename =
3089 std::string(GLOW_DATA_PATH "tests/models/onnxModels/") + fileName;
3090 PlaceholderBindings bindings;
3091 Placeholder *graphOutputVar;
3092 // Destroy the loader after the graph is loaded since the following execution
3093 // will not depend on anyting from the loader.
3094 {
3095 Tensor data;
3096 getNCHWData(&data, inputShape[0], inputShape[1], inputShape[2],
3097 inputShape[3]);
3098 if (expectLoadError) {
3099 Error err = Error::empty();
3100 ONNXModelLoader(NetFilename, {inputName}, {&data.getType()}, *F, &err);
3101 EXPECT_TRUE(ERR_TO_BOOL(std::move(err)));
3102 return;
3103 }
3104 ONNXModelLoader onnxLD(NetFilename, {inputName}, {&data.getType()}, *F);
3105 graphOutputVar = EXIT_ON_ERR(onnxLD.getSingleOutput());
3106 bindings.allocate(mod.getPlaceholders());
3107 updateInputPlaceholdersByName(bindings, &mod, {inputName}, {&data});
3108 }
3109
3110 // ONNX importer loads a Pad operator and adds to the IR:
3111 // - a Pad node
3112
3113 // Check the graph structure.
3114 auto *saveNode = getSaveNodeFromDest(graphOutputVar);
3115 auto *node = saveNode->getInput().getNode();
3116 auto *padNode = llvm::dyn_cast<PadNode>(node);
3117 EXPECT_NE(nullptr, padNode);
3118
3119 // Check Pad node properties.
3120 assert(padNode->getMode() == mode);
3121 if (mode == PaddingMode::CONSTANT) {
3122 EXPECT_EQ(value, padNode->getValue());
3123 }
3124 // Check the Pad node output shape.
3125 std::vector<dim_t> expectedOutputShape(inputShape.size());
3126 for (unsigned int i = 0; i < inputShape.size(); i++) {
3127 expectedOutputShape[i] =
3128 size_t(ssize_t(inputShape[i]) + starts[i] + ends[i]);
3129 }
3130 EXPECT_TRUE(padNode->getResult().dims().vec() == expectedOutputShape);
3131
3132 // Currently, only constant with positive pads is supported at lowering.
3133 // We just consider this test case.
3134 if (testOutput && mode == PaddingMode::CONSTANT) {
3135 // Compile&run the graph, and check the output.
3136 EE.compile(CompilationMode::Infer);
3137 EE.run(bindings);
3138 auto result = bindings.get(graphOutputVar)->getHandle();
3139 EXPECT_TRUE(result.dims().vec() == expectedOutputShape);
3140 size_t indexOutput = 0;
3141 size_t indexinput = 0;
3142 for (size_t n = 0; n < expectedOutputShape[0]; n++) {
3143 for (size_t c = 0; c < expectedOutputShape[1]; c++) {
3144 for (size_t h = 0; h < expectedOutputShape[2]; h++) {
3145 for (size_t w = 0; w < expectedOutputShape[3]; w++) {
3146 float expectedValue = value;
3147 if ((n >= size_t(starts[0])) &&
3148 (n < (expectedOutputShape[0] - size_t(ends[0]))) &&
3149 (c >= size_t(starts[1])) &&
3150 (c < (expectedOutputShape[1] - size_t(ends[1]))) &&
3151 (h >= size_t(starts[2])) &&
3152 (h < (expectedOutputShape[2] - size_t(ends[2]))) &&
3153 (w >= size_t(starts[3])) &&
3154 (w < (expectedOutputShape[3] - size_t(ends[3])))) {
3155 // This is the way 'getNCHWData' initializes data.
3156 expectedValue = indexinput++;
3157 }
3158 EXPECT_FLOAT_EQ(result.raw(indexOutput++), expectedValue);
3159 }
3160 }
3161 }
3162 }
3163 }
3164}
3165
3166TEST_F(OnnxImporterTest, importPadDefault) {
3167 importPad("padDefault.onnxtxt", "data", {4, 6, 5, 7} /* input */,
3168 {1, 2, -2, 0} /* starts */, {0, -2, 1, 2} /* ends */,
3169 PaddingMode::CONSTANT, 0.f, false);
3170}
3171
3172TEST_F(OnnxImporterTest, importPadDefaultInputPads) {
3173 // This test Pad in opset v11 where "pads" is passed through the 2nd input.
3174 importPad("padDefaultInputPad.onnxtxt", "data", {4, 6, 5, 7} /* input */,
3175 {1, 2, -2, 0} /* starts */, {0, -2, 1, 2} /* ends */,
3176 PaddingMode::CONSTANT, 0.f, false);
3177}
3178
3179TEST_F(OnnxImporterTest, importPadConstant) {
3180 importPad("padConstant.onnxtxt", "data", {4, 6, 5, 7} /* input */,
3181 {1, 2, -2, 0} /* starts */, {0, -2, 1, 2} /* ends */,
3182 PaddingMode::CONSTANT, 2.55f, false);
3183}
3184
3185TEST_F(OnnxImporterTest, importPadConstantInput) {
3186 // This tests Pad in opset v11 where "pads" is passed through the 2nd input
3187 // and "value" through the 3rd input.
3188 importPad("padConstantInput.onnxtxt", "data", {4, 6, 5, 7} /* input */,
3189 {1, 2, -2, 0} /* starts */, {0, -2, 1, 2} /* ends */,
3190 PaddingMode::CONSTANT, 2.55f, false);
3191}
3192
3193TEST_F(OnnxImporterTest, importPadReflect) {
3194 // Note: PaddingMode::REFLECT is not yet supported, so we assert death when
3195 // loading the model.
3196 importPad("padReflect.onnxtxt", "data", {4, 6, 5, 7} /* input */,
3197 {1, 2, -2, 0} /* starts */, {0, -2, 1, 2} /* ends */,
3198 PaddingMode::REFLECT, 0.f /* any */, false,
3199 /* expectLoadError */ true);
3200}
3201
3202TEST_F(OnnxImporterTest, importPadEdge) {
3203 // Note: PaddingMode::EDGE is not yet supported, so we assert death when
3204 // loading the model.
3205 importPad("padEdge.onnxtxt", "data", {4, 6, 5, 7} /* input */,
3206 {1, 2, -2, 0} /* starts */, {0, -2, 1, 2} /* ends */,
3207 PaddingMode::EDGE, 0.f /* any */, false,
3208 /* expectLoadError */ true);
3209}
3210
3211TEST_F(OnnxImporterTest, importPadConstantPositive) {
3212 importPad("padConstantPositive.onnxtxt", "data", {4, 6, 5, 7} /* input */,
3213 {1, 2, 3, 4} /* starts */, {0, 3, 1, 2} /* ends */,
3214 PaddingMode::CONSTANT, 2.55f, true);
3215}
3216
3217TEST_F(OnnxImporterTest, instNorm) {
3218 ExecutionEngine EE;
3219 auto &mod = EE.getModule();
3220 std::string netFilename(GLOW_DATA_PATH
3221 "tests/models/onnxModels/instNorm.onnxtxt");
3222 auto *F = mod.createFunction("main");
3223 Placeholder *output;
3224 Tensor inputTensor(ElemKind::FloatTy, {1, 3, 10, 10});
3225 {
3226 ONNXModelLoader onnxLD(netFilename, {"input"}, {&inputTensor.getType()},
3227 *F);
3228 output = EXIT_ON_ERR(onnxLD.getOutputByName("output"));
3229 auto inputs = onnxLD.getInputVarsMapping();
3230 EXPECT_EQ(inputs.size(), 1);
3231 EXPECT_TRUE(inputTensor.getType().isEqual(inputs["input"]->getType()));
3232 }
3233
3234 // Check the graph structure.
3235 auto *saveNode = getSaveNodeFromDest(output);
3236 auto *inNode =
3237 llvm::dyn_cast<InstanceNormalizationNode>(saveNode->getInput().getNode());
3238 EXPECT_NE(nullptr, inNode);
3239}
3240
3241/// Test loading BatchNorm with all optional outputs declared, but not used in
3242/// the model. Glow supports only the first mandatory output, but declaring
3243/// optional outputs while not using them in the model should not make the
3244/// import fail.
3245TEST_F(OnnxImporterTest, batchNormPR2304) {
3246 ExecutionEngine EE;
3247 auto &mod = EE.getModule();
3248 std::string netFilename(GLOW_DATA_PATH
3249 "tests/models/onnxModels/batchNormPR2304.onnxtxt");
3250 auto *F = mod.createFunction("main");
3251 Placeholder *output;
3252 Tensor inputTensor(ElemKind::FloatTy, {1, 2, 10, 10});
3253 {
3254 ONNXModelLoader onnxLD(netFilename, {"input"}, {&inputTensor.getType()},
3255 *F);
3256 output = EXIT_ON_ERR(onnxLD.getOutputByName("output"));
3257 }
3258
3259 // Check the graph structure.
3260 auto *saveNode = getSaveNodeFromDest(output);
3261 auto *trNode = llvm::dyn_cast<TransposeNode>(saveNode->getInput().getNode());
3262 EXPECT_NE(nullptr, trNode);
3263 auto *bnNode =
3264 llvm::dyn_cast<BatchNormalizationNode>(trNode->getInput().getNode());
3265 EXPECT_NE(nullptr, bnNode);
3266}
3267
3268/// Test constructor for auto loading inputs case.
3269TEST_F(OnnxImporterTest, autoLoadInputs) {
3270 ExecutionEngine EE;
3271 auto &mod = EE.getModule();
3272 std::string netFilename(GLOW_DATA_PATH
3273 "tests/models/onnxModels/batchNormPR2304.onnxtxt");
3274 auto *F = mod.createFunction("main");
3275 Tensor inputTensor(ElemKind::FloatTy, {1, 2, 10, 10});
3276 llvm::StringRef inputName = "input";
3277 ONNXModelLoader onnxLD(netFilename, {}, {}, *F);
3278 auto inputs = onnxLD.getInputVarsMapping();
3279 EXPECT_EQ(inputs.size(), 1);
3280 EXPECT_TRUE(inputTensor.getType().isEqual(inputs[inputName]->getType()));
3281}
3282
3283TEST_F(OnnxImporterTest, shape) {
3284 ExecutionEngine EE{};
3285 auto &mod = EE.getModule();
3286 Function *F = mod.createFunction("main");
3287
3288 std::string netFilename(GLOW_DATA_PATH
3289 "tests/models/onnxModels/shape.onnxtxt");
3290
3291 PlaceholderBindings bindings;
3292 Placeholder *output;
3293 Tensor x(ElemKind::FloatTy, {2, 2, 2, 2});
3294 x.getHandle() = {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16};
3295
3296 {
3297 ONNXModelLoader onnxLD(netFilename, {"input"}, {&x.getType()}, *F);
3298 output = EXIT_ON_ERR(onnxLD.getSingleOutput());
3299 bindings.allocate(mod.getPlaceholders());
3300 updateInputPlaceholdersByName(bindings, &mod, {"input"}, {&x});
3301 }
3302
3303 auto *res = bindings.get(output);
3304 EE.compile(CompilationMode::Infer);
3305 EE.run(bindings);
3306
3307 auto result = res->getHandle<int64_t>();
3308 std::vector<dim_t> expectedDims = {1};
3309 std::vector<int64_t> expectedValues = {4};
3310
3311 EXPECT_TRUE(result.dims().vec() == expectedDims);
3312 for (size_t i = 0; i < expectedValues.size(); i++) {
3313 EXPECT_EQ(result.raw(i), expectedValues[i]);
3314 }
3315
3316 // Constant Folding Test.
3317 FAIL_TEST_IF_ERR(checkConstFoldedOutput(netFilename, {"input"}, {&x},
3318 {bindings.get(output)}));
3319}
3320
3321TEST_F(OnnxImporterTest, tile) {
3322 ExecutionEngine EE;
3323 auto &mod = EE.getModule();
3324 Function *F = mod.createFunction("main");
3325
3326 std::string netFilename(GLOW_DATA_PATH
3327 "tests/models/onnxModels/tile.onnxtxt");
3328
3329 PlaceholderBindings bindings;
3330 Placeholder *output;
3331 {
3332 Tensor x(ElemKind::FloatTy, {1, 2, 2, 1});
3333 x.getHandle() = {1., 2., 3., 4.};
3334
3335 ONNXModelLoader onnxLD(netFilename, {"input"}, {&x.getType()}, *F);
3336 output = EXIT_ON_ERR(onnxLD.getSingleOutput());
3337 bindings.allocate(mod.getPlaceholders());
3338 updateInputPlaceholdersByName(bindings, &mod, {"input"}, {&x});
3339 }
3340
3341 auto *res = bindings.get(output);
3342 EE.compile(CompilationMode::Infer);
3343 EE.run(bindings);
3344
3345 auto result = res->getHandle();
3346 std::vector<dim_t> expectedDims = {1, 4, 4, 3};
3347 std::vector<float> expectedValues = {
3348 1.0, 1.0, 1.0, 2.0, 2.0, 2.0, 1.0, 1.0, 1.0, 2.0, 2.0, 2.0,
3349 3.0, 3.0, 3.0, 4.0, 4.0, 4.0, 3.0, 3.0, 3.0, 4.0, 4.0, 4.0,
3350 1.0, 1.0, 1.0, 2.0, 2.0, 2.0, 1.0, 1.0, 1.0, 2.0, 2.0, 2.0,
3351 3.0, 3.0, 3.0, 4.0, 4.0, 4.0, 3.0, 3.0, 3.0, 4.0, 4.0, 4.0,
3352 };
3353
3354 EXPECT_TRUE(result.dims().vec() == expectedDims);
3355 for (size_t i = 0; i < expectedValues.size(); i++) {
3356 EXPECT_EQ(result.raw(i), expectedValues[i]);
3357 }
3358}
3359
3360static void importPowTest(const std::string &netFilename, Tensor &x, Tensor &y,
3361 std::vector<dim_t> &expectedDims,
3362 std::vector<float> &expectedValues) {
3363 ExecutionEngine EE{};
3364 auto &mod = EE.getModule();
3365 Function *F = mod.createFunction("main");
3366
3367 PlaceholderBindings bindings;
3368 Placeholder *output;
3369
3370 ONNXModelLoader onnxLD(netFilename, {"base", "exp"},
3371 {&x.getType(), &y.getType()}, *F);
3372 output = EXIT_ON_ERR(onnxLD.getSingleOutput());
3373 bindings.allocate(mod.getPlaceholders());
3374 updateInputPlaceholdersByName(bindings, &mod, {"base"}, {&x});
3375 updateInputPlaceholdersByName(bindings, &mod, {"exp"}, {&y});
3376
3377 auto *outputT = bindings.get(output);
3378
3379 EE.compile(CompilationMode::Infer);
3380 EE.run(bindings);
3381
3382 auto outputH = outputT->getHandle();
3383
3384 EXPECT_TRUE(outputH.dims().vec() == expectedDims);
3385 for (size_t i = 0; i < expectedValues.size(); i++) {
3386 EXPECT_EQ(outputH.raw(i), expectedValues[i]);
3387 }
3388}
3389
3390TEST_F(OnnxImporterTest, pow_scalar_broadcast) {
3391 Tensor x(ElemKind::FloatTy, {2, 3});
3392 x.getHandle() = {1, 2, 3, 4, 5, 6};
3393
3394 Tensor y(ElemKind::FloatTy, {1});
3395 y.getHandle() = {
3396 3,
3397 };
3398
3399 std::string netFilename(
3400 GLOW_DATA_PATH "tests/models/onnxModels/pow_scalar_broadcast.onnxtxt");
3401
3402 std::vector<dim_t> expectedDims = {2, 3};
3403 std::vector<float> expectedValues = {
3404 1., 8., 27., 64., 125, 216.,
3405 };
3406
3407 importPowTest(netFilename, x, y, expectedDims, expectedValues);
3408}
3409
3410TEST_F(OnnxImporterTest, pow_vector_broadcast) {
3411 Tensor x(ElemKind::FloatTy, {2, 3});
3412 x.getHandle() = {1, 2, 3, 4, 5, 6};
3413
3414 Tensor y(ElemKind::FloatTy, {3});
3415 y.getHandle() = {
3416 1,
3417 2,
3418 3,
3419 };
3420
3421 std::string netFilename(
3422 GLOW_DATA_PATH "tests/models/onnxModels/pow_array_broadcast.onnxtxt");
3423
3424 std::vector<dim_t> expectedDims = {2, 3};
3425 std::vector<float> expectedValues = {
3426 1., 4., 27., 4., 25, 216.,
3427 };
3428
3429 importPowTest(netFilename, x, y, expectedDims, expectedValues);
3430}
3431
3432TEST_F(OnnxImporterTest, pow_element_wise) {
3433 Tensor x(ElemKind::FloatTy, {3});
3434 x.getHandle() = {1, 2, 3};
3435
3436 Tensor y(ElemKind::FloatTy, {3});
3437 y.getHandle() = {4, 5, 6};
3438
3439 std::string netFilename(GLOW_DATA_PATH
3440 "tests/models/onnxModels/pow_element_wise.onnxtxt");
3441
3442 std::vector<dim_t> expectedDims = {3};
3443 std::vector<float> expectedValues = {
3444 1.,
3445 32.,
3446 729.,
3447 };
3448
3449 importPowTest(netFilename, x, y, expectedDims, expectedValues);
3450}
3451
3452TEST_F(OnnxImporterTest, topK) {
3453 ExecutionEngine EE{};
3454 auto &mod = EE.getModule();
3455 Function *F = mod.createFunction("main");
3456
3457 std::string netFilename(GLOW_DATA_PATH
3458 "tests/models/onnxModels/TopK.onnxtxt");
3459
3460 PlaceholderBindings bindings;
3461 Placeholder *output;
3462 Placeholder *index;
3463 Tensor x(ElemKind::FloatTy, {1, 3, 4});
3464 x.getHandle() = {1., 2., 3., 4., 8., 7., 7., 7., 11., 12., 11., 10.};
3465
3466 {
3467 ONNXModelLoader onnxLD(netFilename, {"scores"}, {&x.getType()}, *F);
3468 output = EXIT_ON_ERR(onnxLD.getOutputByName("topscores"));
3469 index = EXIT_ON_ERR(onnxLD.getOutputByName("topindices"));
3470 bindings.allocate(mod.getPlaceholders());
3471 updateInputPlaceholdersByName(bindings, &mod, {"scores"}, {&x});
3472 }
3473
3474 auto *outputT = bindings.get(output);
3475 auto *indexT = bindings.get(index);
3476
3477 EE.compile(CompilationMode::Infer);
3478 EE.run(bindings);
3479
3480 auto outputH = outputT->getHandle();
3481 auto indexH = indexT->getHandle<int64_t>();
3482 std::vector<dim_t> expectedDims = {1, 3, 2};
3483 std::vector<float> expectedValues = {
3484 4., 3., 8., 7., 12, 11.,
3485 };
3486 std::vector<int64_t> expectedIndices = {3, 2, 0, 1, 1, 0};
3487
3488 EXPECT_TRUE(outputH.dims().vec() == expectedDims);
3489 for (size_t i = 0; i < expectedValues.size(); i++) {
3490 EXPECT_EQ(outputH.raw(i), expectedValues[i]);
3491 }
3492
3493 EXPECT_TRUE(indexH.dims().vec() == expectedDims);
3494 for (size_t i = 0; i < expectedIndices.size(); i++) {
3495 EXPECT_EQ(indexH.raw(i), expectedIndices[i]);
3496 }
3497
3498 // Constant Folding Test.
3499 FAIL_TEST_IF_ERR(
3500 checkConstFoldedOutput(netFilename, {"scores"}, {&x}, {outputT, indexT}));
3501}
3502
3503void testArgMinMax(llvm::StringRef filename, bool isMin,
3504 const std::vector<dim_t> &expectedDims) {
3505 ExecutionEngine EE;
3506 auto &mod = EE.getModule();
3507 Function *F = mod.createFunction("main");
3508
3509 std::string netFilename = std::string(GLOW_DATA_PATH) + filename.str();
3510
3511 PlaceholderBindings bindings;
3512 Placeholder *PH;
3513 std::vector<dim_t> inDims = {2, 3, 4, 5};
3514 {
3515 Tensor inT(ElemKind::FloatTy, inDims);
3516
3517 ONNXModelLoader onnxLD(netFilename, {"input"}, {&inT.getType()}, *F);
3518 PH = EXIT_ON_ERR(onnxLD.getOutputByName("scores"));
3519 bindings.allocate(mod.getPlaceholders());
3520 updateInputPlaceholdersByName(bindings, &mod, {"input"}, {&inT});
3521 }
3522
3523 EE.compile(CompilationMode::Infer);
3524 EE.run(bindings);
3525
3526 auto output = bindings.get(PH)->getHandle<int64_t>();
3527 EXPECT_TRUE(output.dims().vec() == expectedDims);
3528
3529 auto *save = getSaveNodeFromDest(PH);
3530 if (isMin) {
3531 EXPECT_TRUE(llvm::isa<ArgMinNode>(save->getInput()));
3532 } else {
3533 EXPECT_TRUE(llvm::isa<ArgMaxNode>(save->getInput()));
3534 }
3535}
3536
3537TEST_F(OnnxImporterTest, argMaxKeepDim) {
3538 testArgMinMax("tests/models/onnxModels/ArgMaxKeepDim.onnxtxt", false,
3539 {2, 3, 1, 5});
3540}
3541
3542TEST_F(OnnxImporterTest, argMaxNoKeepDim) {
3543 testArgMinMax("tests/models/onnxModels/ArgMaxNoKeepDim.onnxtxt", false,
3544 {2, 4, 5});
3545}
3546
3547TEST_F(OnnxImporterTest, argMaxDefault) {
3548 testArgMinMax("tests/models/onnxModels/ArgMaxDefault.onnxtxt", false,
3549 {1, 3, 4, 5});
3550}
3551
3552TEST_F(OnnxImporterTest, argMinKeepDim) {
3553 testArgMinMax("tests/models/onnxModels/ArgMinKeepDim.onnxtxt", true,
3554 {2, 3, 1, 5});
3555}
3556
3557TEST_F(OnnxImporterTest, argMinNoKeepDim) {
3558 testArgMinMax("tests/models/onnxModels/ArgMinNoKeepDim.onnxtxt", true,
3559 {2, 4, 5});
3560}
3561
3562TEST_F(OnnxImporterTest, argMinDefault) {
3563 testArgMinMax("tests/models/onnxModels/ArgMinDefault.onnxtxt", true,
3564 {1, 3, 4, 5});
3565}
3566
3567TEST_F(OnnxImporterTest, importMaxPoolWithArgmax) {
3568 ExecutionEngine EE;
3569 auto &mod = EE.getModule();
3570 std::string netFilename(GLOW_DATA_PATH
3571 "tests/models/onnxModels/maxPoolWithArgmax.onnxtxt");
3572 auto *F = mod.createFunction("main");
3573 PlaceholderBindings bindings;
3574 Placeholder *resultPH, *indicesPH;
3575 Tensor inputTensor(ElemKind::FloatTy, {1, 3, 4, 4});
3576
3577 // Execute the following scenario for MaxPool with Argmax output:
3578 // Input:
3579 // [[[[ 0. 47. 35. 23.]
3580 // [11. 58. 46. 34.]
3581 // [22. 10. 57. 45.]
3582 // [33. 21. 9. 56.]]
3583 //
3584 // [[44. 32. 20. 8.]
3585 // [55. 43. 31. 19.]
3586 // [ 7. 54. 42. 30.]
3587 // [18. 6. 53. 41.]]
3588 //
3589 // [[29. 17. 5. 52.]
3590 // [40. 28. 16. 4.]
3591 // [51. 39. 27. 15.]
3592 // [ 3. 50. 38. 26.]]]]
3593 //
3594 // Result:
3595 // [[[[58. 46.]
3596 // [33. 57.]]
3597 //
3598 // [[55. 31.]
3599 // [54. 53.]]
3600 //
3601 // [[40. 52.]
3602 // [51. 38.]]]]
3603 //
3604 // Argmax:
3605 // [[[[15 18]
3606 // [36 30]]
3607 //
3608 // [[13 19]
3609 // [28 43]]
3610 //
3611 // [[14 11]
3612 // [26 44]]]]
3613 inputTensor.getHandle() = {
3614 0.0, 47.0, 35.0, 23.0, 11.0, 58.0, 46.0, 34.0, 22.0, 10.0, 57.0, 45.0,
3615 33.0, 21.0, 9.0, 56.0, 44.0, 32.0, 20.0, 8.0, 55.0, 43.0, 31.0, 19.0,
3616 7.0, 54.0, 42.0, 30.0, 18.0, 6.0, 53.0, 41.0, 29.0, 17.0, 5.0, 52.0,
3617 40.0, 28.0, 16.0, 4.0, 51.0, 39.0, 27.0, 15.0, 3.0, 50.0, 38.0, 26.0};
3618
3619 {
3620 ONNXModelLoader onnxLD(netFilename, {"input"}, {&inputTensor.getType()},
3621 *F);
3622 resultPH = EXIT_ON_ERR(onnxLD.getOutputByName("result"));
3623 indicesPH = EXIT_ON_ERR(onnxLD.getOutputByName("indices"));
3624 bindings.allocate(mod.getPlaceholders());
3625 updateInputPlaceholdersByName(bindings, &mod, {"input"}, {&inputTensor});
3626 }
3627
3628 EE.compile(CompilationMode::Infer);
3629 EE.run(bindings);
3630
3631 auto result = bindings.get(resultPH)->getHandle();
3632 auto indices = bindings.get(indicesPH)->getHandle<int64_t>();
3633 std::vector<dim_t> expectedDims = {1, 3, 2, 2};
3634
3635 EXPECT_TRUE(result.dims().vec() == expectedDims);
3636 EXPECT_TRUE(indices.dims().vec() == expectedDims);
3637
3638 std::vector<float> expectedResult = {58.0, 46.0, 33.0, 57.0, 55.0, 31.0,
3639 54.0, 53.0, 40.0, 52.0, 51.0, 38.0};
3640 std::vector<int64_t> expectedIndices = {15, 18, 36, 30, 13, 19,
3641 28, 43, 14, 11, 26, 44};
3642
3643 for (size_t i = 0; i < expectedResult.size(); i++) {
3644 EXPECT_EQ(result.raw(i), expectedResult[i]);
3645 EXPECT_EQ(indices.raw(i), expectedIndices[i]);
3646 }
3647}
3648
3649TEST_F(OnnxImporterTest, importMean) {
3650 ExecutionEngine EE;
3651 auto &mod = EE.getModule();
3652 std::string netFilename(GLOW_DATA_PATH
3653 "tests/models/onnxModels/Mean.onnxtxt");
3654 auto *F = mod.createFunction("main");
3655 PlaceholderBindings bindings;
3656 Placeholder *resultPH;
3657 Tensor T0(ElemKind::FloatTy, {2, 3, 2});
3658 Tensor T1(ElemKind::FloatTy, {2, 3, 2});
3659 Tensor T2(ElemKind::FloatTy, {2, 3, 2});
3660 T0.getHandle() = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11};
3661 T1.getHandle() = {11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0};
3662 T2.getHandle() = {2.5, 1, 2.5, 1, 2.5, 1, 2.5, 1, 2.5, 1, 0, 1};
3663 {
3664 ONNXModelLoader onnxLD(netFilename, {"T0", "T1", "T2"},
3665 {&T0.getType(), &T1.getType(), &T2.getType()}, *F);
3666 resultPH = EXIT_ON_ERR(onnxLD.getOutputByName("Y"));
3667 bindings.allocate(mod.getPlaceholders());
3668 updateInputPlaceholdersByName(bindings, &mod, {"T0", "T1", "T2"},
3669 {&T0, &T1, &T2});
3670 }
3671 EE.compile(CompilationMode::Infer);
3672 EE.run(bindings);
3673 auto result = bindings.get(resultPH)->getHandle();
3674 std::vector<dim_t> expectedDims = {2, 3, 2};
3675 EXPECT_TRUE(result.dims().vec() == expectedDims);
3676 std::vector<float> expectedResult = {4.5, 4, 4.5, 4, 4.5, 4,
3677 4.5, 4, 4.5, 4, 11.0 / 3, 4};
3678 for (size_t i = 0; i < expectedResult.size(); i++) {
3679 EXPECT_EQ(result.raw(i), expectedResult[i]);
3680 }
3681}
3682
3683TEST_F(OnnxImporterTest, importMeanBroadcast) {
3684 ExecutionEngine EE;
3685 auto &mod = EE.getModule();
3686 std::string netFilename(GLOW_DATA_PATH
3687 "tests/models/onnxModels/Mean_broadcast.onnxtxt");
3688 auto *F = mod.createFunction("main");
3689 PlaceholderBindings bindings;
3690 Placeholder *resultPH;
3691 Tensor T0(ElemKind::FloatTy, {1, 2, 1});
3692 Tensor T1(ElemKind::FloatTy, {3});
3693 Tensor T2(ElemKind::FloatTy, {1, 2, 3});
3694 T0.getHandle() = {0, 1};
3695 T1.getHandle() = {11, 10, 9};
3696 T2.getHandle() = {5, 4, 3, 2, 1, 0};
3697
3698 {
3699 ONNXModelLoader onnxLD(netFilename, {"T0", "T1", "T2"},
3700 {&T0.getType(), &T1.getType(), &T2.getType()}, *F);
3701 resultPH = EXIT_ON_ERR(onnxLD.getOutputByName("Y"));
3702 bindings.allocate(mod.getPlaceholders());
3703 updateInputPlaceholdersByName(bindings, &mod, {"T0", "T1", "T2"},
3704 {&T0, &T1, &T2});
3705 }
3706 EE.compile(CompilationMode::Infer);
3707 EE.run(bindings);
3708 auto result = bindings.get(resultPH)->getHandle();
3709 std::vector<dim_t> expectedDims = {1, 2, 3};
3710 EXPECT_TRUE(result.dims().vec() == expectedDims);
3711 std::vector<float> expectedResult = {16.0 / 3, 14.0 / 3, 4.0,
3712 14.0 / 3, 4.0, 10.0 / 3};
3713 for (size_t i = 0; i < expectedResult.size(); i++) {
3714 EXPECT_EQ(result.raw(i), expectedResult[i]);
3715 }
3716}
3717
3718TEST_F(OnnxImporterTest, importWhere) {
3719 ExecutionEngine EE{};
3720 auto &mod = EE.getModule();
3721 Function *F = mod.createFunction("main");
3722
3723 std::string netFilename(GLOW_DATA_PATH
3724 "tests/models/onnxModels/Where.onnxtxt");
3725
3726 Placeholder *out = nullptr;
3727 {
3728 Tensor condition(ElemKind::BoolTy, {1, 1, 4});
3729 Tensor X(ElemKind::FloatTy, {1, 4, 1});
3730 Tensor Y(ElemKind::FloatTy, {4, 1, 1});
3731
3732 condition.zero();
3733 X.zero();
3734 Y.zero();
3735
3736 ONNXModelLoader onnxLD(netFilename, {"Condition", "X", "Y"},
3737 {&condition.getType(), &X.getType(), &Y.getType()},
3738 *F);
3739 out = EXIT_ON_ERR(onnxLD.getOutputByName("Out"));
3740 }
3741
3742 auto *save = getSaveNodeFromDest(out);
3743
3744 SelectNode *WHR = llvm::dyn_cast<SelectNode>(save->getInput().getNode());
3745
3746 ASSERT_TRUE(WHR);
3747 EXPECT_EQ(WHR->getResult().dims()[0], 4);
3748 EXPECT_EQ(WHR->getResult().dims()[1], 4);
3749 EXPECT_EQ(WHR->getResult().dims()[2], 4);
3750}
3751
3752TEST_F(OnnxImporterTest, importLess) {
3753 ExecutionEngine EE{};
3754 auto &mod = EE.getModule();
3755 Function *F = mod.createFunction("main");
3756
3757 std::string netFilename(GLOW_DATA_PATH
3758 "tests/models/onnxModels/Less.onnxtxt");
3759
3760 Placeholder *out = nullptr;
3761 {
3762 Tensor X(ElemKind::FloatTy, {1, 4, 1});
3763 Tensor Y(ElemKind::FloatTy, {4, 1, 1});
3764 X.zero();
3765 Y.zero();
3766
3767 ONNXModelLoader onnxLD(netFilename, {"X", "Y"},
3768 {&X.getType(), &Y.getType()}, *F);
3769 out = EXIT_ON_ERR(onnxLD.getOutputByName("Out"));
3770 }
3771
3772 auto *save = getSaveNodeFromDest(out);
3773
3774 CmpLTNode *CMPLT = llvm::dyn_cast<CmpLTNode>(save->getInput().getNode());
3775
3776 ASSERT_TRUE(CMPLT);
3777 ASSERT_EQ(CMPLT->getResult().dims().size(), 3);
3778 EXPECT_EQ(CMPLT->getResult().dims()[0], 4);
3779 EXPECT_EQ(CMPLT->getResult().dims()[1], 4);
3780 EXPECT_EQ(CMPLT->getResult().dims()[2], 1);
3781}
3782
3783TEST_F(OnnxImporterTest, importLessEqual) {
3784 ExecutionEngine EE{};
3785 auto &mod = EE.getModule();
3786 Function *F = mod.createFunction("main");
3787
3788 std::string netFilename(GLOW_DATA_PATH
3789 "tests/models/onnxModels/CmpLTE.onnxtxt");
3790
3791 Placeholder *out = nullptr;
3792 {
3793 Tensor X(ElemKind::FloatTy, {1, 4, 1});
3794 Tensor Y(ElemKind::FloatTy, {4, 1, 1});
3795 X.zero();
3796 Y.zero();
3797
3798 ONNXModelLoader onnxLD(netFilename, {"X", "Y"},
3799 {&X.getType(), &Y.getType()}, *F);
3800 out = EXIT_ON_ERR(onnxLD.getOutputByName("Out"));
3801 }
3802
3803 auto *save = getSaveNodeFromDest(out);
3804
3805 CmpLTENode *CMPLTE = llvm::dyn_cast<CmpLTENode>(save->getInput().getNode());
3806
3807 ASSERT_TRUE(CMPLTE);
3808 ASSERT_EQ(CMPLTE->getResult().dims().size(), 3);
3809 EXPECT_EQ(CMPLTE->getResult().dims()[0], 4);
3810 EXPECT_EQ(CMPLTE->getResult().dims()[1], 4);
3811 EXPECT_EQ(CMPLTE->getResult().dims()[2], 1);
3812}
3813
3814TEST_F(OnnxImporterTest, importEqual) {
3815 ExecutionEngine EE{};
3816 auto &mod = EE.getModule();
3817 Function *F = mod.createFunction("main");
3818
3819 std::string netFilename(GLOW_DATA_PATH
3820 "tests/models/onnxModels/Equal.onnxtxt");
3821
3822 Placeholder *out = nullptr;
3823 {
3824 Tensor X(ElemKind::FloatTy, {1, 4, 1});
3825 Tensor Y(ElemKind::FloatTy, {4, 1, 1});
3826 X.zero();
3827 Y.zero();
3828
3829 ONNXModelLoader onnxLD(netFilename, {"X", "Y"},
3830 {&X.getType(), &Y.getType()}, *F);
3831 out = EXIT_ON_ERR(onnxLD.getOutputByName("Out"));
3832 }
3833
3834 auto *save = getSaveNodeFromDest(out);
3835
3836 CmpEQNode *CMPEQ = llvm::dyn_cast<CmpEQNode>(save->getInput().getNode());
3837
3838 ASSERT_TRUE(CMPEQ);
3839 ASSERT_EQ(CMPEQ->getResult().dims().size(), 3);
3840 EXPECT_EQ(CMPEQ->getResult().dims()[0], 4);
3841 EXPECT_EQ(CMPEQ->getResult().dims()[1], 4);
3842 EXPECT_EQ(CMPEQ->getResult().dims()[2], 1);
3843}
3844
3845static void importLogical(const std::string &netFilename,
3846 llvm::ArrayRef<bool> LHS, llvm::ArrayRef<bool> RHS,
3847 llvm::ArrayRef<dim_t> LHSShape,
3848 llvm::ArrayRef<dim_t> RHSShape,
3849 llvm::ArrayRef<dim_t> outputShape,
3850 llvm::ArrayRef<bool> expectedValues) {
3851 ExecutionEngine EE{};
3852 auto &mod = EE.getModule();
3853 Function *F = mod.createFunction("main");
3854
3855 // Load the .onnxtxt model.
3856 Type LHSType(ElemKind::BoolTy, LHSShape);
3857 Type RHSType(ElemKind::BoolTy, RHSShape);
3858 ONNXModelLoader onnxLD(netFilename, {"LHS", "RHS"}, {&LHSType, &RHSType}, *F);
3859
3860 // Get placeholder bindings
3861 PlaceholderBindings bindings;
3862 Placeholder *graphOutputVar;
3863 graphOutputVar = EXIT_ON_ERR(onnxLD.getSingleOutput());
3864 auto *LHSPH = mod.getPlaceholderByNameSlow("LHS");
3865 auto *LHSTensor = bindings.allocate(LHSPH);
3866 LHSTensor->getHandle<bool>() = LHS;
3867 auto *RHSPH = mod.getPlaceholderByNameSlow("RHS");
3868 auto *RHSTensor = bindings.allocate(RHSPH);
3869 RHSTensor->getHandle<bool>() = RHS;
3870
3871 // Compile and run graph
3872 EE.compile(CompilationMode::Infer);
3873 bindings.allocate(mod.getPlaceholders());
3874 EE.run(bindings);
3875 auto result = bindings.get(graphOutputVar)->getHandle<bool>();
3876
3877 // Validate results
3878 ASSERT_TRUE(result.dims() == (llvm::ArrayRef<dim_t>)outputShape);
3879 for (size_t i = 0; i < result.getType().size(); i++) {
3880 EXPECT_EQ(result.raw(i), (bool)expectedValues[i]);
3881 }
3882}
3883
3884/// Test "and" operation of dimensions 4
3885TEST_F(OnnxImporterTest, importLogicAnd) {
3886 llvm::SmallVector<bool, 12> LHS = {true, true, false, false, true, true,
3887 false, false, false, false, true, true};
3888 llvm::SmallVector<bool, 12> RHS = {true, true, false, true, false, true,
3889 false, true, true, true, true, true};
3890 std::vector<dim_t> LHSShape = {1, 2, 3, 2};
3891 std::vector<dim_t> RHSShape = {1, 2, 3, 2};
3892 std::vector<dim_t> outputShape = {1, 2, 3, 2};
3893 llvm::SmallVector<bool, 12> expectedValues = {true, true, false, false,
3894 false, true, false, false,
3895 false, false, true, true};
3896 std::string netFilename(GLOW_DATA_PATH
3897 "tests/models/onnxModels/logicalAnd.onnxtxt");
3898 importLogical(netFilename, LHS, RHS, LHSShape, RHSShape, outputShape,
3899 expectedValues);
3900}
3901
3902/// Test "broadcast and" of dimensions 4 and 2
3903TEST_F(OnnxImporterTest, importLogicBcastAnd) {
3904 llvm::SmallVector<bool, 12> LHS = {true, true, false, false, true, true,
3905 false, false, false, false, true, true};
3906 llvm::SmallVector<bool, 6> RHS = {false, true, true, true, true, false};
3907 std::vector<dim_t> LHSShape = {1, 2, 3, 2};
3908 std::vector<dim_t> RHSShape = {3, 2};
3909 std::vector<dim_t> outputShape = {1, 2, 3, 2};
3910 llvm::SmallVector<bool, 12> expectedValues = {false, true, false, false,
3911 true, false, false, false,
3912 false, false, true, false};
3913 std::string netFilename(GLOW_DATA_PATH
3914 "tests/models/onnxModels/logicalAndBcast.onnxtxt");
3915 importLogical(netFilename, LHS, RHS, LHSShape, RHSShape, outputShape,
3916 expectedValues);
3917}
3918
3919/// Test "or" operation of dimensions 4
3920TEST_F(OnnxImporterTest, importLogicOr) {
3921 llvm::SmallVector<bool, 12> LHS = {true, true, false, false, true, true,
3922 false, false, false, false, true, true};
3923 llvm::SmallVector<bool, 12> RHS = {true, true, false, true, false, true,
3924 false, true, true, true, true, true};
3925 std::vector<dim_t> LHSShape = {1, 2, 3, 2};
3926 std::vector<dim_t> RHSShape = {1, 2, 3, 2};
3927 std::vector<dim_t> outputShape = {1, 2, 3, 2};
3928 llvm::SmallVector<bool, 12> expectedValues = {
3929 true, true, false, true, true, true, false, true, true, true, true, true};
3930 std::string netFilename(GLOW_DATA_PATH
3931 "tests/models/onnxModels/logicalOr.onnxtxt");
3932 importLogical(netFilename, LHS, RHS, LHSShape, RHSShape, outputShape,
3933 expectedValues);
3934}
3935
3936/// Test "broadcast or" of dimensions 4 and 2
3937TEST_F(OnnxImporterTest, importLogicBcastOr) {
3938 llvm::SmallVector<bool, 12> LHS = {true, true, false, false, true, true,
3939 false, false, false, false, true, true};
3940 llvm::SmallVector<bool, 6> RHS = {false, true, true, true, true, false};
3941 std::vector<dim_t> LHSShape = {1, 2, 3, 2};
3942 std::vector<dim_t> RHSShape = {3, 2};
3943 std::vector<dim_t> outputShape = {1, 2, 3, 2};
3944 llvm::SmallVector<bool, 12> expectedValues = {
3945 true, true, true, true, true, true, false, true, true, true, true, true};
3946 std::string netFilename(GLOW_DATA_PATH
3947 "tests/models/onnxModels/logicalOrBcast.onnxtxt");
3948 importLogical(netFilename, LHS, RHS, LHSShape, RHSShape, outputShape,
3949 expectedValues);
3950}
3951
3952/// Test "xor" operation of dimensions 4
3953TEST_F(OnnxImporterTest, importLogicXor) {
3954 llvm::SmallVector<bool, 12> LHS = {true, true, false, false, true, true,
3955 false, false, false, false, true, true};
3956 llvm::SmallVector<bool, 12> RHS = {true, true, false, true, false, true,
3957 false, true, true, true, true, true};
3958 std::vector<dim_t> LHSShape = {1, 2, 3, 2};
3959 std::vector<dim_t> RHSShape = {1, 2, 3, 2};
3960 std::vector<dim_t> outputShape = {1, 2, 3, 2};
3961 llvm::SmallVector<bool, 12> expectedValues = {false, false, false, true,
3962 true, false, false, true,
3963 true, true, false, false};
3964 std::string netFilename(GLOW_DATA_PATH
3965 "tests/models/onnxModels/logicalXor.onnxtxt");
3966 importLogical(netFilename, LHS, RHS, LHSShape, RHSShape, outputShape,
3967 expectedValues);
3968}
3969
3970/// Test "broadcast xor" of dimensions 4 and 2
3971TEST_F(OnnxImporterTest, importLogicBcastXor) {
3972 llvm::SmallVector<bool, 12> LHS = {true, true, false, false, true, true,
3973 false, false, false, false, true, true};
3974 llvm::SmallVector<bool, 6> RHS = {false, true, true, true, true, false};
3975 std::vector<dim_t> LHSShape = {1, 2, 3, 2};
3976 std::vector<dim_t> RHSShape = {3, 2};
3977 std::vector<dim_t> outputShape = {1, 2, 3, 2};
3978 llvm::SmallVector<bool, 12> expectedValues = {true, false, true, true,
3979 false, true, false, true,
3980 true, true, false, true};
3981 std::string netFilename(GLOW_DATA_PATH
3982 "tests/models/onnxModels/logicalXorBcast.onnxtxt");
3983 importLogical(netFilename, LHS, RHS, LHSShape, RHSShape, outputShape,
3984 expectedValues);
3985}
3986
3987/// Test not operation
3988TEST_F(OnnxImporterTest, importNot) {
3989 llvm::SmallVector<bool, 12> X = {true, true, false, false, true, true,
3990 false, false, false, false, true, true};
3991 std::vector<dim_t> XShape = {1, 2, 3, 2};
3992 std::vector<dim_t> YShape = {1, 2, 3, 2};
3993 llvm::SmallVector<bool, 12> expectedValues = {false, false, true, true,
3994 false, false, true, true,
3995 true, true, false, false};
3996 std::string netFilename(GLOW_DATA_PATH
3997 "tests/models/onnxModels/logicalNot.onnxtxt");
3998
3999 ExecutionEngine EE{};
4000 auto &mod = EE.getModule();
4001 Function *F = mod.createFunction("main");
4002 PlaceholderBindings bindings;
4003 Placeholder *graphOutputVar;
4004
4005 // Load the .onnxtxt model.
4006 Type XType(ElemKind::BoolTy, XShape);
4007 ONNXModelLoader onnxLD(netFilename, {"X"}, {&XType}, *F);
4008 graphOutputVar = EXIT_ON_ERR(onnxLD.getSingleOutput());
4009 auto *XPH = mod.getPlaceholderByNameSlow("X");
4010 auto *XTensor = bindings.allocate(XPH);
4011 XTensor->getHandle<bool>() = X;
4012
4013 // Compile and run the graph
4014 EE.compile(CompilationMode::Infer);
4015 bindings.allocate(mod.getPlaceholders());
4016 EE.run(bindings);
4017
4018 // Validate results
4019 auto result = bindings.get(graphOutputVar)->getHandle<bool>();
4020 ASSERT_TRUE(result.dims() == (llvm::ArrayRef<dim_t>)YShape);
4021 for (size_t i = 0; i < result.getType().size(); i++) {
4022 EXPECT_EQ(result.raw(i), (bool)expectedValues[i]);
4023 }
4024}
4025
4026/// Test loading NonZero from a ONNX model.
4027static void testNonZero(llvm::StringRef name,
4028 const std::vector<dim_t> &expectedDims,
4029 const std::vector<int64_t> &expVals) {
4030 ExecutionEngine EE{};
4031 auto &mod = EE.getModule();
4032 Function *F = mod.createFunction("main");
4033
4034 PlaceholderBindings bindings;
4035 Placeholder *out = nullptr;
4036
4037 std::string netFilename(GLOW_DATA_PATH
4038 "tests/models/onnxModels/NonZero.onnxtxt");
4039 {
4040 ONNXModelLoader onnxLD(netFilename, {}, {}, *F);
4041 out = EXIT_ON_ERR(onnxLD.getOutputByName(name));
4042 EXPECT_NE(out, nullptr);
4043 }
4044
4045 // Constant -> NonZero -> PH (x2 for 3 models inside the file)
4046 ASSERT_EQ(mod.getPlaceholders().size(), 3);
4047 ASSERT_EQ(F->getNodes().size(), 3);
4048
4049 EE.compile(CompilationMode::Infer);
4050 bindings.allocate(mod.getPlaceholders());
4051 EE.run(bindings);
4052
4053 auto result = bindings.get(out)->getHandle<int64_t>();
4054
4055 EXPECT_TRUE(result.dims().vec() == expectedDims);
4056 for (size_t i = 0; i < expVals.size(); i++) {
4057 EXPECT_EQ(result.raw(i), expVals[i]);
4058 }
4059}
4060
4061/// Test loading NonZero using constant int32_t tensor initializer.
4062TEST_F(OnnxImporterTest, importNonZeroI32) {
4063 std::vector<int64_t> expVals = {
4064 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3,
4065 3, 3, 3, 3, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1,
4066 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 1, 2, 0, 1, 2, 0, 1, 2, 0, 0, 1, 2, 0, 0, 1,
4067 2, 0, 1, 1, 0, 1, 1, 2, 0, 0, 1, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
4068 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0,
4069 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 0, 1, 0, 0, 1, 0, 0, 1, 1, 0};
4070 testNonZero("out_i32", {5, 29}, expVals);
4071}
4072
4073/// Test loading NonZero using constant float tensor initializer.
4074TEST_F(OnnxImporterTest, importNonZeroF) {
4075 std::vector<int64_t> expVals = {0, 1, 3, 4, 6, 8, 10,
4076 12, 14, 16, 18, 19, 21, 22};
4077 testNonZero("out_f", {1, 14}, expVals);
4078}
4079
4080/// Test loading NonZero using constant float tensor initializer.
4081TEST_F(OnnxImporterTest, importNonZeroI64) {
4082 std::vector<int64_t> expVals = {0, 1, 3, 4, 6, 8, 10,
4083 12, 14, 16, 18, 19, 21, 22};
4084 testNonZero("out_i64", {1, 14}, expVals);
4085}
4086
4087/// Test loading NMS using initializer nodes op from an ONNX model.
4088TEST_F(OnnxImporterTest, importNMSInitializer) {
4089 ExecutionEngine EE{};
4090 auto &mod = EE.getModule();
4091 Function *F = mod.createFunction("main");
4092
4093 std::string netFilename(GLOW_DATA_PATH
4094 "tests/models/onnxModels/NonMaxSuppression.onnxtxt");
4095
4096 PlaceholderBindings bindings;
4097 Placeholder *output;
4098 {
4099 Tensor boxes(ElemKind::FloatTy, {8, 4});
4100 boxes.zero();
4101
4102 Tensor scores(ElemKind::FloatTy, {8});
4103 scores.zero();
4104
4105 ONNXModelLoader onnxLD(netFilename, {"boxes", "scores"},
4106 {&boxes.getType(), &scores.getType()}, *F);
4107 output = EXIT_ON_ERR(onnxLD.getOutputByName("indices"));
4108 }
4109
4110 auto *save = getSaveNodeFromDest(output);
4111 NonMaxSuppressionNode *NMS =
4112 llvm::dyn_cast<NonMaxSuppressionNode>(save->getInput().getNode());
4113 ASSERT_TRUE(NMS);
4114 EXPECT_EQ(NMS->dims(0)[0], 3);
4115 EXPECT_EQ(NMS->getCenterPointBox(), 0);
4116}
4117
4118/// Test loading NMS using optional parameters from an ONNX model.
4119TEST_F(OnnxImporterTest, importNMSInitOptionalParams) {
4120 ExecutionEngine EE{};
4121 auto &mod = EE.getModule();
4122 Function *F = mod.createFunction("main");
4123
4124 std::string netFilename(
4125 GLOW_DATA_PATH
4126 "tests/models/onnxModels/NonMaxSuppressionOptionalParams.onnxtxt");
4127
4128 PlaceholderBindings bindings;
4129 Placeholder *output;
4130 {
4131 Tensor boxes(ElemKind::FloatTy, {8, 4});
4132 boxes.zero();
4133
4134 Tensor scores(ElemKind::FloatTy, {8});
4135 scores.zero();
4136
4137 ONNXModelLoader onnxLD(netFilename, {"boxes", "scores"},
4138 {&boxes.getType(), &scores.getType()}, *F);
4139 output = EXIT_ON_ERR(onnxLD.getOutputByName("indices"));
4140 }
4141
4142 auto *save = getSaveNodeFromDest(output);
4143 NonMaxSuppressionNode *NMS =
4144 llvm::dyn_cast<NonMaxSuppressionNode>(save->getInput().getNode());
4145 ASSERT_TRUE(NMS);
4146 EXPECT_EQ(NMS->dims(0)[0], 3);
4147 EXPECT_EQ(NMS->getCenterPointBox(), 0);
4148 EXPECT_EQ(NMS->getMaxOutputBoxesPerClass(), 3);
4149 EXPECT_EQ(NMS->getIouThreshold(), 0);
4150 EXPECT_EQ(NMS->getScoreThreshold(), 0);
4151}
4152
4153/// Test loading NMS using Constant Tensors op from an ONNX model.
4154TEST_F(OnnxImporterTest, importNMSConstTensor) {
4155 ExecutionEngine EE{};
4156 auto &mod = EE.getModule();
4157 Function *F = mod.createFunction("main");
4158
4159 std::string netFilename(
4160 GLOW_DATA_PATH "tests/models/onnxModels/NonMaxSuppressionSSD.onnxtxt");
4161
4162 PlaceholderBindings bindings;
4163 Placeholder *output;
4164 {
4165 Tensor boxes(ElemKind::FloatTy, {8, 4});
4166 boxes.zero();
4167
4168 Tensor scores(ElemKind::FloatTy, {8});
4169 scores.zero();
4170
4171 ONNXModelLoader onnxLD(netFilename, {"boxes", "scores"},
4172 {&boxes.getType(), &scores.getType()}, *F);
4173 output = EXIT_ON_ERR(onnxLD.getOutputByName("indices"));
4174 }
4175
4176 auto *save = getSaveNodeFromDest(output);
4177 NonMaxSuppressionNode *NMS =
4178 llvm::dyn_cast<NonMaxSuppressionNode>(save->getInput().getNode());
4179 ASSERT_TRUE(NMS);
4180 EXPECT_EQ(NMS->dims(0)[0], 3);
4181 EXPECT_EQ(NMS->getCenterPointBox(), 1);
4182}
4183
4184/// Test loading ONNX NMS using Constant Tensors op from an ONNX model.
4185TEST_F(OnnxImporterTest, importNMSONNXConstTensor) {
4186 ExecutionEngine EE{};
4187 auto &mod = EE.getModule();
4188 Function *F = mod.createFunction("main");
4189
4190 std::string netFilename(
4191 GLOW_DATA_PATH
4192 "tests/models/onnxModels/NonMaxSuppressionSSD_ONNX.onnxtxt");
4193
4194 PlaceholderBindings bindings;
4195 Placeholder *output;
4196 {
4197 Tensor boxes(ElemKind::FloatTy, {1, 8, 4});
4198 boxes.zero();
4199
4200 Tensor scores(ElemKind::FloatTy, {1, 1, 8});
4201 scores.zero();
4202
4203 ONNXModelLoader onnxLD(netFilename, {"boxes", "scores"},
4204 {&boxes.getType(), &scores.getType()}, *F);
4205 output = EXIT_ON_ERR(onnxLD.getOutputByName("indices"));
4206 }
4207
4208 auto *save = getSaveNodeFromDest(output);
4209 NonMaxSuppressionNode *NMS =
4210 llvm::dyn_cast<NonMaxSuppressionNode>(save->getInput().getNode());
4211 ASSERT_TRUE(NMS);
4212 EXPECT_EQ(NMS->dims(0)[0], 3);
4213 EXPECT_EQ(NMS->dims(0)[1], 3);
4214 EXPECT_EQ(NMS->getCenterPointBox(), 1);
4215}
4216
4217/// Test loading and inference of ONNX ROIAlign of onnx example
4218TEST(onnx, ROIAlign_onnx) {
4219 ExecutionEngine EE{};
4220 auto &mod = EE.getModule();
4221 Function *F = mod.createFunction("main");
4222 std::string netFilename(GLOW_DATA_PATH
4223 "tests/models/onnxModels/ROIAlign_onnx.onnxtxt");
4224 PlaceholderBindings bindings;
4225 Placeholder *output;
4226 Tensor featureMap(ElemKind::FloatTy, {1, 1, 10, 10});
4227 Tensor boxes(ElemKind::FloatTy, {3, 4});
4228 Tensor batchedIndices(ElemKind::Int64ITy, {
4229 3,
4230 });
4231
4232 featureMap.getHandle() = {
4233 0.2764, 0.7150, 0.1958, 0.3416, 0.4638, 0.0259, 0.2963, 0.6518, 0.4856,
4234 0.7250, 0.9637, 0.0895, 0.2919, 0.6753, 0.0234, 0.6132, 0.8085, 0.5324,
4235 0.8992, 0.4467, 0.3265, 0.8479, 0.9698, 0.2471, 0.9336, 0.1878, 0.4766,
4236 0.4308, 0.3400, 0.2162, 0.0206, 0.1720, 0.2155, 0.4394, 0.0653, 0.3406,
4237 0.7724, 0.3921, 0.2541, 0.5799, 0.4062, 0.2194, 0.4473, 0.4687, 0.7109,
4238 0.9327, 0.9815, 0.6320, 0.1728, 0.6119, 0.3097, 0.1283, 0.4984, 0.5068,
4239 0.4279, 0.0173, 0.4388, 0.0430, 0.4671, 0.7119, 0.1011, 0.8477, 0.4726,
4240 0.1777, 0.9923, 0.4042, 0.1869, 0.7795, 0.9946, 0.9689, 0.1366, 0.3671,
4241 0.7011, 0.6234, 0.9867, 0.5585, 0.6985, 0.5609, 0.8788, 0.9928, 0.5697,
4242 0.8511, 0.6711, 0.9406, 0.8751, 0.7496, 0.1650, 0.1049, 0.1559, 0.2514,
4243 0.7012, 0.4056, 0.7879, 0.3461, 0.0415, 0.2998, 0.5094, 0.3727, 0.5482,
4244 0.0502};
4245
4246 boxes.getHandle() = {0, 0, 9, 9, 0, 5, 4, 9, 5, 5, 9, 9};
4247
4248 batchedIndices.getHandle<int64_t>() = {0, 0, 0};
4249 std::vector<float> expectedResult = {
4250 0.4664, 0.4466, 0.3405, 0.5688, 0.6068, 0.3714, 0.4296, 0.3835, 0.5562,
4251 0.351, 0.2768, 0.4883, 0.5222, 0.5528, 0.4171, 0.4713, 0.4844, 0.6904,
4252 0.492, 0.8774, 0.6239, 0.7125, 0.6289, 0.3355, 0.3495,
4253
4254 0.3022, 0.4305, 0.4696, 0.3978, 0.5423, 0.3656, 0.705, 0.5165, 0.3172,
4255 0.7015, 0.2912, 0.5059, 0.6476, 0.6235, 0.8299, 0.5916, 0.7389, 0.7048,
4256 0.8372, 0.8893, 0.6227, 0.6153, 0.7097, 0.6154, 0.4585,
4257
4258 0.2384, 0.3379, 0.3717, 0.61, 0.7601, 0.3767, 0.3785, 0.7147, 0.9243,
4259 0.9727, 0.5749, 0.5826, 0.5709, 0.7619, 0.877, 0.5355, 0.2566, 0.2141,
4260 0.2796, 0.36, 0.4365, 0.3504, 0.2887, 0.3661, 0.2349,
4261 };
4262
4263 ONNXModelLoader onnxLD(
4264 netFilename, {"featureMap", "boxes", "batchIndices"},
4265 {&featureMap.getType(), &boxes.getType(), &batchedIndices.getType()}, *F);
4266
4267 bindings.allocate(mod.getPlaceholders());
4268 updateInputPlaceholdersByName(bindings, &mod,
4269 {"featureMap", "boxes", "batchIndices"},
4270 {&featureMap, &boxes, &batchedIndices});
4271 output = EXIT_ON_ERR(onnxLD.getOutputByName("result"));
4272 EE.compile(CompilationMode::Infer);
4273 EE.run(bindings);
4274 auto resultH = bindings.get(output)->getHandle<float>();
4275 std::vector<dim_t> outputShape = {3, 1, 5, 5};
4276 float delta = 1e-03;
4277 ASSERT_TRUE(resultH.dims() == (llvm::ArrayRef<dim_t>)outputShape);
4278 for (size_t i = 0; i < resultH.getType().size(); i++) {
4279 EXPECT_NEAR(resultH.raw(i), expectedResult[i], delta);
4280 }
4281}
4282
4283/// Test loading and inference of ONNX MatMul operator with
4284/// 4D inputs.
4285TEST(onnx, MatMul4D) {
4286 ExecutionEngine EE{};
4287 auto &mod = EE.getModule();
4288 Function *F = mod.createFunction("main");
4289 std::string netFilename(GLOW_DATA_PATH
4290 "tests/models/onnxModels/MatMul4D.onnxtxt");
4291 PlaceholderBindings bindings;
4292 Placeholder *output;
4293 Placeholder *refOutput;
4294
4295 ONNXModelLoader onnxLD(netFilename, {}, {}, *F);
4296 output = EXIT_ON_ERR(onnxLD.getOutputByName("Y"));
4297 refOutput = EXIT_ON_ERR(onnxLD.getOutputByName("Yref"));
4298
4299 EE.compile(CompilationMode::Infer);
4300 bindings.allocate(mod.getPlaceholders());
4301 EE.run(bindings);
4302 auto resultH = bindings.get(output)->getHandle();
4303 auto refYH = bindings.get(refOutput)->getHandle();
4304 std::vector<dim_t> outputShape = {1, 2, 3, 3};
4305 float delta = 1e-03;
4306 ASSERT_TRUE(resultH.dims() == (llvm::ArrayRef<dim_t>)outputShape);
4307 for (size_t i = 0; i < resultH.getType().size(); i++) {
4308 EXPECT_NEAR(resultH.raw(i), refYH.raw(i), delta);
4309 }
4310}
4311
4312TEST_F(OnnxImporterTest, importDimParamExplicit) {
4313 ExecutionEngine EE;
4314 auto &mod = EE.getModule();
4315 std::string netFilename(GLOW_DATA_PATH
4316 "tests/models/onnxModels/dimParam.onnxtxt");
4317 auto *F = mod.createFunction("main");
4318
4319 // Import ONNX model with explicit input information.
4320 {
4321 Tensor inputTensor(ElemKind::FloatTy, {1, 2});
4322 setOnnxDefineSymbol({"ONNXUndefinedSymbol,1"});
4323 ONNXModelLoader onnxLD(netFilename, {"input"}, {&inputTensor.getType()},
4324 *F);
4325 setOnnxDefineSymbol({});
4326 }
4327
4328 // Validate placeholder sizes.
4329 Placeholder *inputPH, *outputPH;
4330 inputPH = mod.getPlaceholderByNameSlow("input");
4331 outputPH = mod.getPlaceholderByNameSlow("output");
4332 EXPECT_TRUE(inputPH);
4333 EXPECT_TRUE(outputPH);
4334 EXPECT_EQ(inputPH->dims()[0], 1);
4335 EXPECT_EQ(inputPH->dims()[1], 2);
4336 EXPECT_EQ(outputPH->dims()[0], 1);
4337 EXPECT_EQ(outputPH->dims()[1], 2);
4338}
4339
4340TEST_F(OnnxImporterTest, importDimParamImplicit) {
4341 ExecutionEngine EE;
4342 auto &mod = EE.getModule();
4343 std::string netFilename(GLOW_DATA_PATH
4344 "tests/models/onnxModels/dimParam.onnxtxt");
4345 auto *F = mod.createFunction("main");
4346
4347 // Import ONNX model with implicit input information.
4348 {
4349 setOnnxDefineSymbol({"ONNXUndefinedSymbol,1"});
4350 ONNXModelLoader onnxLD(netFilename, {}, {}, *F);
4351 setOnnxDefineSymbol({});
4352 }
4353
4354 // Validate placeholder sizes.
4355 Placeholder *inputPH, *outputPH;
4356 inputPH = mod.getPlaceholderByNameSlow("input");
4357 outputPH = mod.getPlaceholderByNameSlow("output");
4358 EXPECT_TRUE(inputPH);
4359 EXPECT_TRUE(outputPH);
4360 EXPECT_EQ(inputPH->dims()[0], 1);
4361 EXPECT_EQ(inputPH->dims()[1], 2);
4362 EXPECT_EQ(outputPH->dims()[0], 1);
4363 EXPECT_EQ(outputPH->dims()[1], 2);
4364}
4365
4366static void importUnary(const std::string &netFilename,
4367 llvm::ArrayRef<float> input,
4368 llvm::ArrayRef<dim_t> inputShape,
4369 llvm::ArrayRef<dim_t> outputShape,
4370 llvm::ArrayRef<float> expectedValues) {
4371
4372 float delta = 1e-08;
4373 ExecutionEngine EE{};
4374 auto &mod = EE.getModule();
4375 Function *F = mod.createFunction("main");
4376 PlaceholderBindings bindings;
4377 Placeholder *graphOutputVar;
4378 // Load the .onnxtxt model
4379 Type inputType(ElemKind::FloatTy, inputShape);
4380 ONNXModelLoader onnxLD(netFilename, {"input"}, {&inputType}, *F);
4381 graphOutputVar = EXIT_ON_ERR(onnxLD.getSingleOutput());
4382 auto inputPH = mod.getPlaceholderByNameSlow("input");
4383 auto *inputTensor = bindings.allocate(inputPH);
4384 inputTensor->getHandle<float>() = input;
4385 EE.compile(CompilationMode::Infer);
4386 bindings.allocate(mod.getPlaceholders());
4387 EE.run(bindings);
4388 auto result = bindings.get(graphOutputVar)->getHandle<float>();
4389 ASSERT_TRUE(result.dims() == (llvm::ArrayRef<dim_t>)outputShape);
4390 for (size_t i = 0; i < result.getType().size(); i++) {
4391 EXPECT_NEAR(result.raw(i), (float)expectedValues[i], delta);
4392 }
4393}
4394
4395TEST(onnx, importSign) {
4396 std::vector<float> input = {-1, -2, 0, -2, 1, 2, 1, 2, -10, 0, 0, -2};
4397 std::vector<dim_t> inputShape = {1, 2, 3, 2};
4398 std::vector<dim_t> outputShape = {1, 2, 3, 2};
4399 std::vector<float> expectedValues = {-1, -1, 0, -1, 1, 1, 1, 1, -1, 0, 0, -1};
4400 std::string netFilename(GLOW_DATA_PATH
4401 "tests/models/onnxModels/sign.onnxtxt");
4402 importUnary(netFilename, input, inputShape, outputShape, expectedValues);
4403}
4404
4405static void
4406testLoop(std::string &filename, const std::vector<dim_t> &expected_v_finalDims,
4407 const std::vector<dim_t> &expected_scan_output_finalDims,
4408 const std::vector<float> &expected_v_finalValues,
4409 const std::vector<float> &expectedscan_output_finalValues) {
4410 ExecutionEngine EE;
4411 auto &mod = EE.getModule();
4412 auto *F = mod.createFunction("main");
4413
4414 std::string netFilename =
4415 std::string(GLOW_DATA_PATH "tests/models/onnxModels/") + filename;
4416
4417 PlaceholderBindings bindings;
4418 Placeholder *v_final;
4419 Placeholder *scan_output_final;
4420
4421 Tensor init_i(ElemKind::FloatTy, {1});
4422 init_i.getHandle() = {0};
4423 Tensor inc(ElemKind::FloatTy, {1});
4424 inc.getHandle() = {1};
4425
4426 {
4427 ONNXModelLoader onnxLD(netFilename, {"init_i", "inc"},
4428 {&init_i.getType(), &inc.getType()}, *F);
4429
4430 v_final = EXIT_ON_ERR(onnxLD.getOutputByName("v_final"));
4431 scan_output_final =
4432 EXIT_ON_ERR(onnxLD.getOutputByName("scan_output_final"));
4433
4434 bindings.allocate(mod.getPlaceholders());
4435 updateInputPlaceholdersByName(bindings, &mod, {"init_i", "inc"},
4436 {&init_i, &inc});
4437 }
4438
4439 auto *v_finalT = bindings.get(v_final);
4440 auto *scan_output_finalT = bindings.get(scan_output_final);
4441
4442 EE.compile(CompilationMode::Infer);
4443 EE.run(bindings);
4444
4445 auto v_finalH = v_finalT->getHandle();
4446 auto scan_output_finalH = scan_output_finalT->getHandle();
4447
4448 EXPECT_EQ(v_finalH.dims().vec(), expected_v_finalDims);
4449 EXPECT_EQ(scan_output_finalH.dims().vec(), expected_scan_output_finalDims);
4450 for (size_t i = 0; i < expected_v_finalValues.size(); i++) {
4451 EXPECT_FLOAT_EQ(v_finalH.raw(i), expected_v_finalValues[i]);
4452 }
4453 for (size_t i = 0; i < expectedscan_output_finalValues.size(); i++) {
4454 EXPECT_FLOAT_EQ(scan_output_finalH.raw(i),
4455 expectedscan_output_finalValues[i]);
4456 }
4457}
4458
4459TEST_F(OnnxImporterTest, importLoopStatic) {
4460 // In this loop, cond is not changed in the loop body.
4461 //
4462 // input (trip_count, cond)
4463 //
4464 // int max_trip_count = 10;
4465 // cond = true;
4466 // init_i = 0;
4467 // for (i=0; i< max_trip_count && cond; ++i){
4468 // scan_output[i] = init_i;
4469 // inti_i = init_i + inc;
4470 // }
4471 std::string filename("loop_static.onnxtxt");
4472 std::vector<dim_t> expected_v_finalDims = {1};
4473 std::vector<dim_t> expected_scan_output_finalDims = {10, 1};
4474 std::vector<float> expected_v_finalValues = {10.};
4475 std::vector<float> expectedscan_output_finalValues = {0., 1., 2., 3., 4.,
4476 5., 6., 7., 8., 9.};
4477 testLoop(filename, expected_v_finalDims, expected_scan_output_finalDims,
4478 expected_v_finalValues, expectedscan_output_finalValues);
4479}
4480
4481TEST_F(OnnxImporterTest, importLoopNoIteration) {
4482 // The loop should be zero iteration.
4483 //
4484 // input (trip_count, 0)
4485 //
4486 // int max_trip_count = 10;
4487 // cond = false;
4488 // init_i = 0;
4489 // for (i=0; i < max_trip_count && cond; ++i) {
4490 // scan_output[i] = init_i;
4491 // inti_i = init_i + inc;
4492 // }
4493 std::string filename("loop_no_iteration.onnxtxt");
4494 std::vector<dim_t> expected_v_finalDims = {1};
4495 std::vector<dim_t> expected_scan_output_finalDims = {1, 1};
4496 std::vector<float> expected_v_finalValues = {0.};
4497 std::vector<float> expectedscan_output_finalValues = {0.};
4498 testLoop(filename, expected_v_finalDims, expected_scan_output_finalDims,
4499 expected_v_finalValues, expectedscan_output_finalValues);
4500}
4501
4502TEST(onnx, importLoopCond) {
4503 // In this loop, cond is updated in the loop body, but it should be folded
4504 // into a Constant during loading time.
4505 // The loop should exit by cond.
4506 //
4507 // input(trip_count, cond) :
4508 //
4509 // int max_trip_count = 9223372036854775807;
4510 // int reduce_i = 20;
4511 // for (i=0; i < max_trip_count && cond; ++i) {
4512 // scan_output[i] = reduce_i;
4513 // reduce_i = reduce_i - 1;
4514 // cond = (bool)(reduce_i - 1);
4515 // }
4516 std::string filename("loop_cond.onnxtxt");
4517 std::vector<dim_t> expected_v_finalDims = {1};
4518 std::vector<dim_t> expected_scan_output_finalDims = {20, 1};
4519 std::vector<float> expected_v_finalValues = {0.};
4520 std::vector<float> expectedscan_output_finalValues = {
4521 20., 19., 18., 17., 16., 15., 14., 13., 12., 11.,
4522 10., 9., 8., 7., 6., 5., 4., 3., 2., 1.};
4523 testLoop(filename, expected_v_finalDims, expected_scan_output_finalDims,
4524 expected_v_finalValues, expectedscan_output_finalValues);
4525}
4526
4527TEST(onnx, importLoopTripCount) {
4528 // The loop should exit by trip_count.
4529 //
4530 // input(trip_count, cond) :
4531 //
4532 // int max_trip_count = 20;
4533 // int reduce_i = 20;
4534 // for (i=0; i < max_trip_count && cond; ++i) {
4535 // scan_output[i] = reduce_i;
4536 // reduce_i = reduce_i - 1;
4537 // cond = (bool)(reduce_i - 1);
4538 // }
4539 std::string filename("loop_tripcount.onnxtxt");
4540 std::vector<dim_t> expected_v_finalDims = {1};
4541 std::vector<dim_t> expected_scan_output_finalDims = {20, 1};
4542 std::vector<float> expected_v_finalValues = {0.0};
4543 std::vector<float> expectedscan_output_finalValues = {
4544 20., 19., 18., 17., 16., 15., 14., 13., 12., 11.,
4545 10., 9., 8., 7., 6., 5., 4., 3., 2., 1.};
4546 testLoop(filename, expected_v_finalDims, expected_scan_output_finalDims,
4547 expected_v_finalValues, expectedscan_output_finalValues);
4548}
4549
4550TEST(onnx, importLoopEmptyTripCount) {
4551 // The loop should ignore trip-count, so exit by cond.
4552 //
4553 // input ("", 1)
4554 //
4555 // int reduce_i = 10;
4556 // bool cond = true;
4557 // for (int i = 0; cond; ++i) {
4558 // scan_output[i] = reduce_i;
4559 // reduce_i = reduce_i - 1;
4560 // cond = (bool)reduce_i;
4561 // }
4562 std::string filename("loop_empty_tripcount.onnxtxt");
4563 std::vector<dim_t> expected_v_finalDims = {1};
4564 std::vector<dim_t> expected_scan_output_finalDims = {10, 1};
4565 std::vector<float> expected_v_finalValues = {0.};
4566 std::vector<float> expectedscan_output_finalValues = {10., 9., 8., 7., 6.,
4567 5., 4., 3., 2., 1.};
4568 testLoop(filename, expected_v_finalDims, expected_scan_output_finalDims,
4569 expected_v_finalValues, expectedscan_output_finalValues);
4570}
4571
4572TEST(onnx, importLoopEmptyCond) {
4573 // The loop should ignore cond, so exit by trip_count.
4574 //
4575 // input(trip_count, "") :
4576 //
4577 // int max_trip_count = 7;
4578 // int reduce_i = 5;
4579 // for (i=0; i < max_trip_count; ++i) {
4580 // scan_output[i] = reduce_i;
4581 // reduce_i = reduce_i - 1;
4582 // cond = (bool)(reduce_i - 1); // ignored
4583 // }
4584 std::string filename("loop_emptycond.onnxtxt");
4585 std::vector<dim_t> expected_v_finalDims = {1};
4586 std::vector<dim_t> expected_scan_output_finalDims = {7, 1};
4587 std::vector<float> expected_v_finalValues = {-2.0};
4588 std::vector<float> expectedscan_output_finalValues = {5., 4., 3., 2.,
4589 1., 0., -1.};
4590 testLoop(filename, expected_v_finalDims, expected_scan_output_finalDims,
4591 expected_v_finalValues, expectedscan_output_finalValues);
4592}
4593
4594TEST(onnx, importLoopWithoutN) {
4595 // The loop should exit by trip_count.
4596 //
4597 // input(trip_count, cond) :
4598 // bool cond = true;
4599 // int max_trip_count = 10;
4600 // for (i=0; i < max_trip_count && cond; ++i) {
4601 // scan_output[i] = i;
4602 // }
4603 std::string filename("loop_withoutN.onnxtxt");
4604 std::vector<dim_t> expected_v_finalDims = {1};
4605 std::vector<dim_t> expected_scan_output_finalDims = {10, 1};
4606 std::vector<float> expected_v_finalValues = {0.0};
4607 std::vector<float> expectedscan_output_finalValues = {0., 1., 2., 3., 4.,
4608 5., 6., 7., 8., 9.};
4609 testLoop(filename, expected_v_finalDims, expected_scan_output_finalDims,
4610 expected_v_finalValues, expectedscan_output_finalValues);
4611}
4612
4613/// Test loading RNN from a ONNX model. The ONNX model already computes
4614/// the error compared to a PyTorch reference implementation.
4615static void importRNN(std::string fileName) {
4616 ExecutionEngine EE;
4617 auto &mod = EE.getModule();
4618 Function *F = mod.createFunction("main");
4619
4620 PlaceholderBindings bindings;
4621 {
4622 ONNXModelLoader onnxLD(fileName, {}, {}, *F);
4623 bindings.allocate(mod.getPlaceholders());
4624 }
4625
4626 // Compile and run.
4627 EE.compile(CompilationMode::Infer);
4628 EE.run(bindings);
4629
4630 // Verify RNN error.
4631 Placeholder *Y_err_ph = mod.getPlaceholderByNameSlow("Y_err");
4632 EXPECT_TRUE(Y_err_ph);
4633 auto err = bindings.get(Y_err_ph)->getHandle();
4634 for (size_t idx = 0; idx < Y_err_ph->getType()->size(); idx++) {
4635 EXPECT_TRUE(std::abs(err.raw(idx)) < 1e-6);
4636 }
4637}
4638
4639TEST_F(OnnxImporterTest, importRNNForward) {
4640 importRNN(GLOW_DATA_PATH "tests/models/onnxModels/rnnForward.onnxtxt");
4641}
4642
4643TEST_F(OnnxImporterTest, importRNNReverse) {
4644 importRNN(GLOW_DATA_PATH "tests/models/onnxModels/rnnReverse.onnxtxt");
4645}
4646
4647TEST_F(OnnxImporterTest, importRNNBidirectional) {
4648 importRNN(GLOW_DATA_PATH "tests/models/onnxModels/rnnBidirectional.onnxtxt");
4649}
4650
4651TEST_F(OnnxImporterTest, importRNNForwardNoBias) {
4652 importRNN(GLOW_DATA_PATH "tests/models/onnxModels/rnnForwardNoBias.onnxtxt");
4653}
4654
4655TEST_F(OnnxImporterTest, importRNNForwardNoState) {
4656 importRNN(GLOW_DATA_PATH "tests/models/onnxModels/rnnForwardNoState.onnxtxt");
4657}
4658
4659/// Test loading GRU from a ONNX model. The ONNX model already computes
4660/// the error compared to a PyTorch reference implementation.
4661static void importGRU(std::string fileName) {
4662 ExecutionEngine EE;
4663 auto &mod = EE.getModule();
4664 Function *F = mod.createFunction("main");
4665
4666 PlaceholderBindings bindings;
4667 {
4668 ONNXModelLoader onnxLD(fileName, {}, {}, *F);
4669 bindings.allocate(mod.getPlaceholders());
4670 }
4671
4672 // Compile and run.
4673 EE.compile(CompilationMode::Infer);
4674 EE.run(bindings);
4675
4676 // Verify GRU error.
4677 Placeholder *Y_err_ph = mod.getPlaceholderByNameSlow("Y_err");
4678 EXPECT_TRUE(Y_err_ph);
4679 auto err = bindings.get(Y_err_ph)->getHandle();
4680 for (size_t idx = 0; idx < Y_err_ph->getType()->size(); idx++) {
4681 EXPECT_TRUE(std::abs(err.raw(idx)) < 1e-6);
4682 }
4683}
4684
4685TEST_F(OnnxImporterTest, importGRUForward) {
4686 importGRU(GLOW_DATA_PATH "tests/models/onnxModels/gruForward.onnxtxt");
4687}
4688
4689TEST_F(OnnxImporterTest, importGRUReverse) {
4690 importGRU(GLOW_DATA_PATH "tests/models/onnxModels/gruReverse.onnxtxt");
4691}
4692
4693TEST_F(OnnxImporterTest, importGRUBidirectional) {
4694 importGRU(GLOW_DATA_PATH "tests/models/onnxModels/gruBidirectional.onnxtxt");
4695}
4696
4697TEST_F(OnnxImporterTest, importGRUForwardNoBias) {
4698 importGRU(GLOW_DATA_PATH "tests/models/onnxModels/gruForwardNoBias.onnxtxt");
4699}
4700
4701TEST_F(OnnxImporterTest, importGRUForwardNoState) {
4702 importGRU(GLOW_DATA_PATH "tests/models/onnxModels/gruForwardNoState.onnxtxt");
4703}
4704
4705TEST_F(OnnxImporterTest, importGRUForwardLinearBeforeReset) {
4706 importGRU(GLOW_DATA_PATH
4707 "tests/models/onnxModels/gruForwardLinearBeforeReset.onnxtxt");
4708}
4709
4710/// Test loading LSTM from a ONNX model. The ONNX model already computes
4711/// the error compared to a PyTorch reference implementation.
4712static void importLSTM(std::string fileName) {
4713 ExecutionEngine EE;
4714 auto &mod = EE.getModule();
4715 Function *F = mod.createFunction("main");
4716
4717 PlaceholderBindings bindings;
4718 {
4719 ONNXModelLoader onnxLD(fileName, {}, {}, *F);
4720 bindings.allocate(mod.getPlaceholders());
4721 }
4722
4723 // Compile and run.
4724 EE.compile(CompilationMode::Infer);
4725 EE.run(bindings);
4726
4727 // Verify LSTM error.
4728 Placeholder *Y_err_ph = mod.getPlaceholderByNameSlow("Y_err");
4729 EXPECT_TRUE(Y_err_ph);
4730 auto err = bindings.get(Y_err_ph)->getHandle();
4731 for (size_t idx = 0; idx < Y_err_ph->getType()->size(); idx++) {
4732 EXPECT_TRUE(std::abs(err.raw(idx)) < 1e-6);
4733 }
4734}
4735
4736TEST_F(OnnxImporterTest, importLSTMForward) {
4737 importLSTM(GLOW_DATA_PATH "tests/models/onnxModels/lstmForward.onnxtxt");
4738}
4739
4740TEST_F(OnnxImporterTest, importLSTMReverse) {
4741 importLSTM(GLOW_DATA_PATH "tests/models/onnxModels/lstmReverse.onnxtxt");
4742}
4743
4744TEST_F(OnnxImporterTest, importLSTMBidirectional) {
4745 importLSTM(GLOW_DATA_PATH
4746 "tests/models/onnxModels/lstmBidirectional.onnxtxt");
4747}
4748
4749TEST_F(OnnxImporterTest, importLSTMForwardNoBias) {
4750 importLSTM(GLOW_DATA_PATH
4751 "tests/models/onnxModels/lstmForwardNoBias.onnxtxt");
4752}
4753
4754TEST_F(OnnxImporterTest, importLSTMForwardNoState) {
4755 importLSTM(GLOW_DATA_PATH
4756 "tests/models/onnxModels/lstmForwardNoState.onnxtxt");
4757}
4758
4759TEST_F(OnnxImporterTest, importLSTMForwardWithPeephole) {
4760 importLSTM(GLOW_DATA_PATH
4761 "tests/models/onnxModels/lstmForwardWithPeephole.onnxtxt");
4762}
4763
4764TEST_F(OnnxImporterTest, importLSTMForwardInputForget) {
4765 importLSTM(GLOW_DATA_PATH
4766 "tests/models/onnxModels/lstmForwardInputForget.onnxtxt");
4767}
4768
4769/// Test loading Flip from a ONNX model. The ONNX model already computes
4770/// the error.
4771static void importFlip(std::string fileName) {
4772 ExecutionEngine EE;
4773 auto &mod = EE.getModule();
4774 Function *F = mod.createFunction("main");
4775
4776 PlaceholderBindings bindings;
4777 {
4778 ONNXModelLoader onnxLD(fileName, {}, {}, *F);
4779 bindings.allocate(mod.getPlaceholders());
4780 }
4781
4782 // Compile and run.
4783 EE.compile(CompilationMode::Infer);
4784 EE.run(bindings);
4785
4786 // Verify error.
4787 Placeholder *Y_err_ph = mod.getPlaceholderByNameSlow("Y_err");
4788 EXPECT_TRUE(Y_err_ph);
4789 auto err = bindings.get(Y_err_ph)->getHandle();
4790 for (size_t idx = 0; idx < Y_err_ph->getType()->size(); idx++) {
4791 EXPECT_EQ(err.raw(idx), 0);
4792 }
4793}
4794
4795TEST_F(OnnxImporterTest, importFlipWithAxis) {
4796 importFlip(GLOW_DATA_PATH "tests/models/onnxModels/flipWithAxis.onnxtxt");
4797}
4798
4799TEST_F(OnnxImporterTest, importFlipNoAxis) {
4800 importFlip(GLOW_DATA_PATH "tests/models/onnxModels/flipNoAxis.onnxtxt");
4801}
4802
4803/// Test loading FRWQSparseLengthsWeightedSum from an ONNX model.
4804TEST_F(OnnxImporterTest, importFRWQSLWS) {
4805 ExecutionEngine EE;
4806 auto &mod = EE.getModule();
4807 auto *F = mod.createFunction("main");
4808 std::string netFilename(GLOW_DATA_PATH
4809 "tests/models/onnxModels/fusedSLWS.onnxtxt");
4810 Placeholder *output;
4811 {
4812 Tensor weights(ElemKind::FloatTy, {8});
4813 Tensor indices(ElemKind::Int64ITy, {8});
4814 Tensor lengths(ElemKind::Int32ITy, {5});
4815 ONNXModelLoader onnxLD(
4816 netFilename, {"weights", "indices", "lengths"},
4817 {&weights.getType(), &indices.getType(), &lengths.getType()}, *F);
4818 output = EXIT_ON_ERR(onnxLD.getSingleOutput());
4819 }
4820
4821 // Verify structure: {Constant, PH, PH, PH} -> FRWQSLWS -> Save -> PH.
4822 EXPECT_EQ(mod.getPlaceholders().size(), 4);
4823 // FRWQSLWS, Save nodes
4824 EXPECT_EQ(F->getNodes().size(), 2);
4825 auto *save = getSaveNodeFromDest(output);
4826 auto *FRWQSLWS =
4827 llvm::dyn_cast<FusedRowwiseQuantizedSparseLengthsWeightedSumNode>(
4828 save->getInput().getNode());
4829 ASSERT_TRUE(FRWQSLWS);
4830 auto *data = llvm::dyn_cast<Constant>(FRWQSLWS->getData());
4831 ASSERT_TRUE(data);
4832 EXPECT_EQ(data->dims().vec(), std::vector<dim_t>({3, 10}));
4833 EXPECT_EQ(data->getType()->getElementType(), ElemKind::UInt8FusedQTy);
4834 auto *weights = llvm::dyn_cast<Placeholder>(FRWQSLWS->getWeights());
4835 ASSERT_TRUE(weights);
4836 EXPECT_EQ(weights->dims().vec(), std::vector<dim_t>({8}));
4837 EXPECT_EQ(weights->getType()->getElementType(), ElemKind::FloatTy);
4838 auto *indices = llvm::dyn_cast<Placeholder>(FRWQSLWS->getIndices());
4839 ASSERT_TRUE(indices);
4840 EXPECT_EQ(indices->dims().vec(), std::vector<dim_t>({8}));
4841 EXPECT_EQ(indices->getType()->getElementType(), ElemKind::Int64ITy);
4842 auto *lengths = llvm::dyn_cast<Placeholder>(FRWQSLWS->getLengths());
4843 ASSERT_TRUE(lengths);
4844 EXPECT_EQ(lengths->dims().vec(), std::vector<dim_t>({5}));
4845 EXPECT_EQ(lengths->getType()->getElementType(), ElemKind::Int32ITy);
4846}
4847
4848/// Test loading AudioSpectrogram from an ONNX model. The ONNX model already
4849/// computes the error compared to a TensorFlow reference implementation.
4850static void importAudioSpectrogram(std::string fileName) {
4851 ExecutionEngine EE;
4852 auto &mod = EE.getModule();
4853 Function *F = mod.createFunction("main");
4854
4855 PlaceholderBindings bindings;
4856 {
4857 ONNXModelLoader onnxLD(fileName, {}, {}, *F);
4858 bindings.allocate(mod.getPlaceholders());
4859 }
4860
4861 // Compile and run.
4862 EE.compile(CompilationMode::Infer);
4863 EE.run(bindings);
4864
4865 // Verify error.
4866 Placeholder *errPH = mod.getPlaceholderByNameSlow("spectrogram_err");
4867 EXPECT_TRUE(errPH);
4868 auto errH = bindings.get(errPH)->getHandle();
4869 auto fftLen = (errPH->getType()->dims()[1] - 1) * 2;
4870 for (size_t idx = 0; idx < errPH->getType()->size(); idx++) {
4871 float errVal = std::abs(errH.raw(idx)) / (float)(fftLen);
4872 EXPECT_TRUE(errVal < 1e-5);
4873 }
4874}
4875
4876TEST_F(OnnxImporterTest, importAudioSpectrogramOneWindow) {
4877 importAudioSpectrogram(
4878 GLOW_DATA_PATH
4879 "tests/models/onnxModels/audioSpectrogramOneWindow.onnxtxt");
4880}
4881
4882TEST_F(OnnxImporterTest, importAudioSpectrogramTwoWindow) {
4883 importAudioSpectrogram(
4884 GLOW_DATA_PATH
4885 "tests/models/onnxModels/audioSpectrogramTwoWindow.onnxtxt");
4886}
4887
4888TEST_F(OnnxImporterTest, importAudioSpectrogramNonSquared) {
4889 importAudioSpectrogram(
4890 GLOW_DATA_PATH
4891 "tests/models/onnxModels/audioSpectrogramNonSquared.onnxtxt");
4892}
4893
4894/// Test loading MFCC from an ONNX model. The ONNX model already computes
4895/// the error compared to a TensorFlow reference implementation.
4896static void importMFCC(std::string fileName) {
4897 ExecutionEngine EE;
4898 auto &mod = EE.getModule();
4899 Function *F = mod.createFunction("main");
4900
4901 PlaceholderBindings bindings;
4902 {
4903 ONNXModelLoader onnxLD(fileName, {}, {}, *F);
4904 bindings.allocate(mod.getPlaceholders());
4905 }
4906
4907 // Compile and run.
4908 EE.compile(CompilationMode::Infer);
4909 EE.run(bindings);
4910
4911 // Verify error.
4912 Placeholder *errPH = mod.getPlaceholderByNameSlow("coefficients_err");
4913 EXPECT_TRUE(errPH);
4914 auto errH = bindings.get(errPH)->getHandle();
4915 for (size_t idx = 0; idx < errPH->getType()->size(); idx++) {
4916 EXPECT_TRUE(std::abs(errH.raw(idx)) < 1e-5);
4917 }
4918}
4919
4920TEST_F(OnnxImporterTest, importMFCCOneWindow) {
4921 importMFCC(GLOW_DATA_PATH "tests/models/onnxModels/mfccOneWindow.onnxtxt");
4922}
4923
4924TEST_F(OnnxImporterTest, importMFCCTwoWindow) {
4925 importMFCC(GLOW_DATA_PATH "tests/models/onnxModels/mfccTwoWindow.onnxtxt");
4926}
4927
4928/// Test loading a custom ONNX Glow quantized TopK.
4929TEST_F(OnnxImporterTest, CustomGlowTopKQuantized) {
4930 ExecutionEngine EE;
4931 auto &mod = EE.getModule();
4932 auto *F = mod.createFunction("main");
4933 std::string netFilename(
4934 GLOW_DATA_PATH
4935 "tests/models/onnxModels/glow_custom_op_topk_quantized.onnxtxt");
4936 Placeholder *valuesPH, *indicesPH;
4937 {
4938 ONNXModelLoader onnxLD(netFilename, {}, {}, *F);
4939 valuesPH = EXIT_ON_ERR(onnxLD.getOutputByName("save_values"));
4940 indicesPH = EXIT_ON_ERR(onnxLD.getOutputByName("save_indices"));
4941 }
4942
4943 // Verify structure: PH -> TopK -> Save -> PH.
4944 // |
4945 // v
4946 // Save -> PH
4947 EXPECT_EQ(mod.getPlaceholders().size(), 3);
4948 // TopK, Save nodes
4949 EXPECT_EQ(F->getNodes().size(), 3);
4950
4951 auto *values = getSaveNodeFromDest(valuesPH);
4952 ASSERT_TRUE(values);
4953 EXPECT_EQ(values->getInput().getType()->getElementType(), ElemKind::Int8QTy);
4954 EXPECT_EQ(values->getInput().getType()->getScale(), 1.2f);
4955 EXPECT_EQ(values->getInput().getType()->getOffset(), 5);
4956 EXPECT_EQ(values->getInput().dims().vec(), std::vector<dim_t>({3, 1, 3}));
4957
4958 auto *indices = getSaveNodeFromDest(indicesPH);
4959 ASSERT_TRUE(indices);
4960 EXPECT_EQ(indices->getInput().getType()->getElementType(),
4961 ElemKind::Int64ITy);
4962 EXPECT_EQ(indices->getInput().dims().vec(), std::vector<dim_t>({3, 1, 3}));
4963
4964 EXPECT_EQ(indices->getInput().getNode(), values->getInput().getNode());
4965
4966 auto *TKN = llvm::dyn_cast<TopKNode>(indices->getInput());
4967 ASSERT_TRUE(TKN);
4968 EXPECT_EQ(TKN->getK(), 3);
4969
4970 auto *input = llvm::dyn_cast<Placeholder>(TKN->getInput());
4971 ASSERT_TRUE(input);
4972 EXPECT_EQ(input->dims().vec(), std::vector<dim_t>({3, 1, 5}));
4973 EXPECT_EQ(input->getType()->getElementType(), ElemKind::Int8QTy);
4974 EXPECT_EQ(input->getType()->getScale(), 1.2f);
4975 EXPECT_EQ(input->getType()->getOffset(), 5);
4976}
4977
4978/// Test loading a custom ONNX Glow ChannelwiseQuantizedGroupConvolution.
4979TEST_F(OnnxImporterTest, CustomGlowChannelwiseQuantizedGroupConvolution) {
4980 ExecutionEngine EE;
4981 auto &mod = EE.getModule();
4982 auto *F = mod.createFunction("main");
4983 std::string netFilename(
4984 GLOW_DATA_PATH "tests/models/onnxModels/"
4985 "glow_custom_op_channelwise_quantized_group_conv.onnxtxt");
4986 Placeholder *outputPH;
4987 {
4988 ONNXModelLoader onnxLD(netFilename, {}, {}, *F);
4989 outputPH = EXIT_ON_ERR(onnxLD.getSingleOutput());
4990 }
4991
4992 // Verify structure:
4993 // {(PH -> Quantize), Constant, Constant, Constant, Constant} ->
4994 // ChannelwiseQuantizedConvolution -> Save -> PH.
4995 EXPECT_EQ(mod.getPlaceholders().size(), 2);
4996 EXPECT_EQ(mod.getConstants().size(), 6);
4997 // ChannelwiseQuantizedConvolution, Save, Quantize, Dequantize
4998 EXPECT_EQ(F->getNodes().size(), 4);
4999
5000 auto *save = getSaveNodeFromDest(outputPH);
5001 ASSERT_TRUE(save);
5002
5003 auto *DQN = llvm::dyn_cast<DequantizeNode>(save->getInput());
5004 ASSERT_TRUE(DQN);
5005 EXPECT_EQ(DQN->getInput().getType()->getElementType(), ElemKind::Int8QTy);
5006 EXPECT_EQ(DQN->getInput().getType()->getScale(), 1.0f);
5007 EXPECT_EQ(DQN->getInput().getType()->getOffset(), 0);
5008 EXPECT_EQ(DQN->getInput().dims().vec(), std::vector<dim_t>({1, 1, 3, 4}));
5009
5010 auto *CN =
5011 llvm::dyn_cast<ChannelwiseQuantizedConvolutionNode>(DQN->getInput());
5012 ASSERT_TRUE(CN);
5013 EXPECT_EQ(CN->getKernels().vec(), std::vector<unsigned_t>({2, 1}));
5014 EXPECT_EQ(CN->getStrides().vec(), std::vector<unsigned_t>({1, 1}));
5015 EXPECT_EQ(CN->getPads().vec(), std::vector<unsigned_t>({0, 0, 0, 0}));
5016 EXPECT_EQ(CN->getGroup(), 2);
5017 EXPECT_EQ(CN->getDilation().vec(), std::vector<unsigned_t>({1, 1}));
5018
5019 auto *QN = llvm::dyn_cast<QuantizeNode>(CN->getInput());
5020 ASSERT_TRUE(QN);
5021 EXPECT_EQ(QN->getResult().getType()->getElementType(), ElemKind::Int8QTy);
5022 EXPECT_EQ(QN->getResult().getType()->getScale(), 1.0f);
5023 EXPECT_EQ(QN->getResult().getType()->getOffset(), 0);
5024 EXPECT_EQ(QN->getResult().dims().vec(), std::vector<dim_t>({1, 2, 3, 2}));
5025 EXPECT_TRUE(llvm::isa<Placeholder>(QN->getInput()));
5026
5027 auto *filter = llvm::dyn_cast<Constant>(CN->getFilter());
5028 ASSERT_TRUE(filter);
5029 EXPECT_EQ(filter->getOutput().getType()->getElementType(), ElemKind::Int8QTy);
5030 EXPECT_EQ(filter->getOutput().dims().vec(), std::vector<dim_t>({4, 2, 1, 1}));
5031
5032 auto *bias = llvm::dyn_cast<Constant>(CN->getBias());
5033 ASSERT_TRUE(bias);
5034 EXPECT_EQ(bias->getOutput().getType()->getElementType(), ElemKind::Int32QTy);
5035 EXPECT_EQ(bias->getOutput().dims().vec(), std::vector<dim_t>({4}));
5036
5037 auto *filterScales = llvm::dyn_cast<Constant>(CN->getFilterScales());
5038 ASSERT_TRUE(filterScales);
5039 EXPECT_EQ(filterScales->getOutput().getType()->getElementType(),
5040 ElemKind::FloatTy);
5041 EXPECT_EQ(filterScales->getOutput().dims().vec(), std::vector<dim_t>({4}));
5042
5043 auto *filterOffsets = llvm::dyn_cast<Constant>(CN->getFilterOffsets());
5044 ASSERT_TRUE(filterOffsets);
5045 EXPECT_EQ(filterOffsets->getOutput().getType()->getElementType(),
5046 ElemKind::Int32ITy);
5047 EXPECT_EQ(filterOffsets->getOutput().dims().vec(), std::vector<dim_t>({4}));
5048
5049 auto *biasScales = llvm::dyn_cast<Constant>(CN->getBiasScales());
5050 ASSERT_TRUE(biasScales);
5051 EXPECT_EQ(biasScales->getOutput().getType()->getElementType(),
5052 ElemKind::FloatTy);
5053 EXPECT_EQ(biasScales->getOutput().dims().vec(), std::vector<dim_t>({4}));
5054
5055 auto *biasOffsets = llvm::dyn_cast<Constant>(CN->getBiasOffsets());
5056 ASSERT_TRUE(biasOffsets);
5057 EXPECT_EQ(biasOffsets->getOutput().getType()->getElementType(),
5058 ElemKind::Int32ITy);
5059 EXPECT_EQ(biasOffsets->getOutput().dims().vec(), std::vector<dim_t>({4}));
5060}
5061
5062/// Upsample Test Helper
5063static void importUpsampleTest(std::string &netFilename) {
5064 ExecutionEngine EE;
5065 auto &mod = EE.getModule();
5066 auto *F = mod.createFunction("main");
5067 PlaceholderBindings bindings;
5068 Placeholder *resultPH;
5069 Tensor inputTensor(ElemKind::FloatTy, {1, 1, 2, 2});
5070
5071 inputTensor.getHandle() = {1, 2, 3, 4};
5072
5073 ONNXModelLoader onnxLD(netFilename, {"input"}, {&inputTensor.getType()}, *F);
5074 resultPH = EXIT_ON_ERR(onnxLD.getOutputByName("Y"));
5075 bindings.allocate(mod.getPlaceholders());
5076 updateInputPlaceholdersByName(bindings, &mod, {"input"}, {&inputTensor});
5077
5078 EE.compile(CompilationMode::Infer);
5079 EE.run(bindings);
5080
5081 auto result = bindings.get(resultPH)->getHandle();
5082 std::vector<dim_t> expectedDims = {1, 1, 4, 6};
5083
5084 EXPECT_TRUE(result.dims().vec() == expectedDims);
5085
5086 std::vector<float> expectedResult = {1, 1, 1, 2, 2, 2, 1, 1, 1, 2, 2, 2,
5087 3, 3, 3, 4, 4, 4, 3, 3, 3, 4, 4, 4};
5088
5089 for (dim_t i = 0; i < expectedResult.size(); i++) {
5090 EXPECT_EQ(result.raw(i), expectedResult[i]);
5091 }
5092}
5093
5094TEST_F(OnnxImporterTest, importUpsampleOpset7) {
5095 std::string netFilename(GLOW_DATA_PATH
5096 "tests/models/onnxModels/upsampleOpset7.onnxtxt");
5097 importUpsampleTest(netFilename);
5098}
5099
5100TEST_F(OnnxImporterTest, importUpsampleOpset9) {
5101 std::string netFilename(GLOW_DATA_PATH
5102 "tests/models/onnxModels/upsampleOpset9.onnxtxt");
5103 importUpsampleTest(netFilename);
5104}
5105
5106static void testIf(std::string filename, float inputVal, float outputVal) {
5107 ExecutionEngine EE{};
5108 auto &mod = EE.getModule();
5109 Function *F = mod.createFunction("main");
5110
5111 std::string netFilename = std::string(GLOW_DATA_PATH) + filename;
5112
5113 PlaceholderBindings bindings;
5114 Placeholder *output;
5115 {
5116 Tensor x(ElemKind::FloatTy, {1});
5117 x.getHandle() = {inputVal};
5118
5119 ONNXModelLoader onnxLD(netFilename, {"input"}, {&x.getType()}, *F);
5120 output = EXIT_ON_ERR(onnxLD.getSingleOutput());
5121 bindings.allocate(mod.getPlaceholders());
5122 updateInputPlaceholdersByName(bindings, &mod, {"input"}, {&x});
5123 }
5124
5125 auto *res = bindings.get(output);
5126 EE.compile(CompilationMode::Infer);
5127 EE.run(bindings);
5128
5129 auto result = res->getHandle();
5130
5131 std::vector<float> expectedValues = {outputVal};
5132 for (size_t i = 0; i < expectedValues.size(); i++) {
5133 EXPECT_EQ(result.raw(i), expectedValues[i]);
5134 }
5135}
5136
5137TEST(onnx, testIfConstantTrue) {
5138 testIf("tests/models/onnxModels/if_true.onnxtxt", 3.0f, 6.0f);
5139}
5140
5141TEST(onnx, testIfConstantFalse) {
5142 testIf("tests/models/onnxModels/if_false.onnxtxt", 3.0f, 9.0f);
5143}
5144
5145/// ResizeNearest Test Helper
5146static void importResizeNearest(std::string filename) {
5147 ExecutionEngine EE;
5148 auto &mod = EE.getModule();
5149 Function *F = mod.createFunction("main");
5150
5151 std::string netFilename(filename);
5152
5153 PlaceholderBindings bindings;
5154 Placeholder *output;
5155 Tensor in(ElemKind::FloatTy, {2, 2, 2, 2});
5156 in.getHandle() = {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16};
5157 {
5158 ONNXModelLoader onnxLD(netFilename, {"in"}, {&in.getType()}, *F);
5159 output = EXIT_ON_ERR(onnxLD.getSingleOutput());
5160
5161 bindings.allocate(mod.getPlaceholders());
5162 updateInputPlaceholdersByName(bindings, &mod, {"in"}, {&in});
5163 }
5164
5165 auto *res = bindings.get(output);
5166 EE.compile(CompilationMode::Infer);
5167 EE.run(bindings);
5168 ASSERT_EQ(2, F->getNodes().size());
5169
5170 auto *saveNode = getSaveNodeFromDest(output);
5171 auto *RN = llvm::dyn_cast<ResizeNearestNode>(saveNode->getInput());
5172 ASSERT_TRUE(RN);
5173
5174 auto result = res->getHandle();
5175 std::vector<dim_t> expectedDims = {2, 2, 4, 4};
5176 EXPECT_EQ(result.dims().vec(), expectedDims);
5177
5178 std::vector<float> expectedValues = {
5179 1.0, 1.0, 2.0, 2.0, 1.0, 1.0, 2.0, 2.0, 3.0, 3.0, 4.0,
5180 4.0, 3.0, 3.0, 4.0, 4.0, 5.0, 5.0, 6.0, 6.0, 5.0, 5.0,
5181 6.0, 6.0, 7.0, 7.0, 8.0, 8.0, 7.0, 7.0, 8.0, 8.0, 9.0,
5182 9.0, 10.0, 10.0, 9.0, 9.0, 10.0, 10.0, 11.0, 11.0, 12.0, 12.0,
5183 11.0, 11.0, 12.0, 12.0, 13.0, 13.0, 14.0, 14.0, 13.0, 13.0, 14.0,
5184 14.0, 15.0, 15.0, 16.0, 16.0, 15.0, 15.0, 16.0, 16.0};
5185
5186 for (dim_t i = 0; i < 64; i++) {
5187 EXPECT_FLOAT_EQ(result.raw(i), expectedValues[i]);
5188 }
5189
5190 // Constant Folding Test.
5191 FAIL_TEST_IF_ERR(checkConstFoldedOutput(netFilename, {"in"}, {&in},
5192 {bindings.get(output)}));
5193}
5194
5195/// Test ONNX Resize mode=nearest.
5196TEST(onnx, importResizeNearest) {
5197 std::string netFilename(GLOW_DATA_PATH
5198 "tests/models/onnxModels/resizeNearest.onnxtxt");
5199 importResizeNearest(netFilename);
5200}
5201
5202/// Test ONNX Resize V11 mode=nearest that is compatible with V10 spec
5203TEST(onnx, importResizeNearestV11compat) {
5204 std::string netFilename(
5205 GLOW_DATA_PATH "tests/models/onnxModels/resizeNearestV11compat.onnxtxt");
5206 importResizeNearest(netFilename);
5207}
5208
5209/// Test ONNX Resize V11 mode=nearest that is compatible with V10 spec
5210/// except that scales are inferred from sizes input.
5211TEST(onnx, importResizeNearestV11compat_sizes) {
5212 std::string netFilename(
5213 GLOW_DATA_PATH
5214 "tests/models/onnxModels/resizeNearestV11compat_sizes.onnxtxt");
5215 importResizeNearest(netFilename);
5216}
5217
5218static void importResizeBilinear(std::string filename) {
5219 ExecutionEngine EE;
5220 auto &mod = EE.getModule();
5221 Function *F = mod.createFunction("main");
5222 std::string netFilename(filename);
5223
5224 PlaceholderBindings bindings;
5225 Placeholder *output;
5226 Tensor in(ElemKind::FloatTy, {1, 1, 2, 2});
5227 in.getHandle() = {1, 2, 3, 4};
5228 {
5229 ONNXModelLoader onnxLD(netFilename, {"in"}, {&in.getType()}, *F);
5230 output = EXIT_ON_ERR(onnxLD.getSingleOutput());
5231
5232 bindings.allocate(mod.getPlaceholders());
5233 updateInputPlaceholdersByName(bindings, &mod, {"in"}, {&in});
5234 }
5235
5236 auto *res = bindings.get(output);
5237 EE.compile(CompilationMode::Infer);
5238 EE.run(bindings);
5239 ASSERT_EQ(4, F->getNodes().size());
5240
5241 auto *saveNode = getSaveNodeFromDest(output);
5242 auto *TR = llvm::dyn_cast<ReshapeNode>(saveNode->getInput().getNode());
5243 ASSERT_TRUE(TR);
5244 auto *RN = llvm::dyn_cast<ResizeBilinearNode>(TR->getInput());
5245 ASSERT_TRUE(RN);
5246
5247 auto result = res->getHandle();
5248 std::vector<dim_t> expectedDims = {1, 1, 4, 4};
5249 EXPECT_EQ(result.dims().vec(), expectedDims);
5250
5251 std::vector<float> expectedValues = {1.0, 1.5, 2.0, 2.0, 2.0, 2.5, 3.0, 3.0,
5252 3.0, 3.5, 4.0, 4.0, 3.0, 3.5, 4.0, 4.0};
5253
5254 for (dim_t i = 0; i < 16; i++) {
5255 EXPECT_FLOAT_EQ(result.raw(i), expectedValues[i]);
5256 }
5257
5258 // Constant Folding Test.
5259 FAIL_TEST_IF_ERR(checkConstFoldedOutput(netFilename, {"in"}, {&in},
5260 {bindings.get(output)}));
5261}
5262
5263TEST_F(OnnxImporterTest, importBoolFromInt) {
5264 ExecutionEngine EE;
5265 auto &mod = EE.getModule();
5266 std::string netFilename(GLOW_DATA_PATH
5267 "tests/models/onnxModels/bool_from_int.onnxtxt");
5268 auto *F = mod.createFunction("main");
5269 PlaceholderBindings bindings;
5270 Placeholder *output;
5271 {
5272 ONNXModelLoader onnxLD(netFilename, {}, {}, *F);
5273 output = EXIT_ON_ERR(onnxLD.getSingleOutput());
5274 ASSERT_TRUE(output);
5275 }
5276
5277 EE.compile(CompilationMode::Infer);
5278 bindings.allocate(mod.getPlaceholders());
5279 EE.run(bindings);
5280
5281 std::vector<bool> expectedOut = {true, false, true};
5282 auto result = bindings.get(output)->getHandle<bool>();
5283 for (size_t i = 0; i < result.getType().size(); i++)
5284 EXPECT_EQ(result.raw(i), expectedOut[i]);
5285}
5286
5287/// ResizeNearest Test Helper.
5288TEST(onnx, importResizeBilinear) {
5289 std::string netFilename(GLOW_DATA_PATH
5290 "tests/models/onnxModels/resizeBilinear.onnxtxt");
5291 importResizeBilinear(netFilename);
5292}
5293
5294/// Test ONNX Resize V11 mode=nearest that is compatible with V10 spec
5295TEST(onnx, importResizeBilinearV11compat) {
5296 std::string netFilename(
5297 GLOW_DATA_PATH "tests/models/onnxModels/resizeBilinearV11compat.onnxtxt");
5298 importResizeBilinear(netFilename);
5299}
5300
5301/// Test ONNX Resize V11 mode=bilinear that is compatible with V10 spec
5302/// except that scales are inferred from sizes input.
5303TEST(onnx, importResizeBilinearV11compat_sizes) {
5304 std::string netFilename(
5305 GLOW_DATA_PATH
5306 "tests/models/onnxModels/resizeBilinearV11compat_sizes.onnxtxt");
5307 importResizeBilinear(netFilename);
5308}
5309
5310/// Test loading a custom ONNX Glow net with NodeOpts.
5311TEST_F(OnnxImporterTest, CustomGlowWithNodeOpts) {
5312 ExecutionEngine EE;
5313 auto &mod = EE.getModule();
5314 auto *F = mod.createFunction("main");
5315 std::string netFilename(
5316 GLOW_DATA_PATH
5317 "tests/models/onnxModels/glow_custom_op_node_opts.onnxtxt");
5318 Placeholder *outputPH;
5319 BackendSpecificNodeInfo funNodeInfo;
5320 {
5321 ONNXModelLoader onnxLD(netFilename, {}, {}, *F, /* errPtr */ nullptr,
5322 /* zipMode */ false, &funNodeInfo);
5323 outputPH = EXIT_ON_ERR(onnxLD.getSingleOutput());
5324 }
5325
5326 auto itF = funNodeInfo.find(F);
5327 ASSERT_NE(itF, funNodeInfo.end());
5328 auto &nodeInfo = itF->second;
5329
5330 SaveNode *save = getSaveNodeFromDest(outputPH);
5331 ASSERT_TRUE(save);
5332 // Verify that there are no options specified for the Save.
5333 EXPECT_EQ(nodeInfo.find(save), nodeInfo.end());
5334
5335 // Verify that the options for the MatMul are loaded correctly.
5336 MatMulNode *MN = llvm::dyn_cast<MatMulNode>(save->getInput());
5337 auto itMN = nodeInfo.find(MN);
5338 ASSERT_NE(itMN, nodeInfo.end());
5339 llvm::StringMap<std::vector<std::string>> &opts = itMN->second;
5340
5341 // attribute {
5342 // name: "NodeOpt_BackendA_Option1"
5343 // strings: "1"
5344 // strings: "2"
5345 // type: STRINGS
5346 // }
5347 auto itOpt1 = opts.find("BackendA_Option1");
5348 ASSERT_NE(itOpt1, opts.end());
5349 EXPECT_EQ(itOpt1->second.size(), 2);
5350 EXPECT_EQ(itOpt1->second[0], "1");
5351 EXPECT_EQ(itOpt1->second[1], "2");
5352
5353 // attribute {
5354 // name: "NodeOpt_BackendA_Option2"
5355 // strings: "3"
5356 // type: STRINGS
5357 // }
5358 auto itOpt2 = opts.find("BackendA_Option2");
5359 ASSERT_NE(itOpt2, opts.end());
5360 EXPECT_EQ(itOpt2->second.size(), 1);
5361 EXPECT_EQ(itOpt2->second[0], "3");
5362
5363 // attribute {
5364 // name: "NodeOpt_BackendB_Option3"
5365 // strings: "4"
5366 // strings: "5"
5367 // type: STRINGS
5368 // }
5369 auto itOpt3 = opts.find("BackendB_Option3");
5370 ASSERT_NE(itOpt3, opts.end());
5371 EXPECT_EQ(itOpt3->second.size(), 2);
5372 EXPECT_EQ(itOpt3->second[0], "4");
5373 EXPECT_EQ(itOpt3->second[1], "5");
5374}
5375
5376/// Test loading a custom ONNX Glow net with serialized strides.
5377TEST_F(OnnxImporterTest, CustomGlowWithStrides) {
5378 ExecutionEngine EE;
5379 auto &mod = EE.getModule();
5380 auto *F = mod.createFunction("main");
5381 std::string netFilename(
5382 GLOW_DATA_PATH
5383 "tests/models/onnxModels/glow_custom_with_strides.onnxtxt");
5384 {
5385 ONNXModelLoader onnxLD(netFilename, {}, {}, *F, /* errPtr */ nullptr,
5386 /* zipMode */ false);
5387 EXIT_ON_ERR(onnxLD.getSingleOutput());
5388 }
5389
5390 // Find MatMul node.
5391 auto *MN = llvm::cast<MatMulNode>(F->getNodeByName("MM"));
5392
5393 // The MatMul node should have a custom stride[0] equal to 96.
5394 ASSERT_EQ(MN->getResult().getType()->strides()[0], 96);
5395 // LHS should have a custom stride[0] equal to 31.
5396 ASSERT_EQ(MN->getLHS().getType()->strides()[0], 31);
5397}
5398
5399static bool vecContainsVal(const std::vector<runtime::DeviceIDTy> &vec,
5400 runtime::DeviceIDTy val) {
5401 return std::find(vec.begin(), vec.end(), val) != vec.end();
5402}
5403
5404/// Test loading a custom ONNX Glow net that has been already partitioned,
5405/// turned into a DAG, and then exported.
5406TEST_F(OnnxImporterTest, CustomGlowDAGMultiOp) {
5407 ExecutionEngine EE("Interpreter", /* deviceMemory (16GB) */ 0x400000000,
5408 /* ignoreUserDeviceConfig */ false, /* numDevices */ 3);
5409 auto &mod = EE.getModule();
5410 std::string netFilename(
5411 GLOW_DATA_PATH
5412 "tests/models/onnxModels/glow_custom_dag_multi_op.onnxtxt");
5413
5414 Placeholder *outputPH;
5415 Tensor *resultPartitionedT;
5416 PlaceholderBindings bindingsU;
5417 PlaceholderBindings bindingsP;
5418
5419 runtime::PrePartitionedConfig PPC;
5420 Tensor mmIn0T(ElemKind::FloatTy, {10, 10});
5421 Tensor mmIn1T(ElemKind::FloatTy, {10, 10});
5422 Tensor addInT(ElemKind::FloatTy, {10, 10});
5423 mmIn0T.getHandle().randomize(-3.0, 3.0, mod.getPRNG());
5424 mmIn1T.getHandle().randomize(-3.0, 3.0, mod.getPRNG());
5425 addInT.getHandle().randomize(-3.0, 3.0, mod.getPRNG());
5426 Placeholder *mmIn0P = nullptr, *mmIn1P = nullptr, *addInP = nullptr;
5427 {
5428 ONNXModelLoader onnxLD(netFilename, {}, {}, mod, "main", &PPC,
5429 /* errPtr */ nullptr, /* zipMode */ false);
5430 outputPH = EXIT_ON_ERR(onnxLD.getSingleOutput());
5431 NodeValue mmIn0NV;
5432 ASSIGN_VALUE_OR_FAIL_TEST(mmIn0NV, onnxLD.getNodeValueByName("mm0_in"));
5433 mmIn0P = llvm::dyn_cast<Placeholder>(mmIn0NV);
5434 NodeValue mmIn1NV;
5435 ASSIGN_VALUE_OR_FAIL_TEST(mmIn1NV, onnxLD.getNodeValueByName("mm1_in"));
5436 mmIn1P = llvm::dyn_cast<Placeholder>(mmIn1NV);
5437 NodeValue addInNV;
5438 ASSIGN_VALUE_OR_FAIL_TEST(addInNV, onnxLD.getNodeValueByName("add_in"));
5439 addInP = llvm::dyn_cast<Placeholder>(addInNV);
5440 }
5441
5442 {
5443 ASSERT_TRUE(mmIn0P);
5444 ASSERT_TRUE(mmIn1P);
5445 ASSERT_TRUE(addInP);
5446
5447 ASSERT_EQ(mod.getFunctions().size(), 3);
5448 Function *P0 = nullptr, *P1 = nullptr, *P2 = nullptr;
5449 for (size_t i = 0, e = PPC.funcs.size(); i < e; i++) {
5450 // Find the expected Function, and check that the logical device IDs were
5451 // correctly loaded.
5452 Function *F = PPC.funcs[i];
5453 if (F->getName() == "main_p0") {
5454 P0 = F;
5455 ASSERT_EQ(PPC.logicalIDs[i].size(), 1);
5456 EXPECT_TRUE(vecContainsVal(PPC.logicalIDs[i], 2));
5457 EXPECT_EQ(PPC.backendSpecificOpts[i].size(), 0);
5458 } else if (F->getName() == "main_p1") {
5459 P1 = F;
5460 ASSERT_EQ(PPC.logicalIDs[i].size(), 2);
5461 EXPECT_TRUE(vecContainsVal(PPC.logicalIDs[i], 0));
5462 EXPECT_TRUE(vecContainsVal(PPC.logicalIDs[i], 1));
5463 EXPECT_EQ(PPC.backendSpecificOpts[i].size(), 0);
5464 } else if (F->getName() == "main_p2") {
5465 P2 = F;
5466 ASSERT_EQ(PPC.logicalIDs[i].size(), 1);
5467 EXPECT_TRUE(vecContainsVal(PPC.logicalIDs[i], 2));
5468 EXPECT_EQ(PPC.backendSpecificOpts[i].size(), 3);
5469 ASSERT_TRUE(PPC.backendSpecificOpts[i].count("BackendA_opt1"));
5470 EXPECT_EQ(PPC.backendSpecificOpts[i].at("BackendA_opt1"), "val1");
5471 ASSERT_TRUE(PPC.backendSpecificOpts[i].count("BackendA_opt2"));
5472 EXPECT_EQ(PPC.backendSpecificOpts[i].at("BackendA_opt2"), "val2");
5473 ASSERT_TRUE(PPC.backendSpecificOpts[i].count("BackendB_opt3"));
5474 EXPECT_EQ(PPC.backendSpecificOpts[i].at("BackendB_opt3"), "val3");
5475 } else {
5476 FAIL() << "Unknown Function found.";
5477 }
5478
5479 // Check that the function was also found in the module.
5480 auto &modFuns = mod.getFunctions();
5481 ASSERT_NE(std::find(modFuns.begin(), modFuns.end(), F), modFuns.end());
5482 }
5483 ASSERT_TRUE(P0);
5484 ASSERT_TRUE(P1);
5485 ASSERT_TRUE(P2);
5486
5487 // Verify P0:
5488 auto *finalSave = getSaveNodeFromDest(outputPH);
5489 ASSERT_TRUE(finalSave);
5490 EXPECT_EQ(finalSave->getParent(), P0);
5491 SubNode *sub = llvm::dyn_cast<SubNode>(finalSave->getInput());
5492 ASSERT_TRUE(sub);
5493 Placeholder *intermedAddOut = llvm::dyn_cast<Placeholder>(sub->getRHS());
5494 ASSERT_TRUE(intermedAddOut);
5495 MulNode *mul = llvm::dyn_cast<MulNode>(sub->getLHS());
5496 ASSERT_TRUE(mul);
5497 Placeholder *intermedMMOut = llvm::dyn_cast<Placeholder>(mul->getRHS());
5498 ASSERT_TRUE(intermedMMOut);
5499 Placeholder *mmIn0 = llvm::dyn_cast<Placeholder>(mul->getLHS());
5500 ASSERT_TRUE(mmIn0);
5501
5502 // Verify P2:
5503 Node *userFromP2 = nullptr;
5504 for (auto &U : intermedAddOut->getUsers()) {
5505 if (U.getUser()->getParent() == P2) {
5506 ASSERT_FALSE(userFromP2);
5507 userFromP2 = U.getUser();
5508 }
5509 }
5510 ASSERT_TRUE(userFromP2);
5511 SaveNode *saveIntermedP2Out = llvm::dyn_cast<SaveNode>(userFromP2);
5512 ASSERT_TRUE(saveIntermedP2Out);
5513 AddNode *add = llvm::dyn_cast<AddNode>(saveIntermedP2Out->getInput());
5514 ASSERT_TRUE(add);
5515 Placeholder *addIn = llvm::dyn_cast<Placeholder>(add->getRHS());
5516 ASSERT_TRUE(addIn);
5517 EXPECT_EQ(add->getLHS().getNode(), intermedMMOut);
5518
5519 // Verify P1:
5520 Node *userFromP1 = nullptr;
5521 for (auto &U : intermedMMOut->getUsers()) {
5522 if (U.getUser()->getParent() == P1) {
5523 ASSERT_FALSE(userFromP1);
5524 userFromP1 = U.getUser();
5525 }
5526 }
5527 ASSERT_TRUE(userFromP1);
5528 SaveNode *saveIntermedP1Out = llvm::dyn_cast<SaveNode>(userFromP1);
5529 ASSERT_TRUE(saveIntermedP1Out);
5530 MatMulNode *matMul =
5531 llvm::dyn_cast<MatMulNode>(saveIntermedP1Out->getInput());
5532 ASSERT_TRUE(matMul);
5533 EXPECT_EQ(matMul->getLHS().getNode(), mmIn0);
5534 Placeholder *matMulIn = llvm::dyn_cast<Placeholder>(matMul->getRHS());
5535 ASSERT_TRUE(matMulIn);
5536
5537 // Now that we've verifed the shape of the Module, run it and keep around
5538 // the pointer to the result.
5539 CompilationContext cctx;
5540 cctx.prepartitionedConfig = &PPC;
5541 EE.compile(cctx);
5542 bindingsP.insert(mmIn0P, mmIn0T.getUnowned());
5543 bindingsP.insert(mmIn1P, mmIn1T.getUnowned());
5544 bindingsP.insert(addInP, addInT.getUnowned());
5545 bindingsP.allocate(mod.getPlaceholders());
5546 EE.run(bindingsP);
5547
5548 resultPartitionedT = bindingsP.get(outputPH);
5549 }
5550
5551 // Now that we have the model result from pre-partitioned execution, execute
5552 // the model ignoring the pre-partitioning and bitwise compare results.
5553 EE.setBackendName(EE.getBackendName());
5554
5555 Module &modU = EE.getModule();
5556 {
5557 Function *F = modU.createFunction("main");
5558 ONNXModelLoader onnxLD(netFilename, {}, {}, *F);
5559 outputPH = EXIT_ON_ERR(onnxLD.getSingleOutput());
5560 NodeValue mmIn0NV;
5561 ASSIGN_VALUE_OR_FAIL_TEST(mmIn0NV, onnxLD.getNodeValueByName("mm0_in"));
5562 mmIn0P = llvm::dyn_cast<Placeholder>(mmIn0NV);
5563 NodeValue mmIn1NV;
5564 ASSIGN_VALUE_OR_FAIL_TEST(mmIn1NV, onnxLD.getNodeValueByName("mm1_in"));
5565 mmIn1P = llvm::dyn_cast<Placeholder>(mmIn1NV);
5566 NodeValue addInNV;
5567 ASSIGN_VALUE_OR_FAIL_TEST(addInNV, onnxLD.getNodeValueByName("add_in"));
5568 addInP = llvm::dyn_cast<Placeholder>(addInNV);
5569 }
5570
5571 Tensor *resultUnpartitonedT;
5572
5573 {
5574 ASSERT_TRUE(mmIn0P);
5575 ASSERT_TRUE(mmIn1P);
5576 ASSERT_TRUE(addInP);
5577 ASSERT_EQ(modU.getFunctions().size(), 1);
5578
5579 EE.compile(CompilationMode::Infer);
5580 bindingsU.insert(mmIn0P, mmIn0T.getUnowned());
5581 bindingsU.insert(mmIn1P, mmIn1T.getUnowned());
5582 bindingsU.insert(addInP, addInT.getUnowned());
5583 bindingsU.allocate(modU.getPlaceholders());
5584 EE.run(bindingsU);
5585
5586 resultUnpartitonedT = bindingsU.get(outputPH);
5587 }
5588
5589 EXPECT_TRUE(resultPartitionedT->isBitwiseEqual(*resultUnpartitonedT,
5590 /* verbose */ true));
5591}
5592
5593/// Utility function to test ONNX Gemm import.
5594static void importGemm(std::string filename, bool hasC, bool batchedC,
5595 bool transA, bool transB) {
5596 ExecutionEngine EE;
5597 auto &mod = EE.getModule();
5598 Function *F = mod.createFunction("main");
5599 std::string netFilename(filename);
5600
5601 PlaceholderBindings bindings;
5602 Placeholder *output;
5603
5604 Tensor tensorA;
5605 if (transA) {
5606 tensorA = Tensor(ElemKind::FloatTy, {3, 2});
5607 tensorA.getHandle() = {1, 4, 2, 5, 3, 6};
5608 } else {
5609 tensorA = Tensor(ElemKind::FloatTy, {2, 3});
5610 tensorA.getHandle() = {1, 2, 3, 4, 5, 6};
5611 }
5612
5613 Tensor tensorB;
5614 if (transB) {
5615 tensorB = Tensor(ElemKind::FloatTy, {4, 3});
5616 tensorB.getHandle() = {1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4};
5617 } else {
5618 tensorB = Tensor(ElemKind::FloatTy, {3, 4});
5619 tensorB.getHandle() = {1, 2, 3, 4, 1, 2, 3, 4, 1, 2, 3, 4};
5620 }
5621
5622 Tensor tensorC;
5623 if (batchedC) {
5624 tensorC = Tensor(ElemKind::FloatTy, {2, 4});
5625 tensorC.getHandle() = {1, 2, 3, 4, 1, 2, 3, 4};
5626 } else {
5627 tensorC = Tensor(ElemKind::FloatTy, {4});
5628 tensorC.getHandle() = {1, 2, 3, 4};
5629 }
5630
5631 {
5632 ONNXModelLoader onnxLD(netFilename, {}, {}, *F);
5633 output = EXIT_ON_ERR(onnxLD.getSingleOutput());
5634 bindings.allocate(mod.getPlaceholders());
5635 if (hasC) {
5636 updateInputPlaceholdersByName(bindings, &mod, {"A", "B", "C"},
5637 {&tensorA, &tensorB, &tensorC});
5638 } else {
5639 updateInputPlaceholdersByName(bindings, &mod, {"A", "B"},
5640 {&tensorA, &tensorB});
5641 }
5642 }
5643
5644 auto *saveNode = getSaveNodeFromDest(output);
5645 auto *GN = llvm::dyn_cast<GemmNode>(saveNode->getInput().getNode());
5646 ASSERT_TRUE(GN);
5647
5648 auto *res = bindings.get(output);
5649 EE.compile(CompilationMode::Infer);
5650 EE.run(bindings);
5651
5652 // Check output size.
5653 auto result = res->getHandle();
5654 std::vector<dim_t> expectedDims = {2, 4};
5655 EXPECT_EQ(result.dims().vec(), expectedDims);
5656
5657 // Check output values.
5658 std::vector<float> expectedValues(8);
5659 if (hasC) {
5660 expectedValues = {7.0, 14.0, 21.0, 28.0, 16.0, 32.0, 48.0, 64.0};
5661 } else {
5662 expectedValues = {6.0, 12.0, 18.0, 24.0, 15.0, 30.0, 45.0, 60.0};
5663 }
5664 for (dim_t i = 0; i < 8; i++) {
5665 EXPECT_FLOAT_EQ(result.raw(i), expectedValues[i]);
5666 }
5667}
5668
5669/// Test ONNX Gemm.
5670TEST_F(OnnxImporterTest, importGemmNoC) {
5671 std::string netFilename(GLOW_DATA_PATH
5672 "tests/models/onnxModels/gemmNoC.onnxtxt");
5673 importGemm(netFilename, /* hasC */ false, /* batchedC */ false,
5674 /* transA */ false, /* transB */ false);
5675}
5676
5677TEST_F(OnnxImporterTest, importGemmSingleC) {
5678 std::string netFilename(GLOW_DATA_PATH
5679 "tests/models/onnxModels/gemmSingleC.onnxtxt");
5680 importGemm(netFilename, /* hasC */ true, /* batchedC */ false,
5681 /* transA */ false, /* transB */ false);
5682}
5683
5684TEST_F(OnnxImporterTest, importGemmBatchedC) {
5685 std::string netFilename(GLOW_DATA_PATH
5686 "tests/models/onnxModels/gemmBatchedC.onnxtxt");
5687 importGemm(netFilename, /* hasC */ true, /* batchedC */ true,
5688 /* transA */ false, /* transB */ false);
5689}
5690
5691TEST_F(OnnxImporterTest, importGemmTransA) {
5692 std::string netFilename(GLOW_DATA_PATH
5693 "tests/models/onnxModels/gemmTransA.onnxtxt");
5694 importGemm(netFilename, /* hasC */ true, /* batchedC */ false,
5695 /* transA */ true, /* transB */ false);
5696}
5697
5698TEST_F(OnnxImporterTest, importGemmTransB) {
5699 std::string netFilename(GLOW_DATA_PATH
5700 "tests/models/onnxModels/gemmTransB.onnxtxt");
5701 importGemm(netFilename, /* hasC */ true, /* batchedC */ false,
5702 /* transA */ false, /* transB */ true);
5703}
5704
5705TEST(onnx, importTransposeNullPerm) {
5706 ExecutionEngine EE;
5707 auto &mod = EE.getModule();
5708 std::string netFilename(
5709 GLOW_DATA_PATH "tests/models/onnxModels/transpose_null_perm.onnxtxt");
5710 auto *F = mod.createFunction("main");
5711 PlaceholderBindings bindings;
5712 Placeholder *output_0;
5713
5714 Tensor input_0(ElemKind::Int32ITy, {1, 2, 3, 4});
5715 input_0.getHandle<int32_t>() = {1, 2, 3, 6, 4, 5, 6, 3, 1, 2, 3, 6,
5716 4, 5, 6, 3, 7, 8, 9, 2, 3, 5, 7, 1};
5717 {
5718 ONNXModelLoader onnxLD(netFilename, {"X1"}, {&input_0.getType()}, *F);
5719
5720 output_0 = EXIT_ON_ERR(onnxLD.getOutputByName("output0"));
5721
5722 bindings.allocate(mod.getPlaceholders());
5723 updateInputPlaceholdersByName(bindings, &mod, {"X1"}, {&input_0});
5724 }
5725
5726 EE.compile(CompilationMode::Infer);
5727 EE.run(bindings);
5728
5729 std::vector<dim_t> expectedDims = {4, 3, 2, 1};
5730 std::vector<int32_t> expectedValues = {1, 4, 4, 7, 1, 3, 2, 5, 5, 8, 2, 5,
5731 3, 6, 6, 9, 3, 7, 6, 3, 3, 2, 6, 1};
5732
5733 auto result = bindings.get(output_0)->getHandle<int32_t>();
5734
5735 EXPECT_EQ(result.dims().vec(), expectedDims);
5736 for (dim_t i = 0; i < 24; i++) {
5737 EXPECT_FLOAT_EQ(result.raw(i), expectedValues[i]);
5738 }
5739}
5740
5741TEST(onnx, importNames) {
5742 ExecutionEngine EE{};
5743 auto &mod = EE.getModule();
5744 Function *F = mod.createFunction("main");
5745 std::string NetFilename(GLOW_DATA_PATH
5746 "tests/models/onnxModels/legalizeNames.onnxtxt");
5747
5748 PlaceholderBindings bindings;
5749 Placeholder *graphOutputVar;
5750 Type input_type(ElemKind::FloatTy, {1, 2, 4, 3});
5751 ONNXModelLoader onnxLD(NetFilename, {"data"}, {&input_type}, *F);
5752 graphOutputVar = EXIT_ON_ERR(onnxLD.getSingleOutput());
5753 auto PH = mod.getPlaceholderByNameSlow("data");
5754 auto *inTensor = bindings.allocate(PH);
5755 inTensor->getHandle().randomize(-10.0, 10.0, mod.getPRNG());
5756 // Compile&run the graph, and check the output
5757 EE.compile(CompilationMode::Infer);
5758 vector<std::string> origNames = {"a__1", "a__1", "a__3__3", "a__2",
5759 "a__1_", "a__b", "a"};
5760 auto *currNode = (Node *)getSaveNodeFromDest(graphOutputVar);
5761 for (size_t i = 0; i < origNames.size(); i++) {
5762 auto *prevNode = currNode->getNthInput(0).getNode();
5763 // Make sure original names are retained in the legalized names.
5764 EXPECT_EQ(prevNode->getName().find(origNames[i]), 0);
5765 currNode = prevNode;
5766 }
5767}
5768
5769TEST(onnx, importClipDefaultMin) {
5770 // Test loading Clip in opset v11 format where min(default) and max(2) are
5771 // passed as inputs.
5772 ExecutionEngine EE;
5773 auto &mod = EE.getModule();
5774 std::string netFilename(GLOW_DATA_PATH
5775 "tests/models/onnxModels/clip_default.onnxtxt");
5776 auto *F = mod.createFunction("main");
5777 PlaceholderBindings bindings;
5778 Placeholder *output_0;
5779
5780 Tensor X(ElemKind::FloatTy, {1, 2, 2, 2});
5781 X.getHandle() = {-3, -2, -1, 0, 1, 2, 3, 4};
5782
5783 {
5784 ONNXModelLoader onnxLD(netFilename, {"X"}, {&X.getType()}, *F);
5785 output_0 = EXIT_ON_ERR(onnxLD.getOutputByName("output0"));
5786 bindings.allocate(mod.getPlaceholders());
5787 updateInputPlaceholdersByName(bindings, &mod, {"X"}, {&X});
5788 }
5789
5790 EE.compile(CompilationMode::Infer);
5791 EE.run(bindings);
5792
5793 std::vector<dim_t> expectedDims = {1, 2, 2, 2};
5794 std::vector<float> expectedValues = {-3, -2, -1, 0, 1, 2, 2, 2};
5795 auto result = bindings.get(output_0)->getHandle();
5796 EXPECT_EQ(result.dims().vec(), expectedDims);
5797
5798 for (size_t i = 0; i < 8; i++) {
5799 EXPECT_FLOAT_EQ(result.raw(i), expectedValues[i]);
5800 }
5801}
5802
5803TEST(onnx, importClipV11) {
5804 // Test loading Clip in opset v11 format where min(-2) and max(2) are passed
5805 // as inputs.
5806 ExecutionEngine EE;
5807 auto &mod = EE.getModule();
5808 std::string netFilename(GLOW_DATA_PATH
5809 "tests/models/onnxModels/clipv11.onnxtxt");
5810 auto *F = mod.createFunction("main");
5811 PlaceholderBindings bindings;
5812 Placeholder *output_0;
5813
5814 Tensor X(ElemKind::FloatTy, {1, 2, 2, 2});
5815 X.getHandle() = {-3, -2, -1, 0, 1, 2, 3, 4};
5816
5817 {
5818 ONNXModelLoader onnxLD(netFilename, {"X"}, {&X.getType()}, *F);
5819 output_0 = EXIT_ON_ERR(onnxLD.getOutputByName("output0"));
5820 bindings.allocate(mod.getPlaceholders());
5821 updateInputPlaceholdersByName(bindings, &mod, {"X"}, {&X});
5822 }
5823
5824 EE.compile(CompilationMode::Infer);
5825 EE.run(bindings);
5826
5827 std::vector<dim_t> expectedDims = {1, 2, 2, 2};
5828 std::vector<float> expectedValues = {-2, -2, -1, 0, 1, 2, 2, 2};
5829 auto result = bindings.get(output_0)->getHandle();
5830 EXPECT_EQ(result.dims().vec(), expectedDims);
5831
5832 for (size_t i = 0; i < 8; i++) {
5833 EXPECT_FLOAT_EQ(result.raw(i), expectedValues[i]);
5834 }
5835}
5836
5837// Utility function to test ONNX Softmax
5838static void testSoftmax(const std::string &modelName,
5839 const std::vector<dim_t> &expectedDims,
5840 const std::vector<float> &expectedValues) {
5841 ExecutionEngine EE{};
5842 auto &mod = EE.getModule();
5843 Function *F = mod.createFunction("main");
5844
5845 // Input.
5846 Tensor x(ElemKind::FloatTy, {2, 2, 2, 2});
5847 x.getHandle() = {0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0,
5848 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0};
5849
5850 // Load model.
5851 std::string netFilename =
5852 std::string(GLOW_DATA_PATH "tests/models/onnxModels/") + modelName;
5853 ONNXModelLoader onnxLD(netFilename, {"x"}, {&x.getType()}, *F);
5854 Placeholder *output = EXIT_ON_ERR(onnxLD.getSingleOutput());
5855
5856 // Allocate placeholders.
5857 PlaceholderBindings bindings;
5858 bindings.allocate(mod.getPlaceholders());
5859 updateInputPlaceholdersByName(bindings, &mod, {"x"}, {&x});
5860
5861 auto *res = bindings.get(output);
5862 EE.compile(CompilationMode::Infer);
5863 EE.run(bindings);
5864
5865 // Compare results.
5866 auto result = res->getHandle();
5867 EXPECT_TRUE(result.dims().vec() == expectedDims);
5868 for (dim_t i = 0; i < result.size(); i++) {
5869 EXPECT_FLOAT_EQ(result.raw(i), expectedValues[i]);
5870 }
5871}
5872
5873/// Test loading Softmax from a ONNX model.
5874TEST_F(OnnxImporterTest, softmax) {
5875 testSoftmax("softmax11.onnxtxt", {2, 2, 2, 2},
5876 {5.7661277e-04, 1.5673960e-03, 4.2606238e-03, 1.1581578e-02,
5877 3.1481992e-02, 8.5576929e-02, 2.3262219e-01, 6.3233274e-01,
5878 5.7661277e-04, 1.5673960e-03, 4.2606238e-03, 1.1581578e-02,
5879 3.1481992e-02, 8.5576929e-02, 2.3262219e-01, 6.3233274e-01});
5880}
5881/// Test loading Softmax opset13 from a ONNX model.
5882TEST_F(OnnxImporterTest, softmax13) {
5883 testSoftmax("softmax13.onnxtxt", {2, 2, 2, 2},
5884 {0.11920292, 0.11920292, 0.880797, 0.880797, 0.11920292,
5885 0.11920292, 0.880797, 0.880797, 0.11920292, 0.11920292, 0.880797,
5886 0.880797, 0.11920292, 0.11920292, 0.880797, 0.880797});
5887}
5888
5889/// Test loading Conv model with auto_pad=NOTSET from an ONNX model.
5890TEST_F(OnnxImporterTest, importConvPadNotset) {
5891 ExecutionEngine EE;
5892 auto &mod = EE.getModule();
5893 auto *F = mod.createFunction("main");
5894 std::string netFilename(GLOW_DATA_PATH
5895 "tests/models/onnxModels/convPadNotset.onnxtxt");
5896 Placeholder *output;
5897 {
5898 ONNXModelLoader onnxLD(netFilename, {}, {}, *F);
5899 output = EXIT_ON_ERR(onnxLD.getSingleOutput());
5900 }
5901 ASSERT_EQ(mod.getPlaceholders().size(), 2);
5902 // Each Conv2D is loaded as 4 operations: input Transpose, filter Transpose,
5903 // Conv2D node and output Transpose.
5904 ASSERT_EQ(F->getNodes().size(), 11);
5905 auto *save = getSaveNodeFromDest(output);
5906 ASSERT_TRUE(save);
5907 auto *trans1 = llvm::dyn_cast<TransposeNode>(save->getInput().getNode());
5908 ASSERT_TRUE(trans1);
5909 auto *trans2 = llvm::dyn_cast<TransposeNode>(trans1->getInput().getNode());
5910 ASSERT_TRUE(trans2);
5911 auto *conv1 = llvm::dyn_cast<ConvolutionNode>(trans2->getInput().getNode());
5912 ASSERT_TRUE(conv1);
5913 auto *trans3 = llvm::dyn_cast<TransposeNode>(conv1->getInput().getNode());
5914 ASSERT_TRUE(trans3);
5915 auto *trans4 = llvm::dyn_cast<TransposeNode>(trans3->getInput().getNode());
5916 ASSERT_TRUE(trans4);
5917 auto *conv2 = llvm::dyn_cast<ConvolutionNode>(trans4->getInput().getNode());
5918 ASSERT_TRUE(conv2);
5919 EXPECT_EQ(conv2->getPads().vec(), std::vector<unsigned_t>({1, 1, 1, 1}));
5920 EXPECT_EQ(conv1->getPads().vec(), std::vector<unsigned_t>({0, 0, 0, 0}));
5921}
5922
5923/// Test loading LogSoftmax opset13 from a ONNX model.
5924TEST_F(OnnxImporterTest, logsoftmax) {
5925 testSoftmax("logsoftmax.onnxtxt", {2, 2, 2, 2},
5926 {-2.1269281, -2.1269281, -0.12692806, -0.12692806, -2.1269281,
5927 -2.1269281, -0.12692806, -0.12692806, -2.1269281, -2.1269281,
5928 -0.12692806, -0.12692806, -2.1269281, -2.1269281, -0.12692806,
5929 -0.12692806});
5930}
5931