1/**
2 * Copyright (c) Glow Contributors. See CONTRIBUTORS file.
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16#include "BackendTestUtils.h"
17
18#include "glow/Backend/Backend.h"
19#include "glow/Converter/Float16Converter.h"
20#include "glow/Converter/TypeAToTypeBFunctionConverter.h"
21#include "glow/Graph/Graph.h"
22#include "glow/Graph/TensorLayout.h"
23#include "llvm/Support/raw_ostream.h"
24
25#include "gtest/gtest.h"
26
27#include <sstream>
28
29using namespace glow;
30
31class TensorLayoutTest : public BackendTest {
32protected:
33 PlaceholderBindings bindings_;
34};
35
36// Check CanonicalTensorLayout for conv works default values:
37TEST_P(TensorLayoutTest, convDefault) {
38 CHECK_IF_ENABLED();
39
40 auto *input =
41 mod_.createPlaceholder(ElemKind::FloatTy, {1, 3, 3, 1}, "input", false);
42 auto IH = bindings_.allocate(input)->getHandle();
43 IH = {1, 1, 1, 1, 1, 1, 1, 1, 1};
44
45 auto filter =
46 mod_.createPlaceholder(ElemKind::FloatTy, {1, 3, 3, 1}, "filter", false);
47 auto FH = bindings_.allocate(filter)->getHandle();
48 FH = {0, 0, 0, 1, 1, 1, 0, 0, 0};
49
50 auto *zeroBias =
51 mod_.createPlaceholder(ElemKind::FloatTy, {1}, "bias", false);
52 bindings_.allocate(zeroBias)->zero();
53
54 auto outTy = mod_.uniqueType(ElemKind::FloatTy, {1, 3, 3, 1});
55
56 ConvolutionNode *CN =
57 F_->createConv("Conv", input, filter, zeroBias, outTy, 3, 1, 1, 1);
58 SaveNode *S = F_->createSave("save", CN);
59 bindings_.allocate(S->getPlaceholder());
60
61 EXPECT_TRUE(verifyLayouts(*F_, CanonicalTensorLayout::getInstance()));
62}
63
64// Check that pad nodes accept any layout:
65TEST_P(TensorLayoutTest, pad) {
66 CHECK_IF_ENABLED();
67
68 const dim_t inputDims[] = {1, 10, 15, 5};
69 const dim_t outPadDims[] = {5, 18, 25, 11};
70 int pads[] = {0, 2, 3, 1, 4, 6, 7, 5};
71
72 Node *A = mod_.createPlaceholder(ElemKind::FloatTy, inputDims, "input", false,
73 "NCHW");
74 auto outTy = mod_.uniqueType(ElemKind::FloatTy, outPadDims);
75 Node *P = F_->createPad("pad", A, outTy, PaddingMode::CONSTANT, pads, 23.f);
76 SaveNode *S = F_->createSave("save", P);
77 bindings_.allocate(S->getPlaceholder());
78
79 EXPECT_TRUE(verifyLayouts(*F_, CanonicalTensorLayout::getInstance()));
80}
81
82// Check that broadcast nodes accept any layout:
83TEST_P(TensorLayoutTest, broadcastNodeAcceptAnyLayout) {
84 CHECK_IF_ENABLED();
85
86 const std::array<dim_t, 4> inputDims{1, 10, 1, 1};
87 const std::array<dim_t, 4> outputDims{1, 10, 5, 5};
88
89 auto *A = mod_.createPlaceholder(ElemKind::FloatTy, inputDims, "input", false,
90 "NCHW");
91 auto BN = F_->createBroadcast("broadcast", A, outputDims, 2);
92 SaveNode *S = F_->createSave("save", BN);
93 bindings_.allocate(S->getPlaceholder());
94
95 EXPECT_TRUE(verifyLayouts(*F_, CanonicalTensorLayout::getInstance()));
96}
97
98static void buildBadConv(PlaceholderBindings &bindings, Module &mod,
99 Function *F) {
100 auto *input = mod.createPlaceholder(ElemKind::FloatTy, {1, 3, 3, 1}, "input",
101 false, "NWCH");
102 auto IH = bindings.allocate(input)->getHandle();
103 IH = {1, 1, 1, 1, 1, 1, 1, 1, 1};
104
105 auto filter = mod.createPlaceholder(ElemKind::FloatTy, {1, 3, 3, 1}, "filter",
106 false, "NWCH");
107 auto FH = bindings.allocate(filter)->getHandle();
108 FH = {0, 0, 0, 1, 1, 1, 0, 0, 0};
109
110 auto *zeroBias = mod.createPlaceholder(ElemKind::FloatTy, {1}, "bias", false);
111 bindings.allocate(zeroBias)->zero();
112
113 auto outTy = mod.uniqueType(ElemKind::FloatTy, {1, 3, 3, 1});
114
115 ConvolutionNode *CN =
116 F->createConv("Conv", input, filter, zeroBias, outTy, 3, 1, 1, 1);
117 SaveNode *S = F->createSave("save", CN);
118 bindings.allocate(S->getPlaceholder());
119}
120
121// Check CanonicalTensorLayout for conv fails verification with bad layout:
122TEST_P(TensorLayoutTest, convBadLayout) {
123 CHECK_IF_ENABLED();
124
125 buildBadConv(bindings_, mod_, F_);
126
127 EXPECT_FALSE(verifyLayouts(*F_, CanonicalTensorLayout::getInstance(), false));
128}
129
130// Check that we propagate the layout information for convertTo nodes:
131TEST_P(TensorLayoutTest, convertTo) {
132 CHECK_IF_ENABLED();
133
134 auto *input = mod_.createPlaceholder(ElemKind::FloatTy, {1, 3, 3, 1}, "input",
135 false, "NWCH");
136 auto *resultNCHW = F_->createTranspose("transposeInput", input, NHWC2NCHW);
137 auto *save = F_->createSave("save", resultNCHW);
138 bindings_.allocate(save->getPlaceholder());
139
140 EXPECT_TRUE(verifyLayouts(*F_, CanonicalTensorLayout::getInstance()));
141
142 PrecisionConfiguration precConfig;
143 TypeAToTypeBFunctionConverter converter(*F_, ElemKind::FloatTy,
144 ElemKind::Float16Ty, precConfig);
145 converter.convert();
146
147 EXPECT_TRUE(verifyLayouts(*F_, CanonicalTensorLayout::getInstance()));
148}
149
150static void buildFunctionWithCustomTensorLayouts(PlaceholderBindings &bindings,
151 Module &mod, Function *F) {
152 auto *input = mod.createPlaceholder(ElemKind::FloatTy, {2, 2, 3, 3}, "input",
153 false, "NCHW");
154 auto IH = bindings.allocate(input)->getHandle();
155 IH = {1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
156 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1};
157
158 MaxPoolNode *maxPool = F->createMaxPool("maxpool", input, 1, 1, 0);
159 auto outTy = mod.uniqueType(
160 ElemKind::Int8QTy, maxPool->getNthResult(0).getType()->dims(), 1.0f, 0);
161 Node *Q = F->createQuantize("quantize", maxPool->getNthResult(0), outTy);
162 SaveNode *S = F->createSave("save", Q);
163 bindings.allocate(S->getPlaceholder());
164}
165
166/// Define a custom tensor layout. It gelegates all of the work to
167/// TensorLayoutCommonm, but redefines the logic for MaxPool and demands that it
168/// uses NCHW.
169class CustomTensorLayout final
170 : public TensorLayoutCommon,
171 public TensorLayoutSingleton<CustomTensorLayout> {
172public:
173 CustomTensorLayout(token_) {}
174 CustomTensorLayout(TensorLayoutCommon *ctxTensorLayout)
175 : TensorLayoutCommon(ctxTensorLayout) {}
176
177 std::string getDefaultNDLayout(unsigned dims) const override {
178 return CanonicalTensorLayout::getInstance().getDefaultNDLayout(dims);
179 }
180
181 std::string getNthInputLayoutRequirements(const Node *node,
182 size_t n) override {
183 // The custom layout uses NCHW for MaxPool nodes.
184 if (llvm::isa<MaxPoolNode>(node) && n == 0) {
185 return "NCHW";
186 }
187 return CanonicalTensorLayout(this).getNthInputLayoutRequirements(node, n);
188 }
189
190 std::string getNthResultLayoutRequirements(const Node *node,
191 size_t n) override {
192 // The custom layout uses NCHW for MaxPool nodes.
193 if (llvm::isa<MaxPoolNode>(node) && n == 0) {
194 return "NCHW";
195 }
196 return CanonicalTensorLayout(this).getNthResultLayoutRequirements(node, n);
197 }
198
199 bool acceptsAnyLayout(const Node *node) const { return true; }
200};
201
202// Check that proper TensorLayouts are used by getNthResultLayoutRequirements
203// and getNthInputLayoutRequirements during tensor layout verification.
204TEST_P(TensorLayoutTest, multiLayerWithCustomTensorLayouts) {
205 CHECK_IF_ENABLED();
206
207 buildFunctionWithCustomTensorLayouts(bindings_, mod_, F_);
208
209 // The test should fail with standard TensorLayout checks, because MaxPool
210 // uses NCHW.
211 EXPECT_FALSE(verifyLayouts(*F_, CanonicalTensorLayout::getInstance(), false));
212 // The test should pass with CustomTensorLayout checks, because MaxPool is
213 // expected to use NCHW.
214 EXPECT_TRUE(verifyLayouts(*F_, CustomTensorLayout::getInstance(), false));
215}
216
217// Check TensorLayoutDescription's parser with simple input.
218TEST_P(TensorLayoutTest, parseTestSimple) {
219 CHECK_IF_ENABLED();
220
221 TensorLayoutDescription simple("NHWC");
222 EXPECT_FALSE(simple.isAnyLayout());
223 EXPECT_EQ(simple.getNumDims(), 4);
224 EXPECT_EQ(simple.getDims()[0], "N");
225 EXPECT_EQ(simple.getDims()[1], "H");
226 EXPECT_EQ(simple.getDims()[2], "W");
227 EXPECT_EQ(simple.getDims()[3], "C");
228 for (size_t i = 0; i < simple.getNumDims(); ++i) {
229 EXPECT_EQ(simple.getAlignment(i), 1);
230 }
231}
232
233// Check TensorLayoutDescription's parser with alignment.
234TEST_P(TensorLayoutTest, parseTestAlignment) {
235 CHECK_IF_ENABLED();
236
237 TensorLayoutDescription alignment("N[a=32]HW[a=64]C");
238 EXPECT_FALSE(alignment.isAnyLayout());
239 EXPECT_EQ(alignment.getNumDims(), 4);
240 EXPECT_EQ(alignment.getDims()[0], "N[a=32]");
241 EXPECT_EQ(alignment.getDims()[1], "H");
242 EXPECT_EQ(alignment.getDims()[2], "W[a=64]");
243 EXPECT_EQ(alignment.getDims()[3], "C");
244 EXPECT_EQ(alignment.getAlignment(0), 32);
245 EXPECT_EQ(alignment.getAlignment(1), 1);
246 EXPECT_EQ(alignment.getAlignment(2), 64);
247 EXPECT_EQ(alignment.getAlignment(3), 1);
248}
249
250// Check TensorLayoutDescription's parser with custom extensions.
251TEST_P(TensorLayoutTest, parseTestCustom) {
252 CHECK_IF_ENABLED();
253
254 TensorLayoutDescription custom("N[a=32][after:align]C[mal:reynolds][answer:"
255 "42]HW[before:alignment][a=64]");
256 EXPECT_FALSE(custom.isAnyLayout());
257 EXPECT_EQ(custom.getNumDims(), 4);
258 EXPECT_EQ(custom.getDims()[0], "N[a=32][after:align]");
259 EXPECT_EQ(custom.getDims()[1], "C[mal:reynolds][answer:42]");
260 EXPECT_EQ(custom.getDims()[2], "H");
261 EXPECT_EQ(custom.getDims()[3], "W[before:alignment][a=64]");
262 EXPECT_EQ(custom.getAlignment(0), 32);
263 EXPECT_EQ(custom.getAlignment(1), 1);
264 EXPECT_EQ(custom.getAlignment(2), 1);
265 EXPECT_EQ(custom.getAlignment(3), 64);
266}
267
268// Check TensorLayoutDescription's parser with star dims.
269TEST_P(TensorLayoutTest, parseTestStar) {
270 CHECK_IF_ENABLED();
271
272 TensorLayoutDescription custom("N[a=32]*H*[a=64]");
273 EXPECT_FALSE(custom.isAnyLayout());
274 EXPECT_EQ(custom.getNumDims(), 4);
275 EXPECT_EQ(custom.getDims()[0], "N[a=32]");
276 EXPECT_EQ(custom.getDims()[1], "*");
277 EXPECT_EQ(custom.getDims()[2], "H");
278 EXPECT_EQ(custom.getDims()[3], "*[a=64]");
279 EXPECT_EQ(custom.getAlignment(0), 32);
280 EXPECT_EQ(custom.getAlignment(1), 1);
281 EXPECT_EQ(custom.getAlignment(2), 1);
282 EXPECT_EQ(custom.getAlignment(3), 64);
283}
284
285// Check TensorLayoutDescription's setting of alignment.
286TEST_P(TensorLayoutTest, setAlignment) {
287 CHECK_IF_ENABLED();
288
289 TensorLayoutDescription before("N[a=16][answer:42]HW[answer:42]C");
290 EXPECT_FALSE(before.isAnyLayout());
291 auto modN = before.setAlignment(0, 32);
292 EXPECT_EQ(modN, "N[answer:42][a=32]");
293 auto addToW = before.setAlignment(2, 64);
294 EXPECT_EQ(addToW, "W[answer:42][a=64]");
295 auto newSerial = before.getSerializedLayout();
296 EXPECT_EQ(newSerial, "N[answer:42][a=32]HW[answer:42][a=64]C");
297}
298
299// Check TensorLayoutDescription's attribute getter.
300TEST_P(TensorLayoutTest, getAttribute) {
301 CHECK_IF_ENABLED();
302
303 TensorLayoutDescription layout("N[a=16][answer:q=42]HWC");
304 EXPECT_FALSE(layout.isAnyLayout());
305 auto notFound = layout.getAttribute(0, "question");
306 EXPECT_EQ(notFound, "");
307 auto alignStr = layout.getAttribute(0, "a=");
308 EXPECT_EQ(alignStr, "16");
309 auto customAttr = layout.getAttribute(0, "answer:q=");
310 EXPECT_EQ(customAttr, "42");
311}
312
313// Check TensorLayoutDescription's attribute setter.
314TEST_P(TensorLayoutTest, setAttribute) {
315 CHECK_IF_ENABLED();
316
317 TensorLayoutDescription layout("N[a=16][answer:q=42]HWC");
318 EXPECT_FALSE(layout.isAnyLayout());
319 auto customAttr = layout.setAttribute(0, "answer:q=", "h2g2");
320 EXPECT_EQ(customAttr, "N[a=16][answer:q=h2g2]");
321 auto customAttrNew = layout.setAttribute(2, "answer:q=", "42");
322 EXPECT_EQ(customAttrNew, "W[answer:q=42]");
323}
324
325INSTANTIATE_BACKEND_TEST(TensorLayoutTest);
326