1/*
2 * Licensed to the Apache Software Foundation (ASF) under one
3 * or more contributor license agreements. See the NOTICE file
4 * distributed with this work for additional information
5 * regarding copyright ownership. The ASF licenses this file
6 * to you under the Apache License, Version 2.0 (the
7 * "License"); you may not use this file except in compliance
8 * with the License. You may obtain a copy of the License at
9 *
10 * http://www.apache.org/licenses/LICENSE-2.0
11 *
12 * Unless required by applicable law or agreed to in writing,
13 * software distributed under the License is distributed on an
14 * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
15 * KIND, either express or implied. See the License for the
16 * specific language governing permissions and limitations
17 * under the License.
18 */
19
20/*!
21 * \file tvm/relay/attrs/nn.h
22 * \brief Auxiliary attributes for nn operators.
23 */
24#ifndef TVM_RELAY_ATTRS_NN_H_
25#define TVM_RELAY_ATTRS_NN_H_
26
27#include <tvm/ir/attrs.h>
28#include <tvm/relay/base.h>
29
30#include <string>
31
32namespace tvm {
33namespace relay {
34
35/*!
36 * \brief Add a 1D Tensor to an axis of a data.
37 *
38 * \note bias_add is a special add operator that is in nn
39 * and enables automatic derivation of bias's shape.
40 * You can directly use add for more generalized case.
41 */
42struct BiasAddAttrs : public tvm::AttrsNode<BiasAddAttrs> {
43 int axis;
44
45 TVM_DECLARE_ATTRS(BiasAddAttrs, "relay.attrs.BiasAddAttrs") {
46 TVM_ATTR_FIELD(axis).describe("The axis to add the bias").set_default(1);
47 }
48};
49
50/*! \brief Attributes used in 1D convolution operators */
51struct Conv1DAttrs : public tvm::AttrsNode<Conv1DAttrs> {
52 Array<IndexExpr> strides;
53 Array<IndexExpr> padding;
54 Array<IndexExpr> dilation;
55 int groups;
56 IndexExpr channels;
57 Array<IndexExpr> kernel_size;
58 tvm::String data_layout;
59 tvm::String kernel_layout;
60 tvm::String out_layout;
61 DataType out_dtype;
62
63 TVM_DECLARE_ATTRS(Conv1DAttrs, "relay.attrs.Conv1DAttrs") {
64 TVM_ATTR_FIELD(strides)
65 .set_default(Array<IndexExpr>({
66 1,
67 }))
68 .describe("Specifies the stride of the convolution.");
69 TVM_ATTR_FIELD(padding)
70 .set_default(Array<IndexExpr>({0, 0}))
71 .describe(
72 "If padding is non-zero, then the input is implicitly zero-padded"
73 "on both sides for padding number of points");
74 TVM_ATTR_FIELD(dilation)
75 .set_default(Array<IndexExpr>({
76 1,
77 }))
78 .describe("Specifies the dilation rate to use for dilated convolution.");
79 TVM_ATTR_FIELD(groups).set_default(1).describe(
80 "Currently unused but may be added in the future.");
81 TVM_ATTR_FIELD(channels)
82 .describe(
83 "The number of output channels in the convolution."
84 " If it is not set, inferred by shape of the weight.")
85 .set_default(NullValue<IndexExpr>());
86 TVM_ATTR_FIELD(kernel_size)
87 .describe("Specifies the dimensions of the convolution window.")
88 .set_default(NullValue<Array<IndexExpr>>());
89 TVM_ATTR_FIELD(data_layout)
90 .set_default("NCW")
91 .describe(
92 "Dimension ordering of input data. Can be 'NCW', 'NWC', etc."
93 "'N', 'C', 'W' stands for batch, channel, and width"
94 "dimensions respectively. Convolution is applied on the 'W'"
95 "dimension.");
96 TVM_ATTR_FIELD(kernel_layout)
97 .set_default("OIW")
98 .describe(
99 "Dimension ordering of weight. Can be 'OIW', or 'WIO', etc."
100 "'O', 'I', 'W' stands for num_filter, input_channel, and width"
101 "dimensions respectively.");
102 TVM_ATTR_FIELD(out_layout)
103 .set_default("")
104 .describe(
105 "Dimension ordering of output. Can be 'NCW', 'NWC', etc."
106 "'N', 'C', 'W' stands for batch, channel, and width"
107 "dimensions respectively. Default to be same as input layout.");
108
109 // use 0 bits to indicate none.
110 TVM_ATTR_FIELD(out_dtype)
111 .set_default(NullValue<DataType>())
112 .describe("Output data type, set to explicit type under mixed precision setting");
113 }
114};
115
116/*! \brief Attributes used in convolution operators */
117struct Conv2DAttrs : public tvm::AttrsNode<Conv2DAttrs> {
118 Array<IndexExpr> strides;
119 Array<IndexExpr> padding;
120 Array<IndexExpr> dilation;
121 int groups;
122 IndexExpr channels;
123 Array<IndexExpr> kernel_size;
124 tvm::String data_layout;
125 tvm::String kernel_layout;
126 tvm::String out_layout;
127 tvm::String auto_scheduler_rewritten_layout; // The layout after auto-scheduler's layout rewrite
128 Array<PrimExpr> meta_schedule_original_shape; // The original shape of the weights
129 DataType out_dtype;
130
131 TVM_DECLARE_ATTRS(Conv2DAttrs, "relay.attrs.Conv2DAttrs") {
132 TVM_ATTR_FIELD(strides)
133 .set_default(Array<IndexExpr>({1, 1}))
134 .describe("Specifies the strides of the convolution.");
135 TVM_ATTR_FIELD(padding)
136 .set_default(Array<IndexExpr>({0, 0}))
137 .describe(
138 "If padding is non-zero, then the input is implicitly zero-padded"
139 "Padding support both symmetric and asymmetric as"
140 "one int : same padding used on all sides"
141 "two int : bottom, right will use same padding as top, left"
142 "four int : padding width in the order of (top, left, bottom, right)");
143 TVM_ATTR_FIELD(dilation)
144 .set_default(Array<IndexExpr>({1, 1}))
145 .describe("Specifies the dilation rate to use for dilated convolution.");
146 TVM_ATTR_FIELD(groups).set_default(1).describe(
147 "Controls the connections between inputs and outputs."
148 "At groups=1, all inputs are convolved to all outputs."
149 "At groups=2, the operation becomes equivalent to having two convolution"
150 "layers side by side, each seeing half the input channels, and producing"
151 "half the output channels, and both subsequently concatenated.");
152 TVM_ATTR_FIELD(channels)
153 .describe(
154 "The number of output channels in the convolution."
155 " If it is not set, inferred by shape of the weight.")
156 .set_default(NullValue<IndexExpr>());
157 TVM_ATTR_FIELD(kernel_size)
158 .describe("Specifies the dimensions of the convolution window.")
159 .set_default(NullValue<Array<IndexExpr>>());
160 TVM_ATTR_FIELD(data_layout)
161 .set_default("NCHW")
162 .describe(
163 "Dimension ordering of input data. Can be 'NCHW', 'NHWC', etc."
164 "'N', 'C', 'H', 'W' stands for batch, channel, height, and width"
165 "dimensions respectively. Convolution is applied on the 'H' and"
166 "'W' dimensions.");
167 TVM_ATTR_FIELD(kernel_layout)
168 .set_default("OIHW")
169 .describe(
170 "Dimension ordering of weight. Can be 'OIHW', 'OIHW16o16i', etc."
171 "'O', 'I', 'H', 'W' stands for num_filter, input_channel, height, and width"
172 "dimensions respectively.");
173 TVM_ATTR_FIELD(out_layout)
174 .set_default("")
175 .describe(
176 "Dimension ordering of output. Can be 'NCHW', 'NHWC', etc."
177 "'N', 'C', 'H', 'W' stands for batch, channel, height, and width"
178 "dimensions respectively. Default to be same as input layout.");
179
180 // use 0 bits to indicate none.
181 TVM_ATTR_FIELD(out_dtype)
182 .set_default(NullValue<DataType>())
183 .describe("Output data type, set to explicit type under mixed precision setting");
184 }
185};
186
187/*! \brief Attributes used in winograd weight transformation operators */
188struct ConvWinogradWeightTransformAttrs : public tvm::AttrsNode<ConvWinogradWeightTransformAttrs> {
189 int tile_size;
190
191 TVM_DECLARE_ATTRS(ConvWinogradWeightTransformAttrs,
192 "relay.attrs.ConvWinogradWeightTransformAttrs") {
193 TVM_ATTR_FIELD(tile_size).describe(
194 "Tile size of winograd. E.g. 2 for F(2x2, 3x3) and 4 for F(4x4, 3x3)");
195 }
196};
197
198/*! \brief Attributes used in gemm weight transformation operators */
199struct ConvGemmWeightTransformAttrs : public tvm::AttrsNode<ConvGemmWeightTransformAttrs> {
200 int tile_rows;
201 int tile_cols;
202
203 TVM_DECLARE_ATTRS(ConvGemmWeightTransformAttrs, "relay.attrs.ConvGemmWeightTransformAttrs") {
204 TVM_ATTR_FIELD(tile_rows).describe("Tile rows of the weight transformation for ConvGemm.");
205 TVM_ATTR_FIELD(tile_cols).describe("Tile columns of the weight transformation for ConvGemm.");
206 }
207};
208
209/*! \brief Attributes used in convolution operators with winograd algorithm */
210struct Conv2DWinogradAttrs : public tvm::AttrsNode<Conv2DWinogradAttrs> {
211 int tile_size;
212 Array<IndexExpr> strides;
213 Array<IndexExpr> padding;
214 Array<IndexExpr> dilation;
215 int groups;
216 IndexExpr channels;
217 Array<IndexExpr> kernel_size;
218 tvm::String data_layout;
219 tvm::String kernel_layout;
220 tvm::String out_layout;
221 tvm::String auto_scheduler_rewritten_layout; // The layout after auto-scheduler's layout rewrite
222 Array<PrimExpr> meta_schedule_original_shape; // The original shape of the weights
223 DataType out_dtype;
224
225 TVM_DECLARE_ATTRS(Conv2DWinogradAttrs, "relay.attrs.Conv2DWinogradAttrs") {
226 TVM_ATTR_FIELD(tile_size).describe(
227 "The tile size of winograd. E.g. 2 for F(2x2, 3x3) and 4 for F(4x4, 3x3)");
228 TVM_ATTR_FIELD(strides)
229 .set_default(Array<IndexExpr>({1, 1}))
230 .describe("Specifies the strides of the convolution.");
231 TVM_ATTR_FIELD(padding)
232 .set_default(Array<IndexExpr>({0, 0}))
233 .describe(
234 "If padding is non-zero, then the input is implicitly zero-padded"
235 "Padding support both symmetric and asymmetric as"
236 "one int : same padding used on all sides"
237 "two int : bottom, right will use same padding as top, left"
238 "four int : padding width in the order of (top, left, bottom, right)");
239 TVM_ATTR_FIELD(dilation)
240 .set_default(Array<IndexExpr>({1, 1}))
241 .describe("Specifies the dilation rate to use for dilated convolution.");
242 TVM_ATTR_FIELD(groups).set_default(1).describe(
243 "Controls the connections between inputs and outputs."
244 "At groups=1, all inputs are convolved to all outputs."
245 "At groups=2, the operation becomes equivalent to having two convolution"
246 "layers side by side, each seeing half the input channels, and producing"
247 "half the output channels, and both subsequently concatenated.");
248 TVM_ATTR_FIELD(channels)
249 .describe(
250 "The number of output channels in the convolution."
251 " If it is not set, inferred by shape of the weight.")
252 .set_default(NullValue<IndexExpr>());
253 TVM_ATTR_FIELD(kernel_size)
254 .describe("Specifies the dimensions of the convolution window.")
255 .set_default(NullValue<Array<IndexExpr>>());
256 TVM_ATTR_FIELD(data_layout)
257 .set_default("NCHW")
258 .describe(
259 "Dimension ordering of input data. Can be 'NCHW', 'NHWC', etc."
260 "'N', 'C', 'H', 'W' stands for batch, channel, height, and width"
261 "dimensions respectively. Convolution is applied on the 'H' and"
262 "'W' dimensions.");
263 TVM_ATTR_FIELD(kernel_layout)
264 .set_default("OIHW")
265 .describe(
266 "Dimension ordering of weight. Can be 'OIHW', 'OIHW16o16i', etc."
267 "'O', 'I', 'H', 'W' stands for num_filter, input_channel, height, and width"
268 "dimensions respectively.");
269 TVM_ATTR_FIELD(out_layout)
270 .set_default("")
271 .describe(
272 "Dimension ordering of output. Can be 'NCHW', 'NHWC', etc."
273 "'N', 'C', 'H', 'W' stands for batch, channel, height, and width"
274 "dimensions respectively. Default to be same as input layout.");
275
276 // use 0 bits to indicate none.
277 TVM_ATTR_FIELD(out_dtype)
278 .set_default(NullValue<DataType>())
279 .describe("Output data type, set to explicit type under mixed precision setting");
280 }
281};
282
283/*! \brief Attributes used in winograd weight transformation operators */
284struct Conv2DWinogradNNPACKWeightTransformAttrs
285 : public tvm::AttrsNode<Conv2DWinogradNNPACKWeightTransformAttrs> {
286 int convolution_algorithm;
287 DataType out_dtype;
288
289 TVM_DECLARE_ATTRS(Conv2DWinogradNNPACKWeightTransformAttrs,
290 "relay.attrs.Conv2DWinogradNNPACKWeightTransformAttrs") {
291 TVM_ATTR_FIELD(convolution_algorithm)
292 .describe(
293 "The convolution algorithm for Winograd NNPACK. "
294 "E.g. tvm.contrib.nnpack.ConvolutionAlgorithm.WT_8x8 for WT_8x8, "
295 "tvm.contrib.nnpack.ConvolutionAlgorithm.WT_8x8_FP16 for WT_8x8_FP16");
296 TVM_ATTR_FIELD(out_dtype)
297 .set_default(NullValue<DataType>())
298 .describe("Output data type, set to explicit type under mixed precision setting");
299 }
300};
301
302/*! \brief Attributes used in convolution operators */
303struct Conv3DAttrs : public tvm::AttrsNode<Conv3DAttrs> {
304 Array<IndexExpr> strides;
305 Array<IndexExpr> padding;
306 Array<IndexExpr> dilation;
307 int groups;
308 IndexExpr channels;
309 Array<IndexExpr> kernel_size;
310 tvm::String data_layout;
311 tvm::String kernel_layout;
312 tvm::String out_layout;
313 tvm::String auto_scheduler_rewritten_layout; // The layout after auto-scheduler's layout rewrite
314 Array<PrimExpr> meta_schedule_original_shape; // The original shape of the weights
315 DataType out_dtype;
316
317 TVM_DECLARE_ATTRS(Conv3DAttrs, "relay.attrs.Conv3DAttrs") {
318 TVM_ATTR_FIELD(strides)
319 .set_default(Array<IndexExpr>({1, 1, 1}))
320 .describe("Specifies the strides of the convolution.");
321 TVM_ATTR_FIELD(padding)
322 .set_default(Array<IndexExpr>({0, 0, 0}))
323 .describe(
324 "If padding is non-zero, then the input is implicitly zero-padded"
325 "Padding support both symmetric and asymmetric as"
326 "one int : same padding used on all sides"
327 "three int : back, bottom, right will use same padding as front, top, left"
328 "six int : padding width in the order of (front, top, left, back, bottom,"
329 "right)");
330 TVM_ATTR_FIELD(dilation)
331 .set_default(Array<IndexExpr>({1, 1, 1}))
332 .describe("Specifies the dilation rate to use for dilated convolution.");
333 TVM_ATTR_FIELD(groups).set_default(1).describe(
334 "Controls the connections between inputs and outputs."
335 "At groups=1, all inputs are convolved to all outputs."
336 "At groups=2, the operation becomes equivalent to having two convolution"
337 "layers side by side, each seeing half the input channels, and producing"
338 "half the output channels, and both subsequently concatenated.");
339 TVM_ATTR_FIELD(channels)
340 .describe(
341 "The number of output channels in the convolution."
342 " If it is not set, inferred by shape of the weight.")
343 .set_default(NullValue<IndexExpr>());
344 TVM_ATTR_FIELD(kernel_size)
345 .describe("Specifies the dimensions of the convolution window.")
346 .set_default(NullValue<Array<IndexExpr>>());
347 TVM_ATTR_FIELD(data_layout)
348 .set_default("NCDHW")
349 .describe(
350 "Dimension ordering of input data. Can be 'NCDHW', 'NDHWC', etc."
351 "'N', 'C', 'D', 'H', 'W' stands for batch, channel, depth, height, and width"
352 "dimensions respectively. Convolution is applied on the 'D', 'H' and"
353 "'W' dimensions.");
354 TVM_ATTR_FIELD(kernel_layout)
355 .set_default("OIDHW")
356 .describe(
357 "Dimension ordering of weight. Can be 'OIDHW', 'OIDHW16o16i', etc."
358 "'O', 'I', 'D', 'H', 'W' stands for num_filter, input_channel, depth, height,"
359 "and width dimensions respectively.");
360 TVM_ATTR_FIELD(out_layout)
361 .set_default("")
362 .describe(
363 "Dimension ordering of output. Can be 'NCDHW', 'NDHWC', etc."
364 "'N', 'C', 'D', 'H', 'W' stands for batch, channel, depth, height, and width"
365 "dimensions respectively. Default to be same as input layout.");
366
367 // use 0 bits to indicate none.
368 TVM_ATTR_FIELD(out_dtype)
369 .set_default(NullValue<DataType>())
370 .describe("Output data type, set to explicit type under mixed precision setting");
371 }
372};
373
374/*! \brief Attributes used in transposed convolution operator */
375struct Conv3DTransposeAttrs : public tvm::AttrsNode<Conv3DTransposeAttrs> {
376 IndexExpr channels;
377 Array<IndexExpr> kernel_size;
378 Array<IndexExpr> strides;
379 Array<IndexExpr> padding;
380 Array<IndexExpr> output_padding;
381 Array<IndexExpr> dilation;
382 int groups;
383 tvm::String data_layout;
384 tvm::String kernel_layout;
385 tvm::String out_layout;
386 DataType out_dtype;
387
388 TVM_DECLARE_ATTRS(Conv3DTransposeAttrs, "relay.attrs.Conv3DTransposeAttrs") {
389 TVM_ATTR_FIELD(channels)
390 .set_default(NullValue<IndexExpr>())
391 .describe(
392 "The dimensionality of the output space"
393 "i.e. the number of output channels in the convolution.");
394 TVM_ATTR_FIELD(kernel_size)
395 .describe("The dimensions of the convolution window.")
396 .set_default(NullValue<Array<IndexExpr>>());
397 TVM_ATTR_FIELD(strides)
398 .set_default(Array<IndexExpr>({1, 1, 1}))
399 .describe("The strides of the convolution.");
400 TVM_ATTR_FIELD(output_padding)
401 .set_default(Array<IndexExpr>({0, 0, 0}))
402 .describe(
403 "Zero-padding added to one side of the output."
404 "Padding support both symmetric and asymmetric as"
405 "one int : same padding used on all sides"
406 "three int : front, bottom, right will use same padding as back, top, left"
407 "six int : padding width in the order of (front, top, left, back, bottom, right)");
408 TVM_ATTR_FIELD(padding)
409 .set_default(Array<IndexExpr>({0, 0, 0}))
410 .describe(
411 "If padding is non-zero, then the input is implicitly zero-padded"
412 "Padding support both symmetric and asymmetric as"
413 "one int : same padding used on all sides"
414 "three int : front, bottom, right will use same padding as back, top, left"
415 "six int : padding width in the order of (front, top, left, back, bottom, right)");
416 TVM_ATTR_FIELD(dilation)
417 .set_default(Array<IndexExpr>({1, 1, 1}))
418 .describe("Specifies the dilation rate to use for dilated convolution.");
419 TVM_ATTR_FIELD(groups).set_default(1).describe(
420 "Controls the connections between inputs and outputs."
421 "At groups=1, all inputs are convolved to all outputs."
422 "At groups=2, the operation becomes equivalent to having two convolution"
423 "layers side by side, each seeing half the input channels, and producing"
424 "half the output channels, and both subsequently concatenated.");
425 TVM_ATTR_FIELD(data_layout)
426 .set_default("NCDHW")
427 .describe(
428 "Dimension ordering of data. Can be 'NCDHW', 'NDHWC', etc."
429 "'N', 'C', 'D', 'H', 'W' stands for batch, channel, depth, height, and width"
430 "dimensions respectively. Convolution is applied on the 'D', 'H' and"
431 "'W' dimensions.");
432 TVM_ATTR_FIELD(kernel_layout)
433 .set_default("OIDHW")
434 .describe(
435 "Dimension ordering of data and weight. Can be 'OIDHW', 'OIDHW16o16i', etc."
436 "'O', 'I', 'D', 'H', 'W' stands for num_filter, input_channel, depth, height, and width"
437 "dimensions respectively.");
438 TVM_ATTR_FIELD(out_layout)
439 .set_default("")
440 .describe(
441 "Dimension ordering of output. Can be 'NCDHW', 'NDHWC', etc."
442 "'N', 'C', 'D', 'H', 'W' stands for batch, channel, depth, height, and width"
443 "dimensions respectively. Default to be same as input layout.");
444 TVM_ATTR_FIELD(out_dtype)
445 .set_default(NullValue<DataType>())
446 .describe("Output data type, set to explicit type under mixed precision setting");
447 }
448};
449
450/*! \brief Attributes used in 3d winograd convolution operators */
451struct Conv3DWinogradAttrs : public tvm::AttrsNode<Conv3DWinogradAttrs> {
452 int tile_size;
453 Array<IndexExpr> strides;
454 Array<IndexExpr> padding;
455 Array<IndexExpr> dilation;
456 int groups;
457 IndexExpr channels;
458 Array<IndexExpr> kernel_size;
459 std::string data_layout;
460 std::string kernel_layout;
461 std::string out_layout;
462 DataType out_dtype;
463
464 TVM_DECLARE_ATTRS(Conv3DWinogradAttrs, "relay.attrs.Conv3DWinogradAttrs") {
465 TVM_ATTR_FIELD(tile_size).describe(
466 "The tile size of winograd. E.g. 2 for F(2x2x2, 3x3x3) and 4 for F(4x4x4, 3x3x3)");
467 TVM_ATTR_FIELD(strides)
468 .set_default(Array<IndexExpr>({1, 1, 1}))
469 .describe("Specifies the strides of the convolution.");
470 TVM_ATTR_FIELD(padding)
471 .set_default(Array<IndexExpr>({0, 0, 0}))
472 .describe(
473 "If padding is non-zero, then the input is implicitly zero-padded"
474 "Padding support both symmetric and asymmetric as"
475 "one int : same padding used on all sides"
476 "three int : back, bottom, right will use same padding as front, top, left"
477 "six int : padding width in the order of (front, top, left, back, bottom,"
478 "right)");
479 TVM_ATTR_FIELD(dilation)
480 .set_default(Array<IndexExpr>({1, 1, 1}))
481 .describe("Specifies the dilation rate to use for dilated convolution.");
482 TVM_ATTR_FIELD(groups).set_default(1).describe(
483 "Controls the connections between inputs and outputs."
484 "At groups=1, all inputs are convolved to all outputs."
485 "At groups=2, the operation becomes equivalent to having two convolution"
486 "layers side by side, each seeing half the input channels, and producing"
487 "half the output channels, and both subsequently concatenated.");
488 TVM_ATTR_FIELD(channels)
489 .describe(
490 "The number of output channels in the convolution."
491 " If it is not set, inferred by shape of the weight.")
492 .set_default(NullValue<IndexExpr>());
493 TVM_ATTR_FIELD(kernel_size)
494 .describe("Specifies the dimensions of the convolution window.")
495 .set_default(NullValue<Array<IndexExpr>>());
496 TVM_ATTR_FIELD(data_layout)
497 .set_default("NCDHW")
498 .describe(
499 "Dimension ordering of input data. Can be 'NCDHW', 'NDHWC', etc."
500 "'N', 'C', 'D', 'H', 'W' stands for batch, channel, depth, height, and width"
501 "dimensions respectively. Convolution is applied on the 'D', 'H' and"
502 "'W' dimensions.");
503 TVM_ATTR_FIELD(kernel_layout)
504 .set_default("OIDHW")
505 .describe(
506 "Dimension ordering of weight. Can be 'OIDHW', 'OIDHW16o16i', etc."
507 "'O', 'I', 'D', 'H', 'W' stands for num_filter, input_channel, depth, height,"
508 "and width dimensions respectively.");
509 TVM_ATTR_FIELD(out_layout)
510 .set_default("")
511 .describe(
512 "Dimension ordering of output. Can be 'NCDHW', 'NDHWC', etc."
513 "'N', 'C', 'D', 'H', 'W' stands for batch, channel, depth, height, and width"
514 "dimensions respectively. Default to be same as input layout.");
515
516 // use 0 bits to indicate none.
517 TVM_ATTR_FIELD(out_dtype)
518 .set_default(NullValue<DataType>())
519 .describe("Output data type, set to explicit type under mixed precision setting");
520 }
521};
522
523/*! \brief Attributes used in softmax operators */
524struct SoftmaxAttrs : public tvm::AttrsNode<SoftmaxAttrs> {
525 int axis;
526
527 TVM_DECLARE_ATTRS(SoftmaxAttrs, "relay.attrs.SoftmaxAttrs") {
528 TVM_ATTR_FIELD(axis).set_default(-1).describe("The axis to sum over when computing softmax.");
529 }
530};
531
532/*! \brief Attributes used in transposed convolution operator */
533struct Conv2DTransposeAttrs : public tvm::AttrsNode<Conv2DTransposeAttrs> {
534 IndexExpr channels;
535 Array<IndexExpr> kernel_size;
536 Array<IndexExpr> strides;
537 Array<IndexExpr> padding;
538 Array<IndexExpr> output_padding;
539 Array<IndexExpr> dilation;
540 int groups;
541 std::string data_layout;
542 std::string kernel_layout;
543 std::string out_layout;
544 DataType out_dtype;
545
546 TVM_DECLARE_ATTRS(Conv2DTransposeAttrs, "relay.attrs.Conv2DTransposeAttrs") {
547 TVM_ATTR_FIELD(channels)
548 .set_default(NullValue<IndexExpr>())
549 .describe(
550 "The dimensionality of the output space"
551 "i.e. the number of output channels in the convolution.");
552 TVM_ATTR_FIELD(kernel_size)
553 .describe("The dimensions of the convolution window.")
554 .set_default(NullValue<Array<IndexExpr>>());
555 TVM_ATTR_FIELD(strides)
556 .set_default(Array<IndexExpr>({1, 1}))
557 .describe("The strides of the convolution.");
558 TVM_ATTR_FIELD(output_padding)
559 .set_default(Array<IndexExpr>({0, 0}))
560 .describe(
561 "Zero-padding added to one side of the output."
562 "Padding support both symmetric and asymmetric as"
563 "one int : same padding used on all sides"
564 "two int : bottom, right will use same padding as top, left"
565 "four int : padding width in the order of (top, left, bottom, right)");
566 TVM_ATTR_FIELD(padding)
567 .set_default(Array<IndexExpr>({0, 0}))
568 .describe(
569 "If padding is non-zero, then the input is implicitly zero-padded"
570 "Padding support both symmetric and asymmetric as"
571 "one int : same padding used on all sides"
572 "two int : bottom, right will use same padding as top, left"
573 "four int : padding width in the order of (top, left, bottom, right)");
574 TVM_ATTR_FIELD(dilation)
575 .set_default(Array<IndexExpr>({1, 1}))
576 .describe("Specifies the dilation rate to use for dilated convolution.");
577 TVM_ATTR_FIELD(groups).set_default(1).describe(
578 "Controls the connections between inputs and outputs."
579 "At groups=1, all inputs are convolved to all outputs."
580 "At groups=2, the operation becomes equivalent to having two convolution"
581 "layers side by side, each seeing half the input channels, and producing"
582 "half the output channels, and both subsequently concatenated.");
583 TVM_ATTR_FIELD(data_layout)
584 .set_default("NCHW")
585 .describe(
586 "Dimension ordering of data. Can be 'NCHW', 'NHWC', etc."
587 "'N', 'C', 'H', 'W' stands for batch, channel, height, and width"
588 "dimensions respectively. Convolution is applied on the 'H' and"
589 "'W' dimensions.");
590 TVM_ATTR_FIELD(kernel_layout)
591 .set_default("OIHW")
592 .describe(
593 "Dimension ordering of data and weight. Can be 'OIHW', 'OIHW16o16i', etc."
594 "'O', 'I', 'H', 'W' stands for num_filter, input_channel, height, and width"
595 "dimensions respectively.");
596 TVM_ATTR_FIELD(out_layout)
597 .set_default("")
598 .describe(
599 "Dimension ordering of output. Can be 'NCHW', 'NHWC', etc."
600 "'N', 'C', 'H', 'W' stands for batch, channel, height, and width"
601 "dimensions respectively. Default to be same as input layout.");
602 TVM_ATTR_FIELD(out_dtype)
603 .set_default(NullValue<DataType>())
604 .describe("Output data type, set to explicit type under mixed precision setting");
605 }
606};
607
608/*! \brief Attributes used in dilate operator */
609struct DilateAttrs : public tvm::AttrsNode<DilateAttrs> {
610 Array<IndexExpr> strides;
611 double dilation_value;
612
613 TVM_DECLARE_ATTRS(DilateAttrs, "relay.attrs.DilateAttrs") {
614 TVM_ATTR_FIELD(strides)
615 .set_default(Array<IndexExpr>({1, 1}))
616 .describe("Dilation stride on each dimension, 1 means no dilation.");
617 TVM_ATTR_FIELD(dilation_value).set_default(0.0).describe("Value used to dilate the input.");
618 }
619};
620
621/*! \brief Attributes used in 1D transposed convolution operator */
622struct Conv1DTransposeAttrs : public tvm::AttrsNode<Conv1DTransposeAttrs> {
623 IndexExpr channels;
624 Array<IndexExpr> kernel_size;
625 Array<IndexExpr> strides;
626 Array<IndexExpr> padding;
627 Array<IndexExpr> output_padding;
628 Array<IndexExpr> dilation;
629 int groups;
630 std::string data_layout;
631 std::string kernel_layout;
632 std::string out_layout;
633 DataType out_dtype;
634
635 TVM_DECLARE_ATTRS(Conv1DTransposeAttrs, "relay.attrs.Conv1DTransposeAttrs") {
636 TVM_ATTR_FIELD(channels)
637 .set_default(NullValue<IndexExpr>())
638 .describe(
639 "The dimensionality of the output space"
640 "i.e. the number of output channels in the convolution.");
641 TVM_ATTR_FIELD(kernel_size)
642 .describe("The dimensions of the convolution window.")
643 .set_default(NullValue<Array<IndexExpr>>());
644 TVM_ATTR_FIELD(strides)
645 .set_default(Array<IndexExpr>({1}))
646 .describe("The strides of the convolution.");
647 TVM_ATTR_FIELD(output_padding)
648 .set_default(Array<IndexExpr>({0}))
649 .describe("Zero-padding added to one side of the output.");
650 TVM_ATTR_FIELD(padding)
651 .set_default(Array<IndexExpr>({0}))
652 .describe(
653 "Symmetric or asymmetric padding."
654 "Single value: the input is implicitly zero-padded on both sides."
655 "Two values: padding[0] is used for left input padding, "
656 "padding[1] is used for right input padding,");
657 TVM_ATTR_FIELD(dilation)
658 .set_default(Array<IndexExpr>({1}))
659 .describe("Specifies the dilation rate to use for dilated convolution.");
660 TVM_ATTR_FIELD(groups).set_default(1).describe(
661 "Controls the connections between inputs and outputs."
662 "At groups=1, all inputs are convolved to all outputs."
663 "At groups=2, the operation becomes equivalent to having two convolution"
664 "layers side by side, each seeing half the input channels, and producing"
665 "half the output channels, and both subsequently concatenated.");
666 TVM_ATTR_FIELD(data_layout)
667 .set_default("NCW")
668 .describe(
669 "Dimension ordering of data. Can be 'NCW', 'NWC', etc."
670 "'N', 'C', 'W' stands for batch, channel, and width"
671 "dimensions respectively. Convolution is applied on the"
672 "'W' dimension.");
673 TVM_ATTR_FIELD(kernel_layout)
674 .set_default("OIW")
675 .describe(
676 "Dimension ordering of data and weight. Can be 'OIW', 'OIW16o16i', etc."
677 "'O', 'I', 'W' stands for num_filter, input_channel, and width"
678 "dimensions respectively.");
679 TVM_ATTR_FIELD(out_layout)
680 .set_default("")
681 .describe(
682 "Dimension ordering of output. Can be 'NCW', 'NWC', etc."
683 "'N', 'C', 'W' stands for batch, channel, and width"
684 "dimensions respectively. Default to be same as input layout.");
685 TVM_ATTR_FIELD(out_dtype)
686 .set_default(NullValue<DataType>())
687 .describe("Output data type, set to explicit type under mixed precision setting");
688 }
689};
690
691/*! \brief Attributes for max pool operator */
692struct MaxPool2DAttrs : public tvm::AttrsNode<MaxPool2DAttrs> {
693 Array<IndexExpr> pool_size;
694 Array<IndexExpr> strides;
695 Array<IndexExpr> padding;
696 Array<IndexExpr> dilation;
697 tvm::String layout;
698 tvm::String out_layout;
699 bool ceil_mode;
700
701 TVM_DECLARE_ATTRS(MaxPool2DAttrs, "relay.attrs.MaxPool2DAttrs") {
702 TVM_ATTR_FIELD(pool_size).describe("Size of the pooling windows.");
703 TVM_ATTR_FIELD(strides)
704 .set_default(Array<IndexExpr>({1, 1}))
705 .describe("Specifies the strides of the convolution.");
706 TVM_ATTR_FIELD(dilation)
707 .set_default(Array<IndexExpr>({1, 1}))
708 .describe("Specifies the dilation of the convolution.");
709 TVM_ATTR_FIELD(padding)
710 .set_default(Array<IndexExpr>({0, 0}))
711 .describe(
712 "If padding is non-zero, then the input is implicitly zero-padded"
713 "Padding support both symmetric and asymmetric as"
714 "one int : same padding used on all sides"
715 "two int : bottom, right will use same padding as top, left"
716 "four int : padding width in the order of (top, left, bottom, right)");
717 TVM_ATTR_FIELD(layout).set_default("NCHW").describe(
718 "Dimension ordering of input data. Can be 'NCHW', 'NHWC', etc."
719 "'N', 'C', 'H', 'W' stands for batch, channel, height, and width"
720 "dimensions respectively. Pooling is applied on the 'H' and"
721 "'W' dimensions.");
722 TVM_ATTR_FIELD(out_layout)
723 .set_default("")
724 .describe(
725 "Dimension ordering of output data. Can be 'NCHW', 'NHWC', etc."
726 "'N', 'C', 'H', 'W' stands for batch, channel, height, and width"
727 "dimensions respectively. Pooling is applied on the 'H' and"
728 "'W' dimensions.");
729 TVM_ATTR_FIELD(ceil_mode).set_default(false).describe(
730 "When true, will use ceil instead of floor to compute the output shape.");
731 }
732};
733
734/*! \brief Attributes for avg pool operator */
735struct AvgPool2DAttrs : public tvm::AttrsNode<AvgPool2DAttrs> {
736 Array<IndexExpr> pool_size;
737 Array<IndexExpr> strides;
738 Array<IndexExpr> padding;
739 Array<IndexExpr> dilation;
740 tvm::String layout;
741 tvm::String out_layout;
742 bool ceil_mode;
743 bool count_include_pad;
744
745 TVM_DECLARE_ATTRS(AvgPool2DAttrs, "relay.attrs.AvgPool2DAttrs") {
746 TVM_ATTR_FIELD(pool_size).describe("Size of the pooling windows.");
747 TVM_ATTR_FIELD(strides)
748 .set_default(Array<IndexExpr>({1, 1}))
749 .describe("Specifies the strides of the convolution.");
750 TVM_ATTR_FIELD(dilation)
751 .set_default(Array<IndexExpr>({1, 1}))
752 .describe("Specifies the dilation of the convolution.");
753 TVM_ATTR_FIELD(padding)
754 .set_default(Array<IndexExpr>({0, 0}))
755 .describe(
756 "If padding is non-zero, then the input is implicitly zero-padded"
757 "Padding support both symmetric and asymmetric as"
758 "one int : same padding used on all sides"
759 "two int : bottom, right will use same padding as top, left"
760 "four int : padding width in the order of (top, left, bottom, right)");
761 TVM_ATTR_FIELD(layout).set_default("NCHW").describe(
762 "Dimension ordering of input data. Can be 'NCHW', 'NHWC', etc."
763 "'N', 'C', 'H', 'W' stands for batch, channel, height, and width"
764 "dimensions respectively. Pooling is applied on the 'H' and"
765 "'W' dimensions.");
766 TVM_ATTR_FIELD(out_layout)
767 .set_default("")
768 .describe(
769 "Dimension ordering of output data. Can be 'NCHW', 'NHWC', etc."
770 "'N', 'C', 'H', 'W' stands for batch, channel, height, and width"
771 "dimensions respectively. Pooling is applied on the 'H' and"
772 "'W' dimensions.");
773 TVM_ATTR_FIELD(ceil_mode).set_default(false).describe(
774 "When true, will use ceil instead of floor to compute the output shape.");
775 TVM_ATTR_FIELD(count_include_pad)
776 .set_default(false)
777 .describe("When true, will include padding to compute the average");
778 }
779};
780
781/*! \brief Attributes for global pool operator */
782struct GlobalPool2DAttrs : public tvm::AttrsNode<GlobalPool2DAttrs> {
783 tvm::String layout;
784 tvm::String out_layout;
785
786 TVM_DECLARE_ATTRS(GlobalPool2DAttrs, "relay.attrs.GlobalPool2DAttrs") {
787 TVM_ATTR_FIELD(layout).set_default("NCHW").describe(
788 "Dimension ordering of input data. Can be 'NCHW', 'NHWC', etc."
789 "'N', 'C', 'H', 'W' stands for batch, channel, height, and width"
790 "dimensions respectively. Pooling is applied on the 'H' and"
791 "'W' dimensions.");
792 TVM_ATTR_FIELD(out_layout)
793 .set_default("")
794 .describe(
795 "Dimension ordering of output data. Can be 'NCHW', 'NHWC', etc."
796 "'N', 'C', 'H', 'W' stands for batch, channel, height, and width"
797 "dimensions respectively. Pooling is applied on the 'H' and"
798 "'W' dimensions.");
799 }
800};
801
802/*! \brief Attributes for 1d adaptive pool operator */
803struct AdaptivePool1DAttrs : public tvm::AttrsNode<AdaptivePool1DAttrs> {
804 Array<IndexExpr> output_size;
805 std::string layout;
806 tvm::String out_layout;
807
808 TVM_DECLARE_ATTRS(AdaptivePool1DAttrs, "relay.attrs.AdaptivePool1DAttrs") {
809 TVM_ATTR_FIELD(output_size).set_default(Array<IndexExpr>({})).describe("Output width.");
810 TVM_ATTR_FIELD(layout).set_default("NCW").describe(
811 "Dimension ordering of input data. Can be 'NCW', 'NWC', etc."
812 "'N', 'C', 'W' stands for batch, channel, and width"
813 "dimensions respectively. Pooling is applied on the"
814 "'W' dimension.");
815 TVM_ATTR_FIELD(out_layout)
816 .set_default("")
817 .describe(
818 "Dimension ordering of output data. Can be 'NCW', 'NWC', etc."
819 "'N', 'C', 'W' stands for batch, channel, and width"
820 "dimensions respectively. Pooling is applied on the"
821 "'W' dimension.");
822 }
823};
824
825/*! \brief Attributes for 2d adaptive pool operator */
826struct AdaptivePool2DAttrs : public tvm::AttrsNode<AdaptivePool2DAttrs> {
827 Array<IndexExpr> output_size;
828 std::string layout;
829 tvm::String out_layout;
830
831 TVM_DECLARE_ATTRS(AdaptivePool2DAttrs, "relay.attrs.AdaptivePool2DAttrs") {
832 TVM_ATTR_FIELD(output_size)
833 .set_default(Array<IndexExpr>({}))
834 .describe("Output height and width.");
835 TVM_ATTR_FIELD(layout).set_default("NCHW").describe(
836 "Dimension ordering of input data. Can be 'NCHW', 'NHWC', etc."
837 "'N', 'C', 'H', 'W' stands for batch, channel, height, and width"
838 "dimensions respectively. Pooling is applied on the 'H' and"
839 "'W' dimensions.");
840 TVM_ATTR_FIELD(out_layout)
841 .set_default("")
842 .describe(
843 "Dimension ordering of output data. Can be 'NCHW', 'NHWC', etc."
844 "'N', 'C', 'H', 'W' stands for batch, channel, height, and width"
845 "dimensions respectively. Pooling is applied on the 'H' and"
846 "'W' dimensions.");
847 }
848};
849
850/*! \brief Attributes for 3d adaptive pool operator */
851struct AdaptivePool3DAttrs : public tvm::AttrsNode<AdaptivePool3DAttrs> {
852 Array<IndexExpr> output_size;
853 std::string layout;
854 tvm::String out_layout;
855
856 TVM_DECLARE_ATTRS(AdaptivePool3DAttrs, "relay.attrs.AdaptivePool3DAttrs") {
857 TVM_ATTR_FIELD(output_size)
858 .set_default(Array<IndexExpr>({}))
859 .describe("Output depth, height and width.");
860 TVM_ATTR_FIELD(layout).set_default("NCDHW").describe(
861 "Dimension ordering of input data. Can be 'NCDHW', 'NDHWC', etc."
862 "'N', 'C', 'D', 'H', 'W' stands for batch, channel, depth, height, and width"
863 "dimensions respectively. Pooling is applied on 'D', 'H' and"
864 "'W' dimensions.");
865 TVM_ATTR_FIELD(out_layout)
866 .set_default("")
867 .describe(
868 "Dimension ordering of output data. Can be 'NCDHW', 'NDHWC', etc."
869 "'N', 'C', 'D', 'H', 'W' stands for batch, channel, depth, height, and width"
870 "dimensions respectively. Pooling is applied on 'D', 'H' and"
871 "'W' dimensions.");
872 }
873};
874
875/*! \brief Attributes for 1D max pool operator */
876struct MaxPool1DAttrs : public tvm::AttrsNode<MaxPool1DAttrs> {
877 Array<IndexExpr> pool_size;
878 Array<IndexExpr> strides;
879 Array<IndexExpr> dilation;
880 Array<IndexExpr> padding;
881 std::string layout;
882 tvm::String out_layout;
883 bool ceil_mode;
884
885 TVM_DECLARE_ATTRS(MaxPool1DAttrs, "relay.attrs.MaxPool1DAttrs") {
886 TVM_ATTR_FIELD(pool_size).describe("Size of the pooling windows.");
887 TVM_ATTR_FIELD(strides)
888 .set_default(Array<IndexExpr>({1}))
889 .describe("Specifies the strides of the convolution.");
890 TVM_ATTR_FIELD(dilation)
891 .set_default(Array<IndexExpr>({1}))
892 .describe("Specifies the dilation of the convolution.");
893 TVM_ATTR_FIELD(padding)
894 .set_default(Array<IndexExpr>({0}))
895 .describe(
896 "If padding is non-zero, then the input is implicitly zero-padded"
897 "Padding supports both symmetric and asymmetric as"
898 "one int : same padding used on each side"
899 "two int : indicates left padding, right padding");
900 TVM_ATTR_FIELD(layout).set_default("NCW").describe(
901 "Dimension ordering of input data. Can be 'NCW', 'NWC', etc."
902 "'N', 'C', 'W' stands for batch, channel, and width"
903 "dimensions respectively. Pooling is applied on the 'W' dimensions.");
904 TVM_ATTR_FIELD(out_layout)
905 .set_default("")
906 .describe(
907 "Dimension ordering of output data. Can be 'NCW', 'NWC', etc."
908 "'N', 'C', 'W' stands for batch, channel, and width"
909 "dimensions respectively. Pooling is applied on the 'W' dimensions.");
910 TVM_ATTR_FIELD(ceil_mode).set_default(false).describe(
911 "When true, will use ceil instead of floor to compute the output shape.");
912 }
913};
914
915/*! \brief Attributes for 1D avg pool operator */
916struct AvgPool1DAttrs : public tvm::AttrsNode<AvgPool1DAttrs> {
917 Array<IndexExpr> pool_size;
918 Array<IndexExpr> strides;
919 Array<IndexExpr> dilation;
920 Array<IndexExpr> padding;
921 std::string layout;
922 tvm::String out_layout;
923 bool ceil_mode;
924 bool count_include_pad;
925
926 TVM_DECLARE_ATTRS(AvgPool1DAttrs, "relay.attrs.AvgPool1DAttrs") {
927 TVM_ATTR_FIELD(pool_size).describe("Size of the pooling windows.");
928 TVM_ATTR_FIELD(strides)
929 .set_default(Array<IndexExpr>({1}))
930 .describe("Specifies the strides of the convolution.");
931 TVM_ATTR_FIELD(dilation)
932 .set_default(Array<IndexExpr>({1}))
933 .describe("Specifies the dilation of the convolution.");
934 TVM_ATTR_FIELD(padding)
935 .set_default(Array<IndexExpr>({0}))
936 .describe(
937 "If padding is non-zero, then the input is implicitly zero-padded"
938 "Padding supports both symmetric and asymmetric as"
939 "one int : same padding used on each side"
940 "two int : indicates left padding, right padding");
941 TVM_ATTR_FIELD(layout).set_default("NCW").describe(
942 "Dimension ordering of input data. Can be 'NCW', 'NHC', etc."
943 "'N', 'C', 'W' stands for batch, channel, and width"
944 "dimensions respectively. Pooling is applied on the 'W' dimension.");
945 TVM_ATTR_FIELD(out_layout)
946 .set_default("")
947 .describe(
948 "Dimension ordering of output data. Can be 'NCW', 'NHC', etc."
949 "'N', 'C', 'W' stands for batch, channel, and width"
950 "dimensions respectively. Pooling is applied on the 'W' dimension.");
951 TVM_ATTR_FIELD(ceil_mode).set_default(false).describe(
952 "When true, will use ceil instead of floor to compute the output shape.");
953 TVM_ATTR_FIELD(count_include_pad)
954 .set_default(false)
955 .describe("When true, will include padding to compute the average");
956 }
957};
958
959/*! \brief Attributes for 3D max pool operator */
960struct MaxPool3DAttrs : public tvm::AttrsNode<MaxPool3DAttrs> {
961 Array<IndexExpr> pool_size;
962 Array<IndexExpr> strides;
963 Array<IndexExpr> dilation;
964 Array<IndexExpr> padding;
965 std::string layout;
966 tvm::String out_layout;
967 bool ceil_mode;
968
969 TVM_DECLARE_ATTRS(MaxPool3DAttrs, "relay.attrs.MaxPool3DAttrs") {
970 TVM_ATTR_FIELD(pool_size).describe("Size of the pooling windows.");
971 TVM_ATTR_FIELD(strides)
972 .set_default(Array<IndexExpr>({1, 1, 1}))
973 .describe("Specifies the strides of the convolution.");
974 TVM_ATTR_FIELD(dilation)
975 .set_default(Array<IndexExpr>({1, 1, 1}))
976 .describe("Specifies the dilation of the convolution.");
977 TVM_ATTR_FIELD(padding)
978 .set_default(Array<IndexExpr>({0, 0, 0}))
979 .describe(
980 "If padding is non-zero, then the input is implicitly zero-padded"
981 "Padding support both symmetric and asymmetric as"
982 "one int : same padding used on all sides"
983 "three int : back, bottom, right will use same padding as front, top, left"
984 "six int : padding width in the order of (front, top, left, back, bottom, right)");
985 TVM_ATTR_FIELD(layout).set_default("NCDHW").describe(
986 "Dimension ordering of input data. Can be 'NCDHW', 'NDHWC', etc."
987 "'N', 'C', 'D', 'H', 'W' stands for batch, channel, depth, height, and width"
988 "dimensions respectively. Pooling is applied on the 'D', 'H' and"
989 "'W' dimensions.");
990 TVM_ATTR_FIELD(out_layout)
991 .set_default("")
992 .describe(
993 "Dimension ordering of output data. Can be 'NCDHW', 'NDHWC', etc."
994 "'N', 'C', 'D', 'H', 'W' stands for batch, channel, depth, height, and width"
995 "dimensions respectively. Pooling is applied on the 'D', 'H' and"
996 "'W' dimensions.");
997 TVM_ATTR_FIELD(ceil_mode).set_default(false).describe(
998 "When true, will use ceil instead of floor to compute the output shape.");
999 }
1000};
1001
1002/*! \brief Attributes for 3D avg pool operator */
1003struct AvgPool3DAttrs : public tvm::AttrsNode<AvgPool3DAttrs> {
1004 Array<IndexExpr> pool_size;
1005 Array<IndexExpr> strides;
1006 Array<IndexExpr> dilation;
1007 Array<IndexExpr> padding;
1008 std::string layout;
1009 tvm::String out_layout;
1010 bool ceil_mode;
1011 bool count_include_pad;
1012
1013 TVM_DECLARE_ATTRS(AvgPool3DAttrs, "relay.attrs.AvgPool3DAttrs") {
1014 TVM_ATTR_FIELD(pool_size).describe("Size of the pooling windows.");
1015 TVM_ATTR_FIELD(strides)
1016 .set_default(Array<IndexExpr>({1, 1, 1}))
1017 .describe("Specifies the strides of the convolution.");
1018 TVM_ATTR_FIELD(dilation)
1019 .set_default(Array<IndexExpr>({1, 1, 1}))
1020 .describe("Specifies the dilation of the convolution.");
1021 TVM_ATTR_FIELD(padding)
1022 .set_default(Array<IndexExpr>({0, 0, 0}))
1023 .describe(
1024 "If padding is non-zero, then the input is implicitly zero-padded"
1025 "Padding support both symmetric and asymmetric as"
1026 "one int : same padding used on all sides"
1027 "three int : back, bottom, right will use same padding as front, top, left"
1028 "six int : padding width in the order of (front, top, left, back, bottom, right)");
1029 TVM_ATTR_FIELD(layout).set_default("NCDHW").describe(
1030 "Dimension ordering of input data. Can be 'NCDHW', 'NDHWC', etc."
1031 "'N', 'C', 'D', 'H', 'W' stands for batch, channel, depth, height, and width"
1032 "dimensions respectively. Pooling is applied on the 'D', 'H' and"
1033 "'W' dimensions.");
1034 TVM_ATTR_FIELD(out_layout)
1035 .set_default("")
1036 .describe(
1037 "Dimension ordering of output data. Can be 'NCDHW', 'NDHWC', etc."
1038 "'N', 'C', 'D', 'H', 'W' stands for batch, channel, depth, height, and width"
1039 "dimensions respectively. Pooling is applied on the 'D', 'H' and"
1040 "'W' dimensions.");
1041 TVM_ATTR_FIELD(ceil_mode).set_default(false).describe(
1042 "When true, will use ceil instead of floor to compute the output shape.");
1043 TVM_ATTR_FIELD(count_include_pad)
1044 .set_default(false)
1045 .describe("When true, will include padding to compute the average");
1046 }
1047};
1048
1049/*! \brief Attributes for matmul operator */
1050struct MatmulAttrs : public tvm::AttrsNode<MatmulAttrs> {
1051 IndexExpr units;
1052 DataType out_dtype;
1053 bool transpose_a;
1054 bool transpose_b;
1055 // layout of B after auto-scheduler's layout rewrite
1056 tvm::String auto_scheduler_rewritten_layout;
1057 Array<PrimExpr> meta_schedule_original_shape; // The original shape of the weights
1058
1059 TVM_DECLARE_ATTRS(MatmulAttrs, "relay.attrs.MatmulAttrs") {
1060 TVM_ATTR_FIELD(units).describe("Number of hidden units of the dense transformation.");
1061
1062 // use 0 bits to indicate none.
1063 TVM_ATTR_FIELD(out_dtype)
1064 .set_default(NullValue<DataType>())
1065 .describe("Output data type, set to explicit type under mixed precision setting");
1066
1067 TVM_ATTR_FIELD(transpose_a)
1068 .set_default(false)
1069 .describe("Whether the first input tensor is in transposed format.");
1070
1071 TVM_ATTR_FIELD(transpose_b)
1072 .set_default(false)
1073 .describe("Whether the second input tensor is in transposed format.");
1074 }
1075};
1076
1077/*! \brief Attributes for dense operator */
1078struct DenseAttrs : public tvm::AttrsNode<DenseAttrs> {
1079 IndexExpr units;
1080 // layout of B after auto-scheduler's layout rewrite
1081 tvm::String auto_scheduler_rewritten_layout;
1082 Array<PrimExpr> meta_schedule_original_shape; // The original shape of the weights
1083 DataType out_dtype;
1084
1085 TVM_DECLARE_ATTRS(DenseAttrs, "relay.attrs.DenseAttrs") {
1086 TVM_ATTR_FIELD(units).describe("Number of hidden units of the dense transformation.");
1087
1088 // use 0 bits to indicate none.
1089 TVM_ATTR_FIELD(out_dtype)
1090 .set_default(NullValue<DataType>())
1091 .describe("Output data type, set to explicit type under mixed precision setting");
1092 }
1093};
1094
1095/*! \brief Attributes for dense_pack operator */
1096struct DensePackAttrs : public tvm::AttrsNode<DensePackAttrs> {
1097 IndexExpr units;
1098 DataType out_dtype;
1099 tvm::String weight_layout;
1100
1101 TVM_DECLARE_ATTRS(DensePackAttrs, "relay.attrs.DensePackAttrs") {
1102 TVM_ATTR_FIELD(units).describe("Number of hidden units of the dense transformation.");
1103
1104 // use 0 bits to indicate none.
1105 TVM_ATTR_FIELD(out_dtype)
1106 .set_default(NullValue<DataType>())
1107 .describe("Output data type, set to explicit type under mixed precision setting");
1108 TVM_ATTR_FIELD(weight_layout)
1109 .set_default("NC")
1110 .describe("Dimension ordering of weight. Packed layouts, such as NC8n, are possible.");
1111 }
1112};
1113
1114/*! \brief Attributes for batch matmul operator. */
1115struct BatchMatmulAttrs : public tvm::AttrsNode<BatchMatmulAttrs> {
1116 DataType out_dtype;
1117 bool transpose_a;
1118 bool transpose_b;
1119 tvm::String auto_scheduler_rewritten_layout; // The layout after auto-scheduler's layout rewrite
1120 Array<PrimExpr> meta_schedule_original_shape; // The original shape of the weights
1121
1122 TVM_DECLARE_ATTRS(BatchMatmulAttrs, "relay.attrs.BatchMatmulAttrs") {
1123 // use 0 bits to indicate none.
1124 TVM_ATTR_FIELD(out_dtype)
1125 .set_default(NullValue<DataType>())
1126 .describe("Output data type, set to explicit type under mixed precision setting");
1127
1128 TVM_ATTR_FIELD(transpose_a)
1129 .set_default(false)
1130 .describe("Whether the first input tensor is in transposed format.");
1131
1132 TVM_ATTR_FIELD(transpose_b)
1133 .set_default(false)
1134 .describe("Whether the second input tensor is in transposed format.");
1135 }
1136};
1137
1138/*! \brief Attributes for sparse_dense operator */
1139struct SparseDenseAttrs : public tvm::AttrsNode<SparseDenseAttrs> {
1140 bool sparse_lhs;
1141
1142 TVM_DECLARE_ATTRS(SparseDenseAttrs, "relay.attrs.SparseDenseAttrs") {
1143 TVM_ATTR_FIELD(sparse_lhs)
1144 .set_default(false)
1145 .describe(
1146 "Indicate whether sparse matrix is multiplied on the right or the left. If true, then "
1147 "the operation is S * D^T (D dense, S sparse). If false, the operation is D * S^T");
1148 }
1149};
1150
1151/*! \brief Attributes for sparse_transpose operator */
1152struct SparseTransposeAttrs : public tvm::AttrsNode<SparseTransposeAttrs> {
1153 TVM_DECLARE_ATTRS(SparseTransposeAttrs, "relay.attrs.SparseTransposeAttrs") {}
1154};
1155
1156/*! \brief Attributes for sparse_dense operator */
1157struct SparseConv2DAttrs : public tvm::AttrsNode<SparseConv2DAttrs> {
1158 std::string layout;
1159 Array<IndexExpr> kernel_size;
1160
1161 TVM_DECLARE_ATTRS(SparseConv2DAttrs, "relay.attrs.SparseConv2DAttrs") {
1162 TVM_ATTR_FIELD(layout).set_default("NHWC").describe(
1163 "Dimension ordering of input data. Can be 'NCHW', 'NHWC'"
1164 "'N', 'C', 'H', 'W' stands for batch, channel, height, and width"
1165 "dimensions respectively.");
1166 TVM_ATTR_FIELD(kernel_size)
1167 .set_default(Array<IndexExpr>{1, 1})
1168 .describe("Kernel size for SparseConv2D, 1x1 or 3x3. ");
1169 }
1170};
1171
1172/*! \brief Attributes for FIFO buffer operator */
1173struct FIFOBufferAttrs : public tvm::AttrsNode<FIFOBufferAttrs> {
1174 int axis;
1175
1176 TVM_DECLARE_ATTRS(FIFOBufferAttrs, "relay.attrs.FIFOBufferAttrs") {
1177 TVM_ATTR_FIELD(axis).set_default(0);
1178 }
1179};
1180
1181/*! \brief Attributes for upsampling operator */
1182struct UpSamplingAttrs : public tvm::AttrsNode<UpSamplingAttrs> {
1183 double scale_h;
1184 double scale_w;
1185 tvm::String layout;
1186 tvm::String method;
1187 bool align_corners;
1188
1189 TVM_DECLARE_ATTRS(UpSamplingAttrs, "relay.attrs.UpSamplingAttrs") {
1190 TVM_ATTR_FIELD(scale_h).describe("The upsampling factor for height");
1191 TVM_ATTR_FIELD(scale_w).describe("The upsampling factor for width");
1192 TVM_ATTR_FIELD(layout).set_default("NCHW").describe(
1193 "Dimension ordering of input data. Can be 'NCHW', 'NHWC', etc."
1194 "'N', 'C', 'H', 'W' stands for batch, channel, height, and width"
1195 "dimensions respectively. Upsampling is applied on the 'H' and"
1196 "'W' dimensions.");
1197 TVM_ATTR_FIELD(method)
1198 .set_default("nearest_neighbor")
1199 .describe(
1200 "Specify the mode to use for scaling."
1201 "nearest_neighbor - Nearest Neighbor"
1202 "bilinear - Bilinear Interpolation"
1203 "bicubic - Bicubic Interpolation");
1204 TVM_ATTR_FIELD(align_corners)
1205 .set_default(false)
1206 .describe("Should be true to preserve the values at the corner pixels");
1207 }
1208};
1209
1210/*! \brief Attributes for upsampling3d operator */
1211struct UpSampling3DAttrs : public tvm::AttrsNode<UpSampling3DAttrs> {
1212 double scale_d;
1213 double scale_h;
1214 double scale_w;
1215 std::string layout;
1216 std::string method;
1217 std::string coordinate_transformation_mode;
1218
1219 TVM_DECLARE_ATTRS(UpSampling3DAttrs, "relay.attrs.UpSampling3DAttrs") {
1220 TVM_ATTR_FIELD(scale_d).describe("The upsampling factor for depth");
1221 TVM_ATTR_FIELD(scale_h).describe("The upsampling factor for height");
1222 TVM_ATTR_FIELD(scale_w).describe("The upsampling factor for width");
1223 TVM_ATTR_FIELD(layout).set_default("NCDHW").describe(
1224 "Dimension ordering of input data. Can be 'NCDHW', 'NDHWC', etc."
1225 "'N', 'C', 'D', 'H', 'W' stands for batch, channel, depth, height, and width"
1226 "dimensions respectively. Upsampling is applied on the 'D', 'H' and"
1227 "'W' dimensions.");
1228 TVM_ATTR_FIELD(method)
1229 .set_default("nearest_neighbor")
1230 .describe(
1231 "Specify the mode to use for scaling."
1232 "nearest_neighbor - Nearest Neighbor"
1233 "trilinear - Trilinear Interpolation");
1234 TVM_ATTR_FIELD(coordinate_transformation_mode)
1235 .set_default("half_pixel")
1236 .describe(
1237 "Describes how to transform the coordinate in the resized tensor"
1238 "to the coordinate in the original tensor."
1239 "Refer to the ONNX Resize operator specification for details"
1240 "Available options are half_pixel, align_corners and asymmetric");
1241 }
1242};
1243
1244/*! \brief Attributes used for the padding operator */
1245struct PadAttrs : public tvm::AttrsNode<PadAttrs> {
1246 Array<Array<Integer>> pad_width;
1247 tvm::String pad_mode;
1248
1249 TVM_DECLARE_ATTRS(PadAttrs, "relay.attrs.PadAttrs") {
1250 TVM_ATTR_FIELD(pad_width).describe(
1251 "Number of values padded to the edges of each axis, "
1252 "in the format of ((before_1, after_1), ..., (before_N, after_N))");
1253 TVM_ATTR_FIELD(pad_mode)
1254 .set_default("constant")
1255 .describe(
1256 "Padding type to use. \"constant\" pads with constant_value, "
1257 "\"edge\" pads using the edge values of the input array, "
1258 "\"reflect\" pads by reflecting values with respect to the edges.");
1259 }
1260};
1261
1262/*! \brief Attributes used for the MirrorPadding operator */
1263struct MirrorPadAttrs : public tvm::AttrsNode<MirrorPadAttrs> {
1264 std::string mode;
1265 Array<Array<IndexExpr>> pad_width;
1266
1267 TVM_DECLARE_ATTRS(MirrorPadAttrs, "relay.attrs.MirrorPadAttrs") {
1268 TVM_ATTR_FIELD(mode)
1269 .set_default("SYMMETRIC")
1270 .describe("Specifies how mirroring should be performed.");
1271 TVM_ATTR_FIELD(pad_width).describe(
1272 "Number of values padded to the edges of each axis, "
1273 "in the format of ((before_1, after_1), ..., (before_N, after_N))");
1274 }
1275};
1276
1277/*! \brief Attributes for leaky relu operator */
1278struct LeakyReluAttrs : public tvm::AttrsNode<LeakyReluAttrs> {
1279 double alpha;
1280
1281 TVM_DECLARE_ATTRS(LeakyReluAttrs, "relay.attrs.LeakyReluAttrs") {
1282 TVM_ATTR_FIELD(alpha).set_lower_bound(0.0).set_default(0.25).describe(
1283 "Slope coefficient for the negative half axis.");
1284 }
1285};
1286
1287/*! \brief Attributes for prelu operator */
1288struct PReluAttrs : public tvm::AttrsNode<PReluAttrs> {
1289 int axis;
1290
1291 TVM_DECLARE_ATTRS(PReluAttrs, "relay.attrs.PReluAttrs") {
1292 TVM_ATTR_FIELD(axis).set_default(1).describe(
1293 "Specify which shape axis the channel is specified.");
1294 }
1295};
1296
1297/*! \brief Attributes used in dropout operator */
1298struct DropoutAttrs : public tvm::AttrsNode<DropoutAttrs> {
1299 double rate;
1300 TVM_DECLARE_ATTRS(DropoutAttrs, "relay.attrs.DropoutAttrs") {
1301 TVM_ATTR_FIELD(rate)
1302 .describe("Fraction of the input that gets dropped out during training time")
1303 .set_default(0.5);
1304 }
1305}; // struct DropoutAttrs
1306
1307/*! \brief Attributes used in batch_norm operator */
1308struct BatchNormAttrs : public tvm::AttrsNode<BatchNormAttrs> {
1309 int axis;
1310 double epsilon;
1311 bool center;
1312 bool scale;
1313
1314 TVM_DECLARE_ATTRS(BatchNormAttrs, "relay.attrs.BatchNormAttrs") {
1315 TVM_ATTR_FIELD(axis).describe("Specify which shape axis denotes the channel.").set_default(1);
1316 TVM_ATTR_FIELD(epsilon)
1317 .describe("Small float added to variance to avoid dividing by zero")
1318 .set_default(1e-5);
1319 TVM_ATTR_FIELD(center)
1320 .describe("If True, add offset of beta to normalized tensor. If False, beta is ignored")
1321 .set_default(true);
1322 TVM_ATTR_FIELD(scale)
1323 .describe(
1324 "If True, multiply by gamma. If False, gamma is not used. "
1325 "When the next layer is piecewise linear (also, e.g., nn.relu), "
1326 "this can be disabled since the scaling will be done by the next layer.")
1327 .set_default(true);
1328 }
1329}; // struct BatchNormAttrs
1330
1331/*! \brief Attributes used in instance_norm operator */
1332struct InstanceNormAttrs : public tvm::AttrsNode<InstanceNormAttrs> {
1333 int axis;
1334 double epsilon;
1335 bool center;
1336 bool scale;
1337
1338 TVM_DECLARE_ATTRS(InstanceNormAttrs, "relay.attrs.InstanceNormAttrs") {
1339 TVM_ATTR_FIELD(axis).describe("Specify which shape axis denotes the channel.").set_default(1);
1340 TVM_ATTR_FIELD(epsilon)
1341 .describe("Small float added to variance to avoid dividing by zero")
1342 .set_default(1e-5);
1343 TVM_ATTR_FIELD(center).set_default(true).describe(
1344 "If true, add offset of beta to normalized tensor; "
1345 "otherwise, beta is ignored.");
1346 TVM_ATTR_FIELD(scale).set_default(true).describe(
1347 "If true, multiply by gamma; otherwise, gamma is ignored.");
1348 }
1349}; // struct InstanceNormAttrs
1350
1351/*! \brief Attributes used in layer_norm operator */
1352struct LayerNormAttrs : public tvm::AttrsNode<LayerNormAttrs> {
1353 int axis;
1354 double epsilon;
1355 bool center;
1356 bool scale;
1357
1358 TVM_DECLARE_ATTRS(LayerNormAttrs, "relay.attrs.LayerNormAttrs") {
1359 TVM_ATTR_FIELD(axis).set_default(-1).describe("Specify which shape axis denotes the channel.");
1360 TVM_ATTR_FIELD(epsilon).set_default(1e-5).describe(
1361 "Small float added to variance to avoid dividing by zero");
1362 TVM_ATTR_FIELD(center).set_default(true).describe(
1363 "If true, add offset of beta to normalized tensor; "
1364 "otherwise, beta is ignored.");
1365 TVM_ATTR_FIELD(scale).set_default(true).describe(
1366 "If true, multiply by gamma; otherwise, gamma is ignored.");
1367 }
1368}; // struct LayerNormAttrs
1369
1370/*! \brief Attributes used in group_norm operator */
1371struct GroupNormAttrs : public tvm::AttrsNode<GroupNormAttrs> {
1372 int num_groups;
1373 int axis;
1374 double epsilon;
1375 bool center;
1376 bool scale;
1377
1378 TVM_DECLARE_ATTRS(GroupNormAttrs, "relay.attrs.GroupNormAttrs") {
1379 TVM_ATTR_FIELD(num_groups)
1380 .set_default(0)
1381 .describe("Specify number of groups to separate the channels into.");
1382 TVM_ATTR_FIELD(axis).set_default(1).describe("Specify which shape axis denotes the channel.");
1383 TVM_ATTR_FIELD(epsilon).set_default(1e-5).describe(
1384 "Small float added to variance to avoid dividing by zero");
1385 TVM_ATTR_FIELD(center).set_default(true).describe(
1386 "If true, add offset of beta to normalized tensor; "
1387 "otherwise, beta is ignored.");
1388 TVM_ATTR_FIELD(scale).set_default(true).describe(
1389 "If true, multiply by gamma; otherwise, gamma is ignored.");
1390 }
1391}; // struct GroupNormAttrs
1392
1393/*! \brief Attributes for LRN operator */
1394struct LRNAttrs : public tvm::AttrsNode<LRNAttrs> {
1395 int size;
1396 int axis;
1397 double bias;
1398 double alpha;
1399 double beta;
1400
1401 TVM_DECLARE_ATTRS(LRNAttrs, "relay.attrs.LRNAttrs") {
1402 TVM_ATTR_FIELD(size).set_default(5).describe(
1403 "The size of the local region to be considered for normalization.");
1404 TVM_ATTR_FIELD(axis).set_default(1).describe("Axis of input data layout channel.");
1405 TVM_ATTR_FIELD(bias).set_default(2).describe("The offset parameter to avoid division by 0.");
1406 TVM_ATTR_FIELD(alpha).set_default(0.0001).describe("The scaling parameter.");
1407 TVM_ATTR_FIELD(beta).set_default(0.75).describe("The exponent parameter.");
1408 }
1409};
1410
1411/*! \brief Attributes for L2Normalize operator */
1412struct L2NormalizeAttrs : public tvm::AttrsNode<L2NormalizeAttrs> {
1413 double eps;
1414 Array<Integer> axis;
1415
1416 TVM_DECLARE_ATTRS(L2NormalizeAttrs, "relay.attrs.L2NormalizeAttrs") {
1417 TVM_ATTR_FIELD(eps).describe("A lower bound value for the norm, to avoid division by 0.");
1418 TVM_ATTR_FIELD(axis).describe("Axis over the normalization applied.");
1419 }
1420};
1421
1422/*! \brief Attributes for DeformableConv2D operator */
1423struct DeformableConv2DAttrs : public tvm::AttrsNode<DeformableConv2DAttrs> {
1424 Array<IndexExpr> strides;
1425 Array<IndexExpr> padding;
1426 Array<IndexExpr> dilation;
1427 int deformable_groups;
1428 int groups;
1429 IndexExpr channels;
1430 Array<IndexExpr> kernel_size;
1431 std::string data_layout;
1432 std::string kernel_layout;
1433 std::string out_layout;
1434 DataType out_dtype;
1435
1436 TVM_DECLARE_ATTRS(DeformableConv2DAttrs, "relay.attrs.DeformableConv2DAttrs") {
1437 TVM_ATTR_FIELD(strides)
1438 .set_default(Array<IndexExpr>({1, 1}))
1439 .describe("Specifies the strides of the convolution.");
1440 TVM_ATTR_FIELD(padding)
1441 .set_default(Array<IndexExpr>({0, 0}))
1442 .describe(
1443 "If padding is non-zero, then the input is implicitly zero-padded"
1444 "Padding support both symmetric and asymmetric as"
1445 "one int : same padding used on all sides"
1446 "two int : bottom, right will use same padding as top, left"
1447 "four int : padding width in the order of (top, left, bottom, right)");
1448 TVM_ATTR_FIELD(dilation)
1449 .set_default(Array<IndexExpr>({1, 1}))
1450 .describe("Specifies the dilation rate to use for dilated convolution.");
1451 TVM_ATTR_FIELD(deformable_groups)
1452 .set_default(1)
1453 .describe(
1454 "Controls the connections between inputs and offsets."
1455 "Input channels are partitioned into multiple deformable groups. Offsets"
1456 "are shared across input channels in the same deformable group.");
1457 TVM_ATTR_FIELD(groups).set_default(1).describe(
1458 "Controls the connections between inputs and outputs."
1459 "At groups=1, all inputs are convolved to all outputs."
1460 "At groups=2, the operation becomes equivalent to having two convolution"
1461 "layers side by side, each seeing half the input channels, and producing"
1462 "half the output channels, and both subsequently concatenated.");
1463 TVM_ATTR_FIELD(channels)
1464 .describe(
1465 "The number of output channels in the convolution."
1466 " If it is not set, inferred by shape of the weight.")
1467 .set_default(NullValue<IndexExpr>());
1468 TVM_ATTR_FIELD(kernel_size)
1469 .describe("Specifies the dimensions of the convolution window.")
1470 .set_default(NullValue<Array<IndexExpr>>());
1471 TVM_ATTR_FIELD(data_layout)
1472 .set_default("NCHW")
1473 .describe(
1474 "Dimension ordering of input data. Can be 'NCHW', 'NHWC', etc."
1475 "'N', 'C', 'H', 'W' stands for batch, channel, height, and width"
1476 "dimensions respectively. Convolution is applied on the 'H' and"
1477 "'W' dimensions.");
1478 TVM_ATTR_FIELD(kernel_layout)
1479 .set_default("OIHW")
1480 .describe(
1481 "Dimension ordering of weight. Can be 'OIHW', 'OIHW16o16i', etc."
1482 "'O', 'I', 'H', 'W' stands for num_filter, input_channel, height, and width"
1483 "dimensions respectively.");
1484 TVM_ATTR_FIELD(out_layout)
1485 .set_default("")
1486 .describe(
1487 "Dimension ordering of output. Can be 'NCHW', 'NHWC', etc."
1488 "'N', 'C', 'H', 'W' stands for batch, channel, height, and width"
1489 "dimensions respectively. Default to be same as input layout.");
1490
1491 // use 0 bits to indicate none.
1492 TVM_ATTR_FIELD(out_dtype)
1493 .set_default(NullValue<DataType>())
1494 .describe("Output data type, set to explicit type under mixed precision setting");
1495 }
1496};
1497
1498/*! \brief Attributes used in subpixel operators */
1499struct SubPixelAttrs : public tvm::AttrsNode<SubPixelAttrs> {
1500 int block_size;
1501 std::string layout;
1502 std::string mode;
1503
1504 TVM_DECLARE_ATTRS(SubPixelAttrs, "relay.attrs.SubPixelAttrs") {
1505 TVM_ATTR_FIELD(block_size)
1506 .describe("The size of subpixel blocks to compose or decompose.")
1507 .set_default(1);
1508 TVM_ATTR_FIELD(layout).set_default("NCHW").describe(
1509 "Dimension ordering of input data. Can be 'NCHW', 'NHWC', etc."
1510 "'N', 'C', 'H', 'W' stands for batch, channel, height, and width"
1511 "dimensions respectively.");
1512 TVM_ATTR_FIELD(mode).set_default("DCR").describe(
1513 "Indicates order in which channels are accessed. Must be one of"
1514 "DCR or CDR.");
1515 }
1516}; // struct SubPixelAttrs
1517
1518/*! \brief Attributes used in correlation operators */
1519struct CorrelationAttrs : public tvm::AttrsNode<CorrelationAttrs> {
1520 int kernel_size;
1521 int max_displacement;
1522 int stride1;
1523 int stride2;
1524 Array<IndexExpr> padding;
1525 bool is_multiply;
1526 String layout;
1527
1528 TVM_DECLARE_ATTRS(CorrelationAttrs, "relay.attrs.CorrelationAttrs") {
1529 TVM_ATTR_FIELD(kernel_size)
1530 .describe("Kernel size for correlation, must be an odd number.")
1531 .set_default(1);
1532 TVM_ATTR_FIELD(max_displacement).describe("Max displacement of Correlation.").set_default(1);
1533 TVM_ATTR_FIELD(stride1).describe("Stride for data1.").set_default(1);
1534 TVM_ATTR_FIELD(stride2).describe("Stride for data2.").set_default(1);
1535 TVM_ATTR_FIELD(padding)
1536 .describe("Padding for data1 and data2.")
1537 .set_default(Array<IndexExpr>{0, 0});
1538 TVM_ATTR_FIELD(is_multiply)
1539 .describe("Operation type is either multiplication or substraction.")
1540 .set_default(true);
1541 TVM_ATTR_FIELD(layout).set_default("NCHW").describe(
1542 "Dimension ordering of input data. Can be 'NCHW', 'NHWC', etc."
1543 "'N', 'C', 'H', 'W' stands for batch, channel, height, and width"
1544 "dimensions respectively.");
1545 }
1546}; // struct CorrelationAttrs
1547
1548/*! \brief Attributes used in SpaceToBatchND operator */
1549struct SpaceToBatchNDAttrs : public tvm::AttrsNode<SpaceToBatchNDAttrs> {
1550 Array<Integer> block_shape;
1551 Array<Array<IndexExpr>> paddings;
1552 double pad_value;
1553
1554 TVM_DECLARE_ATTRS(SpaceToBatchNDAttrs, "relay.attrs.SpaceToBatchNDAttrs") {
1555 TVM_ATTR_FIELD(block_shape)
1556 .set_default(Array<Integer>({1, 1}))
1557 .describe("1-D containing block size for each spatial dimension.");
1558 TVM_ATTR_FIELD(paddings).describe("2-D containing paddings for each spatial dimension.");
1559 TVM_ATTR_FIELD(pad_value).set_default(0.0).describe("The value used for padding.");
1560 }
1561}; // struct SpaceToBatchNDAttrs
1562
1563/*! \brief Attributes used in BatchToSpaceND operator */
1564struct BatchToSpaceNDAttrs : public tvm::AttrsNode<BatchToSpaceNDAttrs> {
1565 Array<Integer> block_shape;
1566 Array<Array<IndexExpr>> crops;
1567
1568 TVM_DECLARE_ATTRS(BatchToSpaceNDAttrs, "relay.attrs.BatchToSpaceNDAttrs") {
1569 TVM_ATTR_FIELD(block_shape)
1570 .set_default(Array<Integer>({1, 1}))
1571 .describe("1-D containing block size for each spatial dimension.");
1572 TVM_ATTR_FIELD(crops).describe("2-D containing amount to crop from spatial dimension.");
1573 }
1574}; // struct BatchToSpaceNDAttrs
1575
1576/*! \brief Attributes used in NLLLoss operator */
1577struct NLLLossAttrs : public tvm::AttrsNode<NLLLossAttrs> {
1578 std::string reduction;
1579 int ignore_index;
1580
1581 TVM_DECLARE_ATTRS(NLLLossAttrs, "relay.attrs.NLLLossAttrs") {
1582 TVM_ATTR_FIELD(reduction).set_default("mean").describe(
1583 "The reduction method to apply to the output. Can be"
1584 "'none', 'mean' or 'sum'.");
1585 TVM_ATTR_FIELD(ignore_index).describe("The target value to ignore.");
1586 }
1587}; // struct NLLLossAttrs
1588
1589} // namespace relay
1590} // namespace tvm
1591#endif // TVM_RELAY_ATTRS_NN_H_
1592