1/*
2 * Licensed to the Apache Software Foundation (ASF) under one
3 * or more contributor license agreements. See the NOTICE file
4 * distributed with this work for additional information
5 * regarding copyright ownership. The ASF licenses this file
6 * to you under the Apache License, Version 2.0 (the
7 * "License"); you may not use this file except in compliance
8 * with the License. You may obtain a copy of the License at
9 *
10 * http://www.apache.org/licenses/LICENSE-2.0
11 *
12 * Unless required by applicable law or agreed to in writing,
13 * software distributed under the License is distributed on an
14 * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
15 * KIND, either express or implied. See the License for the
16 * specific language governing permissions and limitations
17 * under the License.
18 */
19
20/*!
21 * \brief Dense op constructions
22 * \file nn/dense.h
23 */
24#ifndef TVM_TOPI_NN_DENSE_H_
25#define TVM_TOPI_NN_DENSE_H_
26
27#include <tvm/te/operation.h>
28#include <tvm/topi/tags.h>
29
30#include <string>
31
32namespace tvm {
33namespace topi {
34namespace nn {
35
36using namespace tvm::te;
37
38/*!
39 * \brief Creates an operation that calculates data * weight^T + bias
40 *
41 * \param data Tensor with shape [batch, in_dim]
42 * \param weight Tensor with shape [out_dim, in_dim]
43 * \param bias Tensor with shape [out_dim]. Optional; to omit bias, pass Tensor()
44 * \param out_dtype Output data type. Used for mixed precision.
45 *
46 * \return Tensor with shape [batch, out_dim]
47 */
48inline tvm::te::Tensor dense(const tvm::te::Tensor& data, const tvm::te::Tensor& weight,
49 const tvm::te::Tensor& bias, const DataType& out_dtype) {
50 ICHECK_EQ(data->shape.size(), 2) << "dense requires 2-D data";
51 ICHECK_EQ(weight->shape.size(), 2) << "dense requires 2-D weight";
52 if (bias.defined()) {
53 ICHECK_EQ(bias->shape.size(), 1) << "dense requires 1-D bias";
54 }
55
56 auto batch = data->shape[0];
57 auto in_dim = data->shape[1];
58 auto out_dim = weight->shape[0];
59
60 auto k = tvm::te::reduce_axis(Range(0, in_dim), "k");
61 auto matmul = tvm::te::compute(
62 {batch, out_dim},
63 [&](Var i, Var j) {
64 return tvm::sum(tvm::cast(out_dtype, data(i, k)) * tvm::cast(out_dtype, weight(j, k)), {k});
65 },
66 "tensor", "dense");
67
68 if (bias.defined()) {
69 matmul = tvm::te::compute(
70 {batch, out_dim},
71 [&](Var i, Var j) { return matmul(i, j) + tvm::cast(out_dtype, bias(j)); }, "tensor",
72 kBroadcast);
73 }
74
75 return matmul;
76}
77
78} // namespace nn
79} // namespace topi
80} // namespace tvm
81#endif // TVM_TOPI_NN_DENSE_H_
82