1/*******************************************************************************
2* Copyright 2020-2022 Intel Corporation
3*
4* Licensed under the Apache License, Version 2.0 (the "License");
5* you may not use this file except in compliance with the License.
6* You may obtain a copy of the License at
7*
8* http://www.apache.org/licenses/LICENSE-2.0
9*
10* Unless required by applicable law or agreed to in writing, software
11* distributed under the License is distributed on an "AS IS" BASIS,
12* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13* See the License for the specific language governing permissions and
14* limitations under the License.
15*******************************************************************************/
16
17/// @example matmul.cpp
18/// > Annotated version: @ref matmul_example_cpp
19///
20/// @page matmul_example_cpp_short
21///
22/// This C++ API example demonstrates how to create and execute a
23/// [MatMul](@ref dev_guide_matmul) primitive.
24///
25/// Key optimizations included in this example:
26/// - Primitive attributes with fused post-ops.
27///
28/// @page matmul_example_cpp Matmul Primitive Example
29/// @copydetails matmul_example_cpp_short
30///
31/// @include matmul.cpp
32
33#include <algorithm>
34#include <cmath>
35#include <iostream>
36#include <string>
37#include <vector>
38
39#include "example_utils.hpp"
40#include "oneapi/dnnl/dnnl.hpp"
41
42using namespace dnnl;
43
44using tag = memory::format_tag;
45using dt = memory::data_type;
46
47void matmul_example(dnnl::engine::kind engine_kind) {
48
49 // Create execution dnnl::engine.
50 dnnl::engine engine(engine_kind, 0);
51
52 // Create dnnl::stream.
53 dnnl::stream engine_stream(engine);
54
55 // Tensor dimensions.
56 const memory::dim MB = 3, // batch size
57 M = 128, K = 256, N = 512;
58
59 // Source (src), weights, bias, and destination (dst) tensors dimensions.
60 memory::dims src_dims = {MB, M, K};
61 memory::dims weights_dims = {MB, K, N};
62 memory::dims bias_dims = {1, 1, N};
63 memory::dims dst_dims = {MB, M, N};
64
65 // Allocate buffers.
66 std::vector<float> src_data(product(src_dims));
67 std::vector<float> weights_data(product(weights_dims));
68 std::vector<float> bias_data(product(bias_dims));
69 std::vector<float> dst_data(product(dst_dims));
70
71 // Initialize src, weights, bias.
72 std::generate(src_data.begin(), src_data.end(), []() {
73 static int i = 0;
74 return std::cos(i++ / 10.f);
75 });
76 std::generate(weights_data.begin(), weights_data.end(), []() {
77 static int i = 0;
78 return std::sin(i++ * 2.f);
79 });
80 std::generate(bias_data.begin(), bias_data.end(), []() {
81 static int i = 0;
82 return std::tanh(float(i++));
83 });
84
85 // Create memory descriptors and memory objects for src, weights, bias, and
86 // dst.
87 auto src_md = memory::desc(src_dims, dt::f32, tag::abc);
88 auto weights_md = memory::desc(weights_dims, dt::f32, tag::abc);
89 auto bias_md = memory::desc(bias_dims, dt::f32, tag::abc);
90 auto dst_md = memory::desc(dst_dims, dt::f32, tag::abc);
91
92 auto src_mem = memory(src_md, engine);
93 auto weights_mem = memory(weights_md, engine);
94 auto bias_mem = memory(bias_md, engine);
95 auto dst_mem = memory(dst_md, engine);
96
97 // Write data to memory object's handles.
98 write_to_dnnl_memory(src_data.data(), src_mem);
99 write_to_dnnl_memory(weights_data.data(), weights_mem);
100 write_to_dnnl_memory(bias_data.data(), bias_mem);
101
102 // Create primitive post-ops (ReLU).
103 const float alpha = 0.f;
104 const float beta = 0.f;
105 post_ops matmul_ops;
106 matmul_ops.append_eltwise(algorithm::eltwise_relu, alpha, beta);
107 primitive_attr matmul_attr;
108 matmul_attr.set_post_ops(matmul_ops);
109
110 // Create primitive descriptor.
111 auto matmul_pd = matmul::primitive_desc(
112 engine, src_md, weights_md, bias_md, dst_md, matmul_attr);
113
114 // Create the primitive.
115 auto matmul_prim = matmul(matmul_pd);
116
117 // Primitive arguments.
118 std::unordered_map<int, memory> matmul_args;
119 matmul_args.insert({DNNL_ARG_SRC, src_mem});
120 matmul_args.insert({DNNL_ARG_WEIGHTS, weights_mem});
121 matmul_args.insert({DNNL_ARG_BIAS, bias_mem});
122 matmul_args.insert({DNNL_ARG_DST, dst_mem});
123
124 // Primitive execution: matrix multiplication with ReLU.
125 matmul_prim.execute(engine_stream, matmul_args);
126
127 // Wait for the computation to finalize.
128 engine_stream.wait();
129
130 // Read data from memory object's handle.
131 read_from_dnnl_memory(dst_data.data(), dst_mem);
132}
133
134int main(int argc, char **argv) {
135 return handle_example_errors(matmul_example, parse_engine_kind(argc, argv));
136}
137