1/*******************************************************************************
2* Copyright 2020-2022 Intel Corporation
3*
4* Licensed under the Apache License, Version 2.0 (the "License");
5* you may not use this file except in compliance with the License.
6* You may obtain a copy of the License at
7*
8* http://www.apache.org/licenses/LICENSE-2.0
9*
10* Unless required by applicable law or agreed to in writing, software
11* distributed under the License is distributed on an "AS IS" BASIS,
12* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13* See the License for the specific language governing permissions and
14* limitations under the License.
15*******************************************************************************/
16
17/// @example prelu.cpp
18/// > Annotated version: @ref prelu_example_cpp
19///
20/// @page prelu_example_cpp_short
21///
22/// This C++ API example demonstrates how to create and execute an
23/// [PReLU](@ref dev_guide_prelu) primitive in forward training
24/// propagation mode.
25///
26/// @page prelu_example_cpp Primitive Example
27/// @copydetails prelu_example_cpp_short
28///
29/// @include prelu.cpp
30
31#include <algorithm>
32#include <cmath>
33#include <string>
34#include <vector>
35
36#include "dnnl.hpp"
37#include "example_utils.hpp"
38
39using namespace dnnl;
40
41using tag = memory::format_tag;
42using dt = memory::data_type;
43
44void prelu_example(dnnl::engine::kind engine_kind) {
45
46 // Create execution dnnl::engine.
47 dnnl::engine engine(engine_kind, 0);
48
49 // Create dnnl::stream.
50 dnnl::stream engine_stream(engine);
51
52 // Tensor dimensions.
53 const memory::dim N = 3, // batch size
54 IC = 3, // channels
55 IH = 227, // tensor height
56 IW = 227; // tensor width
57
58 // Source (src), weights and destination (dst) tensors dimensions.
59 const memory::dims src_dims = {N, IC, IH, IW};
60 const memory::dims weights_dims = {N, IC, IH, IW};
61 const memory::dims dst_dims = {N, IC, IH, IW};
62
63 // Allocate buffers. In this example, out-of-place primitive execution is
64 // demonstrated since both src and dst are required for later backward
65 // propagation.
66 std::vector<float> src_data(product(src_dims));
67 std::vector<float> weights_data(product(weights_dims));
68 std::vector<float> dst_data(product(dst_dims));
69
70 // Initialize src tensor.
71 std::generate(src_data.begin(), src_data.end(), []() {
72 static int i = 0;
73 return std::cos(i++ / 10.f);
74 });
75
76 // Initialize weights tensor.
77 std::fill(weights_data.begin(), weights_data.end(), 0.3f);
78
79 // Create memory objects for tensor data (src, weights, dst). In this
80 // example, NCHW layout is assumed for src, weights and dst.
81 auto user_src_mem = memory({src_dims, dt::f32, tag::nchw}, engine);
82 auto user_weights_mem = memory({weights_dims, dt::f32, tag::nchw}, engine);
83 auto user_dst_mem = memory({dst_dims, dt::f32, tag::nchw}, engine);
84
85 // Create memory descriptors for the primitive. Src tag is set
86 // to match src memory object. Setting weights tag to format_tag::any
87 // enables the PReLU primitive to choose memory layout for an optimized
88 // primitive implementation, and that layout may differ from the one
89 // provided by the user.
90 auto src_md = memory::desc(src_dims, dt::f32, tag::nchw);
91 auto weights_md = memory::desc(weights_dims, dt::f32, tag::any);
92 auto dst_md = memory::desc(src_dims, dt::f32, tag::any);
93
94 // Write data to memory object's handle.
95 write_to_dnnl_memory(src_data.data(), user_src_mem);
96 write_to_dnnl_memory(weights_data.data(), user_weights_mem);
97
98 // Create primitive descriptor.
99 auto prelu_pd = prelu_forward::primitive_desc(
100 engine, prop_kind::forward_training, src_md, weights_md, dst_md);
101
102 // For now, assume that the weights memory layout generated
103 // by the primitive and the one provided by the user are identical.
104 auto prelu_weights_mem = user_weights_mem;
105
106 // Reorder the data in case the weights memory layout generated by
107 // the primitive and the one provided by the user are different. In this
108 // case, we create additional memory object with internal buffers that will
109 // contain the reordered data.
110 if (prelu_pd.weights_desc() != user_weights_mem.get_desc()) {
111 prelu_weights_mem = memory(prelu_pd.weights_desc(), engine);
112 reorder(user_weights_mem, prelu_weights_mem)
113 .execute(engine_stream, user_weights_mem, prelu_weights_mem);
114 }
115
116 // Create the primitive.
117 auto prelu_prim = prelu_forward(prelu_pd);
118
119 // Primitive arguments.
120 std::unordered_map<int, memory> prelu_args;
121 prelu_args.insert({DNNL_ARG_SRC, user_src_mem});
122 prelu_args.insert({DNNL_ARG_WEIGHTS, prelu_weights_mem});
123 prelu_args.insert({DNNL_ARG_DST, user_dst_mem});
124
125 // Primitive execution: PReLU.
126 prelu_prim.execute(engine_stream, prelu_args);
127
128 // Wait for the computation to finalize.
129 engine_stream.wait();
130
131 // Read data from memory object's handle.
132 read_from_dnnl_memory(dst_data.data(), user_dst_mem);
133}
134
135int main(int argc, char **argv) {
136 return handle_example_errors(prelu_example, parse_engine_kind(argc, argv));
137}
138