1 | /******************************************************************************* |
2 | * Copyright 2020-2022 Intel Corporation |
3 | * |
4 | * Licensed under the Apache License, Version 2.0 (the "License"); |
5 | * you may not use this file except in compliance with the License. |
6 | * You may obtain a copy of the License at |
7 | * |
8 | * http://www.apache.org/licenses/LICENSE-2.0 |
9 | * |
10 | * Unless required by applicable law or agreed to in writing, software |
11 | * distributed under the License is distributed on an "AS IS" BASIS, |
12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
13 | * See the License for the specific language governing permissions and |
14 | * limitations under the License. |
15 | *******************************************************************************/ |
16 | |
17 | /// @example batch_normalization.cpp |
18 | /// > Annotated version: @ref batch_normalization_example_cpp |
19 | /// |
20 | /// @page batch_normalization_example_cpp_short |
21 | /// |
22 | /// This C++ API example demonstrates how to create and execute a |
23 | /// [Batch Normalization](@ref dev_guide_batch_normalization) primitive in |
24 | /// forward training propagation mode. |
25 | /// |
26 | /// Key optimizations included in this example: |
27 | /// - In-place primitive execution; |
28 | /// - Source memory format for an optimized primitive implementation; |
29 | /// - Fused post-ops via operation descriptor flags; |
30 | /// |
31 | /// @page batch_normalization_example_cpp Batch Normalization Primitive Example |
32 | /// @copydetails batch_normalization_example_cpp_short |
33 | /// |
34 | /// @include batch_normalization.cpp |
35 | |
36 | #include <algorithm> |
37 | #include <cmath> |
38 | #include <iostream> |
39 | #include <string> |
40 | #include <vector> |
41 | |
42 | #include "example_utils.hpp" |
43 | #include "oneapi/dnnl/dnnl.hpp" |
44 | |
45 | using namespace dnnl; |
46 | |
47 | using tag = memory::format_tag; |
48 | using dt = memory::data_type; |
49 | |
50 | void batch_normalization_example(dnnl::engine::kind engine_kind) { |
51 | |
52 | // Create execution dnnl::engine. |
53 | dnnl::engine engine(engine_kind, 0); |
54 | |
55 | // Create dnnl::stream. |
56 | dnnl::stream engine_stream(engine); |
57 | |
58 | // Tensor dimensions. |
59 | const memory::dim N = 3, // batch size |
60 | IC = 3, // channels |
61 | IH = 227, // tensor height |
62 | IW = 227; // tensor width |
63 | |
64 | // Source (src) and destination (dst) tensors dimensions. |
65 | memory::dims src_dims = {N, IC, IH, IW}; |
66 | |
67 | // Scale/shift tensor dimensions. |
68 | memory::dims scaleshift_dims = {IC}; |
69 | |
70 | // Allocate buffers. |
71 | std::vector<float> src_data(product(src_dims)); |
72 | std::vector<float> scale_data(product(scaleshift_dims)); |
73 | std::vector<float> shift_data(product(scaleshift_dims)); |
74 | |
75 | // Initialize src. |
76 | std::generate(src_data.begin(), src_data.end(), []() { |
77 | static int i = 0; |
78 | return std::cos(i++ / 10.f); |
79 | }); |
80 | |
81 | // Initialize scale. |
82 | std::generate(scale_data.begin(), scale_data.end(), []() { |
83 | static int i = 0; |
84 | return std::sin(i++ * 2.f); |
85 | }); |
86 | |
87 | // Initialize shift. |
88 | std::generate(shift_data.begin(), shift_data.end(), []() { |
89 | static int i = 0; |
90 | return std::tan(float(i++)); |
91 | }); |
92 | |
93 | // Create src and scale/shift memory descriptors and memory objects. |
94 | auto src_md = memory::desc(src_dims, dt::f32, tag::nchw); |
95 | auto dst_md = memory::desc(src_dims, dt::f32, tag::nchw); |
96 | auto scaleshift_md = memory::desc(scaleshift_dims, dt::f32, tag::x); |
97 | |
98 | auto src_mem = memory(src_md, engine); |
99 | auto scale_mem = memory(scaleshift_md, engine); |
100 | auto shift_mem = memory(scaleshift_md, engine); |
101 | |
102 | // Write data to memory object's handle. |
103 | write_to_dnnl_memory(src_data.data(), src_mem); |
104 | write_to_dnnl_memory(scale_data.data(), scale_mem); |
105 | write_to_dnnl_memory(shift_data.data(), shift_mem); |
106 | |
107 | // Create primitive descriptor. |
108 | auto bnorm_pd = batch_normalization_forward::primitive_desc(engine, |
109 | prop_kind::forward_training, src_md, dst_md, 1.e-10f, |
110 | normalization_flags::use_scale | normalization_flags::use_shift |
111 | | normalization_flags::fuse_norm_relu); |
112 | |
113 | // Create memory objects using memory descriptors created by the primitive |
114 | // descriptor: mean, variance, workspace. |
115 | // NOTE: Here, the ReLU post-ops require a workspace for later usage in |
116 | // backward propagation mode. |
117 | auto mean_mem = memory(bnorm_pd.mean_desc(), engine); |
118 | auto variance_mem = memory(bnorm_pd.variance_desc(), engine); |
119 | auto workspace_mem = memory(bnorm_pd.workspace_desc(), engine); |
120 | |
121 | // Create the primitive. |
122 | auto bnorm_prim = batch_normalization_forward(bnorm_pd); |
123 | |
124 | // Primitive arguments. Set up in-place execution by assigning src as DST. |
125 | std::unordered_map<int, memory> bnorm_args; |
126 | bnorm_args.insert({DNNL_ARG_SRC, src_mem}); |
127 | bnorm_args.insert({DNNL_ARG_MEAN, mean_mem}); |
128 | bnorm_args.insert({DNNL_ARG_VARIANCE, variance_mem}); |
129 | bnorm_args.insert({DNNL_ARG_SCALE, scale_mem}); |
130 | bnorm_args.insert({DNNL_ARG_SHIFT, shift_mem}); |
131 | bnorm_args.insert({DNNL_ARG_WORKSPACE, workspace_mem}); |
132 | bnorm_args.insert({DNNL_ARG_DST, src_mem}); |
133 | |
134 | // Primitive execution: batch normalization with ReLU. |
135 | bnorm_prim.execute(engine_stream, bnorm_args); |
136 | |
137 | // Wait for the computation to finalize. |
138 | engine_stream.wait(); |
139 | |
140 | // Read data from memory object's handle. |
141 | read_from_dnnl_memory(src_data.data(), src_mem); |
142 | } |
143 | |
144 | int main(int argc, char **argv) { |
145 | return handle_example_errors( |
146 | batch_normalization_example, parse_engine_kind(argc, argv)); |
147 | } |
148 | |