1 | /******************************************************************************* |
2 | * Copyright 2020-2022 Intel Corporation |
3 | * |
4 | * Licensed under the Apache License, Version 2.0 (the "License"); |
5 | * you may not use this file except in compliance with the License. |
6 | * You may obtain a copy of the License at |
7 | * |
8 | * http://www.apache.org/licenses/LICENSE-2.0 |
9 | * |
10 | * Unless required by applicable law or agreed to in writing, software |
11 | * distributed under the License is distributed on an "AS IS" BASIS, |
12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
13 | * See the License for the specific language governing permissions and |
14 | * limitations under the License. |
15 | *******************************************************************************/ |
16 | |
17 | /// @example layer_normalization.cpp |
18 | /// > Annotated version: @ref layer_normalization_example_cpp |
19 | /// |
20 | /// @page layer_normalization_example_cpp_short |
21 | /// |
22 | /// This C++ API example demonstrates how to create and execute a |
23 | /// [Layer normalization](@ref dev_guide_layer_normalization) primitive in |
24 | /// forward propagation mode. |
25 | /// |
26 | /// Key optimizations included in this example: |
27 | /// - In-place primitive execution; |
28 | /// - Creation of memory objects using the primitive descriptor. |
29 | /// |
30 | /// @page layer_normalization_example_cpp Layer Normalization Primitive Example |
31 | /// @copydetails layer_normalization_example_cpp_short |
32 | /// |
33 | /// @include layer_normalization.cpp |
34 | |
35 | #include <algorithm> |
36 | #include <cmath> |
37 | #include <iostream> |
38 | #include <string> |
39 | #include <vector> |
40 | |
41 | #include "example_utils.hpp" |
42 | #include "oneapi/dnnl/dnnl.hpp" |
43 | |
44 | using namespace dnnl; |
45 | |
46 | using tag = memory::format_tag; |
47 | using dt = memory::data_type; |
48 | |
49 | void layer_normalization_example(dnnl::engine::kind engine_kind) { |
50 | |
51 | /// Create execution dnnl::engine. |
52 | dnnl::engine engine(engine_kind, 0); |
53 | |
54 | // Create dnnl::stream. |
55 | dnnl::stream engine_stream(engine); |
56 | |
57 | // Tensor dimensions. |
58 | const memory::dim T = 12, // time steps |
59 | N = 3, // batch |
60 | C = 227; // channels |
61 | |
62 | // Source (src) and destination (dst) tensors dimensions. |
63 | const memory::dims src_dims = {T, N, C}; |
64 | |
65 | // Scale/shift tensor dimensions. |
66 | memory::dims scaleshift_dims = {C}; |
67 | |
68 | // Allocate buffer. |
69 | std::vector<float> src_data(product(src_dims)); |
70 | std::vector<float> scale_data(product(scaleshift_dims)); |
71 | std::vector<float> shift_data(product(scaleshift_dims)); |
72 | |
73 | // Initialize src tensor. |
74 | std::generate(src_data.begin(), src_data.end(), []() { |
75 | static int i = 0; |
76 | return std::cos(i++ / 10.f); |
77 | }); |
78 | |
79 | // Initialize scale. |
80 | std::generate(scale_data.begin(), scale_data.end(), []() { |
81 | static int i = 0; |
82 | return std::sin(i++ * 2.f); |
83 | }); |
84 | |
85 | // Initialize shift. |
86 | std::generate(shift_data.begin(), shift_data.end(), []() { |
87 | static int i = 0; |
88 | return std::tan(float(i++)); |
89 | }); |
90 | |
91 | // Create src memory descriptor and memory objects. |
92 | auto src_md = memory::desc(src_dims, dt::f32, tag::tnc); |
93 | auto dst_md = memory::desc(src_dims, dt::f32, tag::tnc); |
94 | auto scaleshift_md = memory::desc(scaleshift_dims, dt::f32, tag::x); |
95 | |
96 | auto src_mem = memory(src_md, engine); |
97 | auto scale_mem = memory(scaleshift_md, engine); |
98 | auto shift_mem = memory(scaleshift_md, engine); |
99 | |
100 | // Write data to memory object's handle. |
101 | write_to_dnnl_memory(src_data.data(), src_mem); |
102 | write_to_dnnl_memory(scale_data.data(), scale_mem); |
103 | write_to_dnnl_memory(shift_data.data(), shift_mem); |
104 | |
105 | // Create primitive descriptor. |
106 | const float epsilon = 1.e-10f; |
107 | auto lnorm_pd = layer_normalization_forward::primitive_desc(engine, |
108 | prop_kind::forward_training, src_md, dst_md, epsilon, |
109 | normalization_flags::use_scale | normalization_flags::use_shift); |
110 | |
111 | // Use the memory descriptors from the primitive to create memory objects |
112 | // required for the primitive: mean, variance, scale/shift. |
113 | auto mean_mem = memory(lnorm_pd.mean_desc(), engine); |
114 | auto variance_mem = memory(lnorm_pd.variance_desc(), engine); |
115 | |
116 | // Create the primitive. |
117 | auto lnorm_prim = layer_normalization_forward(lnorm_pd); |
118 | |
119 | // Primitive arguments. Set up in-place execution by assigning src as DST. |
120 | std::unordered_map<int, memory> lnorm_args; |
121 | lnorm_args.insert({DNNL_ARG_SRC, src_mem}); |
122 | lnorm_args.insert({DNNL_ARG_MEAN, mean_mem}); |
123 | lnorm_args.insert({DNNL_ARG_VARIANCE, variance_mem}); |
124 | lnorm_args.insert({DNNL_ARG_SCALE, scale_mem}); |
125 | lnorm_args.insert({DNNL_ARG_SHIFT, shift_mem}); |
126 | lnorm_args.insert({DNNL_ARG_DST, src_mem}); |
127 | |
128 | // Primitive execution: layer normalization. |
129 | lnorm_prim.execute(engine_stream, lnorm_args); |
130 | |
131 | // Wait for the computation to finalize. |
132 | engine_stream.wait(); |
133 | |
134 | // Read data from memory object's handle.s |
135 | read_from_dnnl_memory(src_data.data(), src_mem); |
136 | } |
137 | |
138 | int main(int argc, char **argv) { |
139 | return handle_example_errors( |
140 | layer_normalization_example, parse_engine_kind(argc, argv)); |
141 | } |
142 | |