1 | /******************************************************************************* |
2 | * Copyright 2020-2022 Intel Corporation |
3 | * |
4 | * Licensed under the Apache License, Version 2.0 (the "License"); |
5 | * you may not use this file except in compliance with the License. |
6 | * You may obtain a copy of the License at |
7 | * |
8 | * http://www.apache.org/licenses/LICENSE-2.0 |
9 | * |
10 | * Unless required by applicable law or agreed to in writing, software |
11 | * distributed under the License is distributed on an "AS IS" BASIS, |
12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
13 | * See the License for the specific language governing permissions and |
14 | * limitations under the License. |
15 | *******************************************************************************/ |
16 | |
17 | /// @example pooling.cpp |
18 | /// > Annotated version: @ref pooling_example_cpp |
19 | /// |
20 | /// @page pooling_example_cpp_short |
21 | /// |
22 | /// This C++ API example demonstrates how to create and execute a |
23 | /// [Pooling](@ref dev_guide_pooling) primitive in forward training propagation |
24 | /// mode. |
25 | /// |
26 | /// @page pooling_example_cpp Pooling Primitive Example |
27 | /// @copydetails pooling_example_cpp_short |
28 | /// |
29 | /// @include pooling.cpp |
30 | |
31 | #include <algorithm> |
32 | #include <cmath> |
33 | #include <iostream> |
34 | #include <string> |
35 | #include <vector> |
36 | |
37 | #include "example_utils.hpp" |
38 | #include "oneapi/dnnl/dnnl.hpp" |
39 | |
40 | using namespace dnnl; |
41 | |
42 | using tag = memory::format_tag; |
43 | using dt = memory::data_type; |
44 | |
45 | void pooling_example(dnnl::engine::kind engine_kind) { |
46 | |
47 | // Create execution dnnl::engine. |
48 | dnnl::engine engine(engine_kind, 0); |
49 | |
50 | // Create dnnl::stream. |
51 | dnnl::stream engine_stream(engine); |
52 | |
53 | // Tensor dimensions. |
54 | const memory::dim N = 3, // batch size |
55 | IC = 3, // input channels |
56 | IH = 27, // input tensor height |
57 | IW = 27, // input tensor width |
58 | KH = 11, // kernel height |
59 | KW = 11, // kernel width |
60 | PH_L = 0, // height padding: left |
61 | PH_R = 0, // height padding: right |
62 | PW_L = 0, // width padding: left |
63 | PW_R = 0, // width padding: right |
64 | SH = 4, // height-wise stride |
65 | SW = 4, // width-wise stride |
66 | DH = 1, // height-wise dilation |
67 | DW = 1; // width-wise dilation |
68 | |
69 | const memory::dim OH = (IH - ((KH - 1) * DH + KH) + PH_L + PH_R) / SH + 1; |
70 | const memory::dim OW = (IW - ((KW - 1) * DW + KW) + PW_L + PW_R) / SW + 1; |
71 | |
72 | // Source (src) and destination (dst) tensors dimensions. |
73 | memory::dims src_dims = {N, IC, IH, IW}; |
74 | memory::dims dst_dims = {N, IC, OH, OW}; |
75 | |
76 | // Kernel dimensions. |
77 | memory::dims kernel_dims = {KH, KW}; |
78 | |
79 | // Strides, padding dimensions. |
80 | memory::dims strides_dims = {SH, SW}; |
81 | memory::dims padding_dims_l = {PH_L, PW_L}; |
82 | memory::dims padding_dims_r = {PH_R, PW_R}; |
83 | memory::dims dilation = {DH, DW}; |
84 | |
85 | // Allocate buffers. |
86 | std::vector<float> src_data(product(src_dims)); |
87 | std::vector<float> dst_data(product(dst_dims)); |
88 | |
89 | std::generate(src_data.begin(), src_data.end(), []() { |
90 | static int i = 0; |
91 | return std::cos(i++ / 10.f); |
92 | }); |
93 | |
94 | // Create memory descriptors and memory objects for src and dst. |
95 | auto src_md = memory::desc(src_dims, dt::f32, tag::nchw); |
96 | auto src_mem = memory(src_md, engine); |
97 | |
98 | auto dst_md = memory::desc(dst_dims, dt::f32, tag::nchw); |
99 | auto dst_mem = memory(dst_md, engine); |
100 | |
101 | // Write data to memory object's handle. |
102 | write_to_dnnl_memory(src_data.data(), src_mem); |
103 | |
104 | // Create primitive descriptor. |
105 | auto pooling_pd = pooling_forward::primitive_desc(engine, |
106 | prop_kind::forward_training, algorithm::pooling_max, src_md, dst_md, |
107 | strides_dims, kernel_dims, dilation, padding_dims_l, |
108 | padding_dims_r); |
109 | |
110 | // Create workspace memory objects using memory descriptor created by the |
111 | // primitive descriptor. |
112 | // NOTE: Here, the workspace is required to save the indices where maximum |
113 | // was found, and is used in backward pooling to perform upsampling. |
114 | auto workspace_mem = memory(pooling_pd.workspace_desc(), engine); |
115 | |
116 | // Create the primitive. |
117 | auto pooling_prim = pooling_forward(pooling_pd); |
118 | |
119 | // Primitive arguments. Set up in-place execution by assigning src as DST. |
120 | std::unordered_map<int, memory> pooling_args; |
121 | pooling_args.insert({DNNL_ARG_SRC, src_mem}); |
122 | pooling_args.insert({DNNL_ARG_DST, dst_mem}); |
123 | pooling_args.insert({DNNL_ARG_WORKSPACE, workspace_mem}); |
124 | |
125 | // Primitive execution: pooling. |
126 | pooling_prim.execute(engine_stream, pooling_args); |
127 | |
128 | // Wait for the computation to finalize. |
129 | engine_stream.wait(); |
130 | |
131 | // Read data from memory object's handle. |
132 | read_from_dnnl_memory(dst_data.data(), dst_mem); |
133 | } |
134 | |
135 | int main(int argc, char **argv) { |
136 | return handle_example_errors( |
137 | pooling_example, parse_engine_kind(argc, argv)); |
138 | } |
139 | |