1 | /******************************************************************************* |
2 | * Copyright 2020-2022 Intel Corporation |
3 | * |
4 | * Licensed under the Apache License, Version 2.0 (the "License"); |
5 | * you may not use this file except in compliance with the License. |
6 | * You may obtain a copy of the License at |
7 | * |
8 | * http://www.apache.org/licenses/LICENSE-2.0 |
9 | * |
10 | * Unless required by applicable law or agreed to in writing, software |
11 | * distributed under the License is distributed on an "AS IS" BASIS, |
12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
13 | * See the License for the specific language governing permissions and |
14 | * limitations under the License. |
15 | *******************************************************************************/ |
16 | |
17 | /// @example softmax.cpp |
18 | /// > Annotated version: @ref softmax_example_cpp |
19 | /// |
20 | /// @page softmax_example_cpp_short |
21 | /// |
22 | /// This C++ API example demonstrates how to create and execute a |
23 | /// [Softmax](@ref dev_guide_softmax) primitive in forward training propagation |
24 | /// mode. |
25 | /// |
26 | /// Key optimizations included in this example: |
27 | /// - In-place primitive execution; |
28 | /// - Softmax along axis 1 (C) for 2D tensors. |
29 | /// |
30 | /// @page softmax_example_cpp Softmax Primitive Example |
31 | /// @copydetails softmax_example_cpp_short |
32 | /// |
33 | /// @include softmax.cpp |
34 | |
35 | #include <algorithm> |
36 | #include <cmath> |
37 | #include <iostream> |
38 | #include <string> |
39 | #include <vector> |
40 | |
41 | #include "example_utils.hpp" |
42 | #include "oneapi/dnnl/dnnl.hpp" |
43 | |
44 | using namespace dnnl; |
45 | |
46 | using tag = memory::format_tag; |
47 | using dt = memory::data_type; |
48 | |
49 | void softmax_example(dnnl::engine::kind engine_kind) { |
50 | |
51 | // Create execution dnnl::engine. |
52 | dnnl::engine engine(engine_kind, 0); |
53 | |
54 | // Create dnnl::stream. |
55 | dnnl::stream engine_stream(engine); |
56 | |
57 | // Tensor dimensions. |
58 | const memory::dim N = 3, // batch size |
59 | IC = 1000; // channels |
60 | |
61 | // Source (src) and destination (dst) tensors dimensions. |
62 | memory::dims src_dims = {N, IC}; |
63 | |
64 | // Allocate buffer. |
65 | std::vector<float> src_data(product(src_dims)); |
66 | |
67 | std::generate(src_data.begin(), src_data.end(), []() { |
68 | static int i = 0; |
69 | return std::cos(i++ / 10.f); |
70 | }); |
71 | |
72 | // Create src memory descriptor and memory object. |
73 | auto src_md = memory::desc(src_dims, dt::f32, tag::nc); |
74 | auto dst_md = memory::desc(src_dims, dt::f32, tag::nc); |
75 | auto src_mem = memory(src_md, engine); |
76 | |
77 | // Write data to memory object's handle. |
78 | write_to_dnnl_memory(src_data.data(), src_mem); |
79 | |
80 | // Softmax axis. |
81 | const int axis = 1; |
82 | |
83 | // Create primitive descriptor. |
84 | auto softmax_pd = softmax_forward::primitive_desc(engine, |
85 | prop_kind::forward_training, algorithm::softmax_accurate, src_md, |
86 | dst_md, axis); |
87 | |
88 | // Create the primitive. |
89 | auto softmax_prim = softmax_forward(softmax_pd); |
90 | |
91 | // Primitive arguments. Set up in-place execution by assigning src as DST. |
92 | std::unordered_map<int, memory> softmax_args; |
93 | softmax_args.insert({DNNL_ARG_SRC, src_mem}); |
94 | softmax_args.insert({DNNL_ARG_DST, src_mem}); |
95 | |
96 | // Primitive execution. |
97 | softmax_prim.execute(engine_stream, softmax_args); |
98 | |
99 | // Wait for the computation to finalize. |
100 | engine_stream.wait(); |
101 | |
102 | // Read data from memory object's handle. |
103 | read_from_dnnl_memory(src_data.data(), src_mem); |
104 | } |
105 | |
106 | int main(int argc, char **argv) { |
107 | return handle_example_errors( |
108 | softmax_example, parse_engine_kind(argc, argv)); |
109 | } |
110 | |