1/*******************************************************************************
2* Copyright 2019-2021 Intel Corporation
3*
4* Licensed under the Apache License, Version 2.0 (the "License");
5* you may not use this file except in compliance with the License.
6* You may obtain a copy of the License at
7*
8* http://www.apache.org/licenses/LICENSE-2.0
9*
10* Unless required by applicable law or agreed to in writing, software
11* distributed under the License is distributed on an "AS IS" BASIS,
12* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13* See the License for the specific language governing permissions and
14* limitations under the License.
15*******************************************************************************/
16
17#ifndef CPU_X64_JIT_AVX512_CORE_BF16_CONVOLUTION_HPP
18#define CPU_X64_JIT_AVX512_CORE_BF16_CONVOLUTION_HPP
19
20#include "common/c_types_map.hpp"
21#include "common/dnnl_thread.hpp"
22#include "common/memory_tracking.hpp"
23#include "common/primitive.hpp"
24#include "common/utils.hpp"
25
26#include "cpu/cpu_convolution_pd.hpp"
27#include "cpu/x64/cpu_barrier.hpp"
28#include "cpu/x64/cpu_reducer.hpp"
29
30#include "cpu/x64/jit_avx512_core_bf16_conv_kernel.hpp"
31#include "cpu/x64/jit_transpose_utils.hpp"
32
33namespace dnnl {
34namespace impl {
35namespace cpu {
36namespace x64 {
37
38struct jit_avx512_core_bf16_convolution_fwd_t : public primitive_t {
39 struct pd_t : public cpu_convolution_fwd_pd_t {
40 pd_t(const convolution_desc_t *adesc, const primitive_attr_t *attr,
41 const typename pd_t::base_class *hint_fwd_pd)
42 : cpu_convolution_fwd_pd_t(adesc, attr, hint_fwd_pd), jcp_() {}
43
44 DECLARE_COMMON_PD_T(JIT_IMPL_NAME_HELPER("jit_bf16:", jcp_.isa, ""),
45 jit_avx512_core_bf16_convolution_fwd_t);
46
47 status_t init(engine_t *engine) {
48 using namespace data_type;
49 bool ok = mayiuse(avx512_core) && is_fwd()
50 && set_default_alg_kind(alg_kind::convolution_direct)
51 && (expect_data_types(bf16, bf16, data_type::undef, bf16,
52 data_type::undef)
53 || expect_data_types(bf16, bf16, data_type::undef,
54 f32, data_type::undef))
55 && IMPLICATION(with_bias(),
56 utils::one_of(weights_md(1)->data_type, f32, bf16))
57 && attr()->has_default_values(
58 primitive_attr_t::skip_mask_t::post_ops,
59 dst_md()->data_type)
60 && !has_zero_dim_memory();
61 if (!ok) return status::unimplemented;
62
63 CHECK(jit_avx512_core_bf16_fwd_kernel::init_conf(jcp_, *desc(),
64 src_md_, weights_md_, dst_md_, bias_md_, attr_,
65 dnnl_get_max_threads()));
66
67 auto scratchpad = scratchpad_registry().registrar();
68 jit_avx512_core_bf16_fwd_kernel::init_scratchpad(scratchpad, jcp_);
69
70 return status::success;
71 }
72
73 jit_conv_conf_t jcp_;
74 };
75
76 jit_avx512_core_bf16_convolution_fwd_t(const pd_t *apd)
77 : primitive_t(apd) {}
78
79 typedef typename prec_traits<data_type::bf16>::type src_data_t;
80 typedef typename prec_traits<data_type::bf16>::type wei_data_t;
81
82 status_t init(engine_t *engine) override {
83 CHECK(safe_ptr_assign(kernel_,
84 new jit_avx512_core_bf16_fwd_kernel(
85 pd()->jcp_, *pd()->attr(), *pd()->dst_md(0))));
86 return kernel_->create_kernel();
87 }
88
89 status_t execute(const exec_ctx_t &ctx) const override {
90 if (pd()->ndims() == 3)
91 execute_forward_1d(ctx);
92 else if (pd()->ndims() == 4)
93 execute_forward_2d(ctx);
94 else if (pd()->ndims() == 5)
95 execute_forward_3d(ctx);
96 else
97 return status::unimplemented;
98
99 if (pd()->wants_zero_pad_dst()) ctx.zero_pad_output(DNNL_ARG_DST);
100 return status::success;
101 }
102
103private:
104 void prepare_padded_bias(const char *&bias,
105 const memory_tracking::grantor_t &scratchpad) const;
106 void execute_forward_1d(const exec_ctx_t &ctx) const;
107 void execute_forward_2d(const exec_ctx_t &ctx) const;
108 void execute_forward_3d(const exec_ctx_t &ctx) const;
109 const pd_t *pd() const { return (const pd_t *)primitive_t::pd().get(); }
110
111 std::unique_ptr<jit_avx512_core_bf16_fwd_kernel> kernel_;
112};
113
114struct jit_avx512_core_bf16_convolution_bwd_data_t : public primitive_t {
115 struct pd_t : public cpu_convolution_bwd_data_pd_t {
116 pd_t(const convolution_desc_t *adesc, const primitive_attr_t *attr,
117 const convolution_fwd_pd_t *hint_fwd_pd)
118 : cpu_convolution_bwd_data_pd_t(adesc, attr, hint_fwd_pd), jcp_() {}
119
120 DECLARE_COMMON_PD_T(JIT_IMPL_NAME_HELPER("jit_bf16:", jcp_.isa, ""),
121 jit_avx512_core_bf16_convolution_bwd_data_t);
122
123 status_t init(engine_t *engine) {
124 using namespace prop_kind;
125 bool ok = true && mayiuse(avx512_core) && is_bwd_d()
126 && set_default_alg_kind(alg_kind::convolution_direct)
127 && (expect_data_types(data_type::f32, data_type::bf16,
128 data_type::undef, data_type::bf16,
129 data_type::undef)
130 || expect_data_types(data_type::bf16,
131 data_type::bf16, data_type::undef,
132 data_type::bf16, data_type::undef))
133 && attr()->has_default_values() && !has_zero_dim_memory();
134 if (!ok) return status::unimplemented;
135
136 status_t status = jit_avx512_core_bf16_bwd_data_kernel::init_conf(
137 jcp_, *desc(), diff_src_md_, weights_md_, diff_dst_md_,
138 dnnl_get_max_threads());
139 return status;
140 }
141
142 jit_conv_conf_t jcp_;
143 };
144
145 jit_avx512_core_bf16_convolution_bwd_data_t(const pd_t *apd)
146 : primitive_t(apd) {}
147
148 typedef typename prec_traits<data_type::bf16>::type diff_dst_data_t;
149 typedef typename prec_traits<data_type::bf16>::type wei_data_t;
150
151 status_t init(engine_t *engine) override {
152 CHECK(safe_ptr_assign(
153 kernel_, new jit_avx512_core_bf16_bwd_data_kernel(pd()->jcp_)));
154 return kernel_->create_kernel();
155 }
156
157 status_t execute(const exec_ctx_t &ctx) const override {
158 if (pd()->ndims() < 5)
159 execute_backward_data(ctx);
160 else if (pd()->ndims() == 5)
161 execute_backward_data_3d(ctx);
162 else
163 assert(!"invalid dimension");
164
165 return status::success;
166 }
167
168private:
169 void execute_backward_data(const exec_ctx_t &ctx) const;
170 void execute_backward_data_3d(const exec_ctx_t &ctx) const;
171 const pd_t *pd() const { return (const pd_t *)primitive_t::pd().get(); }
172 std::unique_ptr<jit_avx512_core_bf16_bwd_data_kernel> kernel_;
173};
174
175struct jit_avx512_core_bf16_convolution_bwd_weights_t : public primitive_t {
176 struct pd_t : public cpu_convolution_bwd_weights_pd_t {
177 pd_t(const convolution_desc_t *adesc, const primitive_attr_t *attr,
178 const convolution_fwd_pd_t *hint_fwd_pd)
179 : cpu_convolution_bwd_weights_pd_t(adesc, attr, hint_fwd_pd)
180 , jcp_() {}
181
182 DECLARE_COMMON_PD_T(JIT_IMPL_NAME_HELPER("jit_bf16:", jcp_.isa, ""),
183 jit_avx512_core_bf16_convolution_bwd_weights_t);
184
185 status_t init(engine_t *engine) {
186 bool ok = true && mayiuse(avx512_core) && is_bwd_w()
187 && set_default_alg_kind(alg_kind::convolution_direct)
188 && (expect_data_types(data_type::bf16, data_type::bf16,
189 data_type::undef, data_type::bf16,
190 data_type::undef)
191 || expect_data_types(data_type::bf16,
192 data_type::f32, data_type::undef,
193 data_type::bf16, data_type::undef))
194 && IMPLICATION(with_bias(),
195 utils::one_of(diff_bias_md_.data_type,
196 data_type::f32, data_type::bf16))
197 && attr()->has_default_values() && !has_zero_dim_memory();
198 if (!ok) return status::unimplemented;
199
200 status_t status = jit_avx512_core_bf16_conv_bwd_weights_kernel_f32::
201 init_conf(jcp_, *desc(), src_md_, diff_weights_md_,
202 diff_bias_md_, diff_dst_md_,
203 dnnl_get_max_threads());
204 if (status != status::success) return status;
205
206 auto scratchpad = scratchpad_registry().registrar();
207 jit_avx512_core_bf16_conv_bwd_weights_kernel_f32::init_scratchpad(
208 scratchpad, jcp_);
209
210 return status;
211 }
212
213 jit_conv_conf_t jcp_;
214 };
215
216 jit_avx512_core_bf16_convolution_bwd_weights_t(const pd_t *apd)
217 : primitive_t(apd) {}
218
219 typedef typename prec_traits<data_type::bf16>::type src_data_t;
220 typedef typename prec_traits<data_type::bf16>::type diff_dst_data_t;
221
222 status_t init(engine_t *engine) override;
223
224 status_t execute(const exec_ctx_t &ctx) const override {
225 execute_backward_weights(ctx);
226 return status::success;
227 }
228
229private:
230 void execute_backward_weights(const exec_ctx_t &ctx) const;
231 void prepare_scratchpad_data(const exec_ctx_t &ctx) const;
232 struct thread_info_t;
233 void compute_diff_weights_2d(const thread_info_t *) const;
234 void compute_diff_weights_3d(const thread_info_t *) const;
235 void compute_diff_weights(const thread_info_t *) const;
236 void reduce_and_convert_diff_weights_and_bias(const thread_info_t *) const;
237
238 const pd_t *pd() const { return (const pd_t *)primitive_t::pd().get(); }
239
240 size_t tr_src_buf_number(const thread_info_t *ti, int g, int ic) const;
241 size_t tr_diff_dst_buf_number(const thread_info_t *ti, int g, int oc) const;
242 void trans_src(
243 src_data_t *tr_src1, const src_data_t *src1, int my_work) const;
244 void trans_dst(diff_dst_data_t *tr_diff_dst1,
245 const diff_dst_data_t *diff_dst1, int my_work) const;
246 void trans_src_nxc(src_data_t *tr_src, const src_data_t *src_base,
247 int spatial_start, dim_t spatial_start_offset, int icb_start,
248 dim_t chb_stride, int my_work) const;
249 void trans_dst_nxc(diff_dst_data_t *tr_diff_dst,
250 const diff_dst_data_t *diff_dst_base, int spatial_start,
251 dim_t spatial_start_offset, int ocb_start, dim_t chb_stride,
252 int my_work) const;
253
254 int nthr_ = 0, nthr_mb_ = 0, nthr_g_ = 0, nthr_oc_b_ = 0, nthr_ic_b_ = 0;
255
256 std::unique_ptr<jit_avx512_core_bf16_conv_bwd_weights_kernel_f32> kernel_;
257
258 std::unique_ptr<cpu_accumulator_1d_t<data_type::f32>> acc_ker_;
259
260 std::unique_ptr<jit_trans_src_t> trans_kernel_;
261 std::unique_ptr<jit_trans_dst_t> trans_dst_kernel_;
262};
263
264} // namespace x64
265} // namespace cpu
266} // namespace impl
267} // namespace dnnl
268
269#endif
270
271// vim: et ts=4 sw=4 cindent cino+=l0,\:4,N-s
272