1/*******************************************************************************
2* Copyright 2020-2022 Intel Corporation
3*
4* Licensed under the Apache License, Version 2.0 (the "License");
5* you may not use this file except in compliance with the License.
6* You may obtain a copy of the License at
7*
8* http://www.apache.org/licenses/LICENSE-2.0
9*
10* Unless required by applicable law or agreed to in writing, software
11* distributed under the License is distributed on an "AS IS" BASIS,
12* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13* See the License for the specific language governing permissions and
14* limitations under the License.
15*******************************************************************************/
16
17#ifndef CPU_X64_JIT_AVX512_CORE_AMX_1x1_CONVOLUTION_HPP
18#define CPU_X64_JIT_AVX512_CORE_AMX_1x1_CONVOLUTION_HPP
19
20#include "common/c_types_map.hpp"
21#include "common/dnnl_thread.hpp"
22#include "common/memory_tracking.hpp"
23#include "common/primitive.hpp"
24#include "common/utils.hpp"
25
26#include "cpu/cpu_convolution_pd.hpp"
27
28#include "cpu/x64/amx_tile_configure.hpp"
29#include "cpu/x64/jit_avx512_core_amx_1x1_conv_kernel.hpp"
30
31namespace dnnl {
32namespace impl {
33namespace cpu {
34namespace x64 {
35
36struct jit_avx512_core_amx_1x1_convolution_fwd_t : public primitive_t {
37 struct pd_t : public cpu_convolution_fwd_pd_t {
38 pd_t(const convolution_desc_t *adesc, const primitive_attr_t *attr,
39 const typename pd_t::base_class *hint_fwd_pd)
40 : cpu_convolution_fwd_pd_t(adesc, attr, hint_fwd_pd), jcp_() {}
41
42 DECLARE_COMMON_PD_T(JIT_IMPL_NAME_HELPER("jit_1x1:", jcp_.isa, ""),
43 jit_avx512_core_amx_1x1_convolution_fwd_t);
44
45 status_t init(engine_t *engine) {
46 using namespace data_type;
47 using smask_t = primitive_attr_t::skip_mask_t;
48 bool is_bf16_convolution
49 = (src_md(0)->data_type == bf16
50 && weights_md(0)->data_type == bf16
51 && utils::one_of(dst_md(0)->data_type, f32, bf16))
52 && IMPLICATION(with_bias(),
53 utils::one_of(weights_md(1)->data_type, f32, bf16))
54 && attr()->has_default_values(smask_t::post_ops);
55 bool is_int8_convolution
56 = utils::one_of(src_md(0)->data_type, s8, u8)
57 && weights_md(0)->data_type == s8
58 && utils::one_of(
59 dst_md(0)->data_type, s8, u8, s32, f32, bf16)
60 && IMPLICATION(with_bias(),
61 utils::one_of(
62 weights_md(1)->data_type, f32, s32, s8, u8))
63 && attr()->has_default_values(smask_t::scales_runtime
64 | smask_t::post_ops
65 | smask_t::zero_points_runtime
66 | smask_t::sum_dt,
67 dst_md(0)->data_type)
68 && attr()->post_ops_.check_sum_consistent_dt(
69 dst_md(0)->data_type);
70
71 bool ok = is_fwd()
72 && set_default_alg_kind(alg_kind::convolution_direct)
73 && (is_bf16_convolution || is_int8_convolution)
74 && !has_zero_dim_memory() && zero_points_ok();
75 if (!ok) return status::unimplemented;
76
77 CHECK(jit_avx512_core_amx_1x1_fwd_kernel_t::init_conf(jcp_, *desc(),
78 src_md_, weights_md_, dst_md_, bias_md_, attr_,
79 dnnl_get_max_threads()));
80
81 auto scratchpad = scratchpad_registry().registrar();
82 jit_avx512_core_amx_1x1_fwd_kernel_t::init_scratchpad(
83 scratchpad, jcp_, *attr());
84
85 return status::success;
86 }
87
88 jit_conv_conf_t jcp_;
89
90 protected:
91 bool zero_points_ok() const {
92 // Only common zero points are supported -> mask should only be 0
93 int mask_src = 0, mask_dst = 0;
94 attr()->zero_points_.get(DNNL_ARG_SRC, &mask_src);
95 attr()->zero_points_.get(DNNL_ARG_DST, &mask_dst);
96 return attr()->zero_points_.has_default_values(DNNL_ARG_WEIGHTS)
97 && mask_src == 0 && mask_dst == 0;
98 }
99 };
100
101 jit_avx512_core_amx_1x1_convolution_fwd_t(const pd_t *apd)
102 : primitive_t(apd) {}
103
104 status_t init(engine_t *engine) override {
105 CHECK(safe_ptr_assign(kernel_,
106 new jit_avx512_core_amx_1x1_fwd_kernel_t(
107 pd()->jcp_, *pd()->attr(), *pd()->dst_md(0))));
108 return kernel_->create_kernel();
109 }
110
111 status_t execute(const exec_ctx_t &ctx) const override {
112 const auto &_pd = pd();
113 if (_pd->jcp_.is_depthwise) return status::unimplemented;
114 return execute_forward(ctx);
115 }
116
117private:
118 status_t execute_forward(const exec_ctx_t &ctx) const;
119 const pd_t *pd() const { return (const pd_t *)primitive_t::pd().get(); }
120 void prepare_padded_bias(const char *&bias,
121 const memory_tracking::grantor_t &scratchpad) const;
122
123 std::unique_ptr<jit_avx512_core_amx_1x1_fwd_kernel_t> kernel_;
124};
125
126} // namespace x64
127} // namespace cpu
128} // namespace impl
129} // namespace dnnl
130
131#endif
132
133// vim: et ts=4 sw=4 cindent cino^=l0,\:0,N-s
134