1/*******************************************************************************
2* Copyright 2019-2022 Intel Corporation
3*
4* Licensed under the Apache License, Version 2.0 (the "License");
5* you may not use this file except in compliance with the License.
6* You may obtain a copy of the License at
7*
8* http://www.apache.org/licenses/LICENSE-2.0
9*
10* Unless required by applicable law or agreed to in writing, software
11* distributed under the License is distributed on an "AS IS" BASIS,
12* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13* See the License for the specific language governing permissions and
14* limitations under the License.
15*******************************************************************************/
16
17#ifndef CPU_MATMUL_REF_MATMUL_HPP
18#define CPU_MATMUL_REF_MATMUL_HPP
19
20#include <assert.h>
21
22#include "common/bfloat16.hpp"
23#include "common/c_types_map.hpp"
24#include "common/primitive.hpp"
25#include "common/type_helpers.hpp"
26#include "common/utils.hpp"
27
28#include "cpu/platform.hpp"
29#include "cpu/primitive_attr_postops.hpp"
30
31#include "cpu/matmul/cpu_matmul_pd.hpp"
32
33namespace dnnl {
34namespace impl {
35namespace cpu {
36namespace matmul {
37
38struct ref_matmul_t : public primitive_t {
39 struct pd_t : public cpu_matmul_pd_t {
40 using cpu_matmul_pd_t::cpu_matmul_pd_t;
41
42 DECLARE_COMMON_PD_T("ref:any", ref_matmul_t);
43
44 status_t init(engine_t *engine) {
45 using namespace data_type;
46 using smask_t = primitive_attr_t::skip_mask_t;
47 const auto src_type = src_md(0)->data_type;
48 const auto wei_type = weights_md(0)->data_type;
49 const auto bia_type = weights_md(1)->data_type;
50 const auto dst_type = dst_md(0)->data_type;
51
52 bool ok = utils::one_of(src_type, f32, bf16, f16)
53 && utils::one_of(wei_type, f32, bf16, f16)
54 && utils::one_of(dst_type, f32, bf16, f16)
55 && src_type == wei_type
56 && IMPLICATION(src_type == f32, dst_type == f32)
57 && IMPLICATION(src_type == bf16,
58 utils::one_of(dst_type, f32, bf16))
59 && IMPLICATION(
60 src_type == f16, utils::one_of(dst_type, f32, f16))
61 && IMPLICATION(with_bias(),
62 utils::one_of(bia_type, f32, bf16, f16)
63 && IMPLICATION(
64 src_type == f32, bia_type == f32)
65 && IMPLICATION(src_type == f16,
66 utils::one_of(bia_type, f32, f16))
67 && IMPLICATION(src_type == bf16,
68 utils::one_of(bia_type, f32, bf16)))
69 && platform::has_data_type_support(src_type)
70 && attr()->has_default_values(smask_t::scales_runtime
71 | smask_t::post_ops | smask_t::sum_dt,
72 dst_type)
73 && attr_.post_ops_.check_sum_consistent_dt(dst_type)
74 && attr_scales_ok() && set_default_formats()
75 && attr_.set_default_formats(dst_md(0)) == status::success;
76 return ok ? status::success : status::unimplemented;
77 }
78
79 private:
80 // scales for f32/bf16 is a way to support alpha multiplication.
81 bool attr_scales_ok() {
82 const std::vector<int> supported_args
83 = {DNNL_ARG_SRC, DNNL_ARG_WEIGHTS, DNNL_ARG_DST};
84 bool ok = attr()->scales_.has_default_values(supported_args);
85 for (int arg : supported_args) {
86 const auto &mask = attr()->scales_.get(arg).mask_;
87 if (arg == DNNL_ARG_WEIGHTS)
88 ok = ok
89 && (mask == 0
90 || mask == (1 << (dst_md()->ndims - 1)));
91 else
92 ok = ok && (mask == 0);
93 }
94 return ok;
95 }
96 };
97
98 ref_matmul_t(const pd_t *apd) : primitive_t(apd) {}
99
100 status_t init(engine_t *engine) override {
101 ref_post_ops
102 = utils::make_unique<ref_post_ops_t>(pd()->attr()->post_ops_);
103 if (!ref_post_ops) return status::out_of_memory;
104 return status::success;
105 }
106
107 status_t execute(const exec_ctx_t &ctx) const override {
108 return execute_ref(ctx);
109 }
110
111private:
112 const pd_t *pd() const { return (const pd_t *)primitive_t::pd().get(); }
113 status_t execute_ref(const exec_ctx_t &ctx) const;
114 std::unique_ptr<ref_post_ops_t> ref_post_ops;
115};
116
117} // namespace matmul
118} // namespace cpu
119} // namespace impl
120} // namespace dnnl
121
122#endif
123