1/*******************************************************************************
2* Copyright 2019-2022 Intel Corporation
3* Copyright 2022 FUJITSU LIMITED
4* Copyright 2022 Arm Ltd. and affiliates
5*
6* Licensed under the Apache License, Version 2.0 (the "License");
7* you may not use this file except in compliance with the License.
8* You may obtain a copy of the License at
9*
10* http://www.apache.org/licenses/LICENSE-2.0
11*
12* Unless required by applicable law or agreed to in writing, software
13* distributed under the License is distributed on an "AS IS" BASIS,
14* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15* See the License for the specific language governing permissions and
16* limitations under the License.
17*******************************************************************************/
18
19#include "cpu/cpu_engine.hpp"
20
21#include "cpu/ref_deconvolution.hpp"
22
23#if DNNL_X64
24#include "cpu/x64/jit_avx512_core_amx_deconvolution.hpp"
25#include "cpu/x64/jit_avx512_core_x8s8s32x_1x1_deconvolution.hpp"
26#include "cpu/x64/jit_avx512_core_x8s8s32x_deconvolution.hpp"
27#include "cpu/x64/jit_brgemm_deconv.hpp"
28#include "cpu/x64/jit_uni_x8s8s32x_1x1_deconvolution.hpp"
29#include "cpu/x64/jit_uni_x8s8s32x_deconvolution.hpp"
30using namespace dnnl::impl::cpu::x64;
31#elif DNNL_AARCH64
32#include "cpu/aarch64/jit_sve_512_core_x8s8s32x_deconvolution.hpp"
33#if DNNL_AARCH64_USE_ACL
34#include "cpu/aarch64/acl_deconvolution.hpp"
35#endif
36using namespace dnnl::impl::cpu::aarch64;
37#endif
38
39namespace dnnl {
40namespace impl {
41namespace cpu {
42
43namespace {
44using namespace dnnl::impl::data_type;
45using namespace dnnl::impl::prop_kind;
46
47// clang-format off
48const std::map<pk_impl_key_t, std::vector<impl_list_item_t>> &impl_list_map() {
49 static const std::map<pk_impl_key_t, std::vector<impl_list_item_t>> the_map = REG_DECONV_P({
50 {{forward}, {
51 CPU_INSTANCE_AMX(brgemm_deconvolution_fwd_t<avx512_core_amx_fp16>)
52 CPU_INSTANCE_AMX(brgemm_deconvolution_fwd_t<avx512_core_amx>)
53 CPU_INSTANCE_AMX(jit_avx512_core_amx_deconvolution_fwd_t)
54 CPU_INSTANCE_AVX512(jit_avx512_core_x8s8s32x_1x1_deconvolution_fwd_t)
55 CPU_INSTANCE_AVX512(jit_avx512_core_x8s8s32x_deconvolution_fwd_t)
56 CPU_INSTANCE_AVX2(jit_uni_x8s8s32x_1x1_deconvolution_fwd_t<avx2>)
57 CPU_INSTANCE_AVX2(jit_uni_x8s8s32x_deconvolution_fwd_t<avx2>)
58 CPU_INSTANCE_SSE41(jit_uni_x8s8s32x_1x1_deconvolution_fwd_t<sse41>)
59 CPU_INSTANCE_SSE41(jit_uni_x8s8s32x_deconvolution_fwd_t<sse41>)
60 CPU_INSTANCE_AARCH64(jit_sve_512_core_x8s8s32x_deconvolution_fwd_t)
61 CPU_INSTANCE_AARCH64_ACL(acl_deconvolution_fwd_t)
62 CPU_INSTANCE(ref_deconvolution_fwd_t)
63 nullptr,
64 }},
65 {{backward_data}, REG_BWD_PK({
66 CPU_INSTANCE(ref_deconvolution_bwd_data_t)
67 nullptr,
68 })},
69 {{backward_weights}, REG_BWD_PK({
70 CPU_INSTANCE(ref_deconvolution_bwd_weights_t)
71 nullptr,
72 })},
73 });
74 return the_map;
75}
76// clang-format on
77} // namespace
78
79const impl_list_item_t *get_deconvolution_impl_list(
80 const deconvolution_desc_t *desc) {
81 static const impl_list_item_t empty_list[] = {nullptr};
82
83 const bool is_fwd = utils::one_of(
84 desc->prop_kind, forward_training, forward_inference);
85 prop_kind_t prop_kind = is_fwd ? forward : desc->prop_kind;
86
87 pk_impl_key_t key {prop_kind};
88
89 const auto impl_list_it = impl_list_map().find(key);
90 return impl_list_it != impl_list_map().cend() ? impl_list_it->second.data()
91 : empty_list;
92}
93
94} // namespace cpu
95} // namespace impl
96} // namespace dnnl
97