1 | /******************************************************************************* |
2 | * Copyright 2022 Intel Corporation |
3 | * |
4 | * Licensed under the Apache License, Version 2.0 (the "License"); |
5 | * you may not use this file except in compliance with the License. |
6 | * You may obtain a copy of the License at |
7 | * |
8 | * http://www.apache.org/licenses/LICENSE-2.0 |
9 | * |
10 | * Unless required by applicable law or agreed to in writing, software |
11 | * distributed under the License is distributed on an "AS IS" BASIS, |
12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
13 | * See the License for the specific language governing permissions and |
14 | * limitations under the License. |
15 | *******************************************************************************/ |
16 | |
17 | #ifndef GPU_JIT_CONV_EPILOGUE_HPP |
18 | #define GPU_JIT_CONV_EPILOGUE_HPP |
19 | |
20 | #include "gpu/jit/conv/config.hpp" |
21 | #include "gpu/jit/conv/post_ops.hpp" |
22 | #include "gpu/jit/conv/slm_reduce_builder.hpp" |
23 | #include "gpu/jit/ir/gemm_schedule.hpp" |
24 | #include "gpu/jit/ir/ir.hpp" |
25 | |
26 | namespace dnnl { |
27 | namespace impl { |
28 | namespace gpu { |
29 | namespace jit { |
30 | |
31 | stmt_t create_epilogue_stmt(const conv_config_t &cfg, ir_context_t &ir_ctx, |
32 | const gemm_schedule_t &gemm_schedule, |
33 | const post_op_context_t &post_op_ctx, const tensor_t &thr_tile, |
34 | const view_t &c_mem_view, const layout_t &c_reg_layout, |
35 | const expr_t &c_mem_buf, const expr_t &c_reg_buf); |
36 | |
37 | } // namespace jit |
38 | } // namespace gpu |
39 | } // namespace impl |
40 | } // namespace dnnl |
41 | |
42 | #endif |
43 | |