1 | namespace dnnl { |
2 | namespace impl { |
3 | namespace gpu { |
4 | namespace ocl { |
5 | const char *generic_reorder_kernel = R"==(/******************************************************************************* )==" "\n" |
6 | R"==(* Copyright 2021-2022 Intel Corporation )==" "\n" |
7 | R"==(* )==" "\n" |
8 | R"==(* Licensed under the Apache License, Version 2.0 (the "License"); )==" "\n" |
9 | R"==(* you may not use this file except in compliance with the License. )==" "\n" |
10 | R"==(* You may obtain a copy of the License at )==" "\n" |
11 | R"==(* )==" "\n" |
12 | R"==(* http: )==" "\n" |
13 | R"==(* )==" "\n" |
14 | R"==(* Unless required by applicable law or agreed to in writing, software )==" "\n" |
15 | R"==(* distributed under the License is distributed on an "AS IS" BASIS, )==" "\n" |
16 | R"==(* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. )==" "\n" |
17 | R"==(* See the License for the specific language governing permissions and )==" "\n" |
18 | R"==(* limitations under the License. )==" "\n" |
19 | R"==(*******************************************************************************/ )==" "\n" |
20 | R"==(#include "gpu/ocl/reorder_common.h" )==" "\n" |
21 | R"==(KERNEL_ATTR )==" "\n" |
22 | R"==(__kernel void generic_reorder(__global SRC_DATA_T *restrict src, )==" "\n" |
23 | R"==(__global DST_DATA_T *restrict dst, __global float *restrict src_scales, )==" "\n" |
24 | R"==(__global int *restrict src_zps, __global float *restrict dst_scales, )==" "\n" |
25 | R"==(__global int *restrict dst_zps, float sum_scale, int sum_zp) { )==" "\n" |
26 | R"==(const int src_zp = GET_SRC_ZP(src_zps); )==" "\n" |
27 | R"==(const int dst_zp = GET_DST_ZP(dst_zps); )==" "\n" |
28 | R"==(float src_scale = 1.0f; )==" "\n" |
29 | R"==(float dst_scale = 1.0f; )==" "\n" |
30 | R"==(src += SRC_OFFSET0; )==" "\n" |
31 | R"==(dst += DST_OFFSET0; )==" "\n" |
32 | R"==(#define LOOP_NEST_LEVEL 4 )==" "\n" |
33 | R"==(const uint sgId = get_sub_group_local_id(); )==" "\n" |
34 | R"==(uint d[6]; )==" "\n" |
35 | R"==(uint b[6] = {0, 0, 0, 0, 0, 0}; )==" "\n" |
36 | R"==(d[0] = GWS_GET_D0(); )==" "\n" |
37 | R"==(d[1] = GWS_GET_D1(); )==" "\n" |
38 | R"==(d[2] = GWS_GET_D2(); )==" "\n" |
39 | R"==(d[3] = GWS_GET_D3(); )==" "\n" |
40 | R"==(d[4] = GWS_GET_D4(); )==" "\n" |
41 | R"==(d[5] = GWS_GET_D5(); )==" "\n" |
42 | R"==(d[VECT_DIM] /= RESCALE_COEFF; )==" "\n" |
43 | R"==(const uint cache_size_per_sg = D_BLK_SIZE_0 * D_BLK_SIZE_1 * D_BLK_SIZE_2 )==" "\n" |
44 | R"==(* D_BLK_SIZE_3 * VECT_SIZE; )==" "\n" |
45 | R"==(const uint sg_off = get_sub_group_id() * cache_size_per_sg; )==" "\n" |
46 | R"==(__local SRC_DATA_T cache[SG_PER_WG * cache_size_per_sg]; )==" "\n" |
47 | R"==(uint iter[LOOP_NEST_LEVEL] = {0, 0, 0, 0}; )==" "\n" |
48 | R"==(#if S_BLK_SIZE_3 > 1 )==" "\n" |
49 | R"==(for_(iter[3] = 0; iter[3] < S_BLK_SIZE_3; iter[3]++) )==" "\n" |
50 | R"==(#endif )==" "\n" |
51 | R"==(#if S_BLK_SIZE_2 > 1 )==" "\n" |
52 | R"==(for_(iter[2] = 0; iter[2] < S_BLK_SIZE_2; iter[2]++) )==" "\n" |
53 | R"==(#endif )==" "\n" |
54 | R"==(#if S_BLK_SIZE_1 > 1 )==" "\n" |
55 | R"==(for_(iter[1] = 0; iter[1] < S_BLK_SIZE_1; iter[1]++) )==" "\n" |
56 | R"==(#endif )==" "\n" |
57 | R"==(#if S_BLK_SIZE_0 > 1 )==" "\n" |
58 | R"==(for_(iter[0] = 0; iter[0] < S_BLK_SIZE_0; iter[0]++) )==" "\n" |
59 | R"==(#endif )==" "\n" |
60 | R"==({ )==" "\n" |
61 | R"==(b[0] = 0; )==" "\n" |
62 | R"==(b[1] = 0; )==" "\n" |
63 | R"==(b[2] = 0; )==" "\n" |
64 | R"==(b[3] = 0; )==" "\n" |
65 | R"==(b[4] = 0; )==" "\n" |
66 | R"==(b[5] = 0; )==" "\n" |
67 | R"==(b[S_BLK_IDX_0] += iter[0] * S_BLK_STEP_0; )==" "\n" |
68 | R"==(b[S_BLK_IDX_1] += iter[1] * S_BLK_STEP_1; )==" "\n" |
69 | R"==(b[S_BLK_IDX_2] += iter[2] * S_BLK_STEP_2; )==" "\n" |
70 | R"==(b[S_BLK_IDX_3] += iter[3] * S_BLK_STEP_3; )==" "\n" |
71 | R"==(#if S_MOD_3 > 1 )==" "\n" |
72 | R"==(b[S_IDX_3] += S_MUL_3 * ((sgId / S_DIV_3) % S_MOD_3); )==" "\n" |
73 | R"==(#endif )==" "\n" |
74 | R"==(#if S_MOD_2 > 1 )==" "\n" |
75 | R"==(b[S_IDX_2] += S_MUL_2 * ((sgId / S_DIV_2) % S_MOD_2); )==" "\n" |
76 | R"==(#endif )==" "\n" |
77 | R"==(#if S_MOD_1 > 1 )==" "\n" |
78 | R"==(b[S_IDX_1] += S_MUL_1 * ((sgId / S_DIV_1) % S_MOD_1); )==" "\n" |
79 | R"==(#endif )==" "\n" |
80 | R"==(#if S_MOD_0 > 1 )==" "\n" |
81 | R"==(b[S_IDX_0] += S_MUL_0 * ((sgId / S_DIV_0) % S_MOD_0); )==" "\n" |
82 | R"==(#endif )==" "\n" |
83 | R"==(const uint src_off = SRC_OFF(d[0] + b[0], d[1] + b[1], d[2] + b[2], )==" "\n" |
84 | R"==(d[3] + b[3], d[4] + b[4], d[5] + b[5]); )==" "\n" |
85 | R"==(uint cache_idx = sg_off + b[5] * CACHE_STRIDE_5 + b[4] * CACHE_STRIDE_4 )==" "\n" |
86 | R"==(+ b[3] * CACHE_STRIDE_3 + b[2] * CACHE_STRIDE_2 )==" "\n" |
87 | R"==(+ b[1] * CACHE_STRIDE_1 + b[0] * CACHE_STRIDE_0; )==" "\n" |
88 | R"==(const int pad_d0 = d[0] + b[0] >= SRC_D0; )==" "\n" |
89 | R"==(const int pad_d1 = NDIMS > 1 && d[1] + b[1] >= SRC_D1; )==" "\n" |
90 | R"==(const int pad_d2 = NDIMS > 2 && d[2] + b[2] >= SRC_D2; )==" "\n" |
91 | R"==(const int pad_d3 = NDIMS > 3 && d[3] + b[3] >= SRC_D3; )==" "\n" |
92 | R"==(const int pad_d4 = NDIMS > 4 && d[4] + b[4] >= SRC_D4; )==" "\n" |
93 | R"==(const int pad_d5 = NDIMS > 5 && d[5] + b[5] >= SRC_D5; )==" "\n" |
94 | R"==(const bool pad_sgid = sgId >= LIMIT_SSGID; )==" "\n" |
95 | R"==(const int pad )==" "\n" |
96 | R"==(= pad_d0 || pad_d1 || pad_d2 || pad_d3 || pad_d4 || pad_d5; )==" "\n" |
97 | R"==(if (!pad_sgid) { )==" "\n" |
98 | R"==(SRC_DATA_T src_tmp = pad ? 0 : src[src_off]; )==" "\n" |
99 | R"==(cache[cache_idx] = src_tmp; )==" "\n" |
100 | R"==(} )==" "\n" |
101 | R"==(} )==" "\n" |
102 | R"==(for (uint i = 0; i < LOOP_NEST_LEVEL; i++) { )==" "\n" |
103 | R"==(iter[i] = 0; )==" "\n" |
104 | R"==(} )==" "\n" |
105 | R"==(#if D_BLK_SIZE_3 > 1 )==" "\n" |
106 | R"==(for_(iter[3] = 0; iter[3] < D_BLK_SIZE_3; iter[3]++) )==" "\n" |
107 | R"==(#endif )==" "\n" |
108 | R"==(#if D_BLK_SIZE_2 > 1 )==" "\n" |
109 | R"==(for_(iter[2] = 0; iter[2] < D_BLK_SIZE_2; iter[2]++) )==" "\n" |
110 | R"==(#endif )==" "\n" |
111 | R"==(#if D_BLK_SIZE_1 > 1 )==" "\n" |
112 | R"==(for_(iter[1] = 0; iter[1] < D_BLK_SIZE_1; iter[1]++) )==" "\n" |
113 | R"==(#endif )==" "\n" |
114 | R"==(#if D_BLK_SIZE_0 > 1 )==" "\n" |
115 | R"==(for_(iter[0] = 0; iter[0] < D_BLK_SIZE_0; iter[0]++) )==" "\n" |
116 | R"==(#endif )==" "\n" |
117 | R"==({ )==" "\n" |
118 | R"==(b[0] = 0; )==" "\n" |
119 | R"==(b[1] = 0; )==" "\n" |
120 | R"==(b[2] = 0; )==" "\n" |
121 | R"==(b[3] = 0; )==" "\n" |
122 | R"==(b[4] = 0; )==" "\n" |
123 | R"==(b[5] = 0; )==" "\n" |
124 | R"==(b[D_BLK_IDX_0] += iter[0] * D_BLK_STEP_0; )==" "\n" |
125 | R"==(b[D_BLK_IDX_1] += iter[1] * D_BLK_STEP_1; )==" "\n" |
126 | R"==(b[D_BLK_IDX_2] += iter[2] * D_BLK_STEP_2; )==" "\n" |
127 | R"==(b[D_BLK_IDX_3] += iter[3] * D_BLK_STEP_3; )==" "\n" |
128 | R"==(#if D_MOD_3 > 1 )==" "\n" |
129 | R"==(b[D_IDX_3] += D_MUL_3 * ((sgId / D_DIV_3) % D_MOD_3); )==" "\n" |
130 | R"==(#endif )==" "\n" |
131 | R"==(#if D_MOD_2 > 1 )==" "\n" |
132 | R"==(b[D_IDX_2] += D_MUL_2 * ((sgId / D_DIV_2) % D_MOD_2); )==" "\n" |
133 | R"==(#endif )==" "\n" |
134 | R"==(#if D_MOD_1 > 1 )==" "\n" |
135 | R"==(b[D_IDX_1] += D_MUL_1 * ((sgId / D_DIV_1) % D_MOD_1); )==" "\n" |
136 | R"==(#endif )==" "\n" |
137 | R"==(#if D_MOD_0 > 1 )==" "\n" |
138 | R"==(b[D_IDX_0] += D_MUL_0 * ((sgId / D_DIV_0) % D_MOD_0); )==" "\n" |
139 | R"==(#endif )==" "\n" |
140 | R"==(const uint dst_off = DST_OFF(d[0] + b[0], d[1] + b[1], d[2] + b[2], )==" "\n" |
141 | R"==(d[3] + b[3], d[4] + b[4], d[5] + b[5]); )==" "\n" |
142 | R"==(DST_DATA_T dst_tmp; )==" "\n" |
143 | R"==(uint cache_idx = sg_off + b[5] * CACHE_STRIDE_5 + b[4] * CACHE_STRIDE_4 )==" "\n" |
144 | R"==(+ b[3] * CACHE_STRIDE_3 + b[2] * CACHE_STRIDE_2 )==" "\n" |
145 | R"==(+ b[1] * CACHE_STRIDE_1 + b[0] * CACHE_STRIDE_0; )==" "\n" |
146 | R"==(const int pad_d0 = d[0] + b[0] >= DST_PD0; )==" "\n" |
147 | R"==(const int pad_d1 = NDIMS > 1 && d[1] + b[1] >= DST_PD1; )==" "\n" |
148 | R"==(const int pad_d2 = NDIMS > 2 && d[2] + b[2] >= DST_PD2; )==" "\n" |
149 | R"==(const int pad_d3 = NDIMS > 3 && d[3] + b[3] >= DST_PD3; )==" "\n" |
150 | R"==(const int pad_d4 = NDIMS > 4 && d[4] + b[4] >= DST_PD4; )==" "\n" |
151 | R"==(const int pad_d5 = NDIMS > 5 && d[5] + b[5] >= DST_PD5; )==" "\n" |
152 | R"==(const bool pad_sgid = sgId >= LIMIT_DSGID; )==" "\n" |
153 | R"==(const int pad = pad_d0 || pad_d1 || pad_d2 || pad_d3 || pad_d4 || pad_d5 )==" "\n" |
154 | R"==(|| pad_sgid; )==" "\n" |
155 | R"==(if (!pad) { )==" "\n" |
156 | R"==(SRC_DATA_T from_cache = cache[cache_idx]; )==" "\n" |
157 | R"==(#if WITH_SUM_SCALE || WITH_SUM_ZPOINT )==" "\n" |
158 | R"==(dst_tmp = dst[dst_off]; )==" "\n" |
159 | R"==(#endif )==" "\n" |
160 | R"==(#if WITH_SRC_SCALE )==" "\n" |
161 | R"==(uint src_scale_idx = SCALE_OFF(SRC, d[0] + b[0], d[1] + b[1], )==" "\n" |
162 | R"==(d[2] + b[2], d[3] + b[3], d[4] + b[4], d[5] + b[5]); )==" "\n" |
163 | R"==(src_scale = src_scale_idx < SRC_NUM_SCALES )==" "\n" |
164 | R"==(? src_scales[src_scale_idx] )==" "\n" |
165 | R"==(: 0.0; )==" "\n" |
166 | R"==(#endif )==" "\n" |
167 | R"==(#if WITH_DST_SCALE )==" "\n" |
168 | R"==(uint dst_scale_idx = SCALE_OFF(DST, d[0] + b[0], d[1] + b[1], )==" "\n" |
169 | R"==(d[2] + b[2], d[3] + b[3], d[4] + b[4], d[5] + b[5]); )==" "\n" |
170 | R"==(dst_scale = dst_scale_idx < DST_NUM_SCALES )==" "\n" |
171 | R"==(? dst_scales[dst_scale_idx] )==" "\n" |
172 | R"==(: 0.0; )==" "\n" |
173 | R"==(#endif )==" "\n" |
174 | R"==(REORDER(dst_tmp, from_cache, src_scale, dst_scale, sum_scale, )==" "\n" |
175 | R"==(src_zp, dst_zp, sum_zp); )==" "\n" |
176 | R"==(dst[dst_off] = dst_tmp; )==" "\n" |
177 | R"==(} )==" "\n" |
178 | R"==(} )==" "\n" |
179 | R"==(} )==" "\n" |
180 | R"==()==" ; |
181 | } |
182 | } |
183 | } |
184 | } |