1 | namespace dnnl { |
2 | namespace impl { |
3 | namespace gpu { |
4 | namespace ocl { |
5 | const char *gen9_binary_kernel = R"==(/******************************************************************************* )==" "\n" |
6 | R"==(* Copyright 2020-2022 Intel Corporation )==" "\n" |
7 | R"==(* )==" "\n" |
8 | R"==(* Licensed under the Apache License, Version 2.0 (the "License"); )==" "\n" |
9 | R"==(* you may not use this file except in compliance with the License. )==" "\n" |
10 | R"==(* You may obtain a copy of the License at )==" "\n" |
11 | R"==(* )==" "\n" |
12 | R"==(* http: )==" "\n" |
13 | R"==(* )==" "\n" |
14 | R"==(* Unless required by applicable law or agreed to in writing, software )==" "\n" |
15 | R"==(* distributed under the License is distributed on an "AS IS" BASIS, )==" "\n" |
16 | R"==(* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. )==" "\n" |
17 | R"==(* See the License for the specific language governing permissions and )==" "\n" |
18 | R"==(* limitations under the License. )==" "\n" |
19 | R"==(*******************************************************************************/ )==" "\n" |
20 | R"==(#include "gpu/ocl/binary_types.h" )==" "\n" |
21 | R"==(#if IS_PLAIN_LAYOUT )==" "\n" |
22 | R"==(KERNEL_ATTR )==" "\n" |
23 | R"==(__kernel void gen9_binary(__global SRC0_DATA_T *src0, )==" "\n" |
24 | R"==(__global SRC1_DATA_T *src1, __global DST_DATA_T *dst POST_OP_ARGS, )==" "\n" |
25 | R"==(__global float *src0_scale, __global float *src1_scale) { )==" "\n" |
26 | R"==(int dims0[6] = {0}; )==" "\n" |
27 | R"==(unsigned mid_dim = GWS_GET_MIXED_DIM(); )==" "\n" |
28 | R"==(dims0[5] = mid_dim % DST_D5; )==" "\n" |
29 | R"==(mid_dim /= DST_D5; )==" "\n" |
30 | R"==(dims0[4] = mid_dim % DST_D4; )==" "\n" |
31 | R"==(mid_dim /= DST_D4; )==" "\n" |
32 | R"==(dims0[3] = mid_dim % DST_D3; )==" "\n" |
33 | R"==(mid_dim /= DST_D3; )==" "\n" |
34 | R"==(dims0[2] = mid_dim % DST_D2; )==" "\n" |
35 | R"==(mid_dim /= DST_D2; )==" "\n" |
36 | R"==(dims0[1] = mid_dim % DST_D1; )==" "\n" |
37 | R"==(mid_dim /= DST_D1; )==" "\n" |
38 | R"==(dims0[0] = mid_dim; )==" "\n" |
39 | R"==(int src0_off = SRC0_OFF( )==" "\n" |
40 | R"==(dims0[0], dims0[1], dims0[2], dims0[3], dims0[4], dims0[5]); )==" "\n" |
41 | R"==(src0 += src0_off; )==" "\n" |
42 | R"==(int src1_off = SRC1_OFF(dims0[0] * (!BCAST_DIM0), dims0[1] * (!BCAST_DIM1), )==" "\n" |
43 | R"==(dims0[2] * (!BCAST_DIM2), dims0[3] * (!BCAST_DIM3), )==" "\n" |
44 | R"==(dims0[4] * (!BCAST_DIM4), dims0[5] * (!BCAST_DIM5)); )==" "\n" |
45 | R"==(src1 += src1_off; )==" "\n" |
46 | R"==(int dst_off = DST_OFF( )==" "\n" |
47 | R"==(dims0[0], dims0[1], dims0[2], dims0[3], dims0[4], dims0[5]); )==" "\n" |
48 | R"==(dst += dst_off; )==" "\n" |
49 | R"==(#if WITH_SRC0_SCALE )==" "\n" |
50 | R"==(#define src0_scale_val src0_scale[0] )==" "\n" |
51 | R"==(#else )==" "\n" |
52 | R"==(#define src0_scale_val 1 )==" "\n" |
53 | R"==(#endif )==" "\n" |
54 | R"==(#if WITH_SRC1_SCALE )==" "\n" |
55 | R"==(#define src1_scale_val src1_scale[0] )==" "\n" |
56 | R"==(#else )==" "\n" |
57 | R"==(#define src1_scale_val 1 )==" "\n" |
58 | R"==(#endif )==" "\n" |
59 | R"==(float tmp_src0[NVECT]; )==" "\n" |
60 | R"==(READ_DATA(NVECT, SRC0, (&src0[0]), (&tmp_src0[0]), src0_scale_val); )==" "\n" |
61 | R"==(#if BCAST_AT_INNERMOST_DIM )==" "\n" |
62 | R"==(float tmp_src1[1]; )==" "\n" |
63 | R"==(tmp_src1[0] = src1_scale_val * CONVERT_FLOAT_T(src1[0]); )==" "\n" |
64 | R"==(#define SRC1_IDX_MASK 0 )==" "\n" |
65 | R"==(#else )==" "\n" |
66 | R"==(float tmp_src1[NVECT]; )==" "\n" |
67 | R"==(READ_DATA(NVECT, SRC1, (&src1[0]), (&tmp_src1[0]), src1_scale_val); )==" "\n" |
68 | R"==(#define SRC1_IDX_MASK 1 )==" "\n" |
69 | R"==(#endif )==" "\n" |
70 | R"==(float tmp[NVECT]; )==" "\n" |
71 | R"==(unroll_for(unsigned idx = 0; idx < NVECT; ++idx) { )==" "\n" |
72 | R"==(tmp[idx] = get_eltwise_op(tmp_src0[idx], tmp_src1[idx * SRC1_IDX_MASK]); )==" "\n" |
73 | R"==(} )==" "\n" |
74 | R"==(float dst_data[NVECT]; )==" "\n" |
75 | R"==(#if WITH_SUM )==" "\n" |
76 | R"==(READ_DATA(NVECT, DST, (&dst[0]), (&dst_data[0]), 1); )==" "\n" |
77 | R"==(#endif )==" "\n" |
78 | R"==(dims0[NDIMS - 1] += get_sub_group_local_id(); )==" "\n" |
79 | R"==(unroll_for(unsigned idx = 0; idx < NVECT; ++idx) { )==" "\n" |
80 | R"==(float d_i = tmp[idx]; )==" "\n" |
81 | R"==(float dst_i = dst_data[idx]; )==" "\n" |
82 | R"==(APPLY_POST_OPS_SERIAL(d_i, float, dst_i, float, dims0[0], 1, dims0[1], )==" "\n" |
83 | R"==(1, dims0[2], 1, dims0[3], 1, dims0[4], 1, dims0[5], 1); )==" "\n" |
84 | R"==(tmp[idx] = d_i; )==" "\n" |
85 | R"==(dims0[NDIMS - 1] += 16; )==" "\n" |
86 | R"==(} )==" "\n" |
87 | R"==(WRITE_DATA(NVECT, DST, (&tmp[0]), (&dst[0])); )==" "\n" |
88 | R"==(} )==" "\n" |
89 | R"==(#elif PLAIN_TO_ABCD4AXB )==" "\n" |
90 | R"==(KERNEL_ATTR )==" "\n" |
91 | R"==(__kernel void gen9_binary(__global SRC0_DATA_T *src0, )==" "\n" |
92 | R"==(__global SRC1_DATA_T *src1, __global DST_DATA_T *dst POST_OP_ARGS, )==" "\n" |
93 | R"==(__global float *src0_scale, __global float *src1_scale) { )==" "\n" |
94 | R"==(src0 += SRC0_OFFSET0; )==" "\n" |
95 | R"==(src1 += SRC1_OFFSET0; )==" "\n" |
96 | R"==(dst += DST_OFFSET0; )==" "\n" |
97 | R"==(int sglid = get_sub_group_local_id(); )==" "\n" |
98 | R"==(const int d0 = GWS_GET_D0(); )==" "\n" |
99 | R"==(const int d1 = GWS_GET_D1(); )==" "\n" |
100 | R"==(const int d2 = GWS_GET_D2(); )==" "\n" |
101 | R"==(const int d3 = GWS_GET_D3(); )==" "\n" |
102 | R"==(const int d4 = GWS_GET_D3(); )==" "\n" |
103 | R"==(const int d5 = GWS_GET_D3(); )==" "\n" |
104 | R"==(const int d0_block = GWS_GET_D0_BLOCK(); )==" "\n" |
105 | R"==(const int d1_block = GWS_GET_D1_BLOCK(); )==" "\n" |
106 | R"==(const int d01_block = d0_block * d1_block; )==" "\n" |
107 | R"==(SRC0_DATA_T tmp_buf0[d01_block] = {0}; )==" "\n" |
108 | R"==(SRC1_DATA_T tmp_buf1[d01_block] = {0}; )==" "\n" |
109 | R"==(DST_DATA_T res_buf[d01_block] = {0}; )==" "\n" |
110 | R"==(const int d0_inner_block = min(d0_block, SRC0_D0); )==" "\n" |
111 | R"==(const int d1_inner_block = min(d1_block, SRC0_D1); )==" "\n" |
112 | R"==(for (int d0_inner = 0; d0_inner < d0_inner_block; d0_inner++) { )==" "\n" |
113 | R"==(for (int d1_inner = 0; d1_inner < d1_inner_block; d1_inner++) { )==" "\n" |
114 | R"==(if (SRC0_D0 % d0_inner_block != 0 && d0 + d0_inner >= SRC0_D0) )==" "\n" |
115 | R"==(continue; )==" "\n" |
116 | R"==(if (SRC0_D1 % d1_inner_block != 0 && d1 + d1_inner >= SRC0_D1) )==" "\n" |
117 | R"==(continue; )==" "\n" |
118 | R"==(int src0_off; )==" "\n" |
119 | R"==(int src1_off; )==" "\n" |
120 | R"==(if (SRC0_S3_0 == 1) { )==" "\n" |
121 | R"==(src0_off = SRC0_OFF(d0 + d0_inner, d1 + d1_inner, d2, d3, 0, 0); )==" "\n" |
122 | R"==(tmp_buf0[d0_inner * d1_block + d1_inner] )==" "\n" |
123 | R"==(= SRC0_BLOCK_READ(&src0[src0_off]); )==" "\n" |
124 | R"==(src1_off = SRC1_OFF((d0 + d0_inner) * (!BCAST_DIM0), )==" "\n" |
125 | R"==((d1 + d1_inner) * (!BCAST_DIM1), d2 * (!BCAST_DIM2), )==" "\n" |
126 | R"==(d3 * (!BCAST_DIM3), 0, 0); )==" "\n" |
127 | R"==(} else { )==" "\n" |
128 | R"==(src0_off = SRC0_OFF( )==" "\n" |
129 | R"==(d0 + d0_inner, d1 + d1_inner, d2, d3 + sglid, 0, 0); )==" "\n" |
130 | R"==(tmp_buf0[d0_inner * d1_block + d1_inner] = src0[src0_off]; )==" "\n" |
131 | R"==(src1_off = SRC1_OFF((d0 + d0_inner) * (!BCAST_DIM0), )==" "\n" |
132 | R"==((d1 + d1_inner) * (!BCAST_DIM1), d2 * (!BCAST_DIM2), )==" "\n" |
133 | R"==((d3 + sglid) * (!BCAST_DIM3), 0, 0); )==" "\n" |
134 | R"==(} )==" "\n" |
135 | R"==(#if BCAST_AT_INNERMOST_DIM == 1 )==" "\n" |
136 | R"==(tmp_buf1[d0_inner * d1_block + d1_inner] = src1[src1_off]; )==" "\n" |
137 | R"==(#else )==" "\n" |
138 | R"==(tmp_buf1[d0_inner * d1_block + d1_inner] )==" "\n" |
139 | R"==(= SRC1_BLOCK_READ(&src1[src1_off]); )==" "\n" |
140 | R"==(#endif )==" "\n" |
141 | R"==(} )==" "\n" |
142 | R"==(} )==" "\n" |
143 | R"==(int i = 0; )==" "\n" |
144 | R"==(for (int d0_i = 0; d0_i < d0_block; d0_i++) { )==" "\n" |
145 | R"==(for (int d1_i = 0; d1_i < d1_block; d1_i++) { )==" "\n" |
146 | R"==(float tmp_src0 = CONVERT_FLOAT_T(tmp_buf0[i]); )==" "\n" |
147 | R"==(float tmp_src1 = CONVERT_FLOAT_T(tmp_buf1[i]); )==" "\n" |
148 | R"==(float res; )==" "\n" |
149 | R"==(float dst_data; )==" "\n" |
150 | R"==(#if WITH_SRC0_SCALE )==" "\n" |
151 | R"==(tmp_src0 = tmp_src0 * src0_scale[0]; )==" "\n" |
152 | R"==(#endif )==" "\n" |
153 | R"==(#if WITH_SRC1_SCALE )==" "\n" |
154 | R"==(tmp_src1 = tmp_src1 * src1_scale[0]; )==" "\n" |
155 | R"==(#endif )==" "\n" |
156 | R"==(res = get_eltwise_op(tmp_src0, tmp_src1); )==" "\n" |
157 | R"==(APPLY_POST_OPS_SERIAL(res, float, dst_data, float, d0 + d0_i, 1, )==" "\n" |
158 | R"==(d1 + d1_i, 1, d2, 1, d3 + sglid, 1, d4, 1, d5, 1); )==" "\n" |
159 | R"==(res_buf[i] = TO_DST(res); )==" "\n" |
160 | R"==(++i; )==" "\n" |
161 | R"==(} )==" "\n" |
162 | R"==(} )==" "\n" |
163 | R"==(DST_DATA_T res_all[d01_block][SUB_GROUP_SIZE]; )==" "\n" |
164 | R"==(for (int i = 0; i < d01_block; i++) )==" "\n" |
165 | R"==(for (int j = 0; j < SUB_GROUP_SIZE; j++) )==" "\n" |
166 | R"==(res_all[i][j] = intel_sub_group_shuffle(res_buf[i], j); )==" "\n" |
167 | R"==(for (int d = 0; d < SUB_GROUP_SIZE; d += 8) { )==" "\n" |
168 | R"==(DST_DATA8_T res_tmp; )==" "\n" |
169 | R"==(for (int i = 0; i < 8; i++) )==" "\n" |
170 | R"==(res_tmp[i] = res_all[sglid][d + i]; )==" "\n" |
171 | R"==(int dst_off = DST_OFF(d0, d1, d2, d3 + d, 0, 0); )==" "\n" |
172 | R"==(DST_BLOCK_WRITE8(&dst[dst_off], res_tmp); )==" "\n" |
173 | R"==(} )==" "\n" |
174 | R"==(} )==" "\n" |
175 | R"==(#elif IS_XA16B )==" "\n" |
176 | R"==(KERNEL_ATTR )==" "\n" |
177 | R"==(__kernel void gen9_binary(__global SRC0_DATA_T *src0, )==" "\n" |
178 | R"==(__global SRC1_DATA_T *src1, __global DST_DATA_T *dst POST_OP_ARGS, )==" "\n" |
179 | R"==(__global float *src0_scale, __global float *src1_scale) { )==" "\n" |
180 | R"==(int dims0[6] = {0}; )==" "\n" |
181 | R"==(dims0[0] = GWS_GET_D0(); )==" "\n" |
182 | R"==(dims0[1] = GWS_GET_D1(); )==" "\n" |
183 | R"==(dims0[2] = GWS_GET_D2(); )==" "\n" |
184 | R"==(dims0[3] = GWS_GET_D3(); )==" "\n" |
185 | R"==(dims0[4] = GWS_GET_D4(); )==" "\n" |
186 | R"==(dims0[5] = GWS_GET_D5(); )==" "\n" |
187 | R"==(int src0_off = SRC0_OFF( )==" "\n" |
188 | R"==(dims0[0], dims0[1], dims0[2], dims0[3], dims0[4], dims0[5]); )==" "\n" |
189 | R"==(int dst_off = DST_OFF( )==" "\n" |
190 | R"==(dims0[0], dims0[1], dims0[2], dims0[3], dims0[4], dims0[5]); )==" "\n" |
191 | R"==(int src1_off = SRC1_OFF( )==" "\n" |
192 | R"==(dims0[0], dims0[1], dims0[2], dims0[3], dims0[4], dims0[5]); )==" "\n" |
193 | R"==(int sub_grp_id = get_sub_group_local_id(); )==" "\n" |
194 | R"==(for (int channels = 0; channels < SRC0_PD1; channels += GWS_LWS0_DEFAULT) { )==" "\n" |
195 | R"==(float8 d = 0; )==" "\n" |
196 | R"==(float8 dst_data; )==" "\n" |
197 | R"==(__global SRC1_DATA_T *t_src1 = src1 + src1_off; )==" "\n" |
198 | R"==(__global DST_DATA_T *t_dst = dst + dst_off; )==" "\n" |
199 | R"==(__global SRC0_DATA_T *t_src0 = src0 + src0_off; )==" "\n" |
200 | R"==(if ((SRC0_D1 % SUB_GROUP_SIZE != 0) )==" "\n" |
201 | R"==(&& (dims0[1] + sub_grp_id) >= SRC0_D1) { )==" "\n" |
202 | R"==(d = 0; )==" "\n" |
203 | R"==(} else { )==" "\n" |
204 | R"==(float8 tmp_src0 = CONVERT_FLOAT8_T(SRC0_BLOCK_READ8(&t_src0[0])); )==" "\n" |
205 | R"==(float8 tmp_src1 = CONVERT_FLOAT8_T(SRC1_BLOCK_READ8(&t_src1[0])); )==" "\n" |
206 | R"==(#if WITH_SRC0_SCALE )==" "\n" |
207 | R"==(tmp_src0 = tmp_src0 * src0_scale[0]; )==" "\n" |
208 | R"==(#endif )==" "\n" |
209 | R"==(#if WITH_SRC1_SCALE )==" "\n" |
210 | R"==(tmp_src1 = tmp_src1 * src1_scale[0]; )==" "\n" |
211 | R"==(#endif )==" "\n" |
212 | R"==(d = get_eltwise_op(tmp_src0, tmp_src1); )==" "\n" |
213 | R"==(#if WITH_SUM )==" "\n" |
214 | R"==(dst_data = CONVERT_FLOAT8_T(DST_BLOCK_READ8(&t_dst[0])); )==" "\n" |
215 | R"==(#endif )==" "\n" |
216 | R"==(const int po_mb = dims0[0]; )==" "\n" |
217 | R"==(const int po_oc = dims0[1] + sub_grp_id; )==" "\n" |
218 | R"==(for (int lcl_mb = 0; lcl_mb < NVECT; ++lcl_mb) { )==" "\n" |
219 | R"==(if (po_mb + lcl_mb >= SRC0_D0) { )==" "\n" |
220 | R"==(d[lcl_mb] = 0; )==" "\n" |
221 | R"==(} else { )==" "\n" |
222 | R"==(float d_i = d[lcl_mb]; )==" "\n" |
223 | R"==(float dst_i = dst_data[lcl_mb]; )==" "\n" |
224 | R"==(APPLY_POST_OPS_SERIAL(d_i, float, dst_i, float, )==" "\n" |
225 | R"==(po_mb + lcl_mb, 1, po_oc, 1, dims0[2], 1, dims0[3], )==" "\n" |
226 | R"==(1, dims0[4], 1, dims0[5], 1); )==" "\n" |
227 | R"==(d[lcl_mb] = d_i; )==" "\n" |
228 | R"==(} )==" "\n" |
229 | R"==(} )==" "\n" |
230 | R"==(} )==" "\n" |
231 | R"==(DST_BLOCK_WRITE8(&t_dst[0], TO_DST8(d)); )==" "\n" |
232 | R"==(src0_off += MB_BLOCK * SUB_GROUP_SIZE * SRC0_PD2 * SRC0_PD3 * SRC0_PD4 )==" "\n" |
233 | R"==(* SRC0_PD5; )==" "\n" |
234 | R"==(src1_off += MB_BLOCK * SUB_GROUP_SIZE * SRC1_PD2 * SRC1_PD3 * SRC1_PD4 )==" "\n" |
235 | R"==(* SRC1_PD5; )==" "\n" |
236 | R"==(dst_off += MB_BLOCK * SUB_GROUP_SIZE * DST_PD2 * DST_PD3 * DST_PD4 )==" "\n" |
237 | R"==(* DST_PD5; )==" "\n" |
238 | R"==(} )==" "\n" |
239 | R"==(} )==" "\n" |
240 | R"==(#else )==" "\n" |
241 | R"==(KERNEL_ATTR )==" "\n" |
242 | R"==(__kernel void gen9_binary(__global SRC0_DATA_T *src0, )==" "\n" |
243 | R"==(__global SRC1_DATA_T *src1, __global DST_DATA_T *dst POST_OP_ARGS, )==" "\n" |
244 | R"==(__global float *src0_scale, __global float *src1_scale) { )==" "\n" |
245 | R"==(int dims0[6] = {0}; )==" "\n" |
246 | R"==(dims0[0] = GWS_GET_D0(); )==" "\n" |
247 | R"==(dims0[1] = GWS_GET_D1(); )==" "\n" |
248 | R"==(dims0[2] = GWS_GET_D2(); )==" "\n" |
249 | R"==(dims0[3] = GWS_GET_D3(); )==" "\n" |
250 | R"==(dims0[4] = GWS_GET_D4(); )==" "\n" |
251 | R"==(dims0[5] = GWS_GET_D5(); )==" "\n" |
252 | R"==(int src0_off = SRC0_OFF( )==" "\n" |
253 | R"==(dims0[0], dims0[1], dims0[2], dims0[3], dims0[4], dims0[5]); )==" "\n" |
254 | R"==(src0 += src0_off; )==" "\n" |
255 | R"==(int dst_off = DST_OFF( )==" "\n" |
256 | R"==(dims0[0], dims0[1], dims0[2], dims0[3], dims0[4], dims0[5]); )==" "\n" |
257 | R"==(dst += dst_off; )==" "\n" |
258 | R"==(int src1_off = SRC1_OFF(dims0[0] * (!BCAST_DIM0), dims0[1] * (!BCAST_DIM1), )==" "\n" |
259 | R"==(dims0[2] * (!BCAST_DIM2), dims0[3] * (!BCAST_DIM3), )==" "\n" |
260 | R"==(dims0[4] * (!BCAST_DIM4), dims0[5] * (!BCAST_DIM5)); )==" "\n" |
261 | R"==(src1 += src1_off; )==" "\n" |
262 | R"==(#if NVECT == 1 )==" "\n" |
263 | R"==(float d = 0; )==" "\n" |
264 | R"==(float dst_data; )==" "\n" |
265 | R"==(float tmp_src0 = CONVERT_FLOAT_T(SRC0_BLOCK_READ(&src0[0])); )==" "\n" |
266 | R"==(#elif NVECT == 2 )==" "\n" |
267 | R"==(float2 d = 0; )==" "\n" |
268 | R"==(float2 dst_data; )==" "\n" |
269 | R"==(float2 tmp_src0 = CONVERT_FLOAT2_T(SRC0_BLOCK_READ2(&src0[0])); )==" "\n" |
270 | R"==(#elif NVECT == 4 )==" "\n" |
271 | R"==(float4 d = 0; )==" "\n" |
272 | R"==(float4 dst_data; )==" "\n" |
273 | R"==(float4 tmp_src0 = CONVERT_FLOAT4_T(SRC0_BLOCK_READ4(&src0[0])); )==" "\n" |
274 | R"==(#elif NVECT == 8 )==" "\n" |
275 | R"==(float8 d = 0; )==" "\n" |
276 | R"==(float8 dst_data; )==" "\n" |
277 | R"==(float8 tmp_src0 = CONVERT_FLOAT8_T(SRC0_BLOCK_READ8(&src0[0])); )==" "\n" |
278 | R"==(#endif )==" "\n" |
279 | R"==(#if BCAST_DIM1 )==" "\n" |
280 | R"==(float tmp_src1 = CONVERT_FLOAT_T(src1[0]); )==" "\n" |
281 | R"==(#else )==" "\n" |
282 | R"==(#if BCAST_AT_INNERMOST_DIM == 1 || NVECT == 1 )==" "\n" |
283 | R"==(float tmp_src1 = CONVERT_FLOAT_T(SRC1_BLOCK_READ(&src1[0])); )==" "\n" |
284 | R"==(#elif NVECT == 2 )==" "\n" |
285 | R"==(float2 tmp_src1 = CONVERT_FLOAT2_T(SRC1_BLOCK_READ2(&src1[0])); )==" "\n" |
286 | R"==(#elif NVECT == 4 )==" "\n" |
287 | R"==(float4 tmp_src1 = CONVERT_FLOAT4_T(SRC1_BLOCK_READ4(&src1[0])); )==" "\n" |
288 | R"==(#elif NVECT == 8 )==" "\n" |
289 | R"==(float8 tmp_src1 = CONVERT_FLOAT8_T(SRC1_BLOCK_READ8(&src1[0])); )==" "\n" |
290 | R"==(#endif )==" "\n" |
291 | R"==(#endif )==" "\n" |
292 | R"==(#if WITH_SRC0_SCALE )==" "\n" |
293 | R"==(tmp_src0 = tmp_src0 * src0_scale[0]; )==" "\n" |
294 | R"==(#endif )==" "\n" |
295 | R"==(#if WITH_SRC1_SCALE )==" "\n" |
296 | R"==(tmp_src1 = tmp_src1 * src1_scale[0]; )==" "\n" |
297 | R"==(#endif )==" "\n" |
298 | R"==(d = get_eltwise_op(tmp_src0, tmp_src1); )==" "\n" |
299 | R"==(#if WITH_SUM )==" "\n" |
300 | R"==(#if NVECT == 1 )==" "\n" |
301 | R"==(dst_data = CONVERT_FLOAT_T(DST_BLOCK_READ(&dst[0])); )==" "\n" |
302 | R"==(#elif NVECT == 2 )==" "\n" |
303 | R"==(dst_data = CONVERT_FLOAT2_T(DST_BLOCK_READ2(&dst[0])); )==" "\n" |
304 | R"==(#elif NVECT == 4 )==" "\n" |
305 | R"==(dst_data = CONVERT_FLOAT4_T(DST_BLOCK_READ4(&dst[0])); )==" "\n" |
306 | R"==(#elif NVECT == 8 )==" "\n" |
307 | R"==(dst_data = CONVERT_FLOAT8_T(DST_BLOCK_READ8(&dst[0])); )==" "\n" |
308 | R"==(#endif )==" "\n" |
309 | R"==(#endif )==" "\n" |
310 | R"==(const int po_mb = dims0[0]; )==" "\n" |
311 | R"==(const int po_oc = dims0[1] + get_sub_group_local_id(); )==" "\n" |
312 | R"==(#if NVECT == 1 )==" "\n" |
313 | R"==(APPLY_POST_OPS_SERIAL(d, float, dst_data, float, po_mb, 1, po_oc, 1, )==" "\n" |
314 | R"==(dims0[2], 1, dims0[3], 1, dims0[4], 1, dims0[5], 1); )==" "\n" |
315 | R"==(#else )==" "\n" |
316 | R"==(for (int vidx = 0; vidx < NVECT; ++vidx) { )==" "\n" |
317 | R"==(float d_i = d[vidx]; )==" "\n" |
318 | R"==(float dst_i = dst_data[vidx]; )==" "\n" |
319 | R"==(APPLY_POST_OPS_SERIAL(d_i, float, dst_i, float, po_mb, 1, po_oc, 1, )==" "\n" |
320 | R"==(dims0[2], 1, dims0[3], 1, dims0[4], 1, dims0[5], 1); )==" "\n" |
321 | R"==(d[vidx] = d_i; )==" "\n" |
322 | R"==(++dims0[NDIMS - 1]; )==" "\n" |
323 | R"==(} )==" "\n" |
324 | R"==(#endif )==" "\n" |
325 | R"==(#if NVECT == 1 )==" "\n" |
326 | R"==(DST_BLOCK_WRITE(&dst[0], TO_DST(d)); )==" "\n" |
327 | R"==(#elif NVECT == 2 )==" "\n" |
328 | R"==(DST_BLOCK_WRITE2(&dst[0], TO_DST2(d)); )==" "\n" |
329 | R"==(#elif NVECT == 4 )==" "\n" |
330 | R"==(DST_BLOCK_WRITE4(&dst[0], TO_DST4(d)); )==" "\n" |
331 | R"==(#elif NVECT == 8 )==" "\n" |
332 | R"==(DST_BLOCK_WRITE8(&dst[0], TO_DST8(d)); )==" "\n" |
333 | R"==(#endif )==" "\n" |
334 | R"==(} )==" "\n" |
335 | R"==(#endif )==" "\n" |
336 | R"==()==" ; |
337 | } |
338 | } |
339 | } |
340 | } |