1namespace dnnl {
2namespace impl {
3namespace gpu {
4namespace ocl {
5const char *gen9_sum_kernel = R"==(/******************************************************************************* )==""\n"
6R"==(* Copyright 2020-2021 Intel Corporation )==""\n"
7R"==(* )==""\n"
8R"==(* Licensed under the Apache License, Version 2.0 (the "License"); )==""\n"
9R"==(* you may not use this file except in compliance with the License. )==""\n"
10R"==(* You may obtain a copy of the License at )==""\n"
11R"==(* )==""\n"
12R"==(* http: )==""\n"
13R"==(* )==""\n"
14R"==(* Unless required by applicable law or agreed to in writing, software )==""\n"
15R"==(* distributed under the License is distributed on an "AS IS" BASIS, )==""\n"
16R"==(* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. )==""\n"
17R"==(* See the License for the specific language governing permissions and )==""\n"
18R"==(* limitations under the License. )==""\n"
19R"==(*******************************************************************************/ )==""\n"
20R"==(#if DST_DT_S8 )==""\n"
21R"==(#define DST_BLOCK_READ8(src) \ )==""\n"
22R"==(as_char8(intel_sub_group_block_read_uc8((const __global uchar *)(src))) )==""\n"
23R"==(#define DST_BLOCK_WRITE8(dst, val) \ )==""\n"
24R"==(intel_sub_group_block_write_uc8((__global uchar *)(dst), as_uchar8(val)) )==""\n"
25R"==(#endif )==""\n"
26R"==(#if DST_DT_U8 )==""\n"
27R"==(#define DST_BLOCK_READ8(src) \ )==""\n"
28R"==(as_uchar8(intel_sub_group_block_read_uc8((const __global uchar *)(src))) )==""\n"
29R"==(#define DST_BLOCK_WRITE8(dst, val) \ )==""\n"
30R"==(intel_sub_group_block_write_uc8((__global uchar *)(dst), as_uchar8(val)) )==""\n"
31R"==(#endif )==""\n"
32R"==(#if DST_DT_F16 )==""\n"
33R"==(#define DST_BLOCK_READ8(src) \ )==""\n"
34R"==(as_half8(intel_sub_group_block_read_us8((const __global ushort *)(src))) )==""\n"
35R"==(#define DST_BLOCK_WRITE8(dst, val) \ )==""\n"
36R"==(intel_sub_group_block_write_us8((__global ushort *)(dst), as_ushort8(val)) )==""\n"
37R"==(#endif )==""\n"
38R"==(#if DST_DT_S32 )==""\n"
39R"==(#define DST_BLOCK_READ8(src) \ )==""\n"
40R"==(as_int8(intel_sub_group_block_read8((const __global uint *)(src))) )==""\n"
41R"==(#define DST_BLOCK_WRITE8(dst, val) \ )==""\n"
42R"==(intel_sub_group_block_write8((__global uint *)(dst), as_uint8(val)) )==""\n"
43R"==(#endif )==""\n"
44R"==(#if DST_DT_F32 )==""\n"
45R"==(#define DST_BLOCK_READ8(src) \ )==""\n"
46R"==(as_float8(intel_sub_group_block_read8((const __global uint *)(src))) )==""\n"
47R"==(#define DST_BLOCK_WRITE8(dst, val) \ )==""\n"
48R"==(intel_sub_group_block_write8((__global uint *)(dst), as_uint8(val)) )==""\n"
49R"==(#endif )==""\n"
50R"==(#if DST_DT_BF16 )==""\n"
51R"==(#define DST_BLOCK_READ8(src) \ )==""\n"
52R"==(as_ushort8(intel_sub_group_block_read_us8((const __global ushort *)(src))) )==""\n"
53R"==(#define DST_BLOCK_WRITE8(dst, val) \ )==""\n"
54R"==(intel_sub_group_block_write_us8((__global ushort *)(dst), as_ushort8(val)) )==""\n"
55R"==(#endif )==""\n"
56R"==(#include "gpu/ocl/ocl_types.h" )==""\n"
57R"==(float8 get_values(__global SRC_DATA_T *src, ptrdiff_t offset) { )==""\n"
58R"==(float8 val; )==""\n"
59R"==(const uint max_sub_group_size = get_max_sub_group_size(); )==""\n"
60R"==(__global BLOCK_DATA_T *read_pos = (__global BLOCK_DATA_T *)src + offset; )==""\n"
61R"==(if (offset + VECT_DT_N * max_sub_group_size < N_ELEMS) { )==""\n"
62R"==(val = CONVERT_FLOAT8_T(AS_DATA8_T(BLOCK_READ8(read_pos))); )==""\n"
63R"==(} else { )==""\n"
64R"==(const uint sub_group_local_id = get_sub_group_local_id(); )==""\n"
65R"==(uint pos = offset + sub_group_local_id; )==""\n"
66R"==(for (uint i = 0; pos < N_ELEMS && i < VECT_DT_N; i++) { )==""\n"
67R"==(val[i] = CONVERT_FLOAT_T(src[pos]); )==""\n"
68R"==(pos += max_sub_group_size; )==""\n"
69R"==(} )==""\n"
70R"==(} )==""\n"
71R"==(return val; )==""\n"
72R"==(} )==""\n"
73R"==(__kernel void gen9_sum(__global SRC_DATA_T *input0, __global SRC_DATA_T *input1, )==""\n"
74R"==(__global SRC_DATA_T *input2, __global SRC_DATA_T *input3, )==""\n"
75R"==(__global SRC_DATA_T *input4, __global SRC_DATA_T *input5, )==""\n"
76R"==(__global SRC_DATA_T *input6, __global SRC_DATA_T *input7, )==""\n"
77R"==(__global SRC_DATA_T *input8, __global SRC_DATA_T *input9, )==""\n"
78R"==(__global SRC_DATA_T *input10, __global SRC_DATA_T *input11, )==""\n"
79R"==(__global SRC_DATA_T *input12, __global SRC_DATA_T *input13, )==""\n"
80R"==(__global SRC_DATA_T *input14, __global SRC_DATA_T *input15, )==""\n"
81R"==(__global DST_DATA_T *output, __global float *scales) { )==""\n"
82R"==(const uint group_id = get_group_id(0); )==""\n"
83R"==(const uint group_size = get_local_size(0); )==""\n"
84R"==(const uint sub_group_id = get_sub_group_id(); )==""\n"
85R"==(const uint max_sub_group_size = get_max_sub_group_size(); )==""\n"
86R"==(const uint sub_group_local_id = get_sub_group_local_id(); )==""\n"
87R"==(ptrdiff_t offset )==""\n"
88R"==(= (group_id * group_size + sub_group_id * max_sub_group_size) )==""\n"
89R"==(* VECT_DT_N; )==""\n"
90R"==(__global BLOCK_DATA_T *write_pos = (__global BLOCK_DATA_T *)output + offset; )==""\n"
91R"==(int id = 0; )==""\n"
92R"==(float8 sum = 0; )==""\n"
93R"==(if (id < N_INPUTS) sum += get_values(input0, offset) * scales[id++]; )==""\n"
94R"==(if (id < N_INPUTS) sum += get_values(input1, offset) * scales[id++]; )==""\n"
95R"==(if (id < N_INPUTS) sum += get_values(input2, offset) * scales[id++]; )==""\n"
96R"==(if (id < N_INPUTS) sum += get_values(input3, offset) * scales[id++]; )==""\n"
97R"==(if (id < N_INPUTS) sum += get_values(input4, offset) * scales[id++]; )==""\n"
98R"==(if (id < N_INPUTS) sum += get_values(input5, offset) * scales[id++]; )==""\n"
99R"==(if (id < N_INPUTS) sum += get_values(input6, offset) * scales[id++]; )==""\n"
100R"==(if (id < N_INPUTS) sum += get_values(input7, offset) * scales[id++]; )==""\n"
101R"==(if (id < N_INPUTS) sum += get_values(input8, offset) * scales[id++]; )==""\n"
102R"==(if (id < N_INPUTS) sum += get_values(input9, offset) * scales[id++]; )==""\n"
103R"==(if (id < N_INPUTS) sum += get_values(input10, offset) * scales[id++]; )==""\n"
104R"==(if (id < N_INPUTS) sum += get_values(input11, offset) * scales[id++]; )==""\n"
105R"==(if (id < N_INPUTS) sum += get_values(input12, offset) * scales[id++]; )==""\n"
106R"==(if (id < N_INPUTS) sum += get_values(input13, offset) * scales[id++]; )==""\n"
107R"==(if (id < N_INPUTS) sum += get_values(input14, offset) * scales[id++]; )==""\n"
108R"==(if (id < N_INPUTS) sum += get_values(input15, offset) * scales[id++]; )==""\n"
109R"==(if (offset + VECT_DT_N * max_sub_group_size < N_ELEMS) { )==""\n"
110R"==(DST_BLOCK_WRITE8(write_pos, TO_DST8(sum)); )==""\n"
111R"==(} else { )==""\n"
112R"==(uint pos = offset + sub_group_local_id; )==""\n"
113R"==(for (uint i = 0; pos < N_ELEMS && i < VECT_DT_N; i++) { )==""\n"
114R"==(output[pos] = TO_DST(sum[i]); )==""\n"
115R"==(pos += max_sub_group_size; )==""\n"
116R"==(} )==""\n"
117R"==(} )==""\n"
118R"==(} )==""\n"
119R"==()==";
120}
121}
122}
123}