1 | // Copyright 2017 The Gemmlowp Authors. All Rights Reserved. |
2 | // |
3 | // Licensed under the Apache License, Version 2.0 (the "License"); |
4 | // you may not use this file except in compliance with the License. |
5 | // You may obtain a copy of the License at |
6 | // |
7 | // http://www.apache.org/licenses/LICENSE-2.0 |
8 | // |
9 | // Unless required by applicable law or agreed to in writing, software |
10 | // distributed under the License is distributed on an "AS IS" BASIS, |
11 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
12 | // See the License for the specific language governing permissions and |
13 | // limitations under the License. |
14 | |
15 | // simd_wrappers_neon.h: SSE SIMD wrappers |
16 | |
17 | #ifndef GEMMLOWP_INTERNAL_SIMD_WRAPPERS_SSE_H_ |
18 | #define GEMMLOWP_INTERNAL_SIMD_WRAPPERS_SSE_H_ |
19 | |
20 | #include <smmintrin.h> |
21 | |
22 | namespace gemmlowp { |
23 | |
24 | using Int32x4 = __m128i; |
25 | using Int16x8 = __m128i; |
26 | using Uint8x16 = __m128i; |
27 | |
28 | template <int ScalarCount> |
29 | struct RegisterType<std::int32_t, ScalarCount> { |
30 | using Type = |
31 | typename std::conditional<ScalarCount >= 4, Int32x4, std::int32_t>::type; |
32 | }; |
33 | |
34 | template <int ScalarCount> |
35 | struct RegisterType<std::int16_t, ScalarCount> { |
36 | using Type = |
37 | typename std::conditional<ScalarCount >= 8, Int16x8, std::int16_t>::type; |
38 | }; |
39 | |
40 | template <int ScalarCount> |
41 | struct RegisterType<std::uint8_t, ScalarCount> { |
42 | using Type = typename std::conditional< |
43 | ScalarCount >= 16, Uint8x16, |
44 | typename std::conditional<ScalarCount >= 4, std::uint32_t, |
45 | std::uint8_t>::type>::type; |
46 | }; |
47 | |
48 | inline Int32x4 LoadInt32x4(const std::int32_t* src) { |
49 | return _mm_loadu_si128(reinterpret_cast<const Int32x4*>(src)); |
50 | } |
51 | |
52 | inline Int32x4 LoadInt16x8(const std::int16_t* src) { |
53 | return _mm_loadu_si128(reinterpret_cast<const Int16x8*>(src)); |
54 | } |
55 | |
56 | inline void StoreInt32x4(std::int32_t* dst, Int32x4 value) { |
57 | _mm_storeu_si128(reinterpret_cast<__m128i*>(dst), value); |
58 | } |
59 | |
60 | inline void StoreInt16x8(std::int16_t* dst, Int16x8 value) { |
61 | _mm_storeu_si128(reinterpret_cast<__m128i*>(dst), value); |
62 | } |
63 | |
64 | inline Uint8x16 LoadUint8x16(const std::uint8_t* src) { |
65 | return _mm_loadu_si128(reinterpret_cast<const Uint8x16*>(src)); |
66 | } |
67 | |
68 | inline void StoreUint8x16(std::uint8_t* dst, Uint8x16 value) { |
69 | _mm_storeu_si128(reinterpret_cast<__m128i*>(dst), value); |
70 | } |
71 | |
72 | template <int Lane> |
73 | std::int32_t GetLane(Int32x4 value) { |
74 | return _mm_extract_epi32(value, Lane); |
75 | } |
76 | |
77 | template <int Lane> |
78 | Int32x4 DupLane(Int32x4 value) { |
79 | return _mm_shuffle_epi32(value, _MM_SHUFFLE(Lane, Lane, Lane, Lane)); |
80 | } |
81 | |
82 | inline Int32x4 Mul(Int32x4 a, std::int32_t b) { |
83 | return Mul(a, Dup<Int32x4>(b)); |
84 | } |
85 | |
86 | inline Int32x4 Min(Int32x4 a, Int32x4 b) { return _mm_min_epi32(a, b); } |
87 | |
88 | inline Int32x4 Max(Int32x4 a, Int32x4 b) { return _mm_max_epi32(a, b); } |
89 | |
90 | inline Int32x4 SaturatingRoundingDoublingHighMul(Int32x4 a, std::int32_t b) { |
91 | return SaturatingRoundingDoublingHighMul(a, Dup<Int32x4>(b)); |
92 | } |
93 | |
94 | template <int Lane> |
95 | Int32x4 MulByRhsLane(Int32x4 a, Int32x4 b) { |
96 | return Mul(a, DupLane<Lane>(b)); |
97 | } |
98 | |
99 | inline void MulAdd(Int32x4 lhs, Int32x4 rhs, Int32x4* acc) { |
100 | *acc = Add(*acc, Mul(lhs, rhs)); |
101 | } |
102 | |
103 | inline void MulAdd(Int32x4 lhs, std::int32_t rhs, Int32x4* acc) { |
104 | *acc = Add(*acc, Mul(lhs, rhs)); |
105 | } |
106 | |
107 | template <int Lane> |
108 | inline void MulAddByRhsLane(Int32x4 lhs, Int32x4 rhs, Int32x4* acc) { |
109 | *acc = Add(*acc, MulByRhsLane<Lane>(lhs, rhs)); |
110 | } |
111 | |
112 | template <> |
113 | struct LoadContiguousImpl<RegBlockUint8<8, 8>> { |
114 | static RegBlockUint8<8, 8> Run(const std::uint8_t* src) { |
115 | RegBlockUint8<8, 8> result; |
116 | for (int i = 0; i < 4; i++) { |
117 | result.buf.reg[i] = LoadUint8x16(src + 16 * i); |
118 | } |
119 | return result; |
120 | } |
121 | }; |
122 | |
123 | template <> |
124 | struct LoadContiguousImpl<RegBlockInt32<8, 8>> { |
125 | static RegBlockInt32<8, 8> Run(const std::int32_t* src) { |
126 | RegBlockInt32<8, 8> result; |
127 | for (int i = 0; i < 16; i++) { |
128 | result.buf.reg[i] = LoadInt32x4(src + 4 * i); |
129 | } |
130 | return result; |
131 | } |
132 | }; |
133 | |
134 | template <> |
135 | struct LoadContiguousImpl<RegBlockInt16<8, 8>> { |
136 | static RegBlockInt16<8, 8> Run(const std::int16_t* src) { |
137 | RegBlockInt16<8, 8> result; |
138 | for (int i = 0; i < 8; i++) { |
139 | result.buf.reg[i] = LoadInt16x8(src + 8 * i); |
140 | } |
141 | return result; |
142 | } |
143 | }; |
144 | |
145 | } // end namespace gemmlowp |
146 | |
147 | #include "simd_wrappers_common_neon_sse.h" |
148 | |
149 | #endif // GEMMLOWP_INTERNAL_SIMD_WRAPPERS_SSE_H_ |
150 | |