1 | // Copyright 2005 Google Inc. All Rights Reserved. |
2 | // |
3 | // Redistribution and use in source and binary forms, with or without |
4 | // modification, are permitted provided that the following conditions are |
5 | // met: |
6 | // |
7 | // * Redistributions of source code must retain the above copyright |
8 | // notice, this list of conditions and the following disclaimer. |
9 | // * Redistributions in binary form must reproduce the above |
10 | // copyright notice, this list of conditions and the following disclaimer |
11 | // in the documentation and/or other materials provided with the |
12 | // distribution. |
13 | // * Neither the name of Google Inc. nor the names of its |
14 | // contributors may be used to endorse or promote products derived from |
15 | // this software without specific prior written permission. |
16 | // |
17 | // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS |
18 | // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT |
19 | // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR |
20 | // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT |
21 | // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, |
22 | // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT |
23 | // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
24 | // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
25 | // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
26 | // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE |
27 | // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
28 | |
29 | #include "snappy-internal.h" |
30 | #include "snappy-sinksource.h" |
31 | #include "snappy.h" |
32 | |
33 | #if !defined(SNAPPY_HAVE_BMI2) |
34 | // __BMI2__ is defined by GCC and Clang. Visual Studio doesn't target BMI2 |
35 | // specifically, but it does define __AVX2__ when AVX2 support is available. |
36 | // Fortunately, AVX2 was introduced in Haswell, just like BMI2. |
37 | // |
38 | // BMI2 is not defined as a subset of AVX2 (unlike SSSE3 and AVX above). So, |
39 | // GCC and Clang can build code with AVX2 enabled but BMI2 disabled, in which |
40 | // case issuing BMI2 instructions results in a compiler error. |
41 | #if defined(__BMI2__) || (defined(_MSC_VER) && defined(__AVX2__)) |
42 | #define SNAPPY_HAVE_BMI2 1 |
43 | #else |
44 | #define SNAPPY_HAVE_BMI2 0 |
45 | #endif |
46 | #endif // !defined(SNAPPY_HAVE_BMI2) |
47 | |
48 | #if SNAPPY_HAVE_BMI2 |
49 | // Please do not replace with <x86intrin.h>. or with headers that assume more |
50 | // advanced SSE versions without checking with all the OWNERS. |
51 | #include <immintrin.h> |
52 | #endif |
53 | |
54 | #include <algorithm> |
55 | #include <array> |
56 | #include <cstddef> |
57 | #include <cstdint> |
58 | #include <cstdio> |
59 | #include <cstring> |
60 | #include <string> |
61 | #include <utility> |
62 | #include <vector> |
63 | |
64 | namespace snappy { |
65 | |
66 | namespace { |
67 | |
68 | // The amount of slop bytes writers are using for unconditional copies. |
69 | constexpr int kSlopBytes = 64; |
70 | |
71 | using internal::char_table; |
72 | using internal::COPY_1_BYTE_OFFSET; |
73 | using internal::COPY_2_BYTE_OFFSET; |
74 | using internal::COPY_4_BYTE_OFFSET; |
75 | using internal::kMaximumTagLength; |
76 | using internal::LITERAL; |
77 | #if SNAPPY_HAVE_VECTOR_BYTE_SHUFFLE |
78 | using internal::V128; |
79 | using internal::V128_Load; |
80 | using internal::V128_LoadU; |
81 | using internal::V128_Shuffle; |
82 | using internal::V128_StoreU; |
83 | using internal::V128_DupChar; |
84 | #endif |
85 | |
86 | // We translate the information encoded in a tag through a lookup table to a |
87 | // format that requires fewer instructions to decode. Effectively we store |
88 | // the length minus the tag part of the offset. The lowest significant byte |
89 | // thus stores the length. While total length - offset is given by |
90 | // entry - ExtractOffset(type). The nice thing is that the subtraction |
91 | // immediately sets the flags for the necessary check that offset >= length. |
92 | // This folds the cmp with sub. We engineer the long literals and copy-4 to |
93 | // always fail this check, so their presence doesn't affect the fast path. |
94 | // To prevent literals from triggering the guard against offset < length (offset |
95 | // does not apply to literals) the table is giving them a spurious offset of |
96 | // 256. |
97 | inline constexpr int16_t MakeEntry(int16_t len, int16_t offset) { |
98 | return len - (offset << 8); |
99 | } |
100 | |
101 | inline constexpr int16_t LengthMinusOffset(int data, int type) { |
102 | return type == 3 ? 0xFF // copy-4 (or type == 3) |
103 | : type == 2 ? MakeEntry(data + 1, 0) // copy-2 |
104 | : type == 1 ? MakeEntry((data & 7) + 4, data >> 3) // copy-1 |
105 | : data < 60 ? MakeEntry(data + 1, 1) // note spurious offset. |
106 | : 0xFF; // long literal |
107 | } |
108 | |
109 | inline constexpr int16_t LengthMinusOffset(uint8_t tag) { |
110 | return LengthMinusOffset(tag >> 2, tag & 3); |
111 | } |
112 | |
113 | template <size_t... Ints> |
114 | struct index_sequence {}; |
115 | |
116 | template <std::size_t N, size_t... Is> |
117 | struct make_index_sequence : make_index_sequence<N - 1, N - 1, Is...> {}; |
118 | |
119 | template <size_t... Is> |
120 | struct make_index_sequence<0, Is...> : index_sequence<Is...> {}; |
121 | |
122 | template <size_t... seq> |
123 | constexpr std::array<int16_t, 256> MakeTable(index_sequence<seq...>) { |
124 | return std::array<int16_t, 256>{LengthMinusOffset(seq)...}; |
125 | } |
126 | |
127 | alignas(64) const std::array<int16_t, 256> kLengthMinusOffset = |
128 | MakeTable(make_index_sequence<256>{}); |
129 | |
130 | // Any hash function will produce a valid compressed bitstream, but a good |
131 | // hash function reduces the number of collisions and thus yields better |
132 | // compression for compressible input, and more speed for incompressible |
133 | // input. Of course, it doesn't hurt if the hash function is reasonably fast |
134 | // either, as it gets called a lot. |
135 | inline uint32_t HashBytes(uint32_t bytes, uint32_t mask) { |
136 | constexpr uint32_t kMagic = 0x1e35a7bd; |
137 | return ((kMagic * bytes) >> (32 - kMaxHashTableBits)) & mask; |
138 | } |
139 | |
140 | } // namespace |
141 | |
142 | size_t MaxCompressedLength(size_t source_bytes) { |
143 | // Compressed data can be defined as: |
144 | // compressed := item* literal* |
145 | // item := literal* copy |
146 | // |
147 | // The trailing literal sequence has a space blowup of at most 62/60 |
148 | // since a literal of length 60 needs one tag byte + one extra byte |
149 | // for length information. |
150 | // |
151 | // Item blowup is trickier to measure. Suppose the "copy" op copies |
152 | // 4 bytes of data. Because of a special check in the encoding code, |
153 | // we produce a 4-byte copy only if the offset is < 65536. Therefore |
154 | // the copy op takes 3 bytes to encode, and this type of item leads |
155 | // to at most the 62/60 blowup for representing literals. |
156 | // |
157 | // Suppose the "copy" op copies 5 bytes of data. If the offset is big |
158 | // enough, it will take 5 bytes to encode the copy op. Therefore the |
159 | // worst case here is a one-byte literal followed by a five-byte copy. |
160 | // I.e., 6 bytes of input turn into 7 bytes of "compressed" data. |
161 | // |
162 | // This last factor dominates the blowup, so the final estimate is: |
163 | return 32 + source_bytes + source_bytes / 6; |
164 | } |
165 | |
166 | namespace { |
167 | |
168 | void UnalignedCopy64(const void* src, void* dst) { |
169 | char tmp[8]; |
170 | std::memcpy(tmp, src, 8); |
171 | std::memcpy(dst, tmp, 8); |
172 | } |
173 | |
174 | void UnalignedCopy128(const void* src, void* dst) { |
175 | // std::memcpy() gets vectorized when the appropriate compiler options are |
176 | // used. For example, x86 compilers targeting SSE2+ will optimize to an SSE2 |
177 | // load and store. |
178 | char tmp[16]; |
179 | std::memcpy(tmp, src, 16); |
180 | std::memcpy(dst, tmp, 16); |
181 | } |
182 | |
183 | template <bool use_16bytes_chunk> |
184 | inline void ConditionalUnalignedCopy128(const char* src, char* dst) { |
185 | if (use_16bytes_chunk) { |
186 | UnalignedCopy128(src, dst); |
187 | } else { |
188 | UnalignedCopy64(src, dst); |
189 | UnalignedCopy64(src + 8, dst + 8); |
190 | } |
191 | } |
192 | |
193 | // Copy [src, src+(op_limit-op)) to [op, (op_limit-op)) a byte at a time. Used |
194 | // for handling COPY operations where the input and output regions may overlap. |
195 | // For example, suppose: |
196 | // src == "ab" |
197 | // op == src + 2 |
198 | // op_limit == op + 20 |
199 | // After IncrementalCopySlow(src, op, op_limit), the result will have eleven |
200 | // copies of "ab" |
201 | // ababababababababababab |
202 | // Note that this does not match the semantics of either std::memcpy() or |
203 | // std::memmove(). |
204 | inline char* IncrementalCopySlow(const char* src, char* op, |
205 | char* const op_limit) { |
206 | // TODO: Remove pragma when LLVM is aware this |
207 | // function is only called in cold regions and when cold regions don't get |
208 | // vectorized or unrolled. |
209 | #ifdef __clang__ |
210 | #pragma clang loop unroll(disable) |
211 | #endif |
212 | while (op < op_limit) { |
213 | *op++ = *src++; |
214 | } |
215 | return op_limit; |
216 | } |
217 | |
218 | #if SNAPPY_HAVE_VECTOR_BYTE_SHUFFLE |
219 | |
220 | // Computes the bytes for shuffle control mask (please read comments on |
221 | // 'pattern_generation_masks' as well) for the given index_offset and |
222 | // pattern_size. For example, when the 'offset' is 6, it will generate a |
223 | // repeating pattern of size 6. So, the first 16 byte indexes will correspond to |
224 | // the pattern-bytes {0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 4, 5, 0, 1, 2, 3} and the |
225 | // next 16 byte indexes will correspond to the pattern-bytes {4, 5, 0, 1, 2, 3, |
226 | // 4, 5, 0, 1, 2, 3, 4, 5, 0, 1}. These byte index sequences are generated by |
227 | // calling MakePatternMaskBytes(0, 6, index_sequence<16>()) and |
228 | // MakePatternMaskBytes(16, 6, index_sequence<16>()) respectively. |
229 | template <size_t... indexes> |
230 | inline constexpr std::array<char, sizeof...(indexes)> MakePatternMaskBytes( |
231 | int index_offset, int pattern_size, index_sequence<indexes...>) { |
232 | return {static_cast<char>((index_offset + indexes) % pattern_size)...}; |
233 | } |
234 | |
235 | // Computes the shuffle control mask bytes array for given pattern-sizes and |
236 | // returns an array. |
237 | template <size_t... pattern_sizes_minus_one> |
238 | inline constexpr std::array<std::array<char, sizeof(V128)>, |
239 | sizeof...(pattern_sizes_minus_one)> |
240 | MakePatternMaskBytesTable(int index_offset, |
241 | index_sequence<pattern_sizes_minus_one...>) { |
242 | return { |
243 | MakePatternMaskBytes(index_offset, pattern_sizes_minus_one + 1, |
244 | make_index_sequence</*indexes=*/sizeof(V128)>())...}; |
245 | } |
246 | |
247 | // This is an array of shuffle control masks that can be used as the source |
248 | // operand for PSHUFB to permute the contents of the destination XMM register |
249 | // into a repeating byte pattern. |
250 | alignas(16) constexpr std::array<std::array<char, sizeof(V128)>, |
251 | 16> pattern_generation_masks = |
252 | MakePatternMaskBytesTable( |
253 | /*index_offset=*/0, |
254 | /*pattern_sizes_minus_one=*/make_index_sequence<16>()); |
255 | |
256 | // Similar to 'pattern_generation_masks', this table is used to "rotate" the |
257 | // pattern so that we can copy the *next 16 bytes* consistent with the pattern. |
258 | // Basically, pattern_reshuffle_masks is a continuation of |
259 | // pattern_generation_masks. It follows that, pattern_reshuffle_masks is same as |
260 | // pattern_generation_masks for offsets 1, 2, 4, 8 and 16. |
261 | alignas(16) constexpr std::array<std::array<char, sizeof(V128)>, |
262 | 16> pattern_reshuffle_masks = |
263 | MakePatternMaskBytesTable( |
264 | /*index_offset=*/16, |
265 | /*pattern_sizes_minus_one=*/make_index_sequence<16>()); |
266 | |
267 | SNAPPY_ATTRIBUTE_ALWAYS_INLINE |
268 | static inline V128 LoadPattern(const char* src, const size_t pattern_size) { |
269 | V128 generation_mask = V128_Load(reinterpret_cast<const V128*>( |
270 | pattern_generation_masks[pattern_size - 1].data())); |
271 | // Uninitialized bytes are masked out by the shuffle mask. |
272 | // TODO: remove annotation and macro defs once MSan is fixed. |
273 | SNAPPY_ANNOTATE_MEMORY_IS_INITIALIZED(src + pattern_size, 16 - pattern_size); |
274 | return V128_Shuffle(V128_LoadU(reinterpret_cast<const V128*>(src)), |
275 | generation_mask); |
276 | } |
277 | |
278 | SNAPPY_ATTRIBUTE_ALWAYS_INLINE |
279 | static inline std::pair<V128 /* pattern */, V128 /* reshuffle_mask */> |
280 | LoadPatternAndReshuffleMask(const char* src, const size_t pattern_size) { |
281 | V128 pattern = LoadPattern(src, pattern_size); |
282 | |
283 | // This mask will generate the next 16 bytes in-place. Doing so enables us to |
284 | // write data by at most 4 V128_StoreU. |
285 | // |
286 | // For example, suppose pattern is: abcdefabcdefabcd |
287 | // Shuffling with this mask will generate: efabcdefabcdefab |
288 | // Shuffling again will generate: cdefabcdefabcdef |
289 | V128 reshuffle_mask = V128_Load(reinterpret_cast<const V128*>( |
290 | pattern_reshuffle_masks[pattern_size - 1].data())); |
291 | return {pattern, reshuffle_mask}; |
292 | } |
293 | |
294 | #endif // SNAPPY_HAVE_VECTOR_BYTE_SHUFFLE |
295 | |
296 | // Fallback for when we need to copy while extending the pattern, for example |
297 | // copying 10 bytes from 3 positions back abc -> abcabcabcabca. |
298 | // |
299 | // REQUIRES: [dst - offset, dst + 64) is a valid address range. |
300 | SNAPPY_ATTRIBUTE_ALWAYS_INLINE |
301 | static inline bool Copy64BytesWithPatternExtension(char* dst, size_t offset) { |
302 | #if SNAPPY_HAVE_VECTOR_BYTE_SHUFFLE |
303 | if (SNAPPY_PREDICT_TRUE(offset <= 16)) { |
304 | switch (offset) { |
305 | case 0: |
306 | return false; |
307 | case 1: { |
308 | // TODO: Ideally we should memset, move back once the |
309 | // codegen issues are fixed. |
310 | V128 pattern = V128_DupChar(dst[-1]); |
311 | for (int i = 0; i < 4; i++) { |
312 | V128_StoreU(reinterpret_cast<V128*>(dst + 16 * i), pattern); |
313 | } |
314 | return true; |
315 | } |
316 | case 2: |
317 | case 4: |
318 | case 8: |
319 | case 16: { |
320 | V128 pattern = LoadPattern(dst - offset, offset); |
321 | for (int i = 0; i < 4; i++) { |
322 | V128_StoreU(reinterpret_cast<V128*>(dst + 16 * i), pattern); |
323 | } |
324 | return true; |
325 | } |
326 | default: { |
327 | auto pattern_and_reshuffle_mask = |
328 | LoadPatternAndReshuffleMask(dst - offset, offset); |
329 | V128 pattern = pattern_and_reshuffle_mask.first; |
330 | V128 reshuffle_mask = pattern_and_reshuffle_mask.second; |
331 | for (int i = 0; i < 4; i++) { |
332 | V128_StoreU(reinterpret_cast<V128*>(dst + 16 * i), pattern); |
333 | pattern = V128_Shuffle(pattern, reshuffle_mask); |
334 | } |
335 | return true; |
336 | } |
337 | } |
338 | } |
339 | #else |
340 | if (SNAPPY_PREDICT_TRUE(offset < 16)) { |
341 | if (SNAPPY_PREDICT_FALSE(offset == 0)) return false; |
342 | // Extend the pattern to the first 16 bytes. |
343 | // The simpler formulation of `dst[i - offset]` induces undefined behavior. |
344 | for (int i = 0; i < 16; i++) dst[i] = (dst - offset)[i]; |
345 | // Find a multiple of pattern >= 16. |
346 | static std::array<uint8_t, 16> pattern_sizes = []() { |
347 | std::array<uint8_t, 16> res; |
348 | for (int i = 1; i < 16; i++) res[i] = (16 / i + 1) * i; |
349 | return res; |
350 | }(); |
351 | offset = pattern_sizes[offset]; |
352 | for (int i = 1; i < 4; i++) { |
353 | std::memcpy(dst + i * 16, dst + i * 16 - offset, 16); |
354 | } |
355 | return true; |
356 | } |
357 | #endif // SNAPPY_HAVE_VECTOR_BYTE_SHUFFLE |
358 | |
359 | // Very rare. |
360 | for (int i = 0; i < 4; i++) { |
361 | std::memcpy(dst + i * 16, dst + i * 16 - offset, 16); |
362 | } |
363 | return true; |
364 | } |
365 | |
366 | // Copy [src, src+(op_limit-op)) to [op, op_limit) but faster than |
367 | // IncrementalCopySlow. buf_limit is the address past the end of the writable |
368 | // region of the buffer. |
369 | inline char* IncrementalCopy(const char* src, char* op, char* const op_limit, |
370 | char* const buf_limit) { |
371 | #if SNAPPY_HAVE_VECTOR_BYTE_SHUFFLE |
372 | constexpr int big_pattern_size_lower_bound = 16; |
373 | #else |
374 | constexpr int big_pattern_size_lower_bound = 8; |
375 | #endif |
376 | |
377 | // Terminology: |
378 | // |
379 | // slop = buf_limit - op |
380 | // pat = op - src |
381 | // len = op_limit - op |
382 | assert(src < op); |
383 | assert(op < op_limit); |
384 | assert(op_limit <= buf_limit); |
385 | // NOTE: The copy tags use 3 or 6 bits to store the copy length, so len <= 64. |
386 | assert(op_limit - op <= 64); |
387 | // NOTE: In practice the compressor always emits len >= 4, so it is ok to |
388 | // assume that to optimize this function, but this is not guaranteed by the |
389 | // compression format, so we have to also handle len < 4 in case the input |
390 | // does not satisfy these conditions. |
391 | |
392 | size_t pattern_size = op - src; |
393 | // The cases are split into different branches to allow the branch predictor, |
394 | // FDO, and static prediction hints to work better. For each input we list the |
395 | // ratio of invocations that match each condition. |
396 | // |
397 | // input slop < 16 pat < 8 len > 16 |
398 | // ------------------------------------------ |
399 | // html|html4|cp 0% 1.01% 27.73% |
400 | // urls 0% 0.88% 14.79% |
401 | // jpg 0% 64.29% 7.14% |
402 | // pdf 0% 2.56% 58.06% |
403 | // txt[1-4] 0% 0.23% 0.97% |
404 | // pb 0% 0.96% 13.88% |
405 | // bin 0.01% 22.27% 41.17% |
406 | // |
407 | // It is very rare that we don't have enough slop for doing block copies. It |
408 | // is also rare that we need to expand a pattern. Small patterns are common |
409 | // for incompressible formats and for those we are plenty fast already. |
410 | // Lengths are normally not greater than 16 but they vary depending on the |
411 | // input. In general if we always predict len <= 16 it would be an ok |
412 | // prediction. |
413 | // |
414 | // In order to be fast we want a pattern >= 16 bytes (or 8 bytes in non-SSE) |
415 | // and an unrolled loop copying 1x 16 bytes (or 2x 8 bytes in non-SSE) at a |
416 | // time. |
417 | |
418 | // Handle the uncommon case where pattern is less than 16 (or 8 in non-SSE) |
419 | // bytes. |
420 | if (pattern_size < big_pattern_size_lower_bound) { |
421 | #if SNAPPY_HAVE_VECTOR_BYTE_SHUFFLE |
422 | // Load the first eight bytes into an 128-bit XMM register, then use PSHUFB |
423 | // to permute the register's contents in-place into a repeating sequence of |
424 | // the first "pattern_size" bytes. |
425 | // For example, suppose: |
426 | // src == "abc" |
427 | // op == op + 3 |
428 | // After V128_Shuffle(), "pattern" will have five copies of "abc" |
429 | // followed by one byte of slop: abcabcabcabcabca. |
430 | // |
431 | // The non-SSE fallback implementation suffers from store-forwarding stalls |
432 | // because its loads and stores partly overlap. By expanding the pattern |
433 | // in-place, we avoid the penalty. |
434 | |
435 | // Typically, the op_limit is the gating factor so try to simplify the loop |
436 | // based on that. |
437 | if (SNAPPY_PREDICT_TRUE(op_limit <= buf_limit - 15)) { |
438 | auto pattern_and_reshuffle_mask = |
439 | LoadPatternAndReshuffleMask(src, pattern_size); |
440 | V128 pattern = pattern_and_reshuffle_mask.first; |
441 | V128 reshuffle_mask = pattern_and_reshuffle_mask.second; |
442 | |
443 | // There is at least one, and at most four 16-byte blocks. Writing four |
444 | // conditionals instead of a loop allows FDO to layout the code with |
445 | // respect to the actual probabilities of each length. |
446 | // TODO: Replace with loop with trip count hint. |
447 | V128_StoreU(reinterpret_cast<V128*>(op), pattern); |
448 | |
449 | if (op + 16 < op_limit) { |
450 | pattern = V128_Shuffle(pattern, reshuffle_mask); |
451 | V128_StoreU(reinterpret_cast<V128*>(op + 16), pattern); |
452 | } |
453 | if (op + 32 < op_limit) { |
454 | pattern = V128_Shuffle(pattern, reshuffle_mask); |
455 | V128_StoreU(reinterpret_cast<V128*>(op + 32), pattern); |
456 | } |
457 | if (op + 48 < op_limit) { |
458 | pattern = V128_Shuffle(pattern, reshuffle_mask); |
459 | V128_StoreU(reinterpret_cast<V128*>(op + 48), pattern); |
460 | } |
461 | return op_limit; |
462 | } |
463 | char* const op_end = buf_limit - 15; |
464 | if (SNAPPY_PREDICT_TRUE(op < op_end)) { |
465 | auto pattern_and_reshuffle_mask = |
466 | LoadPatternAndReshuffleMask(src, pattern_size); |
467 | V128 pattern = pattern_and_reshuffle_mask.first; |
468 | V128 reshuffle_mask = pattern_and_reshuffle_mask.second; |
469 | |
470 | // This code path is relatively cold however so we save code size |
471 | // by avoiding unrolling and vectorizing. |
472 | // |
473 | // TODO: Remove pragma when when cold regions don't get |
474 | // vectorized or unrolled. |
475 | #ifdef __clang__ |
476 | #pragma clang loop unroll(disable) |
477 | #endif |
478 | do { |
479 | V128_StoreU(reinterpret_cast<V128*>(op), pattern); |
480 | pattern = V128_Shuffle(pattern, reshuffle_mask); |
481 | op += 16; |
482 | } while (SNAPPY_PREDICT_TRUE(op < op_end)); |
483 | } |
484 | return IncrementalCopySlow(op - pattern_size, op, op_limit); |
485 | #else // !SNAPPY_HAVE_VECTOR_BYTE_SHUFFLE |
486 | // If plenty of buffer space remains, expand the pattern to at least 8 |
487 | // bytes. The way the following loop is written, we need 8 bytes of buffer |
488 | // space if pattern_size >= 4, 11 bytes if pattern_size is 1 or 3, and 10 |
489 | // bytes if pattern_size is 2. Precisely encoding that is probably not |
490 | // worthwhile; instead, invoke the slow path if we cannot write 11 bytes |
491 | // (because 11 are required in the worst case). |
492 | if (SNAPPY_PREDICT_TRUE(op <= buf_limit - 11)) { |
493 | while (pattern_size < 8) { |
494 | UnalignedCopy64(src, op); |
495 | op += pattern_size; |
496 | pattern_size *= 2; |
497 | } |
498 | if (SNAPPY_PREDICT_TRUE(op >= op_limit)) return op_limit; |
499 | } else { |
500 | return IncrementalCopySlow(src, op, op_limit); |
501 | } |
502 | #endif // SNAPPY_HAVE_VECTOR_BYTE_SHUFFLE |
503 | } |
504 | assert(pattern_size >= big_pattern_size_lower_bound); |
505 | constexpr bool use_16bytes_chunk = big_pattern_size_lower_bound == 16; |
506 | |
507 | // Copy 1x 16 bytes (or 2x 8 bytes in non-SSE) at a time. Because op - src can |
508 | // be < 16 in non-SSE, a single UnalignedCopy128 might overwrite data in op. |
509 | // UnalignedCopy64 is safe because expanding the pattern to at least 8 bytes |
510 | // guarantees that op - src >= 8. |
511 | // |
512 | // Typically, the op_limit is the gating factor so try to simplify the loop |
513 | // based on that. |
514 | if (SNAPPY_PREDICT_TRUE(op_limit <= buf_limit - 15)) { |
515 | // There is at least one, and at most four 16-byte blocks. Writing four |
516 | // conditionals instead of a loop allows FDO to layout the code with respect |
517 | // to the actual probabilities of each length. |
518 | // TODO: Replace with loop with trip count hint. |
519 | ConditionalUnalignedCopy128<use_16bytes_chunk>(src, op); |
520 | if (op + 16 < op_limit) { |
521 | ConditionalUnalignedCopy128<use_16bytes_chunk>(src + 16, op + 16); |
522 | } |
523 | if (op + 32 < op_limit) { |
524 | ConditionalUnalignedCopy128<use_16bytes_chunk>(src + 32, op + 32); |
525 | } |
526 | if (op + 48 < op_limit) { |
527 | ConditionalUnalignedCopy128<use_16bytes_chunk>(src + 48, op + 48); |
528 | } |
529 | return op_limit; |
530 | } |
531 | |
532 | // Fall back to doing as much as we can with the available slop in the |
533 | // buffer. This code path is relatively cold however so we save code size by |
534 | // avoiding unrolling and vectorizing. |
535 | // |
536 | // TODO: Remove pragma when when cold regions don't get vectorized |
537 | // or unrolled. |
538 | #ifdef __clang__ |
539 | #pragma clang loop unroll(disable) |
540 | #endif |
541 | for (char* op_end = buf_limit - 16; op < op_end; op += 16, src += 16) { |
542 | ConditionalUnalignedCopy128<use_16bytes_chunk>(src, op); |
543 | } |
544 | if (op >= op_limit) return op_limit; |
545 | |
546 | // We only take this branch if we didn't have enough slop and we can do a |
547 | // single 8 byte copy. |
548 | if (SNAPPY_PREDICT_FALSE(op <= buf_limit - 8)) { |
549 | UnalignedCopy64(src, op); |
550 | src += 8; |
551 | op += 8; |
552 | } |
553 | return IncrementalCopySlow(src, op, op_limit); |
554 | } |
555 | |
556 | } // namespace |
557 | |
558 | template <bool allow_fast_path> |
559 | static inline char* EmitLiteral(char* op, const char* literal, int len) { |
560 | // The vast majority of copies are below 16 bytes, for which a |
561 | // call to std::memcpy() is overkill. This fast path can sometimes |
562 | // copy up to 15 bytes too much, but that is okay in the |
563 | // main loop, since we have a bit to go on for both sides: |
564 | // |
565 | // - The input will always have kInputMarginBytes = 15 extra |
566 | // available bytes, as long as we're in the main loop, and |
567 | // if not, allow_fast_path = false. |
568 | // - The output will always have 32 spare bytes (see |
569 | // MaxCompressedLength). |
570 | assert(len > 0); // Zero-length literals are disallowed |
571 | int n = len - 1; |
572 | if (allow_fast_path && len <= 16) { |
573 | // Fits in tag byte |
574 | *op++ = LITERAL | (n << 2); |
575 | |
576 | UnalignedCopy128(literal, op); |
577 | return op + len; |
578 | } |
579 | |
580 | if (n < 60) { |
581 | // Fits in tag byte |
582 | *op++ = LITERAL | (n << 2); |
583 | } else { |
584 | int count = (Bits::Log2Floor(n) >> 3) + 1; |
585 | assert(count >= 1); |
586 | assert(count <= 4); |
587 | *op++ = LITERAL | ((59 + count) << 2); |
588 | // Encode in upcoming bytes. |
589 | // Write 4 bytes, though we may care about only 1 of them. The output buffer |
590 | // is guaranteed to have at least 3 more spaces left as 'len >= 61' holds |
591 | // here and there is a std::memcpy() of size 'len' below. |
592 | LittleEndian::Store32(op, n); |
593 | op += count; |
594 | } |
595 | std::memcpy(op, literal, len); |
596 | return op + len; |
597 | } |
598 | |
599 | template <bool len_less_than_12> |
600 | static inline char* EmitCopyAtMost64(char* op, size_t offset, size_t len) { |
601 | assert(len <= 64); |
602 | assert(len >= 4); |
603 | assert(offset < 65536); |
604 | assert(len_less_than_12 == (len < 12)); |
605 | |
606 | if (len_less_than_12) { |
607 | uint32_t u = (len << 2) + (offset << 8); |
608 | uint32_t copy1 = COPY_1_BYTE_OFFSET - (4 << 2) + ((offset >> 3) & 0xe0); |
609 | uint32_t copy2 = COPY_2_BYTE_OFFSET - (1 << 2); |
610 | // It turns out that offset < 2048 is a difficult to predict branch. |
611 | // `perf record` shows this is the highest percentage of branch misses in |
612 | // benchmarks. This code produces branch free code, the data dependency |
613 | // chain that bottlenecks the throughput is so long that a few extra |
614 | // instructions are completely free (IPC << 6 because of data deps). |
615 | u += offset < 2048 ? copy1 : copy2; |
616 | LittleEndian::Store32(op, u); |
617 | op += offset < 2048 ? 2 : 3; |
618 | } else { |
619 | // Write 4 bytes, though we only care about 3 of them. The output buffer |
620 | // is required to have some slack, so the extra byte won't overrun it. |
621 | uint32_t u = COPY_2_BYTE_OFFSET + ((len - 1) << 2) + (offset << 8); |
622 | LittleEndian::Store32(op, u); |
623 | op += 3; |
624 | } |
625 | return op; |
626 | } |
627 | |
628 | template <bool len_less_than_12> |
629 | static inline char* EmitCopy(char* op, size_t offset, size_t len) { |
630 | assert(len_less_than_12 == (len < 12)); |
631 | if (len_less_than_12) { |
632 | return EmitCopyAtMost64</*len_less_than_12=*/true>(op, offset, len); |
633 | } else { |
634 | // A special case for len <= 64 might help, but so far measurements suggest |
635 | // it's in the noise. |
636 | |
637 | // Emit 64 byte copies but make sure to keep at least four bytes reserved. |
638 | while (SNAPPY_PREDICT_FALSE(len >= 68)) { |
639 | op = EmitCopyAtMost64</*len_less_than_12=*/false>(op, offset, 64); |
640 | len -= 64; |
641 | } |
642 | |
643 | // One or two copies will now finish the job. |
644 | if (len > 64) { |
645 | op = EmitCopyAtMost64</*len_less_than_12=*/false>(op, offset, 60); |
646 | len -= 60; |
647 | } |
648 | |
649 | // Emit remainder. |
650 | if (len < 12) { |
651 | op = EmitCopyAtMost64</*len_less_than_12=*/true>(op, offset, len); |
652 | } else { |
653 | op = EmitCopyAtMost64</*len_less_than_12=*/false>(op, offset, len); |
654 | } |
655 | return op; |
656 | } |
657 | } |
658 | |
659 | bool GetUncompressedLength(const char* start, size_t n, size_t* result) { |
660 | uint32_t v = 0; |
661 | const char* limit = start + n; |
662 | if (Varint::Parse32WithLimit(start, limit, &v) != NULL) { |
663 | *result = v; |
664 | return true; |
665 | } else { |
666 | return false; |
667 | } |
668 | } |
669 | |
670 | namespace { |
671 | uint32_t CalculateTableSize(uint32_t input_size) { |
672 | static_assert( |
673 | kMaxHashTableSize >= kMinHashTableSize, |
674 | "kMaxHashTableSize should be greater or equal to kMinHashTableSize." ); |
675 | if (input_size > kMaxHashTableSize) { |
676 | return kMaxHashTableSize; |
677 | } |
678 | if (input_size < kMinHashTableSize) { |
679 | return kMinHashTableSize; |
680 | } |
681 | // This is equivalent to Log2Ceiling(input_size), assuming input_size > 1. |
682 | // 2 << Log2Floor(x - 1) is equivalent to 1 << (1 + Log2Floor(x - 1)). |
683 | return 2u << Bits::Log2Floor(input_size - 1); |
684 | } |
685 | } // namespace |
686 | |
687 | namespace internal { |
688 | WorkingMemory::WorkingMemory(size_t input_size) { |
689 | const size_t max_fragment_size = std::min(input_size, kBlockSize); |
690 | const size_t table_size = CalculateTableSize(max_fragment_size); |
691 | size_ = table_size * sizeof(*table_) + max_fragment_size + |
692 | MaxCompressedLength(max_fragment_size); |
693 | mem_ = std::allocator<char>().allocate(size_); |
694 | table_ = reinterpret_cast<uint16_t*>(mem_); |
695 | input_ = mem_ + table_size * sizeof(*table_); |
696 | output_ = input_ + max_fragment_size; |
697 | } |
698 | |
699 | WorkingMemory::~WorkingMemory() { |
700 | std::allocator<char>().deallocate(mem_, size_); |
701 | } |
702 | |
703 | uint16_t* WorkingMemory::GetHashTable(size_t fragment_size, |
704 | int* table_size) const { |
705 | const size_t htsize = CalculateTableSize(fragment_size); |
706 | memset(table_, 0, htsize * sizeof(*table_)); |
707 | *table_size = htsize; |
708 | return table_; |
709 | } |
710 | } // end namespace internal |
711 | |
712 | // Flat array compression that does not emit the "uncompressed length" |
713 | // prefix. Compresses "input" string to the "*op" buffer. |
714 | // |
715 | // REQUIRES: "input" is at most "kBlockSize" bytes long. |
716 | // REQUIRES: "op" points to an array of memory that is at least |
717 | // "MaxCompressedLength(input.size())" in size. |
718 | // REQUIRES: All elements in "table[0..table_size-1]" are initialized to zero. |
719 | // REQUIRES: "table_size" is a power of two |
720 | // |
721 | // Returns an "end" pointer into "op" buffer. |
722 | // "end - op" is the compressed size of "input". |
723 | namespace internal { |
724 | char* CompressFragment(const char* input, size_t input_size, char* op, |
725 | uint16_t* table, const int table_size) { |
726 | // "ip" is the input pointer, and "op" is the output pointer. |
727 | const char* ip = input; |
728 | assert(input_size <= kBlockSize); |
729 | assert((table_size & (table_size - 1)) == 0); // table must be power of two |
730 | const uint32_t mask = table_size - 1; |
731 | const char* ip_end = input + input_size; |
732 | const char* base_ip = ip; |
733 | |
734 | const size_t kInputMarginBytes = 15; |
735 | if (SNAPPY_PREDICT_TRUE(input_size >= kInputMarginBytes)) { |
736 | const char* ip_limit = input + input_size - kInputMarginBytes; |
737 | |
738 | for (uint32_t preload = LittleEndian::Load32(ip + 1);;) { |
739 | // Bytes in [next_emit, ip) will be emitted as literal bytes. Or |
740 | // [next_emit, ip_end) after the main loop. |
741 | const char* next_emit = ip++; |
742 | uint64_t data = LittleEndian::Load64(ip); |
743 | // The body of this loop calls EmitLiteral once and then EmitCopy one or |
744 | // more times. (The exception is that when we're close to exhausting |
745 | // the input we goto emit_remainder.) |
746 | // |
747 | // In the first iteration of this loop we're just starting, so |
748 | // there's nothing to copy, so calling EmitLiteral once is |
749 | // necessary. And we only start a new iteration when the |
750 | // current iteration has determined that a call to EmitLiteral will |
751 | // precede the next call to EmitCopy (if any). |
752 | // |
753 | // Step 1: Scan forward in the input looking for a 4-byte-long match. |
754 | // If we get close to exhausting the input then goto emit_remainder. |
755 | // |
756 | // Heuristic match skipping: If 32 bytes are scanned with no matches |
757 | // found, start looking only at every other byte. If 32 more bytes are |
758 | // scanned (or skipped), look at every third byte, etc.. When a match is |
759 | // found, immediately go back to looking at every byte. This is a small |
760 | // loss (~5% performance, ~0.1% density) for compressible data due to more |
761 | // bookkeeping, but for non-compressible data (such as JPEG) it's a huge |
762 | // win since the compressor quickly "realizes" the data is incompressible |
763 | // and doesn't bother looking for matches everywhere. |
764 | // |
765 | // The "skip" variable keeps track of how many bytes there are since the |
766 | // last match; dividing it by 32 (ie. right-shifting by five) gives the |
767 | // number of bytes to move ahead for each iteration. |
768 | uint32_t skip = 32; |
769 | |
770 | const char* candidate; |
771 | if (ip_limit - ip >= 16) { |
772 | auto delta = ip - base_ip; |
773 | for (int j = 0; j < 4; ++j) { |
774 | for (int k = 0; k < 4; ++k) { |
775 | int i = 4 * j + k; |
776 | // These for-loops are meant to be unrolled. So we can freely |
777 | // special case the first iteration to use the value already |
778 | // loaded in preload. |
779 | uint32_t dword = i == 0 ? preload : static_cast<uint32_t>(data); |
780 | assert(dword == LittleEndian::Load32(ip + i)); |
781 | uint32_t hash = HashBytes(dword, mask); |
782 | candidate = base_ip + table[hash]; |
783 | assert(candidate >= base_ip); |
784 | assert(candidate < ip + i); |
785 | table[hash] = delta + i; |
786 | if (SNAPPY_PREDICT_FALSE(LittleEndian::Load32(candidate) == dword)) { |
787 | *op = LITERAL | (i << 2); |
788 | UnalignedCopy128(next_emit, op + 1); |
789 | ip += i; |
790 | op = op + i + 2; |
791 | goto emit_match; |
792 | } |
793 | data >>= 8; |
794 | } |
795 | data = LittleEndian::Load64(ip + 4 * j + 4); |
796 | } |
797 | ip += 16; |
798 | skip += 16; |
799 | } |
800 | while (true) { |
801 | assert(static_cast<uint32_t>(data) == LittleEndian::Load32(ip)); |
802 | uint32_t hash = HashBytes(data, mask); |
803 | uint32_t bytes_between_hash_lookups = skip >> 5; |
804 | skip += bytes_between_hash_lookups; |
805 | const char* next_ip = ip + bytes_between_hash_lookups; |
806 | if (SNAPPY_PREDICT_FALSE(next_ip > ip_limit)) { |
807 | ip = next_emit; |
808 | goto emit_remainder; |
809 | } |
810 | candidate = base_ip + table[hash]; |
811 | assert(candidate >= base_ip); |
812 | assert(candidate < ip); |
813 | |
814 | table[hash] = ip - base_ip; |
815 | if (SNAPPY_PREDICT_FALSE(static_cast<uint32_t>(data) == |
816 | LittleEndian::Load32(candidate))) { |
817 | break; |
818 | } |
819 | data = LittleEndian::Load32(next_ip); |
820 | ip = next_ip; |
821 | } |
822 | |
823 | // Step 2: A 4-byte match has been found. We'll later see if more |
824 | // than 4 bytes match. But, prior to the match, input |
825 | // bytes [next_emit, ip) are unmatched. Emit them as "literal bytes." |
826 | assert(next_emit + 16 <= ip_end); |
827 | op = EmitLiteral</*allow_fast_path=*/true>(op, next_emit, ip - next_emit); |
828 | |
829 | // Step 3: Call EmitCopy, and then see if another EmitCopy could |
830 | // be our next move. Repeat until we find no match for the |
831 | // input immediately after what was consumed by the last EmitCopy call. |
832 | // |
833 | // If we exit this loop normally then we need to call EmitLiteral next, |
834 | // though we don't yet know how big the literal will be. We handle that |
835 | // by proceeding to the next iteration of the main loop. We also can exit |
836 | // this loop via goto if we get close to exhausting the input. |
837 | emit_match: |
838 | do { |
839 | // We have a 4-byte match at ip, and no need to emit any |
840 | // "literal bytes" prior to ip. |
841 | const char* base = ip; |
842 | std::pair<size_t, bool> p = |
843 | FindMatchLength(candidate + 4, ip + 4, ip_end, &data); |
844 | size_t matched = 4 + p.first; |
845 | ip += matched; |
846 | size_t offset = base - candidate; |
847 | assert(0 == memcmp(base, candidate, matched)); |
848 | if (p.second) { |
849 | op = EmitCopy</*len_less_than_12=*/true>(op, offset, matched); |
850 | } else { |
851 | op = EmitCopy</*len_less_than_12=*/false>(op, offset, matched); |
852 | } |
853 | if (SNAPPY_PREDICT_FALSE(ip >= ip_limit)) { |
854 | goto emit_remainder; |
855 | } |
856 | // Expect 5 bytes to match |
857 | assert((data & 0xFFFFFFFFFF) == |
858 | (LittleEndian::Load64(ip) & 0xFFFFFFFFFF)); |
859 | // We are now looking for a 4-byte match again. We read |
860 | // table[Hash(ip, shift)] for that. To improve compression, |
861 | // we also update table[Hash(ip - 1, mask)] and table[Hash(ip, mask)]. |
862 | table[HashBytes(LittleEndian::Load32(ip - 1), mask)] = ip - base_ip - 1; |
863 | uint32_t hash = HashBytes(data, mask); |
864 | candidate = base_ip + table[hash]; |
865 | table[hash] = ip - base_ip; |
866 | // Measurements on the benchmarks have shown the following probabilities |
867 | // for the loop to exit (ie. avg. number of iterations is reciprocal). |
868 | // BM_Flat/6 txt1 p = 0.3-0.4 |
869 | // BM_Flat/7 txt2 p = 0.35 |
870 | // BM_Flat/8 txt3 p = 0.3-0.4 |
871 | // BM_Flat/9 txt3 p = 0.34-0.4 |
872 | // BM_Flat/10 pb p = 0.4 |
873 | // BM_Flat/11 gaviota p = 0.1 |
874 | // BM_Flat/12 cp p = 0.5 |
875 | // BM_Flat/13 c p = 0.3 |
876 | } while (static_cast<uint32_t>(data) == LittleEndian::Load32(candidate)); |
877 | // Because the least significant 5 bytes matched, we can utilize data |
878 | // for the next iteration. |
879 | preload = data >> 8; |
880 | } |
881 | } |
882 | |
883 | emit_remainder: |
884 | // Emit the remaining bytes as a literal |
885 | if (ip < ip_end) { |
886 | op = EmitLiteral</*allow_fast_path=*/false>(op, ip, ip_end - ip); |
887 | } |
888 | |
889 | return op; |
890 | } |
891 | } // end namespace internal |
892 | |
893 | // Called back at avery compression call to trace parameters and sizes. |
894 | static inline void Report(const char *algorithm, size_t compressed_size, |
895 | size_t uncompressed_size) { |
896 | // TODO: Switch to [[maybe_unused]] when we can assume C++17. |
897 | (void)algorithm; |
898 | (void)compressed_size; |
899 | (void)uncompressed_size; |
900 | } |
901 | |
902 | // Signature of output types needed by decompression code. |
903 | // The decompression code is templatized on a type that obeys this |
904 | // signature so that we do not pay virtual function call overhead in |
905 | // the middle of a tight decompression loop. |
906 | // |
907 | // class DecompressionWriter { |
908 | // public: |
909 | // // Called before decompression |
910 | // void SetExpectedLength(size_t length); |
911 | // |
912 | // // For performance a writer may choose to donate the cursor variable to the |
913 | // // decompression function. The decompression will inject it in all its |
914 | // // function calls to the writer. Keeping the important output cursor as a |
915 | // // function local stack variable allows the compiler to keep it in |
916 | // // register, which greatly aids performance by avoiding loads and stores of |
917 | // // this variable in the fast path loop iterations. |
918 | // T GetOutputPtr() const; |
919 | // |
920 | // // At end of decompression the loop donates the ownership of the cursor |
921 | // // variable back to the writer by calling this function. |
922 | // void SetOutputPtr(T op); |
923 | // |
924 | // // Called after decompression |
925 | // bool CheckLength() const; |
926 | // |
927 | // // Called repeatedly during decompression |
928 | // // Each function get a pointer to the op (output pointer), that the writer |
929 | // // can use and update. Note it's important that these functions get fully |
930 | // // inlined so that no actual address of the local variable needs to be |
931 | // // taken. |
932 | // bool Append(const char* ip, size_t length, T* op); |
933 | // bool AppendFromSelf(uint32_t offset, size_t length, T* op); |
934 | // |
935 | // // The rules for how TryFastAppend differs from Append are somewhat |
936 | // // convoluted: |
937 | // // |
938 | // // - TryFastAppend is allowed to decline (return false) at any |
939 | // // time, for any reason -- just "return false" would be |
940 | // // a perfectly legal implementation of TryFastAppend. |
941 | // // The intention is for TryFastAppend to allow a fast path |
942 | // // in the common case of a small append. |
943 | // // - TryFastAppend is allowed to read up to <available> bytes |
944 | // // from the input buffer, whereas Append is allowed to read |
945 | // // <length>. However, if it returns true, it must leave |
946 | // // at least five (kMaximumTagLength) bytes in the input buffer |
947 | // // afterwards, so that there is always enough space to read the |
948 | // // next tag without checking for a refill. |
949 | // // - TryFastAppend must always return decline (return false) |
950 | // // if <length> is 61 or more, as in this case the literal length is not |
951 | // // decoded fully. In practice, this should not be a big problem, |
952 | // // as it is unlikely that one would implement a fast path accepting |
953 | // // this much data. |
954 | // // |
955 | // bool TryFastAppend(const char* ip, size_t available, size_t length, T* op); |
956 | // }; |
957 | |
958 | static inline uint32_t (const uint32_t& v, int n) { |
959 | assert(n >= 0); |
960 | assert(n <= 4); |
961 | #if SNAPPY_HAVE_BMI2 |
962 | return _bzhi_u32(v, 8 * n); |
963 | #else |
964 | // This needs to be wider than uint32_t otherwise `mask << 32` will be |
965 | // undefined. |
966 | uint64_t mask = 0xffffffff; |
967 | return v & ~(mask << (8 * n)); |
968 | #endif |
969 | } |
970 | |
971 | static inline bool LeftShiftOverflows(uint8_t value, uint32_t shift) { |
972 | assert(shift < 32); |
973 | static const uint8_t masks[] = { |
974 | 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // |
975 | 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // |
976 | 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // |
977 | 0x00, 0x80, 0xc0, 0xe0, 0xf0, 0xf8, 0xfc, 0xfe}; |
978 | return (value & masks[shift]) != 0; |
979 | } |
980 | |
981 | inline bool Copy64BytesWithPatternExtension(ptrdiff_t dst, size_t offset) { |
982 | // TODO: Switch to [[maybe_unused]] when we can assume C++17. |
983 | (void)dst; |
984 | return offset != 0; |
985 | } |
986 | |
987 | // Copies between size bytes and 64 bytes from src to dest. size cannot exceed |
988 | // 64. More than size bytes, but never exceeding 64, might be copied if doing |
989 | // so gives better performance. [src, src + size) must not overlap with |
990 | // [dst, dst + size), but [src, src + 64) may overlap with [dst, dst + 64). |
991 | void MemCopy64(char* dst, const void* src, size_t size) { |
992 | // Always copy this many bytes, test if we need to copy more. |
993 | constexpr int kShortMemCopy = 32; |
994 | // We're always allowed to copy 64 bytes, so if we exceed kShortMemCopy just |
995 | // copy 64 rather than the exact amount. |
996 | constexpr int kLongMemCopy = 64; |
997 | |
998 | assert(size <= kLongMemCopy); |
999 | assert(std::less_equal<const void*>()(static_cast<const char*>(src) + size, |
1000 | dst) || |
1001 | std::less_equal<const void*>()(dst + size, src)); |
1002 | |
1003 | // We know that src and dst are at least size bytes apart. However, because we |
1004 | // might copy more than size bytes the copy still might overlap past size. |
1005 | // E.g. if src and dst appear consecutively in memory (src + size == dst). |
1006 | std::memmove(dst, src, kShortMemCopy); |
1007 | // Profiling shows that nearly all copies are short. |
1008 | if (SNAPPY_PREDICT_FALSE(size > kShortMemCopy)) { |
1009 | std::memmove(dst + kShortMemCopy, |
1010 | static_cast<const uint8_t*>(src) + kShortMemCopy, |
1011 | kLongMemCopy - kShortMemCopy); |
1012 | } |
1013 | } |
1014 | |
1015 | void MemCopy64(ptrdiff_t dst, const void* src, size_t size) { |
1016 | // TODO: Switch to [[maybe_unused]] when we can assume C++17. |
1017 | (void)dst; |
1018 | (void)src; |
1019 | (void)size; |
1020 | } |
1021 | |
1022 | SNAPPY_ATTRIBUTE_ALWAYS_INLINE |
1023 | inline size_t AdvanceToNextTagARMOptimized(const uint8_t** ip_p, size_t* tag) { |
1024 | const uint8_t*& ip = *ip_p; |
1025 | // This section is crucial for the throughput of the decompression loop. |
1026 | // The latency of an iteration is fundamentally constrained by the |
1027 | // following data chain on ip. |
1028 | // ip -> c = Load(ip) -> delta1 = (c & 3) -> ip += delta1 or delta2 |
1029 | // delta2 = ((c >> 2) + 1) ip++ |
1030 | // This is different from X86 optimizations because ARM has conditional add |
1031 | // instruction (csinc) and it removes several register moves. |
1032 | const size_t tag_type = *tag & 3; |
1033 | const bool is_literal = (tag_type == 0); |
1034 | if (is_literal) { |
1035 | size_t next_literal_tag = (*tag >> 2) + 1; |
1036 | *tag = ip[next_literal_tag]; |
1037 | ip += next_literal_tag + 1; |
1038 | } else { |
1039 | *tag = ip[tag_type]; |
1040 | ip += tag_type + 1; |
1041 | } |
1042 | return tag_type; |
1043 | } |
1044 | |
1045 | SNAPPY_ATTRIBUTE_ALWAYS_INLINE |
1046 | inline size_t AdvanceToNextTagX86Optimized(const uint8_t** ip_p, size_t* tag) { |
1047 | const uint8_t*& ip = *ip_p; |
1048 | // This section is crucial for the throughput of the decompression loop. |
1049 | // The latency of an iteration is fundamentally constrained by the |
1050 | // following data chain on ip. |
1051 | // ip -> c = Load(ip) -> ip1 = ip + 1 + (c & 3) -> ip = ip1 or ip2 |
1052 | // ip2 = ip + 2 + (c >> 2) |
1053 | // This amounts to 8 cycles. |
1054 | // 5 (load) + 1 (c & 3) + 1 (lea ip1, [ip + (c & 3) + 1]) + 1 (cmov) |
1055 | size_t literal_len = *tag >> 2; |
1056 | size_t tag_type = *tag; |
1057 | bool is_literal; |
1058 | #if defined(__GCC_ASM_FLAG_OUTPUTS__) && defined(__x86_64__) |
1059 | // TODO clang misses the fact that the (c & 3) already correctly |
1060 | // sets the zero flag. |
1061 | asm("and $3, %k[tag_type]\n\t" |
1062 | : [tag_type] "+r" (tag_type), "=@ccz" (is_literal)); |
1063 | #else |
1064 | tag_type &= 3; |
1065 | is_literal = (tag_type == 0); |
1066 | #endif |
1067 | // TODO |
1068 | // This is code is subtle. Loading the values first and then cmov has less |
1069 | // latency then cmov ip and then load. However clang would move the loads |
1070 | // in an optimization phase, volatile prevents this transformation. |
1071 | // Note that we have enough slop bytes (64) that the loads are always valid. |
1072 | size_t tag_literal = |
1073 | static_cast<const volatile uint8_t*>(ip)[1 + literal_len]; |
1074 | size_t tag_copy = static_cast<const volatile uint8_t*>(ip)[tag_type]; |
1075 | *tag = is_literal ? tag_literal : tag_copy; |
1076 | const uint8_t* ip_copy = ip + 1 + tag_type; |
1077 | const uint8_t* ip_literal = ip + 2 + literal_len; |
1078 | ip = is_literal ? ip_literal : ip_copy; |
1079 | #if defined(__GNUC__) && defined(__x86_64__) |
1080 | // TODO Clang is "optimizing" zero-extension (a totally free |
1081 | // operation) this means that after the cmov of tag, it emits another movzb |
1082 | // tag, byte(tag). It really matters as it's on the core chain. This dummy |
1083 | // asm, persuades clang to do the zero-extension at the load (it's automatic) |
1084 | // removing the expensive movzb. |
1085 | asm("" ::"r" (tag_copy)); |
1086 | #endif |
1087 | return tag_type; |
1088 | } |
1089 | |
1090 | // Extract the offset for copy-1 and copy-2 returns 0 for literals or copy-4. |
1091 | inline uint32_t (uint32_t val, size_t tag_type) { |
1092 | // For x86 non-static storage works better. For ARM static storage is better. |
1093 | // TODO: Once the array is recognized as a register, improve the |
1094 | // readability for x86. |
1095 | #if defined(__x86_64__) |
1096 | constexpr uint64_t = 0x0000FFFF00FF0000ull; |
1097 | uint16_t result; |
1098 | memcpy(&result, |
1099 | reinterpret_cast<const char*>(&kExtractMasksCombined) + 2 * tag_type, |
1100 | sizeof(result)); |
1101 | return val & result; |
1102 | #elif defined(__aarch64__) |
1103 | constexpr uint64_t kExtractMasksCombined = 0x0000FFFF00FF0000ull; |
1104 | return val & static_cast<uint32_t>( |
1105 | (kExtractMasksCombined >> (tag_type * 16)) & 0xFFFF); |
1106 | #else |
1107 | static constexpr uint32_t kExtractMasks[4] = {0, 0xFF, 0xFFFF, 0}; |
1108 | return val & kExtractMasks[tag_type]; |
1109 | #endif |
1110 | }; |
1111 | |
1112 | // Core decompression loop, when there is enough data available. |
1113 | // Decompresses the input buffer [ip, ip_limit) into the output buffer |
1114 | // [op, op_limit_min_slop). Returning when either we are too close to the end |
1115 | // of the input buffer, or we exceed op_limit_min_slop or when a exceptional |
1116 | // tag is encountered (literal of length > 60) or a copy-4. |
1117 | // Returns {ip, op} at the points it stopped decoding. |
1118 | // TODO This function probably does not need to be inlined, as it |
1119 | // should decode large chunks at a time. This allows runtime dispatch to |
1120 | // implementations based on CPU capability (BMI2 / perhaps 32 / 64 byte memcpy). |
1121 | template <typename T> |
1122 | std::pair<const uint8_t*, ptrdiff_t> DecompressBranchless( |
1123 | const uint8_t* ip, const uint8_t* ip_limit, ptrdiff_t op, T op_base, |
1124 | ptrdiff_t op_limit_min_slop) { |
1125 | // We unroll the inner loop twice so we need twice the spare room. |
1126 | op_limit_min_slop -= kSlopBytes; |
1127 | if (2 * (kSlopBytes + 1) < ip_limit - ip && op < op_limit_min_slop) { |
1128 | const uint8_t* const ip_limit_min_slop = ip_limit - 2 * kSlopBytes - 1; |
1129 | ip++; |
1130 | // ip points just past the tag and we are touching at maximum kSlopBytes |
1131 | // in an iteration. |
1132 | size_t tag = ip[-1]; |
1133 | #if defined(__clang__) && defined(__aarch64__) |
1134 | // Workaround for https://bugs.llvm.org/show_bug.cgi?id=51317 |
1135 | // when loading 1 byte, clang for aarch64 doesn't realize that it(ldrb) |
1136 | // comes with free zero-extension, so clang generates another |
1137 | // 'and xn, xm, 0xff' before it use that as the offset. This 'and' is |
1138 | // redundant and can be removed by adding this dummy asm, which gives |
1139 | // clang a hint that we're doing the zero-extension at the load. |
1140 | asm("" ::"r" (tag)); |
1141 | #endif |
1142 | do { |
1143 | // The throughput is limited by instructions, unrolling the inner loop |
1144 | // twice reduces the amount of instructions checking limits and also |
1145 | // leads to reduced mov's. |
1146 | for (int i = 0; i < 2; i++) { |
1147 | const uint8_t* old_ip = ip; |
1148 | assert(tag == ip[-1]); |
1149 | // For literals tag_type = 0, hence we will always obtain 0 from |
1150 | // ExtractLowBytes. For literals offset will thus be kLiteralOffset. |
1151 | ptrdiff_t len_min_offset = kLengthMinusOffset[tag]; |
1152 | #if defined(__aarch64__) |
1153 | size_t tag_type = AdvanceToNextTagARMOptimized(&ip, &tag); |
1154 | #else |
1155 | size_t tag_type = AdvanceToNextTagX86Optimized(&ip, &tag); |
1156 | #endif |
1157 | uint32_t next = LittleEndian::Load32(old_ip); |
1158 | size_t len = len_min_offset & 0xFF; |
1159 | len_min_offset -= ExtractOffset(next, tag_type); |
1160 | if (SNAPPY_PREDICT_FALSE(len_min_offset > 0)) { |
1161 | if (SNAPPY_PREDICT_FALSE(len & 0x80)) { |
1162 | // Exceptional case (long literal or copy 4). |
1163 | // Actually doing the copy here is negatively impacting the main |
1164 | // loop due to compiler incorrectly allocating a register for |
1165 | // this fallback. Hence we just break. |
1166 | break_loop: |
1167 | ip = old_ip; |
1168 | goto exit; |
1169 | } |
1170 | // Only copy-1 or copy-2 tags can get here. |
1171 | assert(tag_type == 1 || tag_type == 2); |
1172 | std::ptrdiff_t delta = op + len_min_offset - len; |
1173 | // Guard against copies before the buffer start. |
1174 | if (SNAPPY_PREDICT_FALSE(delta < 0 || |
1175 | !Copy64BytesWithPatternExtension( |
1176 | op_base + op, len - len_min_offset))) { |
1177 | goto break_loop; |
1178 | } |
1179 | op += len; |
1180 | continue; |
1181 | } |
1182 | std::ptrdiff_t delta = op + len_min_offset - len; |
1183 | if (SNAPPY_PREDICT_FALSE(delta < 0)) { |
1184 | // Due to the spurious offset in literals have this will trigger |
1185 | // at the start of a block when op is still smaller than 256. |
1186 | if (tag_type != 0) goto break_loop; |
1187 | MemCopy64(op_base + op, old_ip, len); |
1188 | op += len; |
1189 | continue; |
1190 | } |
1191 | |
1192 | // For copies we need to copy from op_base + delta, for literals |
1193 | // we need to copy from ip instead of from the stream. |
1194 | const void* from = |
1195 | tag_type ? reinterpret_cast<void*>(op_base + delta) : old_ip; |
1196 | MemCopy64(op_base + op, from, len); |
1197 | op += len; |
1198 | } |
1199 | } while (ip < ip_limit_min_slop && op < op_limit_min_slop); |
1200 | exit: |
1201 | ip--; |
1202 | assert(ip <= ip_limit); |
1203 | } |
1204 | return {ip, op}; |
1205 | } |
1206 | |
1207 | // Helper class for decompression |
1208 | class SnappyDecompressor { |
1209 | private: |
1210 | Source* reader_; // Underlying source of bytes to decompress |
1211 | const char* ip_; // Points to next buffered byte |
1212 | const char* ip_limit_; // Points just past buffered bytes |
1213 | // If ip < ip_limit_min_maxtaglen_ it's safe to read kMaxTagLength from |
1214 | // buffer. |
1215 | const char* ip_limit_min_maxtaglen_; |
1216 | uint32_t peeked_; // Bytes peeked from reader (need to skip) |
1217 | bool eof_; // Hit end of input without an error? |
1218 | char scratch_[kMaximumTagLength]; // See RefillTag(). |
1219 | |
1220 | // Ensure that all of the tag metadata for the next tag is available |
1221 | // in [ip_..ip_limit_-1]. Also ensures that [ip,ip+4] is readable even |
1222 | // if (ip_limit_ - ip_ < 5). |
1223 | // |
1224 | // Returns true on success, false on error or end of input. |
1225 | bool RefillTag(); |
1226 | |
1227 | void ResetLimit(const char* ip) { |
1228 | ip_limit_min_maxtaglen_ = |
1229 | ip_limit_ - std::min<ptrdiff_t>(ip_limit_ - ip, kMaximumTagLength - 1); |
1230 | } |
1231 | |
1232 | public: |
1233 | explicit SnappyDecompressor(Source* reader) |
1234 | : reader_(reader), ip_(NULL), ip_limit_(NULL), peeked_(0), eof_(false) {} |
1235 | |
1236 | ~SnappyDecompressor() { |
1237 | // Advance past any bytes we peeked at from the reader |
1238 | reader_->Skip(peeked_); |
1239 | } |
1240 | |
1241 | // Returns true iff we have hit the end of the input without an error. |
1242 | bool eof() const { return eof_; } |
1243 | |
1244 | // Read the uncompressed length stored at the start of the compressed data. |
1245 | // On success, stores the length in *result and returns true. |
1246 | // On failure, returns false. |
1247 | bool ReadUncompressedLength(uint32_t* result) { |
1248 | assert(ip_ == NULL); // Must not have read anything yet |
1249 | // Length is encoded in 1..5 bytes |
1250 | *result = 0; |
1251 | uint32_t shift = 0; |
1252 | while (true) { |
1253 | if (shift >= 32) return false; |
1254 | size_t n; |
1255 | const char* ip = reader_->Peek(&n); |
1256 | if (n == 0) return false; |
1257 | const unsigned char c = *(reinterpret_cast<const unsigned char*>(ip)); |
1258 | reader_->Skip(1); |
1259 | uint32_t val = c & 0x7f; |
1260 | if (LeftShiftOverflows(static_cast<uint8_t>(val), shift)) return false; |
1261 | *result |= val << shift; |
1262 | if (c < 128) { |
1263 | break; |
1264 | } |
1265 | shift += 7; |
1266 | } |
1267 | return true; |
1268 | } |
1269 | |
1270 | // Process the next item found in the input. |
1271 | // Returns true if successful, false on error or end of input. |
1272 | template <class Writer> |
1273 | #if defined(__GNUC__) && defined(__x86_64__) |
1274 | __attribute__((aligned(32))) |
1275 | #endif |
1276 | void |
1277 | DecompressAllTags(Writer* writer) { |
1278 | const char* ip = ip_; |
1279 | ResetLimit(ip); |
1280 | auto op = writer->GetOutputPtr(); |
1281 | // We could have put this refill fragment only at the beginning of the loop. |
1282 | // However, duplicating it at the end of each branch gives the compiler more |
1283 | // scope to optimize the <ip_limit_ - ip> expression based on the local |
1284 | // context, which overall increases speed. |
1285 | #define MAYBE_REFILL() \ |
1286 | if (SNAPPY_PREDICT_FALSE(ip >= ip_limit_min_maxtaglen_)) { \ |
1287 | ip_ = ip; \ |
1288 | if (SNAPPY_PREDICT_FALSE(!RefillTag())) goto exit; \ |
1289 | ip = ip_; \ |
1290 | ResetLimit(ip); \ |
1291 | } \ |
1292 | preload = static_cast<uint8_t>(*ip) |
1293 | |
1294 | // At the start of the for loop below the least significant byte of preload |
1295 | // contains the tag. |
1296 | uint32_t preload; |
1297 | MAYBE_REFILL(); |
1298 | for (;;) { |
1299 | { |
1300 | ptrdiff_t op_limit_min_slop; |
1301 | auto op_base = writer->GetBase(&op_limit_min_slop); |
1302 | if (op_base) { |
1303 | auto res = |
1304 | DecompressBranchless(reinterpret_cast<const uint8_t*>(ip), |
1305 | reinterpret_cast<const uint8_t*>(ip_limit_), |
1306 | op - op_base, op_base, op_limit_min_slop); |
1307 | ip = reinterpret_cast<const char*>(res.first); |
1308 | op = op_base + res.second; |
1309 | MAYBE_REFILL(); |
1310 | } |
1311 | } |
1312 | const uint8_t c = static_cast<uint8_t>(preload); |
1313 | ip++; |
1314 | |
1315 | // Ratio of iterations that have LITERAL vs non-LITERAL for different |
1316 | // inputs. |
1317 | // |
1318 | // input LITERAL NON_LITERAL |
1319 | // ----------------------------------- |
1320 | // html|html4|cp 23% 77% |
1321 | // urls 36% 64% |
1322 | // jpg 47% 53% |
1323 | // pdf 19% 81% |
1324 | // txt[1-4] 25% 75% |
1325 | // pb 24% 76% |
1326 | // bin 24% 76% |
1327 | if (SNAPPY_PREDICT_FALSE((c & 0x3) == LITERAL)) { |
1328 | size_t literal_length = (c >> 2) + 1u; |
1329 | if (writer->TryFastAppend(ip, ip_limit_ - ip, literal_length, &op)) { |
1330 | assert(literal_length < 61); |
1331 | ip += literal_length; |
1332 | // NOTE: There is no MAYBE_REFILL() here, as TryFastAppend() |
1333 | // will not return true unless there's already at least five spare |
1334 | // bytes in addition to the literal. |
1335 | preload = static_cast<uint8_t>(*ip); |
1336 | continue; |
1337 | } |
1338 | if (SNAPPY_PREDICT_FALSE(literal_length >= 61)) { |
1339 | // Long literal. |
1340 | const size_t literal_length_length = literal_length - 60; |
1341 | literal_length = |
1342 | ExtractLowBytes(LittleEndian::Load32(ip), literal_length_length) + |
1343 | 1; |
1344 | ip += literal_length_length; |
1345 | } |
1346 | |
1347 | size_t avail = ip_limit_ - ip; |
1348 | while (avail < literal_length) { |
1349 | if (!writer->Append(ip, avail, &op)) goto exit; |
1350 | literal_length -= avail; |
1351 | reader_->Skip(peeked_); |
1352 | size_t n; |
1353 | ip = reader_->Peek(&n); |
1354 | avail = n; |
1355 | peeked_ = avail; |
1356 | if (avail == 0) goto exit; |
1357 | ip_limit_ = ip + avail; |
1358 | ResetLimit(ip); |
1359 | } |
1360 | if (!writer->Append(ip, literal_length, &op)) goto exit; |
1361 | ip += literal_length; |
1362 | MAYBE_REFILL(); |
1363 | } else { |
1364 | if (SNAPPY_PREDICT_FALSE((c & 3) == COPY_4_BYTE_OFFSET)) { |
1365 | const size_t copy_offset = LittleEndian::Load32(ip); |
1366 | const size_t length = (c >> 2) + 1; |
1367 | ip += 4; |
1368 | |
1369 | if (!writer->AppendFromSelf(copy_offset, length, &op)) goto exit; |
1370 | } else { |
1371 | const ptrdiff_t entry = kLengthMinusOffset[c]; |
1372 | preload = LittleEndian::Load32(ip); |
1373 | const uint32_t trailer = ExtractLowBytes(preload, c & 3); |
1374 | const uint32_t length = entry & 0xff; |
1375 | assert(length > 0); |
1376 | |
1377 | // copy_offset/256 is encoded in bits 8..10. By just fetching |
1378 | // those bits, we get copy_offset (since the bit-field starts at |
1379 | // bit 8). |
1380 | const uint32_t copy_offset = trailer - entry + length; |
1381 | if (!writer->AppendFromSelf(copy_offset, length, &op)) goto exit; |
1382 | |
1383 | ip += (c & 3); |
1384 | // By using the result of the previous load we reduce the critical |
1385 | // dependency chain of ip to 4 cycles. |
1386 | preload >>= (c & 3) * 8; |
1387 | if (ip < ip_limit_min_maxtaglen_) continue; |
1388 | } |
1389 | MAYBE_REFILL(); |
1390 | } |
1391 | } |
1392 | #undef MAYBE_REFILL |
1393 | exit: |
1394 | writer->SetOutputPtr(op); |
1395 | } |
1396 | }; |
1397 | |
1398 | constexpr uint32_t CalculateNeeded(uint8_t tag) { |
1399 | return ((tag & 3) == 0 && tag >= (60 * 4)) |
1400 | ? (tag >> 2) - 58 |
1401 | : (0x05030201 >> ((tag * 8) & 31)) & 0xFF; |
1402 | } |
1403 | |
1404 | #if __cplusplus >= 201402L |
1405 | constexpr bool VerifyCalculateNeeded() { |
1406 | for (int i = 0; i < 1; i++) { |
1407 | if (CalculateNeeded(i) != (char_table[i] >> 11) + 1) return false; |
1408 | } |
1409 | return true; |
1410 | } |
1411 | |
1412 | // Make sure CalculateNeeded is correct by verifying it against the established |
1413 | // table encoding the number of added bytes needed. |
1414 | static_assert(VerifyCalculateNeeded(), "" ); |
1415 | #endif // c++14 |
1416 | |
1417 | bool SnappyDecompressor::RefillTag() { |
1418 | const char* ip = ip_; |
1419 | if (ip == ip_limit_) { |
1420 | // Fetch a new fragment from the reader |
1421 | reader_->Skip(peeked_); // All peeked bytes are used up |
1422 | size_t n; |
1423 | ip = reader_->Peek(&n); |
1424 | peeked_ = n; |
1425 | eof_ = (n == 0); |
1426 | if (eof_) return false; |
1427 | ip_limit_ = ip + n; |
1428 | } |
1429 | |
1430 | // Read the tag character |
1431 | assert(ip < ip_limit_); |
1432 | const unsigned char c = *(reinterpret_cast<const unsigned char*>(ip)); |
1433 | // At this point make sure that the data for the next tag is consecutive. |
1434 | // For copy 1 this means the next 2 bytes (tag and 1 byte offset) |
1435 | // For copy 2 the next 3 bytes (tag and 2 byte offset) |
1436 | // For copy 4 the next 5 bytes (tag and 4 byte offset) |
1437 | // For all small literals we only need 1 byte buf for literals 60...63 the |
1438 | // length is encoded in 1...4 extra bytes. |
1439 | const uint32_t needed = CalculateNeeded(c); |
1440 | assert(needed <= sizeof(scratch_)); |
1441 | |
1442 | // Read more bytes from reader if needed |
1443 | uint32_t nbuf = ip_limit_ - ip; |
1444 | if (nbuf < needed) { |
1445 | // Stitch together bytes from ip and reader to form the word |
1446 | // contents. We store the needed bytes in "scratch_". They |
1447 | // will be consumed immediately by the caller since we do not |
1448 | // read more than we need. |
1449 | std::memmove(scratch_, ip, nbuf); |
1450 | reader_->Skip(peeked_); // All peeked bytes are used up |
1451 | peeked_ = 0; |
1452 | while (nbuf < needed) { |
1453 | size_t length; |
1454 | const char* src = reader_->Peek(&length); |
1455 | if (length == 0) return false; |
1456 | uint32_t to_add = std::min<uint32_t>(needed - nbuf, length); |
1457 | std::memcpy(scratch_ + nbuf, src, to_add); |
1458 | nbuf += to_add; |
1459 | reader_->Skip(to_add); |
1460 | } |
1461 | assert(nbuf == needed); |
1462 | ip_ = scratch_; |
1463 | ip_limit_ = scratch_ + needed; |
1464 | } else if (nbuf < kMaximumTagLength) { |
1465 | // Have enough bytes, but move into scratch_ so that we do not |
1466 | // read past end of input |
1467 | std::memmove(scratch_, ip, nbuf); |
1468 | reader_->Skip(peeked_); // All peeked bytes are used up |
1469 | peeked_ = 0; |
1470 | ip_ = scratch_; |
1471 | ip_limit_ = scratch_ + nbuf; |
1472 | } else { |
1473 | // Pass pointer to buffer returned by reader_. |
1474 | ip_ = ip; |
1475 | } |
1476 | return true; |
1477 | } |
1478 | |
1479 | template <typename Writer> |
1480 | static bool InternalUncompress(Source* r, Writer* writer) { |
1481 | // Read the uncompressed length from the front of the compressed input |
1482 | SnappyDecompressor decompressor(r); |
1483 | uint32_t uncompressed_len = 0; |
1484 | if (!decompressor.ReadUncompressedLength(&uncompressed_len)) return false; |
1485 | |
1486 | return InternalUncompressAllTags(&decompressor, writer, r->Available(), |
1487 | uncompressed_len); |
1488 | } |
1489 | |
1490 | template <typename Writer> |
1491 | static bool InternalUncompressAllTags(SnappyDecompressor* decompressor, |
1492 | Writer* writer, uint32_t compressed_len, |
1493 | uint32_t uncompressed_len) { |
1494 | Report("snappy_uncompress" , compressed_len, uncompressed_len); |
1495 | |
1496 | writer->SetExpectedLength(uncompressed_len); |
1497 | |
1498 | // Process the entire input |
1499 | decompressor->DecompressAllTags(writer); |
1500 | writer->Flush(); |
1501 | return (decompressor->eof() && writer->CheckLength()); |
1502 | } |
1503 | |
1504 | bool GetUncompressedLength(Source* source, uint32_t* result) { |
1505 | SnappyDecompressor decompressor(source); |
1506 | return decompressor.ReadUncompressedLength(result); |
1507 | } |
1508 | |
1509 | size_t Compress(Source* reader, Sink* writer) { |
1510 | size_t written = 0; |
1511 | size_t N = reader->Available(); |
1512 | const size_t uncompressed_size = N; |
1513 | char ulength[Varint::kMax32]; |
1514 | char* p = Varint::Encode32(ulength, N); |
1515 | writer->Append(ulength, p - ulength); |
1516 | written += (p - ulength); |
1517 | |
1518 | internal::WorkingMemory wmem(N); |
1519 | |
1520 | while (N > 0) { |
1521 | // Get next block to compress (without copying if possible) |
1522 | size_t fragment_size; |
1523 | const char* fragment = reader->Peek(&fragment_size); |
1524 | assert(fragment_size != 0); // premature end of input |
1525 | const size_t num_to_read = std::min(N, kBlockSize); |
1526 | size_t bytes_read = fragment_size; |
1527 | |
1528 | size_t pending_advance = 0; |
1529 | if (bytes_read >= num_to_read) { |
1530 | // Buffer returned by reader is large enough |
1531 | pending_advance = num_to_read; |
1532 | fragment_size = num_to_read; |
1533 | } else { |
1534 | char* scratch = wmem.GetScratchInput(); |
1535 | std::memcpy(scratch, fragment, bytes_read); |
1536 | reader->Skip(bytes_read); |
1537 | |
1538 | while (bytes_read < num_to_read) { |
1539 | fragment = reader->Peek(&fragment_size); |
1540 | size_t n = std::min<size_t>(fragment_size, num_to_read - bytes_read); |
1541 | std::memcpy(scratch + bytes_read, fragment, n); |
1542 | bytes_read += n; |
1543 | reader->Skip(n); |
1544 | } |
1545 | assert(bytes_read == num_to_read); |
1546 | fragment = scratch; |
1547 | fragment_size = num_to_read; |
1548 | } |
1549 | assert(fragment_size == num_to_read); |
1550 | |
1551 | // Get encoding table for compression |
1552 | int table_size; |
1553 | uint16_t* table = wmem.GetHashTable(num_to_read, &table_size); |
1554 | |
1555 | // Compress input_fragment and append to dest |
1556 | const int max_output = MaxCompressedLength(num_to_read); |
1557 | |
1558 | // Need a scratch buffer for the output, in case the byte sink doesn't |
1559 | // have room for us directly. |
1560 | |
1561 | // Since we encode kBlockSize regions followed by a region |
1562 | // which is <= kBlockSize in length, a previously allocated |
1563 | // scratch_output[] region is big enough for this iteration. |
1564 | char* dest = writer->GetAppendBuffer(max_output, wmem.GetScratchOutput()); |
1565 | char* end = internal::CompressFragment(fragment, fragment_size, dest, table, |
1566 | table_size); |
1567 | writer->Append(dest, end - dest); |
1568 | written += (end - dest); |
1569 | |
1570 | N -= num_to_read; |
1571 | reader->Skip(pending_advance); |
1572 | } |
1573 | |
1574 | Report("snappy_compress" , written, uncompressed_size); |
1575 | |
1576 | return written; |
1577 | } |
1578 | |
1579 | // ----------------------------------------------------------------------- |
1580 | // IOVec interfaces |
1581 | // ----------------------------------------------------------------------- |
1582 | |
1583 | // A `Source` implementation that yields the contents of an `iovec` array. Note |
1584 | // that `total_size` is the total number of bytes to be read from the elements |
1585 | // of `iov` (_not_ the total number of elements in `iov`). |
1586 | class SnappyIOVecReader : public Source { |
1587 | public: |
1588 | SnappyIOVecReader(const struct iovec* iov, size_t total_size) |
1589 | : curr_iov_(iov), |
1590 | curr_pos_(total_size > 0 ? reinterpret_cast<const char*>(iov->iov_base) |
1591 | : nullptr), |
1592 | curr_size_remaining_(total_size > 0 ? iov->iov_len : 0), |
1593 | total_size_remaining_(total_size) { |
1594 | // Skip empty leading `iovec`s. |
1595 | if (total_size > 0 && curr_size_remaining_ == 0) Advance(); |
1596 | } |
1597 | |
1598 | ~SnappyIOVecReader() = default; |
1599 | |
1600 | size_t Available() const { return total_size_remaining_; } |
1601 | |
1602 | const char* Peek(size_t* len) { |
1603 | *len = curr_size_remaining_; |
1604 | return curr_pos_; |
1605 | } |
1606 | |
1607 | void Skip(size_t n) { |
1608 | while (n >= curr_size_remaining_ && n > 0) { |
1609 | n -= curr_size_remaining_; |
1610 | Advance(); |
1611 | } |
1612 | curr_size_remaining_ -= n; |
1613 | total_size_remaining_ -= n; |
1614 | curr_pos_ += n; |
1615 | } |
1616 | |
1617 | private: |
1618 | // Advances to the next nonempty `iovec` and updates related variables. |
1619 | void Advance() { |
1620 | do { |
1621 | assert(total_size_remaining_ >= curr_size_remaining_); |
1622 | total_size_remaining_ -= curr_size_remaining_; |
1623 | if (total_size_remaining_ == 0) { |
1624 | curr_pos_ = nullptr; |
1625 | curr_size_remaining_ = 0; |
1626 | return; |
1627 | } |
1628 | ++curr_iov_; |
1629 | curr_pos_ = reinterpret_cast<const char*>(curr_iov_->iov_base); |
1630 | curr_size_remaining_ = curr_iov_->iov_len; |
1631 | } while (curr_size_remaining_ == 0); |
1632 | } |
1633 | |
1634 | // The `iovec` currently being read. |
1635 | const struct iovec* curr_iov_; |
1636 | // The location in `curr_iov_` currently being read. |
1637 | const char* curr_pos_; |
1638 | // The amount of unread data in `curr_iov_`. |
1639 | size_t curr_size_remaining_; |
1640 | // The amount of unread data in the entire input array. |
1641 | size_t total_size_remaining_; |
1642 | }; |
1643 | |
1644 | // A type that writes to an iovec. |
1645 | // Note that this is not a "ByteSink", but a type that matches the |
1646 | // Writer template argument to SnappyDecompressor::DecompressAllTags(). |
1647 | class SnappyIOVecWriter { |
1648 | private: |
1649 | // output_iov_end_ is set to iov + count and used to determine when |
1650 | // the end of the iovs is reached. |
1651 | const struct iovec* output_iov_end_; |
1652 | |
1653 | #if !defined(NDEBUG) |
1654 | const struct iovec* output_iov_; |
1655 | #endif // !defined(NDEBUG) |
1656 | |
1657 | // Current iov that is being written into. |
1658 | const struct iovec* curr_iov_; |
1659 | |
1660 | // Pointer to current iov's write location. |
1661 | char* curr_iov_output_; |
1662 | |
1663 | // Remaining bytes to write into curr_iov_output. |
1664 | size_t curr_iov_remaining_; |
1665 | |
1666 | // Total bytes decompressed into output_iov_ so far. |
1667 | size_t total_written_; |
1668 | |
1669 | // Maximum number of bytes that will be decompressed into output_iov_. |
1670 | size_t output_limit_; |
1671 | |
1672 | static inline char* GetIOVecPointer(const struct iovec* iov, size_t offset) { |
1673 | return reinterpret_cast<char*>(iov->iov_base) + offset; |
1674 | } |
1675 | |
1676 | public: |
1677 | // Does not take ownership of iov. iov must be valid during the |
1678 | // entire lifetime of the SnappyIOVecWriter. |
1679 | inline SnappyIOVecWriter(const struct iovec* iov, size_t iov_count) |
1680 | : output_iov_end_(iov + iov_count), |
1681 | #if !defined(NDEBUG) |
1682 | output_iov_(iov), |
1683 | #endif // !defined(NDEBUG) |
1684 | curr_iov_(iov), |
1685 | curr_iov_output_(iov_count ? reinterpret_cast<char*>(iov->iov_base) |
1686 | : nullptr), |
1687 | curr_iov_remaining_(iov_count ? iov->iov_len : 0), |
1688 | total_written_(0), |
1689 | output_limit_(-1) { |
1690 | } |
1691 | |
1692 | inline void SetExpectedLength(size_t len) { output_limit_ = len; } |
1693 | |
1694 | inline bool CheckLength() const { return total_written_ == output_limit_; } |
1695 | |
1696 | inline bool Append(const char* ip, size_t len, char**) { |
1697 | if (total_written_ + len > output_limit_) { |
1698 | return false; |
1699 | } |
1700 | |
1701 | return AppendNoCheck(ip, len); |
1702 | } |
1703 | |
1704 | char* GetOutputPtr() { return nullptr; } |
1705 | char* GetBase(ptrdiff_t*) { return nullptr; } |
1706 | void SetOutputPtr(char* op) { |
1707 | // TODO: Switch to [[maybe_unused]] when we can assume C++17. |
1708 | (void)op; |
1709 | } |
1710 | |
1711 | inline bool AppendNoCheck(const char* ip, size_t len) { |
1712 | while (len > 0) { |
1713 | if (curr_iov_remaining_ == 0) { |
1714 | // This iovec is full. Go to the next one. |
1715 | if (curr_iov_ + 1 >= output_iov_end_) { |
1716 | return false; |
1717 | } |
1718 | ++curr_iov_; |
1719 | curr_iov_output_ = reinterpret_cast<char*>(curr_iov_->iov_base); |
1720 | curr_iov_remaining_ = curr_iov_->iov_len; |
1721 | } |
1722 | |
1723 | const size_t to_write = std::min(len, curr_iov_remaining_); |
1724 | std::memcpy(curr_iov_output_, ip, to_write); |
1725 | curr_iov_output_ += to_write; |
1726 | curr_iov_remaining_ -= to_write; |
1727 | total_written_ += to_write; |
1728 | ip += to_write; |
1729 | len -= to_write; |
1730 | } |
1731 | |
1732 | return true; |
1733 | } |
1734 | |
1735 | inline bool TryFastAppend(const char* ip, size_t available, size_t len, |
1736 | char**) { |
1737 | const size_t space_left = output_limit_ - total_written_; |
1738 | if (len <= 16 && available >= 16 + kMaximumTagLength && space_left >= 16 && |
1739 | curr_iov_remaining_ >= 16) { |
1740 | // Fast path, used for the majority (about 95%) of invocations. |
1741 | UnalignedCopy128(ip, curr_iov_output_); |
1742 | curr_iov_output_ += len; |
1743 | curr_iov_remaining_ -= len; |
1744 | total_written_ += len; |
1745 | return true; |
1746 | } |
1747 | |
1748 | return false; |
1749 | } |
1750 | |
1751 | inline bool AppendFromSelf(size_t offset, size_t len, char**) { |
1752 | // See SnappyArrayWriter::AppendFromSelf for an explanation of |
1753 | // the "offset - 1u" trick. |
1754 | if (offset - 1u >= total_written_) { |
1755 | return false; |
1756 | } |
1757 | const size_t space_left = output_limit_ - total_written_; |
1758 | if (len > space_left) { |
1759 | return false; |
1760 | } |
1761 | |
1762 | // Locate the iovec from which we need to start the copy. |
1763 | const iovec* from_iov = curr_iov_; |
1764 | size_t from_iov_offset = curr_iov_->iov_len - curr_iov_remaining_; |
1765 | while (offset > 0) { |
1766 | if (from_iov_offset >= offset) { |
1767 | from_iov_offset -= offset; |
1768 | break; |
1769 | } |
1770 | |
1771 | offset -= from_iov_offset; |
1772 | --from_iov; |
1773 | #if !defined(NDEBUG) |
1774 | assert(from_iov >= output_iov_); |
1775 | #endif // !defined(NDEBUG) |
1776 | from_iov_offset = from_iov->iov_len; |
1777 | } |
1778 | |
1779 | // Copy <len> bytes starting from the iovec pointed to by from_iov_index to |
1780 | // the current iovec. |
1781 | while (len > 0) { |
1782 | assert(from_iov <= curr_iov_); |
1783 | if (from_iov != curr_iov_) { |
1784 | const size_t to_copy = |
1785 | std::min(from_iov->iov_len - from_iov_offset, len); |
1786 | AppendNoCheck(GetIOVecPointer(from_iov, from_iov_offset), to_copy); |
1787 | len -= to_copy; |
1788 | if (len > 0) { |
1789 | ++from_iov; |
1790 | from_iov_offset = 0; |
1791 | } |
1792 | } else { |
1793 | size_t to_copy = curr_iov_remaining_; |
1794 | if (to_copy == 0) { |
1795 | // This iovec is full. Go to the next one. |
1796 | if (curr_iov_ + 1 >= output_iov_end_) { |
1797 | return false; |
1798 | } |
1799 | ++curr_iov_; |
1800 | curr_iov_output_ = reinterpret_cast<char*>(curr_iov_->iov_base); |
1801 | curr_iov_remaining_ = curr_iov_->iov_len; |
1802 | continue; |
1803 | } |
1804 | if (to_copy > len) { |
1805 | to_copy = len; |
1806 | } |
1807 | assert(to_copy > 0); |
1808 | |
1809 | IncrementalCopy(GetIOVecPointer(from_iov, from_iov_offset), |
1810 | curr_iov_output_, curr_iov_output_ + to_copy, |
1811 | curr_iov_output_ + curr_iov_remaining_); |
1812 | curr_iov_output_ += to_copy; |
1813 | curr_iov_remaining_ -= to_copy; |
1814 | from_iov_offset += to_copy; |
1815 | total_written_ += to_copy; |
1816 | len -= to_copy; |
1817 | } |
1818 | } |
1819 | |
1820 | return true; |
1821 | } |
1822 | |
1823 | inline void Flush() {} |
1824 | }; |
1825 | |
1826 | bool RawUncompressToIOVec(const char* compressed, size_t compressed_length, |
1827 | const struct iovec* iov, size_t iov_cnt) { |
1828 | ByteArraySource reader(compressed, compressed_length); |
1829 | return RawUncompressToIOVec(&reader, iov, iov_cnt); |
1830 | } |
1831 | |
1832 | bool RawUncompressToIOVec(Source* compressed, const struct iovec* iov, |
1833 | size_t iov_cnt) { |
1834 | SnappyIOVecWriter output(iov, iov_cnt); |
1835 | return InternalUncompress(compressed, &output); |
1836 | } |
1837 | |
1838 | // ----------------------------------------------------------------------- |
1839 | // Flat array interfaces |
1840 | // ----------------------------------------------------------------------- |
1841 | |
1842 | // A type that writes to a flat array. |
1843 | // Note that this is not a "ByteSink", but a type that matches the |
1844 | // Writer template argument to SnappyDecompressor::DecompressAllTags(). |
1845 | class SnappyArrayWriter { |
1846 | private: |
1847 | char* base_; |
1848 | char* op_; |
1849 | char* op_limit_; |
1850 | // If op < op_limit_min_slop_ then it's safe to unconditionally write |
1851 | // kSlopBytes starting at op. |
1852 | char* op_limit_min_slop_; |
1853 | |
1854 | public: |
1855 | inline explicit SnappyArrayWriter(char* dst) |
1856 | : base_(dst), |
1857 | op_(dst), |
1858 | op_limit_(dst), |
1859 | op_limit_min_slop_(dst) {} // Safe default see invariant. |
1860 | |
1861 | inline void SetExpectedLength(size_t len) { |
1862 | op_limit_ = op_ + len; |
1863 | // Prevent pointer from being past the buffer. |
1864 | op_limit_min_slop_ = op_limit_ - std::min<size_t>(kSlopBytes - 1, len); |
1865 | } |
1866 | |
1867 | inline bool CheckLength() const { return op_ == op_limit_; } |
1868 | |
1869 | char* GetOutputPtr() { return op_; } |
1870 | char* GetBase(ptrdiff_t* op_limit_min_slop) { |
1871 | *op_limit_min_slop = op_limit_min_slop_ - base_; |
1872 | return base_; |
1873 | } |
1874 | void SetOutputPtr(char* op) { op_ = op; } |
1875 | |
1876 | inline bool Append(const char* ip, size_t len, char** op_p) { |
1877 | char* op = *op_p; |
1878 | const size_t space_left = op_limit_ - op; |
1879 | if (space_left < len) return false; |
1880 | std::memcpy(op, ip, len); |
1881 | *op_p = op + len; |
1882 | return true; |
1883 | } |
1884 | |
1885 | inline bool TryFastAppend(const char* ip, size_t available, size_t len, |
1886 | char** op_p) { |
1887 | char* op = *op_p; |
1888 | const size_t space_left = op_limit_ - op; |
1889 | if (len <= 16 && available >= 16 + kMaximumTagLength && space_left >= 16) { |
1890 | // Fast path, used for the majority (about 95%) of invocations. |
1891 | UnalignedCopy128(ip, op); |
1892 | *op_p = op + len; |
1893 | return true; |
1894 | } else { |
1895 | return false; |
1896 | } |
1897 | } |
1898 | |
1899 | SNAPPY_ATTRIBUTE_ALWAYS_INLINE |
1900 | inline bool AppendFromSelf(size_t offset, size_t len, char** op_p) { |
1901 | assert(len > 0); |
1902 | char* const op = *op_p; |
1903 | assert(op >= base_); |
1904 | char* const op_end = op + len; |
1905 | |
1906 | // Check if we try to append from before the start of the buffer. |
1907 | if (SNAPPY_PREDICT_FALSE(static_cast<size_t>(op - base_) < offset)) |
1908 | return false; |
1909 | |
1910 | if (SNAPPY_PREDICT_FALSE((kSlopBytes < 64 && len > kSlopBytes) || |
1911 | op >= op_limit_min_slop_ || offset < len)) { |
1912 | if (op_end > op_limit_ || offset == 0) return false; |
1913 | *op_p = IncrementalCopy(op - offset, op, op_end, op_limit_); |
1914 | return true; |
1915 | } |
1916 | std::memmove(op, op - offset, kSlopBytes); |
1917 | *op_p = op_end; |
1918 | return true; |
1919 | } |
1920 | inline size_t Produced() const { |
1921 | assert(op_ >= base_); |
1922 | return op_ - base_; |
1923 | } |
1924 | inline void Flush() {} |
1925 | }; |
1926 | |
1927 | bool RawUncompress(const char* compressed, size_t compressed_length, |
1928 | char* uncompressed) { |
1929 | ByteArraySource reader(compressed, compressed_length); |
1930 | return RawUncompress(&reader, uncompressed); |
1931 | } |
1932 | |
1933 | bool RawUncompress(Source* compressed, char* uncompressed) { |
1934 | SnappyArrayWriter output(uncompressed); |
1935 | return InternalUncompress(compressed, &output); |
1936 | } |
1937 | |
1938 | bool Uncompress(const char* compressed, size_t compressed_length, |
1939 | std::string* uncompressed) { |
1940 | size_t ulength; |
1941 | if (!GetUncompressedLength(compressed, compressed_length, &ulength)) { |
1942 | return false; |
1943 | } |
1944 | // On 32-bit builds: max_size() < kuint32max. Check for that instead |
1945 | // of crashing (e.g., consider externally specified compressed data). |
1946 | if (ulength > uncompressed->max_size()) { |
1947 | return false; |
1948 | } |
1949 | STLStringResizeUninitialized(uncompressed, ulength); |
1950 | return RawUncompress(compressed, compressed_length, |
1951 | string_as_array(uncompressed)); |
1952 | } |
1953 | |
1954 | // A Writer that drops everything on the floor and just does validation |
1955 | class SnappyDecompressionValidator { |
1956 | private: |
1957 | size_t expected_; |
1958 | size_t produced_; |
1959 | |
1960 | public: |
1961 | inline SnappyDecompressionValidator() : expected_(0), produced_(0) {} |
1962 | inline void SetExpectedLength(size_t len) { expected_ = len; } |
1963 | size_t GetOutputPtr() { return produced_; } |
1964 | size_t GetBase(ptrdiff_t* op_limit_min_slop) { |
1965 | *op_limit_min_slop = std::numeric_limits<ptrdiff_t>::max() - kSlopBytes + 1; |
1966 | return 1; |
1967 | } |
1968 | void SetOutputPtr(size_t op) { produced_ = op; } |
1969 | inline bool CheckLength() const { return expected_ == produced_; } |
1970 | inline bool Append(const char* ip, size_t len, size_t* produced) { |
1971 | // TODO: Switch to [[maybe_unused]] when we can assume C++17. |
1972 | (void)ip; |
1973 | |
1974 | *produced += len; |
1975 | return *produced <= expected_; |
1976 | } |
1977 | inline bool TryFastAppend(const char* ip, size_t available, size_t length, |
1978 | size_t* produced) { |
1979 | // TODO: Switch to [[maybe_unused]] when we can assume C++17. |
1980 | (void)ip; |
1981 | (void)available; |
1982 | (void)length; |
1983 | (void)produced; |
1984 | |
1985 | return false; |
1986 | } |
1987 | inline bool AppendFromSelf(size_t offset, size_t len, size_t* produced) { |
1988 | // See SnappyArrayWriter::AppendFromSelf for an explanation of |
1989 | // the "offset - 1u" trick. |
1990 | if (*produced <= offset - 1u) return false; |
1991 | *produced += len; |
1992 | return *produced <= expected_; |
1993 | } |
1994 | inline void Flush() {} |
1995 | }; |
1996 | |
1997 | bool IsValidCompressedBuffer(const char* compressed, size_t compressed_length) { |
1998 | ByteArraySource reader(compressed, compressed_length); |
1999 | SnappyDecompressionValidator writer; |
2000 | return InternalUncompress(&reader, &writer); |
2001 | } |
2002 | |
2003 | bool IsValidCompressed(Source* compressed) { |
2004 | SnappyDecompressionValidator writer; |
2005 | return InternalUncompress(compressed, &writer); |
2006 | } |
2007 | |
2008 | void RawCompress(const char* input, size_t input_length, char* compressed, |
2009 | size_t* compressed_length) { |
2010 | ByteArraySource reader(input, input_length); |
2011 | UncheckedByteArraySink writer(compressed); |
2012 | Compress(&reader, &writer); |
2013 | |
2014 | // Compute how many bytes were added |
2015 | *compressed_length = (writer.CurrentDestination() - compressed); |
2016 | } |
2017 | |
2018 | void RawCompressFromIOVec(const struct iovec* iov, size_t uncompressed_length, |
2019 | char* compressed, size_t* compressed_length) { |
2020 | SnappyIOVecReader reader(iov, uncompressed_length); |
2021 | UncheckedByteArraySink writer(compressed); |
2022 | Compress(&reader, &writer); |
2023 | |
2024 | // Compute how many bytes were added. |
2025 | *compressed_length = writer.CurrentDestination() - compressed; |
2026 | } |
2027 | |
2028 | size_t Compress(const char* input, size_t input_length, |
2029 | std::string* compressed) { |
2030 | // Pre-grow the buffer to the max length of the compressed output |
2031 | STLStringResizeUninitialized(compressed, MaxCompressedLength(input_length)); |
2032 | |
2033 | size_t compressed_length; |
2034 | RawCompress(input, input_length, string_as_array(compressed), |
2035 | &compressed_length); |
2036 | compressed->erase(compressed_length); |
2037 | return compressed_length; |
2038 | } |
2039 | |
2040 | size_t CompressFromIOVec(const struct iovec* iov, size_t iov_cnt, |
2041 | std::string* compressed) { |
2042 | // Compute the number of bytes to be compressed. |
2043 | size_t uncompressed_length = 0; |
2044 | for (size_t i = 0; i < iov_cnt; ++i) { |
2045 | uncompressed_length += iov[i].iov_len; |
2046 | } |
2047 | |
2048 | // Pre-grow the buffer to the max length of the compressed output. |
2049 | STLStringResizeUninitialized(compressed, MaxCompressedLength( |
2050 | uncompressed_length)); |
2051 | |
2052 | size_t compressed_length; |
2053 | RawCompressFromIOVec(iov, uncompressed_length, string_as_array(compressed), |
2054 | &compressed_length); |
2055 | compressed->erase(compressed_length); |
2056 | return compressed_length; |
2057 | } |
2058 | |
2059 | // ----------------------------------------------------------------------- |
2060 | // Sink interface |
2061 | // ----------------------------------------------------------------------- |
2062 | |
2063 | // A type that decompresses into a Sink. The template parameter |
2064 | // Allocator must export one method "char* Allocate(int size);", which |
2065 | // allocates a buffer of "size" and appends that to the destination. |
2066 | template <typename Allocator> |
2067 | class SnappyScatteredWriter { |
2068 | Allocator allocator_; |
2069 | |
2070 | // We need random access into the data generated so far. Therefore |
2071 | // we keep track of all of the generated data as an array of blocks. |
2072 | // All of the blocks except the last have length kBlockSize. |
2073 | std::vector<char*> blocks_; |
2074 | size_t expected_; |
2075 | |
2076 | // Total size of all fully generated blocks so far |
2077 | size_t full_size_; |
2078 | |
2079 | // Pointer into current output block |
2080 | char* op_base_; // Base of output block |
2081 | char* op_ptr_; // Pointer to next unfilled byte in block |
2082 | char* op_limit_; // Pointer just past block |
2083 | // If op < op_limit_min_slop_ then it's safe to unconditionally write |
2084 | // kSlopBytes starting at op. |
2085 | char* op_limit_min_slop_; |
2086 | |
2087 | inline size_t Size() const { return full_size_ + (op_ptr_ - op_base_); } |
2088 | |
2089 | bool SlowAppend(const char* ip, size_t len); |
2090 | bool SlowAppendFromSelf(size_t offset, size_t len); |
2091 | |
2092 | public: |
2093 | inline explicit SnappyScatteredWriter(const Allocator& allocator) |
2094 | : allocator_(allocator), |
2095 | full_size_(0), |
2096 | op_base_(NULL), |
2097 | op_ptr_(NULL), |
2098 | op_limit_(NULL), |
2099 | op_limit_min_slop_(NULL) {} |
2100 | char* GetOutputPtr() { return op_ptr_; } |
2101 | char* GetBase(ptrdiff_t* op_limit_min_slop) { |
2102 | *op_limit_min_slop = op_limit_min_slop_ - op_base_; |
2103 | return op_base_; |
2104 | } |
2105 | void SetOutputPtr(char* op) { op_ptr_ = op; } |
2106 | |
2107 | inline void SetExpectedLength(size_t len) { |
2108 | assert(blocks_.empty()); |
2109 | expected_ = len; |
2110 | } |
2111 | |
2112 | inline bool CheckLength() const { return Size() == expected_; } |
2113 | |
2114 | // Return the number of bytes actually uncompressed so far |
2115 | inline size_t Produced() const { return Size(); } |
2116 | |
2117 | inline bool Append(const char* ip, size_t len, char** op_p) { |
2118 | char* op = *op_p; |
2119 | size_t avail = op_limit_ - op; |
2120 | if (len <= avail) { |
2121 | // Fast path |
2122 | std::memcpy(op, ip, len); |
2123 | *op_p = op + len; |
2124 | return true; |
2125 | } else { |
2126 | op_ptr_ = op; |
2127 | bool res = SlowAppend(ip, len); |
2128 | *op_p = op_ptr_; |
2129 | return res; |
2130 | } |
2131 | } |
2132 | |
2133 | inline bool TryFastAppend(const char* ip, size_t available, size_t length, |
2134 | char** op_p) { |
2135 | char* op = *op_p; |
2136 | const int space_left = op_limit_ - op; |
2137 | if (length <= 16 && available >= 16 + kMaximumTagLength && |
2138 | space_left >= 16) { |
2139 | // Fast path, used for the majority (about 95%) of invocations. |
2140 | UnalignedCopy128(ip, op); |
2141 | *op_p = op + length; |
2142 | return true; |
2143 | } else { |
2144 | return false; |
2145 | } |
2146 | } |
2147 | |
2148 | inline bool AppendFromSelf(size_t offset, size_t len, char** op_p) { |
2149 | char* op = *op_p; |
2150 | assert(op >= op_base_); |
2151 | // Check if we try to append from before the start of the buffer. |
2152 | if (SNAPPY_PREDICT_FALSE((kSlopBytes < 64 && len > kSlopBytes) || |
2153 | static_cast<size_t>(op - op_base_) < offset || |
2154 | op >= op_limit_min_slop_ || offset < len)) { |
2155 | if (offset == 0) return false; |
2156 | if (SNAPPY_PREDICT_FALSE(static_cast<size_t>(op - op_base_) < offset || |
2157 | op + len > op_limit_)) { |
2158 | op_ptr_ = op; |
2159 | bool res = SlowAppendFromSelf(offset, len); |
2160 | *op_p = op_ptr_; |
2161 | return res; |
2162 | } |
2163 | *op_p = IncrementalCopy(op - offset, op, op + len, op_limit_); |
2164 | return true; |
2165 | } |
2166 | // Fast path |
2167 | char* const op_end = op + len; |
2168 | std::memmove(op, op - offset, kSlopBytes); |
2169 | *op_p = op_end; |
2170 | return true; |
2171 | } |
2172 | |
2173 | // Called at the end of the decompress. We ask the allocator |
2174 | // write all blocks to the sink. |
2175 | inline void Flush() { allocator_.Flush(Produced()); } |
2176 | }; |
2177 | |
2178 | template <typename Allocator> |
2179 | bool SnappyScatteredWriter<Allocator>::SlowAppend(const char* ip, size_t len) { |
2180 | size_t avail = op_limit_ - op_ptr_; |
2181 | while (len > avail) { |
2182 | // Completely fill this block |
2183 | std::memcpy(op_ptr_, ip, avail); |
2184 | op_ptr_ += avail; |
2185 | assert(op_limit_ - op_ptr_ == 0); |
2186 | full_size_ += (op_ptr_ - op_base_); |
2187 | len -= avail; |
2188 | ip += avail; |
2189 | |
2190 | // Bounds check |
2191 | if (full_size_ + len > expected_) return false; |
2192 | |
2193 | // Make new block |
2194 | size_t bsize = std::min<size_t>(kBlockSize, expected_ - full_size_); |
2195 | op_base_ = allocator_.Allocate(bsize); |
2196 | op_ptr_ = op_base_; |
2197 | op_limit_ = op_base_ + bsize; |
2198 | op_limit_min_slop_ = op_limit_ - std::min<size_t>(kSlopBytes - 1, bsize); |
2199 | |
2200 | blocks_.push_back(op_base_); |
2201 | avail = bsize; |
2202 | } |
2203 | |
2204 | std::memcpy(op_ptr_, ip, len); |
2205 | op_ptr_ += len; |
2206 | return true; |
2207 | } |
2208 | |
2209 | template <typename Allocator> |
2210 | bool SnappyScatteredWriter<Allocator>::SlowAppendFromSelf(size_t offset, |
2211 | size_t len) { |
2212 | // Overflow check |
2213 | // See SnappyArrayWriter::AppendFromSelf for an explanation of |
2214 | // the "offset - 1u" trick. |
2215 | const size_t cur = Size(); |
2216 | if (offset - 1u >= cur) return false; |
2217 | if (expected_ - cur < len) return false; |
2218 | |
2219 | // Currently we shouldn't ever hit this path because Compress() chops the |
2220 | // input into blocks and does not create cross-block copies. However, it is |
2221 | // nice if we do not rely on that, since we can get better compression if we |
2222 | // allow cross-block copies and thus might want to change the compressor in |
2223 | // the future. |
2224 | // TODO Replace this with a properly optimized path. This is not |
2225 | // triggered right now. But this is so super slow, that it would regress |
2226 | // performance unacceptably if triggered. |
2227 | size_t src = cur - offset; |
2228 | char* op = op_ptr_; |
2229 | while (len-- > 0) { |
2230 | char c = blocks_[src >> kBlockLog][src & (kBlockSize - 1)]; |
2231 | if (!Append(&c, 1, &op)) { |
2232 | op_ptr_ = op; |
2233 | return false; |
2234 | } |
2235 | src++; |
2236 | } |
2237 | op_ptr_ = op; |
2238 | return true; |
2239 | } |
2240 | |
2241 | class SnappySinkAllocator { |
2242 | public: |
2243 | explicit SnappySinkAllocator(Sink* dest) : dest_(dest) {} |
2244 | ~SnappySinkAllocator() {} |
2245 | |
2246 | char* Allocate(int size) { |
2247 | Datablock block(new char[size], size); |
2248 | blocks_.push_back(block); |
2249 | return block.data; |
2250 | } |
2251 | |
2252 | // We flush only at the end, because the writer wants |
2253 | // random access to the blocks and once we hand the |
2254 | // block over to the sink, we can't access it anymore. |
2255 | // Also we don't write more than has been actually written |
2256 | // to the blocks. |
2257 | void Flush(size_t size) { |
2258 | size_t size_written = 0; |
2259 | for (Datablock& block : blocks_) { |
2260 | size_t block_size = std::min<size_t>(block.size, size - size_written); |
2261 | dest_->AppendAndTakeOwnership(block.data, block_size, |
2262 | &SnappySinkAllocator::Deleter, NULL); |
2263 | size_written += block_size; |
2264 | } |
2265 | blocks_.clear(); |
2266 | } |
2267 | |
2268 | private: |
2269 | struct Datablock { |
2270 | char* data; |
2271 | size_t size; |
2272 | Datablock(char* p, size_t s) : data(p), size(s) {} |
2273 | }; |
2274 | |
2275 | static void Deleter(void* arg, const char* bytes, size_t size) { |
2276 | // TODO: Switch to [[maybe_unused]] when we can assume C++17. |
2277 | (void)arg; |
2278 | (void)size; |
2279 | |
2280 | delete[] bytes; |
2281 | } |
2282 | |
2283 | Sink* dest_; |
2284 | std::vector<Datablock> blocks_; |
2285 | |
2286 | // Note: copying this object is allowed |
2287 | }; |
2288 | |
2289 | size_t UncompressAsMuchAsPossible(Source* compressed, Sink* uncompressed) { |
2290 | SnappySinkAllocator allocator(uncompressed); |
2291 | SnappyScatteredWriter<SnappySinkAllocator> writer(allocator); |
2292 | InternalUncompress(compressed, &writer); |
2293 | return writer.Produced(); |
2294 | } |
2295 | |
2296 | bool Uncompress(Source* compressed, Sink* uncompressed) { |
2297 | // Read the uncompressed length from the front of the compressed input |
2298 | SnappyDecompressor decompressor(compressed); |
2299 | uint32_t uncompressed_len = 0; |
2300 | if (!decompressor.ReadUncompressedLength(&uncompressed_len)) { |
2301 | return false; |
2302 | } |
2303 | |
2304 | char c; |
2305 | size_t allocated_size; |
2306 | char* buf = uncompressed->GetAppendBufferVariable(1, uncompressed_len, &c, 1, |
2307 | &allocated_size); |
2308 | |
2309 | const size_t compressed_len = compressed->Available(); |
2310 | // If we can get a flat buffer, then use it, otherwise do block by block |
2311 | // uncompression |
2312 | if (allocated_size >= uncompressed_len) { |
2313 | SnappyArrayWriter writer(buf); |
2314 | bool result = InternalUncompressAllTags(&decompressor, &writer, |
2315 | compressed_len, uncompressed_len); |
2316 | uncompressed->Append(buf, writer.Produced()); |
2317 | return result; |
2318 | } else { |
2319 | SnappySinkAllocator allocator(uncompressed); |
2320 | SnappyScatteredWriter<SnappySinkAllocator> writer(allocator); |
2321 | return InternalUncompressAllTags(&decompressor, &writer, compressed_len, |
2322 | uncompressed_len); |
2323 | } |
2324 | } |
2325 | |
2326 | } // namespace snappy |
2327 | |