diff options
-rw-r--r-- | CMakeLists.txt | 13 | ||||
-rw-r--r-- | cmake/config.h.in | 7 | ||||
-rw-r--r-- | snappy-internal.h | 18 | ||||
-rw-r--r-- | snappy-stubs-internal.h | 33 | ||||
-rw-r--r-- | snappy.cc | 259 | ||||
-rw-r--r-- | snappy.h | 17 | ||||
-rw-r--r-- | snappy_benchmark.cc | 52 | ||||
-rw-r--r-- | snappy_unittest.cc | 80 |
8 files changed, 394 insertions, 85 deletions
diff --git a/CMakeLists.txt b/CMakeLists.txt index 6eef485..2a0bc10 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -175,6 +175,19 @@ int main() { check_cxx_source_compiles(" #include <immintrin.h> int main() { + return _mm_crc32_u32(0, 1); +}" SNAPPY_HAVE_X86_CRC32) + +check_cxx_source_compiles(" +#include <arm_neon.h> +#include <arm_acle.h> +int main() { + return __crc32cw(0, 1); +}" SNAPPY_HAVE_NEON_CRC32) + +check_cxx_source_compiles(" +#include <immintrin.h> +int main() { return _bzhi_u32(0, 1); }" SNAPPY_HAVE_BMI2) diff --git a/cmake/config.h.in b/cmake/config.h.in index 5ea2b5a..d1de25c 100644 --- a/cmake/config.h.in +++ b/cmake/config.h.in @@ -46,12 +46,19 @@ /* Define to 1 if you target processors with SSSE3+ and have <tmmintrin.h>. */ #cmakedefine01 SNAPPY_HAVE_SSSE3 +/* Define to 1 if you target processors with SSE4.2 and have <crc32intrin.h>. */ +#cmakedefine01 SNAPPY_HAVE_X86_CRC32 + /* Define to 1 if you target processors with BMI2+ and have <bmi2intrin.h>. */ #cmakedefine01 SNAPPY_HAVE_BMI2 /* Define to 1 if you target processors with NEON and have <arm_neon.h>. */ #cmakedefine01 SNAPPY_HAVE_NEON +/* Define to 1 if you have <arm_neon.h> and <arm_acle.h> and want to optimize + compression speed by using __crc32cw from <arm_acle.h>. */ +#cmakedefine01 SNAPPY_HAVE_NEON_CRC32 + /* Define to 1 if your processor stores words with the most significant byte first (like Motorola and SPARC, unlike Intel and VAX). */ #cmakedefine01 SNAPPY_IS_BIG_ENDIAN diff --git a/snappy-internal.h b/snappy-internal.h index ae7ab5a..0923f39 100644 --- a/snappy-internal.h +++ b/snappy-internal.h @@ -230,8 +230,9 @@ static inline std::pair<size_t, bool> FindMatchLength(const char* s1, uint64_t xorval = a1 ^ a2; int shift = Bits::FindLSBSetNonZero64(xorval); size_t matched_bytes = shift >> 3; + uint64_t a3 = UNALIGNED_LOAD64(s2 + 4); #ifndef __x86_64__ - *data = UNALIGNED_LOAD64(s2 + matched_bytes); + a2 = static_cast<uint32_t>(xorval) == 0 ? a3 : a2; #else // Ideally this would just be // @@ -242,13 +243,13 @@ static inline std::pair<size_t, bool> FindMatchLength(const char* s1, // use a conditional move (it's tuned to cut data dependencies). In this // case there is a longer parallel chain anyway AND this will be fairly // unpredictable. - uint64_t a3 = UNALIGNED_LOAD64(s2 + 4); asm("testl %k2, %k2\n\t" "cmovzq %1, %0\n\t" : "+r"(a2) - : "r"(a3), "r"(xorval)); - *data = a2 >> (shift & (3 * 8)); + : "r"(a3), "r"(xorval) + : "cc"); #endif + *data = a2 >> (shift & (3 * 8)); return std::pair<size_t, bool>(matched_bytes, true); } else { matched = 8; @@ -270,16 +271,17 @@ static inline std::pair<size_t, bool> FindMatchLength(const char* s1, uint64_t xorval = a1 ^ a2; int shift = Bits::FindLSBSetNonZero64(xorval); size_t matched_bytes = shift >> 3; + uint64_t a3 = UNALIGNED_LOAD64(s2 + 4); #ifndef __x86_64__ - *data = UNALIGNED_LOAD64(s2 + matched_bytes); + a2 = static_cast<uint32_t>(xorval) == 0 ? a3 : a2; #else - uint64_t a3 = UNALIGNED_LOAD64(s2 + 4); asm("testl %k2, %k2\n\t" "cmovzq %1, %0\n\t" : "+r"(a2) - : "r"(a3), "r"(xorval)); - *data = a2 >> (shift & (3 * 8)); + : "r"(a3), "r"(xorval) + : "cc"); #endif + *data = a2 >> (shift & (3 * 8)); matched += matched_bytes; assert(matched >= 8); return std::pair<size_t, bool>(matched, false); diff --git a/snappy-stubs-internal.h b/snappy-stubs-internal.h index 8e28b3d..1548ed7 100644 --- a/snappy-stubs-internal.h +++ b/snappy-stubs-internal.h @@ -177,9 +177,13 @@ class LittleEndian { return (static_cast<uint16_t>(buffer[0])) | (static_cast<uint16_t>(buffer[1]) << 8); #else - uint16_t x; - memcpy(&x, ptr, 2); - return x; + // memcpy() turns into a single instruction early in the optimization + // pipeline (relatively to a series of byte accesses). So, using memcpy + // instead of byte accesses may lead to better decisions in more stages of + // the optimization pipeline. + uint16_t value; + std::memcpy(&value, ptr, 2); + return value; #endif } @@ -192,9 +196,10 @@ class LittleEndian { (static_cast<uint32_t>(buffer[2]) << 16) | (static_cast<uint32_t>(buffer[3]) << 24); #else - uint32_t x; - memcpy(&x, ptr, 4); - return x; + // See Load16() for the rationale of using memcpy(). + uint32_t value; + std::memcpy(&value, ptr, 4); + return value; #endif } @@ -211,9 +216,10 @@ class LittleEndian { (static_cast<uint64_t>(buffer[6]) << 48) | (static_cast<uint64_t>(buffer[7]) << 56); #else - uint64_t x; - memcpy(&x, ptr, 8); - return x; + // See Load16() for the rationale of using memcpy(). + uint64_t value; + std::memcpy(&value, ptr, 8); + return value; #endif } @@ -224,7 +230,8 @@ class LittleEndian { buffer[0] = static_cast<uint8_t>(value); buffer[1] = static_cast<uint8_t>(value >> 8); #else - memcpy(dst, &value, 2); + // See Load16() for the rationale of using memcpy(). + std::memcpy(dst, &value, 2); #endif } @@ -237,7 +244,8 @@ class LittleEndian { buffer[2] = static_cast<uint8_t>(value >> 16); buffer[3] = static_cast<uint8_t>(value >> 24); #else - memcpy(dst, &value, 4); + // See Load16() for the rationale of using memcpy(). + std::memcpy(dst, &value, 4); #endif } @@ -254,7 +262,8 @@ class LittleEndian { buffer[6] = static_cast<uint8_t>(value >> 48); buffer[7] = static_cast<uint8_t>(value >> 56); #else - memcpy(dst, &value, 8); + // See Load16() for the rationale of using memcpy(). + std::memcpy(dst, &value, 8); #endif } @@ -45,10 +45,34 @@ #endif #endif // !defined(SNAPPY_HAVE_BMI2) -#if SNAPPY_HAVE_BMI2 +#if !defined(SNAPPY_HAVE_X86_CRC32) +#if defined(__SSE4_2__) +#define SNAPPY_HAVE_X86_CRC32 1 +#else +#define SNAPPY_HAVE_X86_CRC32 0 +#endif +#endif // !defined(SNAPPY_HAVE_X86_CRC32) + +#if !defined(SNAPPY_HAVE_NEON_CRC32) +#if SNAPPY_HAVE_NEON && defined(__ARM_FEATURE_CRC32) +#define SNAPPY_HAVE_NEON_CRC32 1 +#else +#define SNAPPY_HAVE_NEON_CRC32 0 +#endif +#endif // !defined(SNAPPY_HAVE_NEON_CRC32) + +#if SNAPPY_HAVE_BMI2 || SNAPPY_HAVE_X86_CRC32 // Please do not replace with <x86intrin.h>. or with headers that assume more // advanced SSE versions without checking with all the OWNERS. #include <immintrin.h> +#elif SNAPPY_HAVE_NEON_CRC32 +#include <arm_acle.h> +#endif + +#if defined(__GNUC__) +#define SNAPPY_PREFETCH(ptr) __builtin_prefetch(ptr, 0, 3) +#else +#define SNAPPY_PREFETCH(ptr) (void)(ptr) #endif #include <algorithm> @@ -127,14 +151,34 @@ constexpr std::array<int16_t, 256> MakeTable(index_sequence<seq...>) { alignas(64) const std::array<int16_t, 256> kLengthMinusOffset = MakeTable(make_index_sequence<256>{}); -// Any hash function will produce a valid compressed bitstream, but a good -// hash function reduces the number of collisions and thus yields better -// compression for compressible input, and more speed for incompressible -// input. Of course, it doesn't hurt if the hash function is reasonably fast -// either, as it gets called a lot. -inline uint32_t HashBytes(uint32_t bytes, uint32_t mask) { +// Given a table of uint16_t whose size is mask / 2 + 1, return a pointer to the +// relevant entry, if any, for the given bytes. Any hash function will do, +// but a good hash function reduces the number of collisions and thus yields +// better compression for compressible input. +// +// REQUIRES: mask is 2 * (table_size - 1), and table_size is a power of two. +inline uint16_t* TableEntry(uint16_t* table, uint32_t bytes, uint32_t mask) { + // Our choice is quicker-and-dirtier than the typical hash function; + // empirically, that seems beneficial. The upper bits of kMagic * bytes are a + // higher-quality hash than the lower bits, so when using kMagic * bytes we + // also shift right to get a higher-quality end result. There's no similar + // issue with a CRC because all of the output bits of a CRC are equally good + // "hashes." So, a CPU instruction for CRC, if available, tends to be a good + // choice. +#if SNAPPY_HAVE_NEON_CRC32 + // We use mask as the second arg to the CRC function, as it's about to + // be used anyway; it'd be equally correct to use 0 or some constant. + // Mathematically, _mm_crc32_u32 (or similar) is a function of the + // xor of its arguments. + const uint32_t hash = __crc32cw(bytes, mask); +#elif SNAPPY_HAVE_X86_CRC32 + const uint32_t hash = _mm_crc32_u32(bytes, mask); +#else constexpr uint32_t kMagic = 0x1e35a7bd; - return ((kMagic * bytes) >> (32 - kMaxHashTableBits)) & mask; + const uint32_t hash = (kMagic * bytes) >> (31 - kMaxHashTableBits); +#endif + return reinterpret_cast<uint16_t*>(reinterpret_cast<uintptr_t>(table) + + (hash & mask)); } } // namespace @@ -340,7 +384,8 @@ static inline bool Copy64BytesWithPatternExtension(char* dst, size_t offset) { if (SNAPPY_PREDICT_TRUE(offset < 16)) { if (SNAPPY_PREDICT_FALSE(offset == 0)) return false; // Extend the pattern to the first 16 bytes. - for (int i = 0; i < 16; i++) dst[i] = dst[i - offset]; + // The simpler formulation of `dst[i - offset]` induces undefined behavior. + for (int i = 0; i < 16; i++) dst[i] = (dst - offset)[i]; // Find a multiple of pattern >= 16. static std::array<uint8_t, 16> pattern_sizes = []() { std::array<uint8_t, 16> res; @@ -591,7 +636,19 @@ static inline char* EmitLiteral(char* op, const char* literal, int len) { LittleEndian::Store32(op, n); op += count; } - std::memcpy(op, literal, len); + // When allow_fast_path is true, we can overwrite up to 16 bytes. + if (allow_fast_path) { + char* destination = op; + const char* source = literal; + const char* end = destination + len; + do { + std::memcpy(destination, source, 16); + destination += 16; + source += 16; + } while (destination < end); + } else { + std::memcpy(op, literal, len); + } return op + len; } @@ -726,7 +783,7 @@ char* CompressFragment(const char* input, size_t input_size, char* op, const char* ip = input; assert(input_size <= kBlockSize); assert((table_size & (table_size - 1)) == 0); // table must be power of two - const uint32_t mask = table_size - 1; + const uint32_t mask = 2 * (table_size - 1); const char* ip_end = input + input_size; const char* base_ip = ip; @@ -777,11 +834,11 @@ char* CompressFragment(const char* input, size_t input_size, char* op, // loaded in preload. uint32_t dword = i == 0 ? preload : static_cast<uint32_t>(data); assert(dword == LittleEndian::Load32(ip + i)); - uint32_t hash = HashBytes(dword, mask); - candidate = base_ip + table[hash]; + uint16_t* table_entry = TableEntry(table, dword, mask); + candidate = base_ip + *table_entry; assert(candidate >= base_ip); assert(candidate < ip + i); - table[hash] = delta + i; + *table_entry = delta + i; if (SNAPPY_PREDICT_FALSE(LittleEndian::Load32(candidate) == dword)) { *op = LITERAL | (i << 2); UnalignedCopy128(next_emit, op + 1); @@ -798,7 +855,7 @@ char* CompressFragment(const char* input, size_t input_size, char* op, } while (true) { assert(static_cast<uint32_t>(data) == LittleEndian::Load32(ip)); - uint32_t hash = HashBytes(data, mask); + uint16_t* table_entry = TableEntry(table, data, mask); uint32_t bytes_between_hash_lookups = skip >> 5; skip += bytes_between_hash_lookups; const char* next_ip = ip + bytes_between_hash_lookups; @@ -806,11 +863,11 @@ char* CompressFragment(const char* input, size_t input_size, char* op, ip = next_emit; goto emit_remainder; } - candidate = base_ip + table[hash]; + candidate = base_ip + *table_entry; assert(candidate >= base_ip); assert(candidate < ip); - table[hash] = ip - base_ip; + *table_entry = ip - base_ip; if (SNAPPY_PREDICT_FALSE(static_cast<uint32_t>(data) == LittleEndian::Load32(candidate))) { break; @@ -856,12 +913,13 @@ char* CompressFragment(const char* input, size_t input_size, char* op, assert((data & 0xFFFFFFFFFF) == (LittleEndian::Load64(ip) & 0xFFFFFFFFFF)); // We are now looking for a 4-byte match again. We read - // table[Hash(ip, shift)] for that. To improve compression, + // table[Hash(ip, mask)] for that. To improve compression, // we also update table[Hash(ip - 1, mask)] and table[Hash(ip, mask)]. - table[HashBytes(LittleEndian::Load32(ip - 1), mask)] = ip - base_ip - 1; - uint32_t hash = HashBytes(data, mask); - candidate = base_ip + table[hash]; - table[hash] = ip - base_ip; + *TableEntry(table, LittleEndian::Load32(ip - 1), mask) = + ip - base_ip - 1; + uint16_t* table_entry = TableEntry(table, data, mask); + candidate = base_ip + *table_entry; + *table_entry = ip - base_ip; // Measurements on the benchmarks have shown the following probabilities // for the loop to exit (ie. avg. number of iterations is reciprocal). // BM_Flat/6 txt1 p = 0.3-0.4 @@ -983,22 +1041,44 @@ inline bool Copy64BytesWithPatternExtension(ptrdiff_t dst, size_t offset) { return offset != 0; } -void MemCopy(char* dst, const uint8_t* src, size_t size) { - std::memcpy(dst, src, size); -} - -void MemCopy(ptrdiff_t dst, const uint8_t* src, size_t size) { - // TODO: Switch to [[maybe_unused]] when we can assume C++17. - (void)dst; - (void)src; - (void)size; -} - -void MemMove(char* dst, const void* src, size_t size) { - std::memmove(dst, src, size); +// Copies between size bytes and 64 bytes from src to dest. size cannot exceed +// 64. More than size bytes, but never exceeding 64, might be copied if doing +// so gives better performance. [src, src + size) must not overlap with +// [dst, dst + size), but [src, src + 64) may overlap with [dst, dst + 64). +void MemCopy64(char* dst, const void* src, size_t size) { + // Always copy this many bytes. If that's below size then copy the full 64. + constexpr int kShortMemCopy = 32; + + assert(size <= 64); + assert(std::less_equal<const void*>()(static_cast<const char*>(src) + size, + dst) || + std::less_equal<const void*>()(dst + size, src)); + + // We know that src and dst are at least size bytes apart. However, because we + // might copy more than size bytes the copy still might overlap past size. + // E.g. if src and dst appear consecutively in memory (src + size >= dst). + // TODO: Investigate wider copies on other platforms. +#if defined(__x86_64__) && defined(__AVX__) + assert(kShortMemCopy <= 32); + __m256i data = _mm256_lddqu_si256(static_cast<const __m256i *>(src)); + _mm256_storeu_si256(reinterpret_cast<__m256i *>(dst), data); + // Profiling shows that nearly all copies are short. + if (SNAPPY_PREDICT_FALSE(size > kShortMemCopy)) { + data = _mm256_lddqu_si256(static_cast<const __m256i *>(src) + 1); + _mm256_storeu_si256(reinterpret_cast<__m256i *>(dst) + 1, data); + } +#else + std::memmove(dst, src, kShortMemCopy); + // Profiling shows that nearly all copies are short. + if (SNAPPY_PREDICT_FALSE(size > kShortMemCopy)) { + std::memmove(dst + kShortMemCopy, + static_cast<const uint8_t*>(src) + kShortMemCopy, + 64 - kShortMemCopy); + } +#endif } -void MemMove(ptrdiff_t dst, const void* src, size_t size) { +void MemCopy64(ptrdiff_t dst, const void* src, size_t size) { // TODO: Switch to [[maybe_unused]] when we can assume C++17. (void)dst; (void)src; @@ -1006,7 +1086,7 @@ void MemMove(ptrdiff_t dst, const void* src, size_t size) { } SNAPPY_ATTRIBUTE_ALWAYS_INLINE -size_t AdvanceToNextTagARMOptimized(const uint8_t** ip_p, size_t* tag) { +inline size_t AdvanceToNextTagARMOptimized(const uint8_t** ip_p, size_t* tag) { const uint8_t*& ip = *ip_p; // This section is crucial for the throughput of the decompression loop. // The latency of an iteration is fundamentally constrained by the @@ -1029,7 +1109,7 @@ size_t AdvanceToNextTagARMOptimized(const uint8_t** ip_p, size_t* tag) { } SNAPPY_ATTRIBUTE_ALWAYS_INLINE -size_t AdvanceToNextTagX86Optimized(const uint8_t** ip_p, size_t* tag) { +inline size_t AdvanceToNextTagX86Optimized(const uint8_t** ip_p, size_t* tag) { const uint8_t*& ip = *ip_p; // This section is crucial for the throughput of the decompression loop. // The latency of an iteration is fundamentally constrained by the @@ -1041,11 +1121,12 @@ size_t AdvanceToNextTagX86Optimized(const uint8_t** ip_p, size_t* tag) { size_t literal_len = *tag >> 2; size_t tag_type = *tag; bool is_literal; -#if defined(__GNUC__) && defined(__x86_64__) +#if defined(__GCC_ASM_FLAG_OUTPUTS__) && defined(__x86_64__) // TODO clang misses the fact that the (c & 3) already correctly // sets the zero flag. asm("and $3, %k[tag_type]\n\t" - : [tag_type] "+r"(tag_type), "=@ccz"(is_literal)); + : [tag_type] "+r"(tag_type), "=@ccz"(is_literal) + :: "cc"); #else tag_type &= 3; is_literal = (tag_type == 0); @@ -1129,6 +1210,8 @@ std::pair<const uint8_t*, ptrdiff_t> DecompressBranchless( // The throughput is limited by instructions, unrolling the inner loop // twice reduces the amount of instructions checking limits and also // leads to reduced mov's. + + SNAPPY_PREFETCH(ip+128); for (int i = 0; i < 2; i++) { const uint8_t* old_ip = ip; assert(tag == ip[-1]); @@ -1170,7 +1253,7 @@ std::pair<const uint8_t*, ptrdiff_t> DecompressBranchless( // Due to the spurious offset in literals have this will trigger // at the start of a block when op is still smaller than 256. if (tag_type != 0) goto break_loop; - MemCopy(op_base + op, old_ip, 64); + MemCopy64(op_base + op, old_ip, len); op += len; continue; } @@ -1179,7 +1262,7 @@ std::pair<const uint8_t*, ptrdiff_t> DecompressBranchless( // we need to copy from ip instead of from the stream. const void* from = tag_type ? reinterpret_cast<void*>(op_base + delta) : old_ip; - MemMove(op_base + op, from, 64); + MemCopy64(op_base + op, from, len); op += len; } } while (ip < ip_limit_min_slop && op < op_limit_min_slop); @@ -1566,6 +1649,67 @@ size_t Compress(Source* reader, Sink* writer) { // IOVec interfaces // ----------------------------------------------------------------------- +// A `Source` implementation that yields the contents of an `iovec` array. Note +// that `total_size` is the total number of bytes to be read from the elements +// of `iov` (_not_ the total number of elements in `iov`). +class SnappyIOVecReader : public Source { + public: + SnappyIOVecReader(const struct iovec* iov, size_t total_size) + : curr_iov_(iov), + curr_pos_(total_size > 0 ? reinterpret_cast<const char*>(iov->iov_base) + : nullptr), + curr_size_remaining_(total_size > 0 ? iov->iov_len : 0), + total_size_remaining_(total_size) { + // Skip empty leading `iovec`s. + if (total_size > 0 && curr_size_remaining_ == 0) Advance(); + } + + ~SnappyIOVecReader() = default; + + size_t Available() const { return total_size_remaining_; } + + const char* Peek(size_t* len) { + *len = curr_size_remaining_; + return curr_pos_; + } + + void Skip(size_t n) { + while (n >= curr_size_remaining_ && n > 0) { + n -= curr_size_remaining_; + Advance(); + } + curr_size_remaining_ -= n; + total_size_remaining_ -= n; + curr_pos_ += n; + } + + private: + // Advances to the next nonempty `iovec` and updates related variables. + void Advance() { + do { + assert(total_size_remaining_ >= curr_size_remaining_); + total_size_remaining_ -= curr_size_remaining_; + if (total_size_remaining_ == 0) { + curr_pos_ = nullptr; + curr_size_remaining_ = 0; + return; + } + ++curr_iov_; + curr_pos_ = reinterpret_cast<const char*>(curr_iov_->iov_base); + curr_size_remaining_ = curr_iov_->iov_len; + } while (curr_size_remaining_ == 0); + } + + // The `iovec` currently being read. + const struct iovec* curr_iov_; + // The location in `curr_iov_` currently being read. + const char* curr_pos_; + // The amount of unread data in `curr_iov_`. + size_t curr_size_remaining_; + // The amount of unread data in the entire input array. + size_t total_size_remaining_; +}; + // A type that writes to an iovec. // Note that this is not a "ByteSink", but a type that matches the // Writer template argument to SnappyDecompressor::DecompressAllTags(). @@ -1940,6 +2084,16 @@ void RawCompress(const char* input, size_t input_length, char* compressed, *compressed_length = (writer.CurrentDestination() - compressed); } +void RawCompressFromIOVec(const struct iovec* iov, size_t uncompressed_length, + char* compressed, size_t* compressed_length) { + SnappyIOVecReader reader(iov, uncompressed_length); + UncheckedByteArraySink writer(compressed); + Compress(&reader, &writer); + + // Compute how many bytes were added. + *compressed_length = writer.CurrentDestination() - compressed; +} + size_t Compress(const char* input, size_t input_length, std::string* compressed) { // Pre-grow the buffer to the max length of the compressed output @@ -1948,7 +2102,26 @@ size_t Compress(const char* input, size_t input_length, size_t compressed_length; RawCompress(input, input_length, string_as_array(compressed), &compressed_length); - compressed->resize(compressed_length); + compressed->erase(compressed_length); + return compressed_length; +} + +size_t CompressFromIOVec(const struct iovec* iov, size_t iov_cnt, + std::string* compressed) { + // Compute the number of bytes to be compressed. + size_t uncompressed_length = 0; + for (size_t i = 0; i < iov_cnt; ++i) { + uncompressed_length += iov[i].iov_len; + } + + // Pre-grow the buffer to the max length of the compressed output. + STLStringResizeUninitialized(compressed, MaxCompressedLength( + uncompressed_length)); + + size_t compressed_length; + RawCompressFromIOVec(iov, uncompressed_length, string_as_array(compressed), + &compressed_length); + compressed->erase(compressed_length); return compressed_length; } @@ -71,14 +71,21 @@ namespace snappy { // Higher-level string based routines (should be sufficient for most users) // ------------------------------------------------------------------------ - // Sets "*compressed" to the compressed version of "input[0,input_length-1]". + // Sets "*compressed" to the compressed version of "input[0..input_length-1]". // Original contents of *compressed are lost. // // REQUIRES: "input[]" is not an alias of "*compressed". size_t Compress(const char* input, size_t input_length, std::string* compressed); - // Decompresses "compressed[0,compressed_length-1]" to "*uncompressed". + // Same as `Compress` above but taking an `iovec` array as input. Note that + // this function preprocesses the inputs to compute the sum of + // `iov[0..iov_cnt-1].iov_len` before reading. To avoid this, use + // `RawCompressFromIOVec` below. + size_t CompressFromIOVec(const struct iovec* iov, size_t iov_cnt, + std::string* compressed); + + // Decompresses "compressed[0..compressed_length-1]" to "*uncompressed". // Original contents of "*uncompressed" are lost. // // REQUIRES: "compressed[]" is not an alias of "*uncompressed". @@ -124,6 +131,12 @@ namespace snappy { char* compressed, size_t* compressed_length); + // Same as `RawCompress` above but taking an `iovec` array as input. Note that + // `uncompressed_length` is the total number of bytes to be read from the + // elements of `iov` (_not_ the number of elements in `iov`). + void RawCompressFromIOVec(const struct iovec* iov, size_t uncompressed_length, + char* compressed, size_t* compressed_length); + // Given data in "compressed[0..compressed_length-1]" generated by // calling the Snappy::Compress routine, this routine // stores the uncompressed data to diff --git a/snappy_benchmark.cc b/snappy_benchmark.cc index 9a54f9c..28570dd 100644 --- a/snappy_benchmark.cc +++ b/snappy_benchmark.cc @@ -149,7 +149,55 @@ void BM_UValidateMedley(benchmark::State& state) { } BENCHMARK(BM_UValidateMedley); -void BM_UIOVec(benchmark::State& state) { +void BM_UIOVecSource(benchmark::State& state) { + // Pick file to process based on state.range(0). + int file_index = state.range(0); + + CHECK_GE(file_index, 0); + CHECK_LT(file_index, ARRAYSIZE(kTestDataFiles)); + std::string contents = + ReadTestDataFile(kTestDataFiles[file_index].filename, + kTestDataFiles[file_index].size_limit); + + // Create `iovec`s of the `contents`. + const int kNumEntries = 10; + struct iovec iov[kNumEntries]; + size_t used_so_far = 0; + for (int i = 0; i < kNumEntries; ++i) { + iov[i].iov_base = const_cast<char*>(contents.data()) + used_so_far; + if (used_so_far == contents.size()) { + iov[i].iov_len = 0; + continue; + } + if (i == kNumEntries - 1) { + iov[i].iov_len = contents.size() - used_so_far; + } else { + iov[i].iov_len = contents.size() / kNumEntries; + } + used_so_far += iov[i].iov_len; + } + + char* dst = new char[snappy::MaxCompressedLength(contents.size())]; + size_t zsize = 0; + for (auto s : state) { + snappy::RawCompressFromIOVec(iov, contents.size(), dst, &zsize); + benchmark::DoNotOptimize(iov); + } + state.SetBytesProcessed(static_cast<int64_t>(state.iterations()) * + static_cast<int64_t>(contents.size())); + const double compression_ratio = + static_cast<double>(zsize) / std::max<size_t>(1, contents.size()); + state.SetLabel(StrFormat("%s (%.2f %%)", kTestDataFiles[file_index].label, + 100.0 * compression_ratio)); + VLOG(0) << StrFormat("compression for %s: %d -> %d bytes", + kTestDataFiles[file_index].label, contents.size(), + zsize); + + delete[] dst; +} +BENCHMARK(BM_UIOVecSource)->DenseRange(0, ARRAYSIZE(kTestDataFiles) - 1); + +void BM_UIOVecSink(benchmark::State& state) { // Pick file to process based on state.range(0). int file_index = state.range(0); @@ -193,7 +241,7 @@ void BM_UIOVec(benchmark::State& state) { delete[] dst; } -BENCHMARK(BM_UIOVec)->DenseRange(0, 4); +BENCHMARK(BM_UIOVecSink)->DenseRange(0, 4); void BM_UFlatSink(benchmark::State& state) { // Pick file to process based on state.range(0). diff --git a/snappy_unittest.cc b/snappy_unittest.cc index 292004c..e57b13d 100644 --- a/snappy_unittest.cc +++ b/snappy_unittest.cc @@ -137,21 +137,10 @@ void VerifyStringSink(const std::string& input) { CHECK_EQ(uncompressed, input); } -void VerifyIOVec(const std::string& input) { - std::string compressed; - DataEndingAtUnreadablePage i(input); - const size_t written = snappy::Compress(i.data(), i.size(), &compressed); - CHECK_EQ(written, compressed.size()); - CHECK_LE(compressed.size(), - snappy::MaxCompressedLength(input.size())); - CHECK(snappy::IsValidCompressedBuffer(compressed.data(), compressed.size())); - - // Try uncompressing into an iovec containing a random number of entries - // ranging from 1 to 10. - char* buf = new char[input.size()]; +struct iovec* GetIOVec(const std::string& input, char*& buf, size_t& num) { std::minstd_rand0 rng(input.size()); std::uniform_int_distribution<size_t> uniform_1_to_10(1, 10); - size_t num = uniform_1_to_10(rng); + num = uniform_1_to_10(rng); if (input.size() < num) { num = input.size(); } @@ -175,8 +164,40 @@ void VerifyIOVec(const std::string& input) { } used_so_far += iov[i].iov_len; } - CHECK(snappy::RawUncompressToIOVec( - compressed.data(), compressed.size(), iov, num)); + return iov; +} + +int VerifyIOVecSource(const std::string& input) { + std::string compressed; + std::string copy = input; + char* buf = const_cast<char*>(copy.data()); + size_t num = 0; + struct iovec* iov = GetIOVec(input, buf, num); + const size_t written = snappy::CompressFromIOVec(iov, num, &compressed); + CHECK_EQ(written, compressed.size()); + CHECK_LE(compressed.size(), snappy::MaxCompressedLength(input.size())); + CHECK(snappy::IsValidCompressedBuffer(compressed.data(), compressed.size())); + + std::string uncompressed; + DataEndingAtUnreadablePage c(compressed); + CHECK(snappy::Uncompress(c.data(), c.size(), &uncompressed)); + CHECK_EQ(uncompressed, input); + delete[] iov; + return uncompressed.size(); +} + +void VerifyIOVecSink(const std::string& input) { + std::string compressed; + DataEndingAtUnreadablePage i(input); + const size_t written = snappy::Compress(i.data(), i.size(), &compressed); + CHECK_EQ(written, compressed.size()); + CHECK_LE(compressed.size(), snappy::MaxCompressedLength(input.size())); + CHECK(snappy::IsValidCompressedBuffer(compressed.data(), compressed.size())); + char* buf = new char[input.size()]; + size_t num = 0; + struct iovec* iov = GetIOVec(input, buf, num); + CHECK(snappy::RawUncompressToIOVec(compressed.data(), compressed.size(), iov, + num)); CHECK(!memcmp(buf, input.data(), input.size())); delete[] iov; delete[] buf; @@ -252,15 +273,18 @@ int Verify(const std::string& input) { // Compress using string based routines const int result = VerifyString(input); + // Compress using `iovec`-based routines. + CHECK_EQ(VerifyIOVecSource(input), result); + // Verify using sink based routines VerifyStringSink(input); VerifyNonBlockedCompression(input); - VerifyIOVec(input); + VerifyIOVecSink(input); if (!input.empty()) { const std::string expanded = Expand(input); VerifyNonBlockedCompression(expanded); - VerifyIOVec(input); + VerifyIOVecSink(input); } return result; @@ -540,7 +564,27 @@ TEST(Snappy, FourByteOffset) { CHECK_EQ(uncompressed, src); } -TEST(Snappy, IOVecEdgeCases) { +TEST(Snappy, IOVecSourceEdgeCases) { + // Validate that empty leading, trailing, and in-between iovecs are handled: + // [] [] ['a'] [] ['b'] []. + std::string data = "ab"; + char* buf = const_cast<char*>(data.data()); + size_t used_so_far = 0; + static const int kLengths[] = {0, 0, 1, 0, 1, 0}; + struct iovec iov[ARRAYSIZE(kLengths)]; + for (int i = 0; i < ARRAYSIZE(kLengths); ++i) { + iov[i].iov_base = buf + used_so_far; + iov[i].iov_len = kLengths[i]; + used_so_far += kLengths[i]; + } + std::string compressed; + snappy::CompressFromIOVec(iov, ARRAYSIZE(kLengths), &compressed); + std::string uncompressed; + snappy::Uncompress(compressed.data(), compressed.size(), &uncompressed); + CHECK_EQ(data, uncompressed); +} + +TEST(Snappy, IOVecSinkEdgeCases) { // Test some tricky edge cases in the iovec output that are not necessarily // exercised by random tests. |