diff options
author | Victor Costan <costan@google.com> | 2020-04-12 20:03:50 +0000 |
---|---|---|
committer | Victor Costan <costan@google.com> | 2020-04-12 20:10:03 +0000 |
commit | 231b8be07655f6d887dde850c85fa3c64eacc0ee (patch) | |
tree | 7616431b11cb0ef7e25d83ee35df3a82e0ea5301 | |
parent | 14bef6629050a8ddf474c7ddeb4fabacc0157d8a (diff) | |
download | snappy-git-231b8be07655f6d887dde850c85fa3c64eacc0ee.tar.gz |
Migrate to standard integral types.
The following changes are done via find/replace.
* int8 -> int8_t
* int16 -> int16_t
* int32 -> int32_t
* int64 -> int64_t
The aliases were removed from snappy-stubs-public.h.
PiperOrigin-RevId: 306141557
-rw-r--r-- | snappy-internal.h | 47 | ||||
-rw-r--r-- | snappy-stubs-internal.cc | 2 | ||||
-rw-r--r-- | snappy-stubs-internal.h | 150 | ||||
-rw-r--r-- | snappy-stubs-public.h.in | 10 | ||||
-rw-r--r-- | snappy-test.cc | 18 | ||||
-rw-r--r-- | snappy-test.h | 4 | ||||
-rw-r--r-- | snappy.cc | 102 | ||||
-rw-r--r-- | snappy.h | 3 | ||||
-rw-r--r-- | snappy_unittest.cc | 52 |
9 files changed, 191 insertions, 197 deletions
diff --git a/snappy-internal.h b/snappy-internal.h index 0531644..f88577c 100644 --- a/snappy-internal.h +++ b/snappy-internal.h @@ -46,16 +46,16 @@ class WorkingMemory { // Allocates and clears a hash table using memory in "*this", // stores the number of buckets in "*table_size" and returns a pointer to // the base of the hash table. - uint16* GetHashTable(size_t fragment_size, int* table_size) const; + uint16_t* GetHashTable(size_t fragment_size, int* table_size) const; char* GetScratchInput() const { return input_; } char* GetScratchOutput() const { return output_; } private: - char* mem_; // the allocated memory, never nullptr - size_t size_; // the size of the allocated memory, never 0 - uint16* table_; // the pointer to the hashtable - char* input_; // the pointer to the input scratch buffer - char* output_; // the pointer to the output scratch buffer + char* mem_; // the allocated memory, never nullptr + size_t size_; // the size of the allocated memory, never 0 + uint16_t* table_; // the pointer to the hashtable + char* input_; // the pointer to the input scratch buffer + char* output_; // the pointer to the output scratch buffer // No copying WorkingMemory(const WorkingMemory&); @@ -76,7 +76,7 @@ class WorkingMemory { char* CompressFragment(const char* input, size_t input_length, char* op, - uint16* table, + uint16_t* table, const int table_size); // Find the largest n such that @@ -100,7 +100,7 @@ char* CompressFragment(const char* input, static inline std::pair<size_t, bool> FindMatchLength(const char* s1, const char* s2, const char* s2_limit, - uint64* data) { + uint64_t* data) { assert(s2_limit >= s2); size_t matched = 0; @@ -110,8 +110,8 @@ static inline std::pair<size_t, bool> FindMatchLength(const char* s1, // length is less than 8. In short, we are hoping to avoid a conditional // branch, and perhaps get better code layout from the C++ compiler. if (SNAPPY_PREDICT_TRUE(s2 <= s2_limit - 16)) { - uint64 a1 = UNALIGNED_LOAD64(s1); - uint64 a2 = UNALIGNED_LOAD64(s2); + uint64_t a1 = UNALIGNED_LOAD64(s1); + uint64_t a2 = UNALIGNED_LOAD64(s2); if (SNAPPY_PREDICT_TRUE(a1 != a2)) { // This code is critical for performance. The reason is that it determines // how much to advance `ip` (s2). This obviously depends on both the loads @@ -147,10 +147,11 @@ static inline std::pair<size_t, bool> FindMatchLength(const char* s1, // // Writen like above this is not a big win, the conditional move would be // a cmp followed by a cmov (2 cycles) followed by a shift (1 cycle). - // However matched_bytes < 4 is equal to static_cast<uint32>(xorval) != 0. - // Writen that way the conditional move (2 cycles) can execute parallel - // with FindLSBSetNonZero64 (tzcnt), which takes 3 cycles. - uint64 xorval = a1 ^ a2; + // However matched_bytes < 4 is equal to + // static_cast<uint32_t>(xorval) != 0. Writen that way, the conditional + // move (2 cycles) can execute in parallel with FindLSBSetNonZero64 + // (tzcnt), which takes 3 cycles. + uint64_t xorval = a1 ^ a2; int shift = Bits::FindLSBSetNonZero64(xorval); size_t matched_bytes = shift >> 3; #ifndef __x86_64__ @@ -158,14 +159,14 @@ static inline std::pair<size_t, bool> FindMatchLength(const char* s1, #else // Ideally this would just be // - // a2 = static_cast<uint32>(xorval) == 0 ? a3 : a2; + // a2 = static_cast<uint32_t>(xorval) == 0 ? a3 : a2; // // However clang correctly infers that the above statement participates on // a critical data dependency chain and thus, unfortunately, refuses to // use a conditional move (it's tuned to cut data dependencies). In this // case there is a longer parallel chain anyway AND this will be fairly // unpredictable. - uint64 a3 = UNALIGNED_LOAD64(s2 + 4); + uint64_t a3 = UNALIGNED_LOAD64(s2 + 4); asm("testl %k2, %k2\n\t" "cmovzq %1, %0\n\t" : "+r"(a2) @@ -184,19 +185,19 @@ static inline std::pair<size_t, bool> FindMatchLength(const char* s1, // the first non-matching bit and use that to calculate the total // length of the match. while (SNAPPY_PREDICT_TRUE(s2 <= s2_limit - 16)) { - uint64 a1 = UNALIGNED_LOAD64(s1 + matched); - uint64 a2 = UNALIGNED_LOAD64(s2); + uint64_t a1 = UNALIGNED_LOAD64(s1 + matched); + uint64_t a2 = UNALIGNED_LOAD64(s2); if (a1 == a2) { s2 += 8; matched += 8; } else { - uint64 xorval = a1 ^ a2; + uint64_t xorval = a1 ^ a2; int shift = Bits::FindLSBSetNonZero64(xorval); size_t matched_bytes = shift >> 3; #ifndef __x86_64__ *data = UNALIGNED_LOAD64(s2 + matched_bytes); #else - uint64 a3 = UNALIGNED_LOAD64(s2 + 4); + uint64_t a3 = UNALIGNED_LOAD64(s2 + 4); asm("testl %k2, %k2\n\t" "cmovzq %1, %0\n\t" : "+r"(a2) @@ -225,7 +226,7 @@ static inline std::pair<size_t, bool> FindMatchLength(const char* s1, static inline std::pair<size_t, bool> FindMatchLength(const char* s1, const char* s2, const char* s2_limit, - uint64* data) { + uint64_t* data) { // Implementation based on the x86-64 version, above. assert(s2_limit >= s2); int matched = 0; @@ -236,7 +237,7 @@ static inline std::pair<size_t, bool> FindMatchLength(const char* s1, matched += 4; } if (LittleEndian::IsLittleEndian() && s2 <= s2_limit - 4) { - uint32 x = UNALIGNED_LOAD32(s2) ^ UNALIGNED_LOAD32(s1 + matched); + uint32_t x = UNALIGNED_LOAD32(s2) ^ UNALIGNED_LOAD32(s1 + matched); int matching_bits = Bits::FindLSBSetNonZero(x); matched += matching_bits >> 3; s2 += matching_bits >> 3; @@ -273,7 +274,7 @@ static const int kMaximumTagLength = 5; // COPY_4_BYTE_OFFSET plus the actual o // because of efficiency reasons: // (1) Extracting a byte is faster than a bit-field // (2) It properly aligns copy offset so we do not need a <<8 -static const uint16 char_table[256] = { +static const uint16_t char_table[256] = { 0x0001, 0x0804, 0x1001, 0x2001, 0x0002, 0x0805, 0x1002, 0x2002, 0x0003, 0x0806, 0x1003, 0x2003, 0x0004, 0x0807, 0x1004, 0x2004, 0x0005, 0x0808, 0x1005, 0x2005, 0x0006, 0x0809, 0x1006, 0x2006, diff --git a/snappy-stubs-internal.cc b/snappy-stubs-internal.cc index 66ed2e9..0bc8c2d 100644 --- a/snappy-stubs-internal.cc +++ b/snappy-stubs-internal.cc @@ -33,7 +33,7 @@ namespace snappy { -void Varint::Append32(std::string* s, uint32 value) { +void Varint::Append32(std::string* s, uint32_t value) { char buf[Varint::kMax32]; const char* p = Varint::Encode32(buf, value); s->append(buf, p - buf); diff --git a/snappy-stubs-internal.h b/snappy-stubs-internal.h index 7e81c17..c9a795c 100644 --- a/snappy-stubs-internal.h +++ b/snappy-stubs-internal.h @@ -35,7 +35,9 @@ #include "config.h" #endif +#include <cstdint> #include <cstring> +#include <numeric_limits> #include <string> #include <assert.h> @@ -116,8 +118,8 @@ namespace snappy { -static const uint32 kuint32max = static_cast<uint32>(0xFFFFFFFF); -static const int64 kint64max = static_cast<int64>(0x7FFFFFFFFFFFFFFFLL); +static const uint32_t kuint32max = std::numeric_limits<uint32_t>::max(); +static const int64_t kint64max = std::numeric_limits<int64_t>::max(); // Potentially unaligned loads and stores. @@ -126,13 +128,13 @@ static const int64 kint64max = static_cast<int64>(0x7FFFFFFFFFFFFFFFLL); #if defined(__i386__) || defined(__x86_64__) || defined(__powerpc__) || \ defined(__aarch64__) -#define UNALIGNED_LOAD16(_p) (*reinterpret_cast<const uint16 *>(_p)) -#define UNALIGNED_LOAD32(_p) (*reinterpret_cast<const uint32 *>(_p)) -#define UNALIGNED_LOAD64(_p) (*reinterpret_cast<const uint64 *>(_p)) +#define UNALIGNED_LOAD16(_p) (*reinterpret_cast<const uint16_t *>(_p)) +#define UNALIGNED_LOAD32(_p) (*reinterpret_cast<const uint32_t *>(_p)) +#define UNALIGNED_LOAD64(_p) (*reinterpret_cast<const uint64_t *>(_p)) -#define UNALIGNED_STORE16(_p, _val) (*reinterpret_cast<uint16 *>(_p) = (_val)) -#define UNALIGNED_STORE32(_p, _val) (*reinterpret_cast<uint32 *>(_p) = (_val)) -#define UNALIGNED_STORE64(_p, _val) (*reinterpret_cast<uint64 *>(_p) = (_val)) +#define UNALIGNED_STORE16(_p, _val) (*reinterpret_cast<uint16_t *>(_p) = (_val)) +#define UNALIGNED_STORE32(_p, _val) (*reinterpret_cast<uint32_t *>(_p) = (_val)) +#define UNALIGNED_STORE64(_p, _val) (*reinterpret_cast<uint64_t *>(_p) = (_val)) // ARMv7 and newer support native unaligned accesses, but only of 16-bit // and 32-bit values (not 64-bit); older versions either raise a fatal signal, @@ -147,7 +149,7 @@ static const int64 kint64max = static_cast<int64>(0x7FFFFFFFFFFFFFFFLL); // allowed to be unaligned, not LDRD (two reads) or LDM (many reads). Unless we // explicitly tell the compiler that these accesses can be unaligned, it can and // will combine accesses. On armcc, the way to signal this is done by accessing -// through the type (uint32 __packed *), but GCC has no such attribute +// through the type (uint32_t __packed *), but GCC has no such attribute // (it ignores __attribute__((packed)) on individual variables). However, // we can tell it that a _struct_ is unaligned, which has the same effect, // so we do that. @@ -176,13 +178,13 @@ namespace base { namespace internal { struct Unaligned16Struct { - uint16 value; - uint8 dummy; // To make the size non-power-of-two. + uint16_t value; + uint8_t dummy; // To make the size non-power-of-two. } ATTRIBUTE_PACKED; struct Unaligned32Struct { - uint32 value; - uint8 dummy; // To make the size non-power-of-two. + uint32_t value; + uint8_t dummy; // To make the size non-power-of-two. } ATTRIBUTE_PACKED; } // namespace internal @@ -204,13 +206,13 @@ struct Unaligned32Struct { // See if that would be more efficient on platforms supporting it, // at least for copies. -inline uint64 UNALIGNED_LOAD64(const void *p) { - uint64 t; +inline uint64_t UNALIGNED_LOAD64(const void *p) { + uint64_t t; std::memcpy(&t, p, sizeof t); return t; } -inline void UNALIGNED_STORE64(void *p, uint64 v) { +inline void UNALIGNED_STORE64(void *p, uint64_t v) { std::memcpy(p, &v, sizeof v); } @@ -219,33 +221,33 @@ inline void UNALIGNED_STORE64(void *p, uint64 v) { // These functions are provided for architectures that don't support // unaligned loads and stores. -inline uint16 UNALIGNED_LOAD16(const void *p) { - uint16 t; +inline uint16_t UNALIGNED_LOAD16(const void *p) { + uint16_t t; std::memcpy(&t, p, sizeof t); return t; } -inline uint32 UNALIGNED_LOAD32(const void *p) { - uint32 t; +inline uint32_t UNALIGNED_LOAD32(const void *p) { + uint32_t t; std::memcpy(&t, p, sizeof t); return t; } -inline uint64 UNALIGNED_LOAD64(const void *p) { - uint64 t; +inline uint64_t UNALIGNED_LOAD64(const void *p) { + uint64_t t; std::memcpy(&t, p, sizeof t); return t; } -inline void UNALIGNED_STORE16(void *p, uint16 v) { +inline void UNALIGNED_STORE16(void *p, uint16_t v) { std::memcpy(p, &v, sizeof v); } -inline void UNALIGNED_STORE32(void *p, uint32 v) { +inline void UNALIGNED_STORE32(void *p, uint32_t v) { std::memcpy(p, &v, sizeof v); } -inline void UNALIGNED_STORE64(void *p, uint64 v) { +inline void UNALIGNED_STORE64(void *p, uint64_t v) { std::memcpy(p, &v, sizeof v); } @@ -292,16 +294,16 @@ inline void UNALIGNED_STORE64(void *p, uint64 v) { #else -inline uint16 bswap_16(uint16 x) { +inline uint16_t bswap_16(uint16_t x) { return (x << 8) | (x >> 8); } -inline uint32 bswap_32(uint32 x) { +inline uint32_t bswap_32(uint32_t x) { x = ((x & 0xff00ff00UL) >> 8) | ((x & 0x00ff00ffUL) << 8); return (x >> 16) | (x << 16); } -inline uint64 bswap_64(uint64 x) { +inline uint64_t bswap_64(uint64_t x) { x = ((x & 0xff00ff00ff00ff00ULL) >> 8) | ((x & 0x00ff00ff00ff00ffULL) << 8); x = ((x & 0xffff0000ffff0000ULL) >> 16) | ((x & 0x0000ffff0000ffffULL) << 16); return (x >> 32) | (x << 32); @@ -325,54 +327,54 @@ class LittleEndian { // Conversion functions. #if defined(SNAPPY_IS_BIG_ENDIAN) - static uint16 FromHost16(uint16 x) { return bswap_16(x); } - static uint16 ToHost16(uint16 x) { return bswap_16(x); } + static uint16_t FromHost16(uint16_t x) { return bswap_16(x); } + static uint16_t ToHost16(uint16_t x) { return bswap_16(x); } - static uint32 FromHost32(uint32 x) { return bswap_32(x); } - static uint32 ToHost32(uint32 x) { return bswap_32(x); } + static uint32_t FromHost32(uint32_t x) { return bswap_32(x); } + static uint32_t ToHost32(uint32_t x) { return bswap_32(x); } - static uint32 FromHost64(uint64 x) { return bswap_64(x); } - static uint32 ToHost64(uint64 x) { return bswap_64(x); } + static uint32_t FromHost64(uint64_t x) { return bswap_64(x); } + static uint32_t ToHost64(uint64_t x) { return bswap_64(x); } static bool IsLittleEndian() { return false; } #else // !defined(SNAPPY_IS_BIG_ENDIAN) - static uint16 FromHost16(uint16 x) { return x; } - static uint16 ToHost16(uint16 x) { return x; } + static uint16_t FromHost16(uint16_t x) { return x; } + static uint16_t ToHost16(uint16_t x) { return x; } - static uint32 FromHost32(uint32 x) { return x; } - static uint32 ToHost32(uint32 x) { return x; } + static uint32_t FromHost32(uint32_t x) { return x; } + static uint32_t ToHost32(uint32_t x) { return x; } - static uint32 FromHost64(uint64 x) { return x; } - static uint32 ToHost64(uint64 x) { return x; } + static uint32_t FromHost64(uint64_t x) { return x; } + static uint32_t ToHost64(uint64_t x) { return x; } static bool IsLittleEndian() { return true; } #endif // !defined(SNAPPY_IS_BIG_ENDIAN) // Functions to do unaligned loads and stores in little-endian order. - static uint16 Load16(const void *p) { + static uint16_t Load16(const void *p) { return ToHost16(UNALIGNED_LOAD16(p)); } - static void Store16(void *p, uint16 v) { + static void Store16(void *p, uint16_t v) { UNALIGNED_STORE16(p, FromHost16(v)); } - static uint32 Load32(const void *p) { + static uint32_t Load32(const void *p) { return ToHost32(UNALIGNED_LOAD32(p)); } - static void Store32(void *p, uint32 v) { + static void Store32(void *p, uint32_t v) { UNALIGNED_STORE32(p, FromHost32(v)); } - static uint64 Load64(const void *p) { + static uint64_t Load64(const void *p) { return ToHost64(UNALIGNED_LOAD64(p)); } - static void Store64(void *p, uint64 v) { + static void Store64(void *p, uint64_t v) { UNALIGNED_STORE64(p, FromHost64(v)); } }; @@ -381,18 +383,18 @@ class LittleEndian { class Bits { public: // Return floor(log2(n)) for positive integer n. - static int Log2FloorNonZero(uint32 n); + static int Log2FloorNonZero(uint32_t n); // Return floor(log2(n)) for positive integer n. Returns -1 iff n == 0. - static int Log2Floor(uint32 n); + static int Log2Floor(uint32_t n); // Return the first set least / most significant bit, 0-indexed. Returns an // undefined value if n == 0. FindLSBSetNonZero() is similar to ffs() except // that it's 0-indexed. - static int FindLSBSetNonZero(uint32 n); + static int FindLSBSetNonZero(uint32_t n); #if defined(ARCH_K8) || defined(ARCH_PPC) || defined(ARCH_ARM) - static int FindLSBSetNonZero64(uint64 n); + static int FindLSBSetNonZero64(uint64_t n); #endif // defined(ARCH_K8) || defined(ARCH_PPC) || defined(ARCH_ARM) private: @@ -403,7 +405,7 @@ class Bits { #ifdef HAVE_BUILTIN_CTZ -inline int Bits::Log2FloorNonZero(uint32 n) { +inline int Bits::Log2FloorNonZero(uint32_t n) { assert(n != 0); // (31 ^ x) is equivalent to (31 - x) for x in [0, 31]. An easy proof // represents subtraction in base 2 and observes that there's no carry. @@ -414,17 +416,17 @@ inline int Bits::Log2FloorNonZero(uint32 n) { return 31 ^ __builtin_clz(n); } -inline int Bits::Log2Floor(uint32 n) { +inline int Bits::Log2Floor(uint32_t n) { return (n == 0) ? -1 : Bits::Log2FloorNonZero(n); } -inline int Bits::FindLSBSetNonZero(uint32 n) { +inline int Bits::FindLSBSetNonZero(uint32_t n) { assert(n != 0); return __builtin_ctz(n); } #if defined(ARCH_K8) || defined(ARCH_PPC) || defined(ARCH_ARM) -inline int Bits::FindLSBSetNonZero64(uint64 n) { +inline int Bits::FindLSBSetNonZero64(uint64_t n) { assert(n != 0); return __builtin_ctzll(n); } @@ -432,21 +434,21 @@ inline int Bits::FindLSBSetNonZero64(uint64 n) { #elif defined(_MSC_VER) -inline int Bits::Log2FloorNonZero(uint32 n) { +inline int Bits::Log2FloorNonZero(uint32_t n) { assert(n != 0); unsigned long where; _BitScanReverse(&where, n); return static_cast<int>(where); } -inline int Bits::Log2Floor(uint32 n) { +inline int Bits::Log2Floor(uint32_t n) { unsigned long where; if (_BitScanReverse(&where, n)) return static_cast<int>(where); return -1; } -inline int Bits::FindLSBSetNonZero(uint32 n) { +inline int Bits::FindLSBSetNonZero(uint32_t n) { assert(n != 0); unsigned long where; if (_BitScanForward(&where, n)) @@ -455,7 +457,7 @@ inline int Bits::FindLSBSetNonZero(uint32 n) { } #if defined(ARCH_K8) || defined(ARCH_PPC) || defined(ARCH_ARM) -inline int Bits::FindLSBSetNonZero64(uint64 n) { +inline int Bits::FindLSBSetNonZero64(uint64_t n) { assert(n != 0); unsigned long where; if (_BitScanForward64(&where, n)) @@ -466,14 +468,14 @@ inline int Bits::FindLSBSetNonZero64(uint64 n) { #else // Portable versions. -inline int Bits::Log2FloorNonZero(uint32 n) { +inline int Bits::Log2FloorNonZero(uint32_t n) { assert(n != 0); int log = 0; - uint32 value = n; + uint32_t value = n; for (int i = 4; i >= 0; --i) { int shift = (1 << i); - uint32 x = value >> shift; + uint32_t x = value >> shift; if (x != 0) { value = x; log += shift; @@ -483,16 +485,16 @@ inline int Bits::Log2FloorNonZero(uint32 n) { return log; } -inline int Bits::Log2Floor(uint32 n) { +inline int Bits::Log2Floor(uint32_t n) { return (n == 0) ? -1 : Bits::Log2FloorNonZero(n); } -inline int Bits::FindLSBSetNonZero(uint32 n) { +inline int Bits::FindLSBSetNonZero(uint32_t n) { assert(n != 0); int rc = 31; for (int i = 4, shift = 1 << 4; i >= 0; --i) { - const uint32 x = n << shift; + const uint32_t x = n << shift; if (x != 0) { n = x; rc -= shift; @@ -504,13 +506,13 @@ inline int Bits::FindLSBSetNonZero(uint32 n) { #if defined(ARCH_K8) || defined(ARCH_PPC) || defined(ARCH_ARM) // FindLSBSetNonZero64() is defined in terms of FindLSBSetNonZero(). -inline int Bits::FindLSBSetNonZero64(uint64 n) { +inline int Bits::FindLSBSetNonZero64(uint64_t n) { assert(n != 0); - const uint32 bottombits = static_cast<uint32>(n); + const uint32_t bottombits = static_cast<uint32_t>(n); if (bottombits == 0) { // Bottom bits are zero, so scan in top bits - return 32 + FindLSBSetNonZero(static_cast<uint32>(n >> 32)); + return 32 + FindLSBSetNonZero(static_cast<uint32_t>(n >> 32)); } else { return FindLSBSetNonZero(bottombits); } @@ -522,7 +524,7 @@ inline int Bits::FindLSBSetNonZero64(uint64 n) { // Variable-length integer encoding. class Varint { public: - // Maximum lengths of varint encoding of uint32. + // Maximum lengths of varint encoding of uint32_t. static const int kMax32 = 5; // Attempts to parse a varint32 from a prefix of the bytes in [ptr,limit-1]. @@ -531,23 +533,23 @@ class Varint { // past the last byte of the varint32. Else returns NULL. On success, // "result <= limit". static const char* Parse32WithLimit(const char* ptr, const char* limit, - uint32* OUTPUT); + uint32_t* OUTPUT); // REQUIRES "ptr" points to a buffer of length sufficient to hold "v". // EFFECTS Encodes "v" into "ptr" and returns a pointer to the // byte just past the last encoded byte. - static char* Encode32(char* ptr, uint32 v); + static char* Encode32(char* ptr, uint32_t v); // EFFECTS Appends the varint representation of "value" to "*s". - static void Append32(std::string* s, uint32 value); + static void Append32(std::string* s, uint32_t value); }; inline const char* Varint::Parse32WithLimit(const char* p, const char* l, - uint32* OUTPUT) { + uint32_t* OUTPUT) { const unsigned char* ptr = reinterpret_cast<const unsigned char*>(p); const unsigned char* limit = reinterpret_cast<const unsigned char*>(l); - uint32 b, result; + uint32_t b, result; if (ptr >= limit) return NULL; b = *(ptr++); result = b & 127; if (b < 128) goto done; if (ptr >= limit) return NULL; @@ -564,7 +566,7 @@ inline const char* Varint::Parse32WithLimit(const char* p, return reinterpret_cast<const char*>(ptr); } -inline char* Varint::Encode32(char* sptr, uint32 v) { +inline char* Varint::Encode32(char* sptr, uint32_t v) { // Operate on characters as unsigneds unsigned char* ptr = reinterpret_cast<unsigned char*>(sptr); static const int B = 128; diff --git a/snappy-stubs-public.h.in b/snappy-stubs-public.h.in index 416ab99..82618df 100644 --- a/snappy-stubs-public.h.in +++ b/snappy-stubs-public.h.in @@ -36,7 +36,6 @@ #define THIRD_PARTY_SNAPPY_OPENSOURCE_SNAPPY_STUBS_PUBLIC_H_ #include <cstddef> -#include <cstdint> #include <string> #if ${HAVE_SYS_UIO_H_01} // HAVE_SYS_UIO_H @@ -51,15 +50,6 @@ namespace snappy { -using int8 = std::int8_t; -using uint8 = std::uint8_t; -using int16 = std::int16_t; -using uint16 = std::uint16_t; -using int32 = std::int32_t; -using uint32 = std::uint32_t; -using int64 = std::int64_t; -using uint64 = std::uint64_t; - #if !${HAVE_SYS_UIO_H_01} // !HAVE_SYS_UIO_H // Windows does not have an iovec type, yet the concept is universally useful. // It is simple to define it ourselves, so we put it inside our own namespace. diff --git a/snappy-test.cc b/snappy-test.cc index 83be2d3..7149f2e 100644 --- a/snappy-test.cc +++ b/snappy-test.cc @@ -77,10 +77,10 @@ std::string StrFormat(const char* format, ...) { } bool benchmark_running = false; -int64 benchmark_real_time_us = 0; -int64 benchmark_cpu_time_us = 0; +int64_t benchmark_real_time_us = 0; +int64_t benchmark_cpu_time_us = 0; std::string* benchmark_label = nullptr; -int64 benchmark_bytes_processed = 0; +int64_t benchmark_bytes_processed = 0; void ResetBenchmarkTiming() { benchmark_real_time_us = 0; @@ -170,13 +170,13 @@ void SetBenchmarkLabel(const std::string& str) { benchmark_label = new std::string(str); } -void SetBenchmarkBytesProcessed(int64 bytes) { +void SetBenchmarkBytesProcessed(int64_t bytes) { benchmark_bytes_processed = bytes; } struct BenchmarkRun { - int64 real_time_us; - int64 cpu_time_us; + int64_t real_time_us; + int64_t cpu_time_us; }; struct BenchmarkCompareCPUTime { @@ -224,12 +224,12 @@ void Benchmark::Run() { benchmark_runs + kMedianPos, benchmark_runs + kNumRuns, BenchmarkCompareCPUTime()); - int64 real_time_us = benchmark_runs[kMedianPos].real_time_us; - int64 cpu_time_us = benchmark_runs[kMedianPos].cpu_time_us; + int64_t real_time_us = benchmark_runs[kMedianPos].real_time_us; + int64_t cpu_time_us = benchmark_runs[kMedianPos].cpu_time_us; if (cpu_time_us <= 0) { human_readable_speed = "?"; } else { - int64 bytes_per_second = + int64_t bytes_per_second = benchmark_bytes_processed * 1000000 / cpu_time_us; if (bytes_per_second < 1024) { human_readable_speed = diff --git a/snappy-test.h b/snappy-test.h index c8b7d38..603f486 100644 --- a/snappy-test.h +++ b/snappy-test.h @@ -227,7 +227,7 @@ class CycleTimer { } private: - int64 real_time_us_; + int64_t real_time_us_; #ifdef WIN32 LARGE_INTEGER start_; #else @@ -272,7 +272,7 @@ void ResetBenchmarkTiming(); void StartBenchmarkTiming(); void StopBenchmarkTiming(); void SetBenchmarkLabel(const std::string& str); -void SetBenchmarkBytesProcessed(int64 bytes); +void SetBenchmarkBytesProcessed(int64_t bytes); #ifdef HAVE_LIBZ @@ -92,8 +92,8 @@ using internal::LITERAL; // compression for compressible input, and more speed for incompressible // input. Of course, it doesn't hurt if the hash function is reasonably fast // either, as it gets called a lot. -static inline uint32 HashBytes(uint32 bytes, int shift) { - uint32 kMul = 0x1e35a7bd; +static inline uint32_t HashBytes(uint32_t bytes, int shift) { + uint32_t kMul = 0x1e35a7bd; return (bytes * kMul) >> shift; } @@ -388,9 +388,9 @@ static inline char* EmitCopyAtMost64(char* op, size_t offset, size_t len) { assert(len_less_than_12 == (len < 12)); if (len_less_than_12) { - uint32 u = (len << 2) + (offset << 8); - uint32 copy1 = COPY_1_BYTE_OFFSET - (4 << 2) + ((offset >> 3) & 0xe0); - uint32 copy2 = COPY_2_BYTE_OFFSET - (1 << 2); + uint32_t u = (len << 2) + (offset << 8); + uint32_t copy1 = COPY_1_BYTE_OFFSET - (4 << 2) + ((offset >> 3) & 0xe0); + uint32_t copy2 = COPY_2_BYTE_OFFSET - (1 << 2); // It turns out that offset < 2048 is a difficult to predict branch. // `perf record` shows this is the highest percentage of branch misses in // benchmarks. This code produces branch free code, the data dependency @@ -402,7 +402,7 @@ static inline char* EmitCopyAtMost64(char* op, size_t offset, size_t len) { } else { // Write 4 bytes, though we only care about 3 of them. The output buffer // is required to have some slack, so the extra byte won't overrun it. - uint32 u = COPY_2_BYTE_OFFSET + ((len - 1) << 2) + (offset << 8); + uint32_t u = COPY_2_BYTE_OFFSET + ((len - 1) << 2) + (offset << 8); LittleEndian::Store32(op, u); op += 3; } @@ -441,7 +441,7 @@ static inline char* EmitCopy(char* op, size_t offset, size_t len) { } bool GetUncompressedLength(const char* start, size_t n, size_t* result) { - uint32 v = 0; + uint32_t v = 0; const char* limit = start + n; if (Varint::Parse32WithLimit(start, limit, &v) != NULL) { *result = v; @@ -452,7 +452,7 @@ bool GetUncompressedLength(const char* start, size_t n, size_t* result) { } namespace { -uint32 CalculateTableSize(uint32 input_size) { +uint32_t CalculateTableSize(uint32_t input_size) { static_assert( kMaxHashTableSize >= kMinHashTableSize, "kMaxHashTableSize should be greater or equal to kMinHashTableSize."); @@ -475,7 +475,7 @@ WorkingMemory::WorkingMemory(size_t input_size) { size_ = table_size * sizeof(*table_) + max_fragment_size + MaxCompressedLength(max_fragment_size); mem_ = std::allocator<char>().allocate(size_); - table_ = reinterpret_cast<uint16*>(mem_); + table_ = reinterpret_cast<uint16_t*>(mem_); input_ = mem_ + table_size * sizeof(*table_); output_ = input_ + max_fragment_size; } @@ -484,7 +484,7 @@ WorkingMemory::~WorkingMemory() { std::allocator<char>().deallocate(mem_, size_); } -uint16* WorkingMemory::GetHashTable(size_t fragment_size, +uint16_t* WorkingMemory::GetHashTable(size_t fragment_size, int* table_size) const { const size_t htsize = CalculateTableSize(fragment_size); memset(table_, 0, htsize * sizeof(*table_)); @@ -508,7 +508,7 @@ namespace internal { char* CompressFragment(const char* input, size_t input_size, char* op, - uint16* table, + uint16_t* table, const int table_size) { // "ip" is the input pointer, and "op" is the output pointer. const char* ip = input; @@ -523,11 +523,11 @@ char* CompressFragment(const char* input, if (SNAPPY_PREDICT_TRUE(input_size >= kInputMarginBytes)) { const char* ip_limit = input + input_size - kInputMarginBytes; - for (uint32 preload = LittleEndian::Load32(ip + 1);;) { + for (uint32_t preload = LittleEndian::Load32(ip + 1);;) { // Bytes in [next_emit, ip) will be emitted as literal bytes. Or // [next_emit, ip_end) after the main loop. const char* next_emit = ip++; - uint64 data = LittleEndian::Load64(ip); + uint64_t data = LittleEndian::Load64(ip); // The body of this loop calls EmitLiteral once and then EmitCopy one or // more times. (The exception is that when we're close to exhausting // the input we goto emit_remainder.) @@ -553,7 +553,7 @@ char* CompressFragment(const char* input, // The "skip" variable keeps track of how many bytes there are since the // last match; dividing it by 32 (ie. right-shifting by five) gives the // number of bytes to move ahead for each iteration. - uint32 skip = 32; + uint32_t skip = 32; const char* candidate; if (ip_limit - ip >= 16) { @@ -564,9 +564,9 @@ char* CompressFragment(const char* input, // These for-loops are meant to be unrolled. So we can freely // special case the first iteration to use the value already // loaded in preload. - uint32 dword = i == 0 ? preload : data; + uint32_t dword = i == 0 ? preload : data; assert(dword == LittleEndian::Load32(ip + i)); - uint32 hash = HashBytes(dword, shift); + uint32_t hash = HashBytes(dword, shift); candidate = base_ip + table[hash]; assert(candidate >= base_ip); assert(candidate < ip + i); @@ -586,9 +586,9 @@ char* CompressFragment(const char* input, skip += 16; } while (true) { - assert(static_cast<uint32>(data) == LittleEndian::Load32(ip)); - uint32 hash = HashBytes(data, shift); - uint32 bytes_between_hash_lookups = skip >> 5; + assert(static_cast<uint32_t>(data) == LittleEndian::Load32(ip)); + uint32_t hash = HashBytes(data, shift); + uint32_t bytes_between_hash_lookups = skip >> 5; skip += bytes_between_hash_lookups; const char* next_ip = ip + bytes_between_hash_lookups; if (SNAPPY_PREDICT_FALSE(next_ip > ip_limit)) { @@ -600,7 +600,7 @@ char* CompressFragment(const char* input, assert(candidate < ip); table[hash] = ip - base_ip; - if (SNAPPY_PREDICT_FALSE(static_cast<uint32>(data) == + if (SNAPPY_PREDICT_FALSE(static_cast<uint32_t>(data) == LittleEndian::Load32(candidate))) { break; } @@ -649,7 +649,7 @@ char* CompressFragment(const char* input, // we also update table[Hash(ip - 1, shift)] and table[Hash(ip, shift)]. table[HashBytes(LittleEndian::Load32(ip - 1), shift)] = ip - base_ip - 1; - uint32 hash = HashBytes(data, shift); + uint32_t hash = HashBytes(data, shift); candidate = base_ip + table[hash]; table[hash] = ip - base_ip; // Measurements on the benchmarks have shown the following probabilities @@ -662,7 +662,7 @@ char* CompressFragment(const char* input, // BM_Flat/11 gaviota p = 0.1 // BM_Flat/12 cp p = 0.5 // BM_Flat/13 c p = 0.3 - } while (static_cast<uint32>(data) == LittleEndian::Load32(candidate)); + } while (static_cast<uint32_t>(data) == LittleEndian::Load32(candidate)); // Because the least significant 5 bytes matched, we can utilize data // for the next iteration. preload = data >> 8; @@ -714,7 +714,7 @@ static inline void Report(const char *algorithm, size_t compressed_size, // // inlined so that no actual address of the local variable needs to be // // taken. // bool Append(const char* ip, size_t length, T* op); -// bool AppendFromSelf(uint32 offset, size_t length, T* op); +// bool AppendFromSelf(uint32_t offset, size_t length, T* op); // // // The rules for how TryFastAppend differs from Append are somewhat // // convoluted: @@ -739,22 +739,22 @@ static inline void Report(const char *algorithm, size_t compressed_size, // bool TryFastAppend(const char* ip, size_t available, size_t length, T* op); // }; -static inline uint32 ExtractLowBytes(uint32 v, int n) { +static inline uint32_t ExtractLowBytes(uint32_t v, int n) { assert(n >= 0); assert(n <= 4); #if SNAPPY_HAVE_BMI2 return _bzhi_u32(v, 8 * n); #else - // This needs to be wider than uint32 otherwise `mask << 32` will be + // This needs to be wider than uint32_t otherwise `mask << 32` will be // undefined. - uint64 mask = 0xffffffff; + uint64_t mask = 0xffffffff; return v & ~(mask << (8 * n)); #endif } -static inline bool LeftShiftOverflows(uint8 value, uint32 shift) { +static inline bool LeftShiftOverflows(uint8_t value, uint32_t shift) { assert(shift < 32); - static const uint8 masks[] = { + static const uint8_t masks[] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // @@ -771,7 +771,7 @@ class SnappyDecompressor { // If ip < ip_limit_min_maxtaglen_ it's safe to read kMaxTagLength from // buffer. const char* ip_limit_min_maxtaglen_; - uint32 peeked_; // Bytes peeked from reader (need to skip) + uint32_t peeked_; // Bytes peeked from reader (need to skip) bool eof_; // Hit end of input without an error? char scratch_[kMaximumTagLength]; // See RefillTag(). @@ -809,11 +809,11 @@ class SnappyDecompressor { // Read the uncompressed length stored at the start of the compressed data. // On success, stores the length in *result and returns true. // On failure, returns false. - bool ReadUncompressedLength(uint32* result) { + bool ReadUncompressedLength(uint32_t* result) { assert(ip_ == NULL); // Must not have read anything yet // Length is encoded in 1..5 bytes *result = 0; - uint32 shift = 0; + uint32_t shift = 0; while (true) { if (shift >= 32) return false; size_t n; @@ -821,8 +821,8 @@ class SnappyDecompressor { if (n == 0) return false; const unsigned char c = *(reinterpret_cast<const unsigned char*>(ip)); reader_->Skip(1); - uint32 val = c & 0x7f; - if (LeftShiftOverflows(static_cast<uint8>(val), shift)) return false; + uint32_t val = c & 0x7f; + if (LeftShiftOverflows(static_cast<uint8_t>(val), shift)) return false; *result |= val << shift; if (c < 128) { break; @@ -853,14 +853,14 @@ class SnappyDecompressor { ip = ip_; \ ResetLimit(ip); \ } \ - preload = static_cast<uint8>(*ip) + preload = static_cast<uint8_t>(*ip) // At the start of the for loop below the least significant byte of preload // contains the tag. - uint32 preload; + uint32_t preload; MAYBE_REFILL(); for ( ;; ) { - const uint8 c = static_cast<uint8>(preload); + const uint8_t c = static_cast<uint8_t>(preload); ip++; // Ratio of iterations that have LITERAL vs non-LITERAL for different @@ -883,7 +883,7 @@ class SnappyDecompressor { // NOTE: There is no MAYBE_REFILL() here, as TryFastAppend() // will not return true unless there's already at least five spare // bytes in addition to the literal. - preload = static_cast<uint8>(*ip); + preload = static_cast<uint8_t>(*ip); continue; } if (SNAPPY_PREDICT_FALSE(literal_length >= 61)) { @@ -919,15 +919,15 @@ class SnappyDecompressor { if (!writer->AppendFromSelf(copy_offset, length, &op)) goto exit; } else { - const uint32 entry = char_table[c]; + const uint32_t entry = char_table[c]; preload = LittleEndian::Load32(ip); - const uint32 trailer = ExtractLowBytes(preload, c & 3); - const uint32 length = entry & 0xff; + const uint32_t trailer = ExtractLowBytes(preload, c & 3); + const uint32_t length = entry & 0xff; // copy_offset/256 is encoded in bits 8..10. By just fetching // those bits, we get copy_offset (since the bit-field starts at // bit 8). - const uint32 copy_offset = (entry & 0x700) + trailer; + const uint32_t copy_offset = (entry & 0x700) + trailer; if (!writer->AppendFromSelf(copy_offset, length, &op)) goto exit; ip += (c & 3); @@ -961,12 +961,12 @@ bool SnappyDecompressor::RefillTag() { // Read the tag character assert(ip < ip_limit_); const unsigned char c = *(reinterpret_cast<const unsigned char*>(ip)); - const uint32 entry = char_table[c]; - const uint32 needed = (entry >> 11) + 1; // +1 byte for 'c' + const uint32_t entry = char_table[c]; + const uint32_t needed = (entry >> 11) + 1; // +1 byte for 'c' assert(needed <= sizeof(scratch_)); // Read more bytes from reader if needed - uint32 nbuf = ip_limit_ - ip; + uint32_t nbuf = ip_limit_ - ip; if (nbuf < needed) { // Stitch together bytes from ip and reader to form the word // contents. We store the needed bytes in "scratch_". They @@ -979,7 +979,7 @@ bool SnappyDecompressor::RefillTag() { size_t length; const char* src = reader_->Peek(&length); if (length == 0) return false; - uint32 to_add = std::min<uint32>(needed - nbuf, length); + uint32_t to_add = std::min<uint32_t>(needed - nbuf, length); std::memcpy(scratch_ + nbuf, src, to_add); nbuf += to_add; reader_->Skip(to_add); @@ -1006,7 +1006,7 @@ template <typename Writer> static bool InternalUncompress(Source* r, Writer* writer) { // Read the uncompressed length from the front of the compressed input SnappyDecompressor decompressor(r); - uint32 uncompressed_len = 0; + uint32_t uncompressed_len = 0; if (!decompressor.ReadUncompressedLength(&uncompressed_len)) return false; return InternalUncompressAllTags(&decompressor, writer, r->Available(), @@ -1016,8 +1016,8 @@ static bool InternalUncompress(Source* r, Writer* writer) { template <typename Writer> static bool InternalUncompressAllTags(SnappyDecompressor* decompressor, Writer* writer, - uint32 compressed_len, - uint32 uncompressed_len) { + uint32_t compressed_len, + uint32_t uncompressed_len) { Report("snappy_uncompress", compressed_len, uncompressed_len); writer->SetExpectedLength(uncompressed_len); @@ -1028,7 +1028,7 @@ static bool InternalUncompressAllTags(SnappyDecompressor* decompressor, return (decompressor->eof() && writer->CheckLength()); } -bool GetUncompressedLength(Source* source, uint32* result) { +bool GetUncompressedLength(Source* source, uint32_t* result) { SnappyDecompressor decompressor(source); return decompressor.ReadUncompressedLength(result); } @@ -1077,7 +1077,7 @@ size_t Compress(Source* reader, Sink* writer) { // Get encoding table for compression int table_size; - uint16* table = wmem.GetHashTable(num_to_read, &table_size); + uint16_t* table = wmem.GetHashTable(num_to_read, &table_size); // Compress input_fragment and append to dest const int max_output = MaxCompressedLength(num_to_read); @@ -1713,7 +1713,7 @@ size_t UncompressAsMuchAsPossible(Source* compressed, Sink* uncompressed) { bool Uncompress(Source* compressed, Sink* uncompressed) { // Read the uncompressed length from the front of the compressed input SnappyDecompressor decompressor(compressed); - uint32 uncompressed_len = 0; + uint32_t uncompressed_len = 0; if (!decompressor.ReadUncompressedLength(&uncompressed_len)) { return false; } @@ -40,6 +40,7 @@ #define THIRD_PARTY_SNAPPY_SNAPPY_H__ #include <cstddef> +#include <cstdint> #include <string> #include "snappy-stubs-public.h" @@ -63,7 +64,7 @@ namespace snappy { // Also note that this leaves "*source" in a state that is unsuitable for // further operations, such as RawUncompress(). You will need to rewind // or recreate the source yourself before attempting any further calls. - bool GetUncompressedLength(Source* source, uint32* result); + bool GetUncompressedLength(Source* source, uint32_t* result); // ------------------------------------------------------------------------ // Higher-level string based routines (should be sufficient for most users) diff --git a/snappy_unittest.cc b/snappy_unittest.cc index dd6b665..db44c90 100644 --- a/snappy_unittest.cc +++ b/snappy_unittest.cc @@ -182,9 +182,9 @@ static bool Compress(const char* input, size_t input_size, CompressorType comp, unsigned char* mem = new unsigned char[LZO1X_1_15_MEM_COMPRESS]; lzo_uint destlen; int ret = lzo1x_1_15_compress( - reinterpret_cast<const uint8*>(input), + reinterpret_cast<const uint8_t*>(input), input_size, - reinterpret_cast<uint8*>(string_as_array(compressed)), + reinterpret_cast<uint8_t*>(string_as_array(compressed)), &destlen, mem); CHECK_EQ(LZO_E_OK, ret); @@ -239,9 +239,9 @@ static bool Uncompress(const std::string& compressed, CompressorType comp, output->resize(size); lzo_uint destlen; int ret = lzo1x_decompress( - reinterpret_cast<const uint8*>(compressed.data()), + reinterpret_cast<const uint8_t*>(compressed.data()), compressed.size(), - reinterpret_cast<uint8*>(string_as_array(output)), + reinterpret_cast<uint8_t*>(string_as_array(output)), &destlen, NULL); CHECK_EQ(LZO_E_OK, ret); @@ -352,7 +352,7 @@ static void Measure(const char* data, "comp %5.1f MB/s uncomp %5s MB/s\n", x.c_str(), block_size/(1<<20), - static_cast<int>(length), static_cast<uint32>(compressed_size), + static_cast<int>(length), static_cast<uint32_t>(compressed_size), (compressed_size * 100.0) / std::max<int>(1, length), comp_rate, urate.c_str()); @@ -451,7 +451,7 @@ static void VerifyNonBlockedCompression(const std::string& input) { // Setup compression table snappy::internal::WorkingMemory wmem(input.size()); int table_size; - uint16* table = wmem.GetHashTable(input.size(), &table_size); + uint16_t* table = wmem.GetHashTable(input.size(), &table_size); // Compress entire input in one shot std::string compressed; @@ -588,7 +588,7 @@ TEST(CorruptedTest, VerifyCorrupted) { size_t ulen; CHECK(!snappy::GetUncompressedLength(data.data(), data.size(), &ulen) || (ulen < (1<<20))); - uint32 ulen2; + uint32_t ulen2; snappy::ByteArraySource source(data.data(), data.size()); CHECK(!snappy::GetUncompressedLength(&source, &ulen2) || (ulen2 < (1<<20))); @@ -879,7 +879,7 @@ static bool CheckUncompressedLength(const std::string& compressed, ulength); snappy::ByteArraySource source(compressed.data(), compressed.size()); - uint32 length; + uint32_t length; const bool result2 = snappy::GetUncompressedLength(&source, &length); CHECK_EQ(result1, result2); return result1; @@ -957,7 +957,7 @@ TEST(Snappy, ZeroOffsetCopyValidation) { namespace { int TestFindMatchLength(const char* s1, const char *s2, unsigned length) { - uint64 data; + uint64_t data; std::pair<size_t, bool> p = snappy::internal::FindMatchLength(s1, s2, s2 + length, &data); CHECK_EQ(p.first < 8, p.second); @@ -1086,7 +1086,7 @@ TEST(Snappy, FindMatchLengthRandom) { } } -static uint16 MakeEntry(unsigned int extra, +static uint16_t MakeEntry(unsigned int extra, unsigned int len, unsigned int copy_offset) { // Check that all of the fields fit within the allocated space @@ -1105,7 +1105,7 @@ TEST(Snappy, VerifyCharTable) { using snappy::internal::COPY_4_BYTE_OFFSET; using snappy::internal::char_table; - uint16 dst[256]; + uint16_t dst[256]; // Place invalid entries in all places to detect missing initialization int assigned = 0; @@ -1164,7 +1164,7 @@ TEST(Snappy, VerifyCharTable) { } if (FLAGS_snappy_dump_decompression_table) { - printf("static const uint16 char_table[256] = {\n "); + printf("static const uint16_t char_table[256] = {\n "); for (int i = 0; i < 256; i++) { printf("0x%04x%s", dst[i], @@ -1266,8 +1266,8 @@ static void BM_UFlat(int iters, int arg) { snappy::Compress(contents.data(), contents.size(), &zcontents); char* dst = new char[contents.size()]; - SetBenchmarkBytesProcessed(static_cast<int64>(iters) * - static_cast<int64>(contents.size())); + SetBenchmarkBytesProcessed(static_cast<int64_t>(iters) * + static_cast<int64_t>(contents.size())); SetBenchmarkLabel(files[arg].label); StartBenchmarkTiming(); while (iters-- > 0) { @@ -1291,8 +1291,8 @@ static void BM_UValidate(int iters, int arg) { std::string zcontents; snappy::Compress(contents.data(), contents.size(), &zcontents); - SetBenchmarkBytesProcessed(static_cast<int64>(iters) * - static_cast<int64>(contents.size())); + SetBenchmarkBytesProcessed(static_cast<int64_t>(iters) * + static_cast<int64_t>(contents.size())); SetBenchmarkLabel(files[arg].label); StartBenchmarkTiming(); while (iters-- > 0) { @@ -1334,8 +1334,8 @@ static void BM_UIOVec(int iters, int arg) { used_so_far += iov[i].iov_len; } - SetBenchmarkBytesProcessed(static_cast<int64>(iters) * - static_cast<int64>(contents.size())); + SetBenchmarkBytesProcessed(static_cast<int64_t>(iters) * + static_cast<int64_t>(contents.size())); SetBenchmarkLabel(files[arg].label); StartBenchmarkTiming(); while (iters-- > 0) { @@ -1361,8 +1361,8 @@ static void BM_UFlatSink(int iters, int arg) { snappy::Compress(contents.data(), contents.size(), &zcontents); char* dst = new char[contents.size()]; - SetBenchmarkBytesProcessed(static_cast<int64>(iters) * - static_cast<int64>(contents.size())); + SetBenchmarkBytesProcessed(static_cast<int64_t>(iters) * + static_cast<int64_t>(contents.size())); SetBenchmarkLabel(files[arg].label); StartBenchmarkTiming(); while (iters-- > 0) { @@ -1391,8 +1391,8 @@ static void BM_ZFlat(int iters, int arg) { char* dst = new char[snappy::MaxCompressedLength(contents.size())]; - SetBenchmarkBytesProcessed(static_cast<int64>(iters) * - static_cast<int64>(contents.size())); + SetBenchmarkBytesProcessed(static_cast<int64_t>(iters) * + static_cast<int64_t>(contents.size())); StartBenchmarkTiming(); size_t zsize = 0; @@ -1420,14 +1420,14 @@ static void BM_ZFlatAll(int iters, int arg) { std::vector<std::string> contents(num_files); std::vector<char*> dst(num_files); - int64 total_contents_size = 0; + int64_t total_contents_size = 0; for (int i = 0; i < num_files; ++i) { contents[i] = ReadTestDataFile(files[i].filename, files[i].size_limit); dst[i] = new char[snappy::MaxCompressedLength(contents[i].size())]; total_contents_size += contents[i].size(); } - SetBenchmarkBytesProcessed(static_cast<int64>(iters) * total_contents_size); + SetBenchmarkBytesProcessed(static_cast<int64_t>(iters) * total_contents_size); StartBenchmarkTiming(); size_t zsize = 0; @@ -1456,7 +1456,7 @@ static void BM_ZFlatIncreasingTableSize(int iters, int arg) { std::vector<std::string> contents; std::vector<char*> dst; - int64 total_contents_size = 0; + int64_t total_contents_size = 0; for (int table_bits = kMinHashTableBits; table_bits <= kMaxHashTableBits; ++table_bits) { std::string content = base_content; @@ -1467,7 +1467,7 @@ static void BM_ZFlatIncreasingTableSize(int iters, int arg) { } size_t zsize = 0; - SetBenchmarkBytesProcessed(static_cast<int64>(iters) * total_contents_size); + SetBenchmarkBytesProcessed(static_cast<int64_t>(iters) * total_contents_size); StartBenchmarkTiming(); while (iters-- > 0) { for (int i = 0; i < contents.size(); ++i) { |