summaryrefslogtreecommitdiff
path: root/snappy.cc
diff options
context:
space:
mode:
authorsnappy.mirrorbot@gmail.com <snappy.mirrorbot@gmail.com@03e5f5b5-db94-4691-08a0-1a8bf15f6143>2012-01-04 13:10:46 +0000
committersnappy.mirrorbot@gmail.com <snappy.mirrorbot@gmail.com@03e5f5b5-db94-4691-08a0-1a8bf15f6143>2012-01-04 13:10:46 +0000
commit9dfee3cc187181ae12b37f965d139b666b3a2948 (patch)
tree28e3bcc6c490aa9ebb9ca7f8ad2d585072fc150b /snappy.cc
parent03a8e62372a7a311372308eb104dada144720dce (diff)
downloadsnappy-9dfee3cc187181ae12b37f965d139b666b3a2948.tar.gz
Fix public issue r57: Fix most warnings with -Wall, mostly signed/unsigned
warnings. There are still some in the unit test, but the main .cc file should be clean. We haven't enabled -Wall for the default build, since the unit test is still not clean. This also fixes a real bug in the open-source implementation of ReadFileToStringOrDie(); it would not detect errors correctly. I had to go through some pains to avoid performance loss as the types were changed; I think there might still be some with 32-bit if and only if LFS is enabled (ie., size_t is 64-bit), but for regular 32-bit and 64-bit I can't see any losses, and I've diffed the generated GCC assembler between the old and new code without seeing any significant choices. If anything, it's ever so slightly faster. This may or may not enable compression of very large blocks (>2^32 bytes) when size_t is 64-bit, but I haven't checked, and it is still not a supported case. git-svn-id: http://snappy.googlecode.com/svn/trunk@56 03e5f5b5-db94-4691-08a0-1a8bf15f6143
Diffstat (limited to 'snappy.cc')
-rw-r--r--snappy.cc51
1 files changed, 25 insertions, 26 deletions
diff --git a/snappy.cc b/snappy.cc
index b8b41cb..5dce19a 100644
--- a/snappy.cc
+++ b/snappy.cc
@@ -194,13 +194,13 @@ static inline char* EmitLiteral(char* op,
return op + len;
}
-static inline char* EmitCopyLessThan64(char* op, int offset, int len) {
+static inline char* EmitCopyLessThan64(char* op, size_t offset, int len) {
DCHECK_LE(len, 64);
DCHECK_GE(len, 4);
DCHECK_LT(offset, 65536);
if ((len < 12) && (offset < 2048)) {
- int len_minus_4 = len - 4;
+ size_t len_minus_4 = len - 4;
assert(len_minus_4 < 8); // Must fit in 3 bits
*op++ = COPY_1_BYTE_OFFSET | ((len_minus_4) << 2) | ((offset >> 8) << 5);
*op++ = offset & 0xff;
@@ -212,7 +212,7 @@ static inline char* EmitCopyLessThan64(char* op, int offset, int len) {
return op;
}
-static inline char* EmitCopy(char* op, int offset, int len) {
+static inline char* EmitCopy(char* op, size_t offset, int len) {
// Emit 64 byte copies but make sure to keep at least four bytes reserved
while (len >= 68) {
op = EmitCopyLessThan64(op, offset, 64);
@@ -249,7 +249,7 @@ uint16* WorkingMemory::GetHashTable(size_t input_size, int* table_size) {
// compression, and if the input is short, we won't need that
// many hash table entries anyway.
assert(kMaxHashTableSize >= 256);
- int htsize = 256;
+ size_t htsize = 256;
while (htsize < kMaxHashTableSize && htsize < input_size) {
htsize <<= 1;
}
@@ -304,14 +304,14 @@ char* CompressFragment(const char* input,
CHECK_LE(input_size, kBlockSize);
CHECK_EQ(table_size & (table_size - 1), 0) << ": table must be power of two";
const int shift = 32 - Bits::Log2Floor(table_size);
- DCHECK_EQ(kuint32max >> shift, table_size - 1);
+ DCHECK_EQ(static_cast<int>(kuint32max >> shift), table_size - 1);
const char* ip_end = input + input_size;
const char* base_ip = ip;
// Bytes in [next_emit, ip) will be emitted as literal bytes. Or
// [next_emit, ip_end) after the main loop.
const char* next_emit = ip;
- const int kInputMarginBytes = 15;
+ const size_t kInputMarginBytes = 15;
if (PREDICT_TRUE(input_size >= kInputMarginBytes)) {
const char* ip_limit = input + input_size - kInputMarginBytes;
@@ -387,7 +387,7 @@ char* CompressFragment(const char* input,
const char* base = ip;
int matched = 4 + FindMatchLength(candidate + 4, ip + 4, ip_end);
ip += matched;
- int offset = base - candidate;
+ size_t offset = base - candidate;
DCHECK_EQ(0, memcmp(base, candidate, matched));
op = EmitCopy(op, offset, matched);
// We could immediately start working at ip now, but to improve
@@ -435,8 +435,8 @@ char* CompressFragment(const char* input,
// bool CheckLength() const;
//
// // Called repeatedly during decompression
-// bool Append(const char* ip, uint32 length);
-// bool AppendFromSelf(uint32 offset, uint32 length);
+// bool Append(const char* ip, size_t length);
+// bool AppendFromSelf(uint32 offset, size_t length);
//
// // The difference between TryFastAppend and Append is that TryFastAppend
// // is allowed to read up to <available> bytes from the input buffer,
@@ -453,7 +453,7 @@ char* CompressFragment(const char* input,
// // decoded fully. In practice, this should not be a big problem,
// // as it is unlikely that one would implement a fast path accepting
// // this much data.
-// bool TryFastAppend(const char* ip, uint32 available, uint32 length);
+// bool TryFastAppend(const char* ip, size_t available, size_t length);
// };
// -----------------------------------------------------------------------
@@ -601,7 +601,6 @@ static void ComputeTable() {
CHECK_EQ(dst[i], char_table[i]);
}
}
-REGISTER_MODULE_INITIALIZER(snappy, ComputeTable());
#endif /* !NDEBUG */
// Helper class for decompression
@@ -686,7 +685,7 @@ class SnappyDecompressor {
const unsigned char c = *(reinterpret_cast<const unsigned char*>(ip++));
if ((c & 0x3) == LITERAL) {
- uint32 literal_length = (c >> 2) + 1;
+ size_t literal_length = (c >> 2) + 1u;
if (writer->TryFastAppend(ip, ip_limit_ - ip, literal_length)) {
DCHECK_LT(literal_length, 61);
ip += literal_length;
@@ -695,13 +694,13 @@ class SnappyDecompressor {
}
if (PREDICT_FALSE(literal_length >= 61)) {
// Long literal.
- const uint32 literal_length_length = literal_length - 60;
+ const size_t literal_length_length = literal_length - 60;
literal_length =
(LittleEndian::Load32(ip) & wordmask[literal_length_length]) + 1;
ip += literal_length_length;
}
- uint32 avail = ip_limit_ - ip;
+ size_t avail = ip_limit_ - ip;
while (avail < literal_length) {
if (!writer->Append(ip, avail)) return;
literal_length -= avail;
@@ -825,7 +824,7 @@ bool GetUncompressedLength(Source* source, uint32* result) {
size_t Compress(Source* reader, Sink* writer) {
size_t written = 0;
- int N = reader->Available();
+ size_t N = reader->Available();
char ulength[Varint::kMax32];
char* p = Varint::Encode32(ulength, N);
writer->Append(ulength, p-ulength);
@@ -840,10 +839,10 @@ size_t Compress(Source* reader, Sink* writer) {
size_t fragment_size;
const char* fragment = reader->Peek(&fragment_size);
DCHECK_NE(fragment_size, 0) << ": premature end of input";
- const int num_to_read = min(N, kBlockSize);
+ const size_t num_to_read = min(N, kBlockSize);
size_t bytes_read = fragment_size;
- int pending_advance = 0;
+ size_t pending_advance = 0;
if (bytes_read >= num_to_read) {
// Buffer returned by reader is large enough
pending_advance = num_to_read;
@@ -931,9 +930,9 @@ class SnappyArrayWriter {
return op_ == op_limit_;
}
- inline bool Append(const char* ip, uint32 len) {
+ inline bool Append(const char* ip, size_t len) {
char* op = op_;
- const int space_left = op_limit_ - op;
+ const size_t space_left = op_limit_ - op;
if (space_left < len) {
return false;
}
@@ -942,9 +941,9 @@ class SnappyArrayWriter {
return true;
}
- inline bool TryFastAppend(const char* ip, uint32 available, uint32 len) {
+ inline bool TryFastAppend(const char* ip, size_t available, size_t len) {
char* op = op_;
- const int space_left = op_limit_ - op;
+ const size_t space_left = op_limit_ - op;
if (len <= 16 && available >= 16 && space_left >= 16) {
// Fast path, used for the majority (about 95%) of invocations.
UNALIGNED_STORE64(op, UNALIGNED_LOAD64(ip));
@@ -956,9 +955,9 @@ class SnappyArrayWriter {
}
}
- inline bool AppendFromSelf(uint32 offset, uint32 len) {
+ inline bool AppendFromSelf(size_t offset, size_t len) {
char* op = op_;
- const int space_left = op_limit_ - op;
+ const size_t space_left = op_limit_ - op;
if (op - base_ <= offset - 1u) { // -1u catches offset==0
return false;
@@ -1022,14 +1021,14 @@ class SnappyDecompressionValidator {
inline bool CheckLength() const {
return expected_ == produced_;
}
- inline bool Append(const char* ip, uint32 len) {
+ inline bool Append(const char* ip, size_t len) {
produced_ += len;
return produced_ <= expected_;
}
- inline bool TryFastAppend(const char* ip, uint32 available, uint32 length) {
+ inline bool TryFastAppend(const char* ip, size_t available, size_t length) {
return false;
}
- inline bool AppendFromSelf(uint32 offset, uint32 len) {
+ inline bool AppendFromSelf(size_t offset, size_t len) {
if (produced_ <= offset - 1u) return false; // -1u catches offset==0
produced_ += len;
return produced_ <= expected_;