summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--snappy-stubs-internal.h5
-rw-r--r--snappy-test.cc2
-rw-r--r--snappy-test.h2
-rw-r--r--snappy.cc51
-rw-r--r--snappy.h4
-rw-r--r--snappy_unittest.cc4
6 files changed, 32 insertions, 36 deletions
diff --git a/snappy-stubs-internal.h b/snappy-stubs-internal.h
index 0215288..12ba1ab 100644
--- a/snappy-stubs-internal.h
+++ b/snappy-stubs-internal.h
@@ -86,10 +86,9 @@ using namespace std;
// version (anyone who wants to regenerate it can just do the call
// themselves within main()).
#define DEFINE_bool(flag_name, default_value, description) \
- bool FLAGS_ ## flag_name = default_value;
+ bool FLAGS_ ## flag_name = default_value
#define DECLARE_bool(flag_name) \
- extern bool FLAGS_ ## flag_name;
-#define REGISTER_MODULE_INITIALIZER(name, code)
+ extern bool FLAGS_ ## flag_name
namespace snappy {
diff --git a/snappy-test.cc b/snappy-test.cc
index 2c50388..223cd92 100644
--- a/snappy-test.cc
+++ b/snappy-test.cc
@@ -353,7 +353,6 @@ int ZLib::CompressAtMostOrAll(Bytef *dest, uLongf *destLen,
// compression.
err = deflate(&comp_stream_, flush_mode);
- const uLong source_bytes_consumed = *sourceLen - comp_stream_.avail_in;
*sourceLen = comp_stream_.avail_in;
if ((err == Z_STREAM_END || err == Z_OK)
@@ -397,7 +396,6 @@ int ZLib::CompressChunkOrAll(Bytef *dest, uLongf *destLen,
int ZLib::Compress(Bytef *dest, uLongf *destLen,
const Bytef *source, uLong sourceLen) {
int err;
- const uLongf orig_destLen = *destLen;
if ( (err=CompressChunkOrAll(dest, destLen, source, sourceLen,
Z_FINISH)) != Z_OK )
return err;
diff --git a/snappy-test.h b/snappy-test.h
index 649f26e..ef6a955 100644
--- a/snappy-test.h
+++ b/snappy-test.h
@@ -135,7 +135,7 @@ namespace File {
while (!feof(fp)) {
char buf[4096];
size_t ret = fread(buf, 1, 4096, fp);
- if (ret == -1) {
+ if (ret == 0 && ferror(fp)) {
perror("fread");
exit(1);
}
diff --git a/snappy.cc b/snappy.cc
index b8b41cb..5dce19a 100644
--- a/snappy.cc
+++ b/snappy.cc
@@ -194,13 +194,13 @@ static inline char* EmitLiteral(char* op,
return op + len;
}
-static inline char* EmitCopyLessThan64(char* op, int offset, int len) {
+static inline char* EmitCopyLessThan64(char* op, size_t offset, int len) {
DCHECK_LE(len, 64);
DCHECK_GE(len, 4);
DCHECK_LT(offset, 65536);
if ((len < 12) && (offset < 2048)) {
- int len_minus_4 = len - 4;
+ size_t len_minus_4 = len - 4;
assert(len_minus_4 < 8); // Must fit in 3 bits
*op++ = COPY_1_BYTE_OFFSET | ((len_minus_4) << 2) | ((offset >> 8) << 5);
*op++ = offset & 0xff;
@@ -212,7 +212,7 @@ static inline char* EmitCopyLessThan64(char* op, int offset, int len) {
return op;
}
-static inline char* EmitCopy(char* op, int offset, int len) {
+static inline char* EmitCopy(char* op, size_t offset, int len) {
// Emit 64 byte copies but make sure to keep at least four bytes reserved
while (len >= 68) {
op = EmitCopyLessThan64(op, offset, 64);
@@ -249,7 +249,7 @@ uint16* WorkingMemory::GetHashTable(size_t input_size, int* table_size) {
// compression, and if the input is short, we won't need that
// many hash table entries anyway.
assert(kMaxHashTableSize >= 256);
- int htsize = 256;
+ size_t htsize = 256;
while (htsize < kMaxHashTableSize && htsize < input_size) {
htsize <<= 1;
}
@@ -304,14 +304,14 @@ char* CompressFragment(const char* input,
CHECK_LE(input_size, kBlockSize);
CHECK_EQ(table_size & (table_size - 1), 0) << ": table must be power of two";
const int shift = 32 - Bits::Log2Floor(table_size);
- DCHECK_EQ(kuint32max >> shift, table_size - 1);
+ DCHECK_EQ(static_cast<int>(kuint32max >> shift), table_size - 1);
const char* ip_end = input + input_size;
const char* base_ip = ip;
// Bytes in [next_emit, ip) will be emitted as literal bytes. Or
// [next_emit, ip_end) after the main loop.
const char* next_emit = ip;
- const int kInputMarginBytes = 15;
+ const size_t kInputMarginBytes = 15;
if (PREDICT_TRUE(input_size >= kInputMarginBytes)) {
const char* ip_limit = input + input_size - kInputMarginBytes;
@@ -387,7 +387,7 @@ char* CompressFragment(const char* input,
const char* base = ip;
int matched = 4 + FindMatchLength(candidate + 4, ip + 4, ip_end);
ip += matched;
- int offset = base - candidate;
+ size_t offset = base - candidate;
DCHECK_EQ(0, memcmp(base, candidate, matched));
op = EmitCopy(op, offset, matched);
// We could immediately start working at ip now, but to improve
@@ -435,8 +435,8 @@ char* CompressFragment(const char* input,
// bool CheckLength() const;
//
// // Called repeatedly during decompression
-// bool Append(const char* ip, uint32 length);
-// bool AppendFromSelf(uint32 offset, uint32 length);
+// bool Append(const char* ip, size_t length);
+// bool AppendFromSelf(uint32 offset, size_t length);
//
// // The difference between TryFastAppend and Append is that TryFastAppend
// // is allowed to read up to <available> bytes from the input buffer,
@@ -453,7 +453,7 @@ char* CompressFragment(const char* input,
// // decoded fully. In practice, this should not be a big problem,
// // as it is unlikely that one would implement a fast path accepting
// // this much data.
-// bool TryFastAppend(const char* ip, uint32 available, uint32 length);
+// bool TryFastAppend(const char* ip, size_t available, size_t length);
// };
// -----------------------------------------------------------------------
@@ -601,7 +601,6 @@ static void ComputeTable() {
CHECK_EQ(dst[i], char_table[i]);
}
}
-REGISTER_MODULE_INITIALIZER(snappy, ComputeTable());
#endif /* !NDEBUG */
// Helper class for decompression
@@ -686,7 +685,7 @@ class SnappyDecompressor {
const unsigned char c = *(reinterpret_cast<const unsigned char*>(ip++));
if ((c & 0x3) == LITERAL) {
- uint32 literal_length = (c >> 2) + 1;
+ size_t literal_length = (c >> 2) + 1u;
if (writer->TryFastAppend(ip, ip_limit_ - ip, literal_length)) {
DCHECK_LT(literal_length, 61);
ip += literal_length;
@@ -695,13 +694,13 @@ class SnappyDecompressor {
}
if (PREDICT_FALSE(literal_length >= 61)) {
// Long literal.
- const uint32 literal_length_length = literal_length - 60;
+ const size_t literal_length_length = literal_length - 60;
literal_length =
(LittleEndian::Load32(ip) & wordmask[literal_length_length]) + 1;
ip += literal_length_length;
}
- uint32 avail = ip_limit_ - ip;
+ size_t avail = ip_limit_ - ip;
while (avail < literal_length) {
if (!writer->Append(ip, avail)) return;
literal_length -= avail;
@@ -825,7 +824,7 @@ bool GetUncompressedLength(Source* source, uint32* result) {
size_t Compress(Source* reader, Sink* writer) {
size_t written = 0;
- int N = reader->Available();
+ size_t N = reader->Available();
char ulength[Varint::kMax32];
char* p = Varint::Encode32(ulength, N);
writer->Append(ulength, p-ulength);
@@ -840,10 +839,10 @@ size_t Compress(Source* reader, Sink* writer) {
size_t fragment_size;
const char* fragment = reader->Peek(&fragment_size);
DCHECK_NE(fragment_size, 0) << ": premature end of input";
- const int num_to_read = min(N, kBlockSize);
+ const size_t num_to_read = min(N, kBlockSize);
size_t bytes_read = fragment_size;
- int pending_advance = 0;
+ size_t pending_advance = 0;
if (bytes_read >= num_to_read) {
// Buffer returned by reader is large enough
pending_advance = num_to_read;
@@ -931,9 +930,9 @@ class SnappyArrayWriter {
return op_ == op_limit_;
}
- inline bool Append(const char* ip, uint32 len) {
+ inline bool Append(const char* ip, size_t len) {
char* op = op_;
- const int space_left = op_limit_ - op;
+ const size_t space_left = op_limit_ - op;
if (space_left < len) {
return false;
}
@@ -942,9 +941,9 @@ class SnappyArrayWriter {
return true;
}
- inline bool TryFastAppend(const char* ip, uint32 available, uint32 len) {
+ inline bool TryFastAppend(const char* ip, size_t available, size_t len) {
char* op = op_;
- const int space_left = op_limit_ - op;
+ const size_t space_left = op_limit_ - op;
if (len <= 16 && available >= 16 && space_left >= 16) {
// Fast path, used for the majority (about 95%) of invocations.
UNALIGNED_STORE64(op, UNALIGNED_LOAD64(ip));
@@ -956,9 +955,9 @@ class SnappyArrayWriter {
}
}
- inline bool AppendFromSelf(uint32 offset, uint32 len) {
+ inline bool AppendFromSelf(size_t offset, size_t len) {
char* op = op_;
- const int space_left = op_limit_ - op;
+ const size_t space_left = op_limit_ - op;
if (op - base_ <= offset - 1u) { // -1u catches offset==0
return false;
@@ -1022,14 +1021,14 @@ class SnappyDecompressionValidator {
inline bool CheckLength() const {
return expected_ == produced_;
}
- inline bool Append(const char* ip, uint32 len) {
+ inline bool Append(const char* ip, size_t len) {
produced_ += len;
return produced_ <= expected_;
}
- inline bool TryFastAppend(const char* ip, uint32 available, uint32 length) {
+ inline bool TryFastAppend(const char* ip, size_t available, size_t length) {
return false;
}
- inline bool AppendFromSelf(uint32 offset, uint32 len) {
+ inline bool AppendFromSelf(size_t offset, size_t len) {
if (produced_ <= offset - 1u) return false; // -1u catches offset==0
produced_ += len;
return produced_ <= expected_;
diff --git a/snappy.h b/snappy.h
index 8d6ef22..8c2075f 100644
--- a/snappy.h
+++ b/snappy.h
@@ -144,10 +144,10 @@ namespace snappy {
// decompression code should not rely on this guarantee since older
// compression code may not obey it.
static const int kBlockLog = 15;
- static const int kBlockSize = 1 << kBlockLog;
+ static const size_t kBlockSize = 1 << kBlockLog;
static const int kMaxHashTableBits = 14;
- static const int kMaxHashTableSize = 1 << kMaxHashTableBits;
+ static const size_t kMaxHashTableSize = 1 << kMaxHashTableBits;
} // end namespace snappy
diff --git a/snappy_unittest.cc b/snappy_unittest.cc
index 6fff333..0984e3e 100644
--- a/snappy_unittest.cc
+++ b/snappy_unittest.cc
@@ -300,7 +300,7 @@ static bool Uncompress(const string& compressed, CompressorType comp,
reinterpret_cast<const Bytef*>(compressed.data()),
compressed.size());
CHECK_EQ(Z_OK, ret);
- CHECK_EQ(destlen, size);
+ CHECK_EQ(static_cast<uLongf>(size), destlen);
break;
}
#endif // ZLIB_VERSION
@@ -316,7 +316,7 @@ static bool Uncompress(const string& compressed, CompressorType comp,
&destlen,
NULL);
CHECK_EQ(LZO_E_OK, ret);
- CHECK_EQ(destlen, size);
+ CHECK_EQ(static_cast<lzo_uint>(size), destlen);
break;
}
#endif // LZO_VERSION