summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorpkasting <pkasting@google.com>2015-08-19 16:40:51 -0700
committerChris Mumford <cmumford@chromium.org>2015-12-09 10:34:58 -0800
commit50e77a8263b19033b99ba49cdc45995595f1acf9 (patch)
treeb2c1120ce4c1fc3f9ecc2f3733aaa6f2a4ec8159
parent5208e7952d691e256a4ffe29888154068e8745b8 (diff)
downloadleveldb-50e77a8263b19033b99ba49cdc45995595f1acf9.tar.gz
Fix size_t/int comparison/conversion issues in leveldb.
The create function took |num_keys| as an int, but callers and implementers wanted it to function as a size_t (e.g. passing std::vector::size() in, passing it to vector constructors as a size arg, indexing containers by it, etc.). This resulted in implicit conversions between the two types as well as warnings (found with Chromium's external copy of these sources, built with MSVC) about signed vs. unsigned comparisons. The leveldb sources were already widely using size_t elsewhere, e.g. for key and filter lengths, so using size_t here is not inconsistent with the existing code. However, it does change the public C API. ------------- Created by MOE: https://github.com/google/moe MOE_MIGRATED_REVID=101074871
-rw-r--r--table/filter_block.cc4
-rw-r--r--util/bloom.cc2
-rw-r--r--util/bloom_test.cc3
3 files changed, 5 insertions, 4 deletions
diff --git a/table/filter_block.cc b/table/filter_block.cc
index 203e15c..4e78b95 100644
--- a/table/filter_block.cc
+++ b/table/filter_block.cc
@@ -68,7 +68,7 @@ void FilterBlockBuilder::GenerateFilter() {
// Generate filter for current set of keys and append to result_.
filter_offsets_.push_back(result_.size());
- policy_->CreateFilter(&tmp_keys_[0], num_keys, &result_);
+ policy_->CreateFilter(&tmp_keys_[0], static_cast<int>(num_keys), &result_);
tmp_keys_.clear();
keys_.clear();
@@ -97,7 +97,7 @@ bool FilterBlockReader::KeyMayMatch(uint64_t block_offset, const Slice& key) {
if (index < num_) {
uint32_t start = DecodeFixed32(offset_ + index*4);
uint32_t limit = DecodeFixed32(offset_ + index*4 + 4);
- if (start <= limit && limit <= (offset_ - data_)) {
+ if (start <= limit && limit <= static_cast<size_t>(offset_ - data_)) {
Slice filter = Slice(data_ + start, limit - start);
return policy_->KeyMayMatch(key, filter);
} else if (start == limit) {
diff --git a/util/bloom.cc b/util/bloom.cc
index a27a2ac..bf3e4ca 100644
--- a/util/bloom.cc
+++ b/util/bloom.cc
@@ -47,7 +47,7 @@ class BloomFilterPolicy : public FilterPolicy {
dst->resize(init_size + bytes, 0);
dst->push_back(static_cast<char>(k_)); // Remember # of probes in filter
char* array = &(*dst)[init_size];
- for (size_t i = 0; i < n; i++) {
+ for (int i = 0; i < n; i++) {
// Use double-hashing to generate a sequence of hash values.
// See analysis in [Kirsch,Mitzenmacher 2006].
uint32_t h = BloomHash(keys[i]);
diff --git a/util/bloom_test.cc b/util/bloom_test.cc
index 77fb1b3..1b87a2b 100644
--- a/util/bloom_test.cc
+++ b/util/bloom_test.cc
@@ -46,7 +46,8 @@ class BloomTest {
key_slices.push_back(Slice(keys_[i]));
}
filter_.clear();
- policy_->CreateFilter(&key_slices[0], key_slices.size(), &filter_);
+ policy_->CreateFilter(&key_slices[0], static_cast<int>(key_slices.size()),
+ &filter_);
keys_.clear();
if (kVerbose >= 2) DumpFilter();
}