summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorKaloian Manassiev <kaloian.manassiev@mongodb.com>2018-01-04 18:03:26 -0500
committerKaloian Manassiev <kaloian.manassiev@mongodb.com>2018-01-13 09:01:24 -0500
commitf164097cb678f763289ee870e854f89749dbbba8 (patch)
tree62a03c13f2f76548c8738e057a2aaae4abd5c1be
parent4e4261cd0efc5fc9e0b2b9f9b787e7b6bc12295f (diff)
downloadmongo-f164097cb678f763289ee870e854f89749dbbba8.tar.gz
SERVER-32526 Increase the ChunkManager unit-test coverage
Also tightens up some of the checks in the JS tests and cleans up the ChunkManager/CollectionMetadata code in preparation for making it use KeyString.
-rw-r--r--jstests/sharding/count1.js22
-rw-r--r--jstests/sharding/covered_shard_key_indexes.js309
-rw-r--r--src/mongo/db/s/collection_metadata.h5
-rw-r--r--src/mongo/db/s/collection_metadata_test.cpp132
-rw-r--r--src/mongo/db/s/merge_chunks_command.cpp4
-rw-r--r--src/mongo/db/s/split_chunk.cpp2
-rw-r--r--src/mongo/s/chunk_manager.cpp18
-rw-r--r--src/mongo/s/chunk_manager_query_test.cpp53
8 files changed, 300 insertions, 245 deletions
diff --git a/jstests/sharding/count1.js b/jstests/sharding/count1.js
index 3aaa1b25162..e8783dac6a1 100644
--- a/jstests/sharding/count1.js
+++ b/jstests/sharding/count1.js
@@ -1,6 +1,7 @@
(function() {
+ 'use strict';
- var s = new ShardingTest({name: "count1", shards: 2});
+ var s = new ShardingTest({shards: 2});
var db = s.getDB("test");
// ************** Test Set #1 *************
@@ -32,8 +33,8 @@
s.ensurePrimaryShard('test', 'shard0001');
s.adminCommand({shardcollection: "test.foo", key: {name: 1}});
- primary = s.getPrimaryShard("test").getDB("test");
- secondary = s.getOther(primary).getDB("test");
+ var primary = s.getPrimaryShard("test").getDB("test");
+ var secondary = s.getOther(primary).getDB("test");
assert.eq(1, s.config.chunks.count({"ns": "test.foo"}), "sanity check A");
@@ -47,9 +48,9 @@
assert.eq(6, db.foo.find().count(), "basic count");
// part 2
- s.adminCommand({split: "test.foo", middle: {name: "allan"}});
- s.adminCommand({split: "test.foo", middle: {name: "sara"}});
- s.adminCommand({split: "test.foo", middle: {name: "eliot"}});
+ assert.commandWorked(s.s0.adminCommand({split: "test.foo", middle: {name: "allan"}}));
+ assert.commandWorked(s.s0.adminCommand({split: "test.foo", middle: {name: "sara"}}));
+ assert.commandWorked(s.s0.adminCommand({split: "test.foo", middle: {name: "eliot"}}));
// MINKEY->allan,bob->eliot,joe,mark->sara,MAXKEY
@@ -60,12 +61,12 @@
assert.eq(6, db.foo.find().sort({name: 1}).count(), "basic count after split sorted ");
// part 4
- s.adminCommand({
- movechunk: "test.foo",
+ assert.commandWorked(s.s0.adminCommand({
+ moveChunk: "test.foo",
find: {name: "eliot"},
to: secondary.getMongo().name,
_waitForDelete: true
- });
+ }));
assert.eq(3, primary.foo.find().toArray().length, "primary count");
assert.eq(3, secondary.foo.find().toArray().length, "secondary count");
@@ -151,7 +152,7 @@
assert.eq("joe,mark", nameString(db.foo.find().sort({_id: 1}).skip(3).limit(2)), "LSE3");
// part 6
- for (i = 0; i < 10; i++) {
+ for (var i = 0; i < 10; i++) {
db.foo.save({_id: 7 + i, name: "zzz" + i});
}
@@ -177,5 +178,4 @@
assert(negSkipLimitResult.errmsg.length > 0, "no error msg for negative skip");
s.stop();
-
})();
diff --git a/jstests/sharding/covered_shard_key_indexes.js b/jstests/sharding/covered_shard_key_indexes.js
index b00daa01c38..ce6851cafe5 100644
--- a/jstests/sharding/covered_shard_key_indexes.js
+++ b/jstests/sharding/covered_shard_key_indexes.js
@@ -3,168 +3,151 @@
// particular queries
//
-// Include helpers for analyzing explain output.
load("jstests/libs/analyze_plan.js");
-var st = new ShardingTest({shards: 1});
-
-var mongos = st.s0;
-var admin = mongos.getDB("admin");
-var coll = mongos.getCollection("foo.bar");
-
-//
-//
-// Tests with _id : 1 shard key
-
-assert(admin.runCommand({enableSharding: coll.getDB() + ""}).ok);
-printjson(admin.runCommand({movePrimary: coll.getDB() + "", to: st.shard0.shardName}));
-assert(admin.runCommand({shardCollection: coll + "", key: {_id: 1}}).ok);
-st.printShardingStatus();
-
-// Insert some data
-assert.writeOK(coll.insert({_id: true, a: true, b: true}));
-
-assert.commandWorked(
- st.shard0.adminCommand({setParameter: 1, logComponentVerbosity: {query: {verbosity: 5}}}));
-
-//
-// Index without shard key query - not covered
-assert.commandWorked(coll.ensureIndex({a: 1}));
-assert.eq(1, coll.find({a: true}).explain(true).executionStats.totalDocsExamined);
-assert.eq(1, coll.find({a: true}, {_id: 1, a: 1}).explain(true).executionStats.totalDocsExamined);
-
-//
-// Index with shard key query - covered when projecting
-assert.commandWorked(coll.dropIndexes());
-assert.commandWorked(coll.ensureIndex({a: 1, _id: 1}));
-assert.eq(1, coll.find({a: true}).explain(true).executionStats.totalDocsExamined);
-assert.eq(0, coll.find({a: true}, {_id: 1, a: 1}).explain(true).executionStats.totalDocsExamined);
-
-//
-// Compound index with shard key query - covered when projecting
-assert.commandWorked(coll.dropIndexes());
-assert.commandWorked(coll.ensureIndex({a: 1, b: 1, _id: 1}));
-assert.eq(1, coll.find({a: true, b: true}).explain(true).executionStats.totalDocsExamined);
-assert.eq(
- 0,
- coll.find({a: true, b: true}, {_id: 1, a: 1}).explain(true).executionStats.totalDocsExamined);
-
-//
-//
-// Tests with _id : hashed shard key
-coll.drop();
-assert(admin.runCommand({shardCollection: coll + "", key: {_id: "hashed"}}).ok);
-st.printShardingStatus();
-
-// Insert some data
-assert.writeOK(coll.insert({_id: true, a: true, b: true}));
-
-//
-// Index without shard key query - not covered
-assert.commandWorked(coll.ensureIndex({a: 1}));
-assert.eq(1, coll.find({a: true}).explain(true).executionStats.totalDocsExamined);
-assert.eq(1, coll.find({a: true}, {_id: 0, a: 1}).explain(true).executionStats.totalDocsExamined);
-
-//
-// Index with shard key query - can't be covered since hashed index
-assert.commandWorked(coll.dropIndex({a: 1}));
-assert.eq(1, coll.find({_id: true}).explain(true).executionStats.totalDocsExamined);
-assert.eq(1, coll.find({_id: true}, {_id: 0}).explain(true).executionStats.totalDocsExamined);
-
-//
-//
-// Tests with compound shard key
-coll.drop();
-assert(admin.runCommand({shardCollection: coll + "", key: {a: 1, b: 1}}).ok);
-st.printShardingStatus();
-
-// Insert some data
-assert.writeOK(coll.insert({_id: true, a: true, b: true, c: true, d: true}));
-
-//
-// Index without shard key query - not covered
-assert.commandWorked(coll.ensureIndex({c: 1}));
-assert.eq(1, coll.find({c: true}).explain(true).executionStats.totalDocsExamined);
-assert.eq(1,
- coll.find({c: true}, {_id: 0, a: 1, b: 1, c: 1})
- .explain(true)
- .executionStats.totalDocsExamined);
-
-//
-// Index with shard key query - covered when projecting
-assert.commandWorked(coll.dropIndex({c: 1}));
-assert.commandWorked(coll.ensureIndex({c: 1, b: 1, a: 1}));
-assert.eq(1, coll.find({c: true}).explain(true).executionStats.totalDocsExamined);
-assert.eq(0,
- coll.find({c: true}, {_id: 0, a: 1, b: 1, c: 1})
- .explain(true)
- .executionStats.totalDocsExamined);
-
-//
-// Compound index with shard key query - covered when projecting
-assert.commandWorked(coll.dropIndex({c: 1, b: 1, a: 1}));
-assert.commandWorked(coll.ensureIndex({c: 1, d: 1, a: 1, b: 1, _id: 1}));
-assert.eq(1, coll.find({c: true, d: true}).explain(true).executionStats.totalDocsExamined);
-assert.eq(0,
- coll.find({c: true, d: true}, {a: 1, b: 1, c: 1, d: 1})
- .explain(true)
- .executionStats.totalDocsExamined);
-
-//
-//
-// Tests with nested shard key
-coll.drop();
-assert(admin.runCommand({shardCollection: coll + "", key: {'a.b': 1}}).ok);
-st.printShardingStatus();
-
-// Insert some data
-assert.writeOK(coll.insert({_id: true, a: {b: true}, c: true}));
-
-//
-// Index without shard key query - not covered
-assert.commandWorked(coll.ensureIndex({c: 1}));
-assert.eq(1, coll.find({c: true}).explain(true).executionStats.totalDocsExamined);
-assert.eq(
- 1,
- coll.find({c: true}, {_id: 0, 'a.b': 1, c: 1}).explain(true).executionStats.totalDocsExamined);
-
-//
-// Index with shard key query - can be covered given the appropriate projection.
-assert.commandWorked(coll.dropIndex({c: 1}));
-assert.commandWorked(coll.ensureIndex({c: 1, 'a.b': 1}));
-assert.eq(1, coll.find({c: true}).explain(true).executionStats.totalDocsExamined);
-assert.eq(
- 0,
- coll.find({c: true}, {_id: 0, 'a.b': 1, c: 1}).explain(true).executionStats.totalDocsExamined);
-
-//
-//
-// Tests with bad data with no shard key
-coll.drop();
-assert(admin.runCommand({shardCollection: coll + "", key: {a: 1}}).ok);
-st.printShardingStatus();
-
-// Insert some bad data manually
-assert.writeOK(st.shard0.getCollection(coll.toString()).insert({_id: "bad data", c: true}));
-
-//
-// Index without shard key query - not covered but succeeds
-assert.commandWorked(coll.ensureIndex({c: 1}));
-var explain = coll.find({c: true}).explain(true).executionStats;
-assert.eq(0, explain.nReturned);
-assert.eq(1, explain.totalDocsExamined);
-assert.eq(1, getChunkSkips(explain.executionStages.shards[0].executionStages));
-
-//
-// Index with shard key query - covered and succeeds and returns result
-// NOTE: This is weird and only a result of the fact that we don't have a dedicated "does not exist"
-// value for indexes
-assert.commandWorked(coll.ensureIndex({c: 1, a: 1}));
-jsTest.log(tojson(coll.find({c: true}, {_id: 0, a: 1, c: 1}).toArray()));
-var explain = coll.find({c: true}, {_id: 0, a: 1, c: 1}).explain(true).executionStats;
-assert.eq(1, explain.nReturned);
-assert.eq(0, explain.totalDocsExamined);
-assert.eq(0, getChunkSkips(explain.executionStages.shards[0].executionStages));
-
-jsTest.log("DONE!");
-st.stop();
+(function() {
+ 'use strict';
+
+ var st = new ShardingTest({shards: 1});
+ var coll = st.s0.getCollection("foo.bar");
+
+ assert.commandWorked(st.s0.adminCommand({enableSharding: coll.getDB() + ""}));
+
+ jsTest.log('Tests with _id : 1 shard key');
+ coll.drop();
+ assert.commandWorked(st.s0.adminCommand({shardCollection: coll + "", key: {_id: 1}}));
+ st.printShardingStatus();
+
+ assert.commandWorked(
+ st.shard0.adminCommand({setParameter: 1, logComponentVerbosity: {query: {verbosity: 5}}}));
+
+ // Insert some data
+ assert.writeOK(coll.insert({_id: true, a: true, b: true}));
+
+ // Index without shard key query - not covered
+ assert.commandWorked(coll.ensureIndex({a: 1}));
+ assert.eq(1, coll.find({a: true}).explain(true).executionStats.totalDocsExamined);
+ assert.eq(1,
+ coll.find({a: true}, {_id: 1, a: 1}).explain(true).executionStats.totalDocsExamined);
+
+ // Index with shard key query - covered when projecting
+ assert.commandWorked(coll.dropIndexes());
+ assert.commandWorked(coll.ensureIndex({a: 1, _id: 1}));
+ assert.eq(1, coll.find({a: true}).explain(true).executionStats.totalDocsExamined);
+ assert.eq(0,
+ coll.find({a: true}, {_id: 1, a: 1}).explain(true).executionStats.totalDocsExamined);
+
+ // Compound index with shard key query - covered when projecting
+ assert.commandWorked(coll.dropIndexes());
+ assert.commandWorked(coll.ensureIndex({a: 1, b: 1, _id: 1}));
+ assert.eq(1, coll.find({a: true, b: true}).explain(true).executionStats.totalDocsExamined);
+ assert.eq(0,
+ coll.find({a: true, b: true}, {_id: 1, a: 1})
+ .explain(true)
+ .executionStats.totalDocsExamined);
+
+ jsTest.log('Tests with _id : hashed shard key');
+ coll.drop();
+ assert.commandWorked(st.s0.adminCommand({shardCollection: coll + "", key: {_id: "hashed"}}));
+ st.printShardingStatus();
+
+ // Insert some data
+ assert.writeOK(coll.insert({_id: true, a: true, b: true}));
+
+ // Index without shard key query - not covered
+ assert.commandWorked(coll.ensureIndex({a: 1}));
+ assert.eq(1, coll.find({a: true}).explain(true).executionStats.totalDocsExamined);
+ assert.eq(1,
+ coll.find({a: true}, {_id: 0, a: 1}).explain(true).executionStats.totalDocsExamined);
+
+ // Index with shard key query - can't be covered since hashed index
+ assert.commandWorked(coll.dropIndex({a: 1}));
+ assert.eq(1, coll.find({_id: true}).explain(true).executionStats.totalDocsExamined);
+ assert.eq(1, coll.find({_id: true}, {_id: 0}).explain(true).executionStats.totalDocsExamined);
+
+ jsTest.log('Tests with compound shard key');
+ coll.drop();
+ assert.commandWorked(st.s0.adminCommand({shardCollection: coll + "", key: {a: 1, b: 1}}));
+ st.printShardingStatus();
+
+ // Insert some data
+ assert.writeOK(coll.insert({_id: true, a: true, b: true, c: true, d: true}));
+
+ // Index without shard key query - not covered
+ assert.commandWorked(coll.ensureIndex({c: 1}));
+ assert.eq(1, coll.find({c: true}).explain(true).executionStats.totalDocsExamined);
+ assert.eq(1,
+ coll.find({c: true}, {_id: 0, a: 1, b: 1, c: 1})
+ .explain(true)
+ .executionStats.totalDocsExamined);
+
+ // Index with shard key query - covered when projecting
+ assert.commandWorked(coll.dropIndex({c: 1}));
+ assert.commandWorked(coll.ensureIndex({c: 1, b: 1, a: 1}));
+ assert.eq(1, coll.find({c: true}).explain(true).executionStats.totalDocsExamined);
+ assert.eq(0,
+ coll.find({c: true}, {_id: 0, a: 1, b: 1, c: 1})
+ .explain(true)
+ .executionStats.totalDocsExamined);
+
+ // Compound index with shard key query - covered when projecting
+ assert.commandWorked(coll.dropIndex({c: 1, b: 1, a: 1}));
+ assert.commandWorked(coll.ensureIndex({c: 1, d: 1, a: 1, b: 1, _id: 1}));
+ assert.eq(1, coll.find({c: true, d: true}).explain(true).executionStats.totalDocsExamined);
+ assert.eq(0,
+ coll.find({c: true, d: true}, {a: 1, b: 1, c: 1, d: 1})
+ .explain(true)
+ .executionStats.totalDocsExamined);
+
+ jsTest.log('Tests with nested shard key');
+ coll.drop();
+ assert.commandWorked(st.s0.adminCommand({shardCollection: coll + "", key: {'a.b': 1}}));
+ st.printShardingStatus();
+
+ // Insert some data
+ assert.writeOK(coll.insert({_id: true, a: {b: true}, c: true}));
+
+ // Index without shard key query - not covered
+ assert.commandWorked(coll.ensureIndex({c: 1}));
+ assert.eq(1, coll.find({c: true}).explain(true).executionStats.totalDocsExamined);
+ assert.eq(1,
+ coll.find({c: true}, {_id: 0, 'a.b': 1, c: 1})
+ .explain(true)
+ .executionStats.totalDocsExamined);
+
+ // Index with shard key query - can be covered given the appropriate projection.
+ assert.commandWorked(coll.dropIndex({c: 1}));
+ assert.commandWorked(coll.ensureIndex({c: 1, 'a.b': 1}));
+ assert.eq(1, coll.find({c: true}).explain(true).executionStats.totalDocsExamined);
+ assert.eq(0,
+ coll.find({c: true}, {_id: 0, 'a.b': 1, c: 1})
+ .explain(true)
+ .executionStats.totalDocsExamined);
+
+ jsTest.log('Tests with bad data with no shard key');
+ coll.drop();
+ assert.commandWorked(st.s0.adminCommand({shardCollection: coll + "", key: {a: 1}}));
+ st.printShardingStatus();
+
+ // Insert some bad data manually on the shard
+ assert.writeOK(st.shard0.getCollection(coll.toString()).insert({_id: "bad data", c: true}));
+
+ // Index without shard key query - not covered but succeeds
+ assert.commandWorked(coll.ensureIndex({c: 1}));
+ var explain = coll.find({c: true}).explain(true).executionStats;
+ assert.eq(0, explain.nReturned);
+ assert.eq(1, explain.totalDocsExamined);
+ assert.eq(1, getChunkSkips(explain.executionStages.shards[0].executionStages));
+
+ // Index with shard key query - covered and succeeds and returns result
+ //
+ // NOTE: This is weird and only a result of the fact that we don't have a dedicated "does not
+ // exist" value for indexes
+ assert.commandWorked(coll.ensureIndex({c: 1, a: 1}));
+ var explain = coll.find({c: true}, {_id: 0, a: 1, c: 1}).explain(true).executionStats;
+ assert.eq(1, explain.nReturned);
+ assert.eq(0, explain.totalDocsExamined);
+ assert.eq(0, getChunkSkips(explain.executionStages.shards[0].executionStages));
+
+ st.stop();
+})();
diff --git a/src/mongo/db/s/collection_metadata.h b/src/mongo/db/s/collection_metadata.h
index fad4cfeae3d..e606058eae5 100644
--- a/src/mongo/db/s/collection_metadata.h
+++ b/src/mongo/db/s/collection_metadata.h
@@ -64,9 +64,8 @@ public:
}
/**
- * Returns true if the document key 'key' belongs to this chunkset. Recall that documents of
- * an in-flight chunk migration may be present and should not be considered part of the
- * collection / chunkset yet. Key must be the full shard key.
+ * Returns true if the document with the given key belongs to this chunkset. If the key is empty
+ * returns false. If key is not a valid shard key, the behaviour is undefined.
*/
bool keyBelongsToMe(const BSONObj& key) const;
diff --git a/src/mongo/db/s/collection_metadata_test.cpp b/src/mongo/db/s/collection_metadata_test.cpp
index 9f619b1ca12..120c0b3ee58 100644
--- a/src/mongo/db/s/collection_metadata_test.cpp
+++ b/src/mongo/db/s/collection_metadata_test.cpp
@@ -75,6 +75,11 @@ std::unique_ptr<CollectionMetadata> makeCollectionMetadataImpl(
return stdx::make_unique<CollectionMetadata>(cm, kThisShard);
}
+struct ConstructedRangeMap : public RangeMap {
+ ConstructedRangeMap()
+ : RangeMap(SimpleBSONObjComparator::kInstance.makeBSONObjIndexedMap<BSONObj>()) {}
+};
+
class NoChunkFixture : public unittest::Test {
protected:
std::unique_ptr<CollectionMetadata> makeCollectionMetadata() const {
@@ -82,47 +87,47 @@ protected:
}
};
-struct stRangeMap : public RangeMap {
- stRangeMap() : RangeMap(SimpleBSONObjComparator::kInstance.makeBSONObjIndexedMap<BSONObj>()) {}
-};
-
-TEST_F(NoChunkFixture, BasicBelongsToMe) {
- ASSERT_FALSE(makeCollectionMetadata()->keyBelongsToMe(BSON("a" << MINKEY)));
- ASSERT_FALSE(makeCollectionMetadata()->keyBelongsToMe(BSON("a" << 10)));
-}
+TEST_F(NoChunkFixture, KeyBelongsToMe) {
+ ASSERT(!makeCollectionMetadata()->keyBelongsToMe(BSON("a" << MINKEY)));
+ ASSERT(!makeCollectionMetadata()->keyBelongsToMe(BSON("a" << 10)));
+ ASSERT(!makeCollectionMetadata()->keyBelongsToMe(BSON("a" << MINKEY)));
-TEST_F(NoChunkFixture, CompoundKeyBelongsToMe) {
- ASSERT_FALSE(makeCollectionMetadata()->keyBelongsToMe(BSON("a" << 1 << "b" << 2)));
+ ASSERT(!makeCollectionMetadata()->keyBelongsToMe(BSONObj()));
}
-TEST_F(NoChunkFixture, IsKeyValid) {
- ASSERT_TRUE(makeCollectionMetadata()->isValidKey(BSON("a"
- << "abcde")));
- ASSERT_TRUE(makeCollectionMetadata()->isValidKey(BSON("a" << 3)));
- ASSERT_FALSE(makeCollectionMetadata()->isValidKey(BSON("a"
- << "abcde"
- << "b"
- << 1)));
- ASSERT_FALSE(makeCollectionMetadata()->isValidKey(BSON("c"
- << "abcde")));
+TEST_F(NoChunkFixture, IsValidKey) {
+ ASSERT(makeCollectionMetadata()->isValidKey(BSON("a"
+ << "abcde")));
+ ASSERT(makeCollectionMetadata()->isValidKey(BSON("a" << 3)));
+ ASSERT(!makeCollectionMetadata()->isValidKey(BSON("a"
+ << "abcde"
+ << "b"
+ << 1)));
+ ASSERT(!makeCollectionMetadata()->isValidKey(BSON("c"
+ << "abcde")));
}
-TEST_F(NoChunkFixture, getNextFromEmpty) {
+TEST_F(NoChunkFixture, GetNextChunk) {
ChunkType nextChunk;
ASSERT(
!makeCollectionMetadata()->getNextChunk(makeCollectionMetadata()->getMinKey(), &nextChunk));
}
-TEST_F(NoChunkFixture, getDifferentFromEmpty) {
+TEST_F(NoChunkFixture, GetDifferentChunk) {
ChunkType differentChunk;
ASSERT(!makeCollectionMetadata()->getDifferentChunk(makeCollectionMetadata()->getMinKey(),
&differentChunk));
}
+TEST_F(NoChunkFixture, RangeOverlapsChunk) {
+ ASSERT(!makeCollectionMetadata()->rangeOverlapsChunk(
+ ChunkRange{BSON("a" << 100), BSON("a" << 200)}));
+}
+
TEST_F(NoChunkFixture, OrphanedDataRangeBegin) {
auto metadata(makeCollectionMetadata());
- stRangeMap pending;
+ ConstructedRangeMap pending;
BSONObj lookupKey = metadata->getMinKey();
auto keyRange = metadata->getNextOrphanRange(pending, lookupKey);
ASSERT(keyRange);
@@ -137,7 +142,7 @@ TEST_F(NoChunkFixture, OrphanedDataRangeBegin) {
TEST_F(NoChunkFixture, OrphanedDataRangeMiddle) {
auto metadata(makeCollectionMetadata());
- stRangeMap pending;
+ ConstructedRangeMap pending;
BSONObj lookupKey = BSON("a" << 20);
auto keyRange = metadata->getNextOrphanRange(pending, lookupKey);
ASSERT(keyRange);
@@ -152,7 +157,7 @@ TEST_F(NoChunkFixture, OrphanedDataRangeMiddle) {
TEST_F(NoChunkFixture, OrphanedDataRangeEnd) {
auto metadata(makeCollectionMetadata());
- stRangeMap pending;
+ ConstructedRangeMap pending;
ASSERT(!metadata->getNextOrphanRange(pending, metadata->getMaxKey()));
}
@@ -168,26 +173,22 @@ protected:
}
};
-TEST_F(SingleChunkFixture, BasicBelongsToMe) {
+TEST_F(SingleChunkFixture, KeyBelongsToMe) {
ASSERT(makeCollectionMetadata()->keyBelongsToMe(BSON("a" << 10)));
ASSERT(makeCollectionMetadata()->keyBelongsToMe(BSON("a" << 15)));
ASSERT(makeCollectionMetadata()->keyBelongsToMe(BSON("a" << 19)));
-}
-TEST_F(SingleChunkFixture, DoesntBelongsToMe) {
- ASSERT_FALSE(makeCollectionMetadata()->keyBelongsToMe(BSON("a" << 0)));
- ASSERT_FALSE(makeCollectionMetadata()->keyBelongsToMe(BSON("a" << 9)));
- ASSERT_FALSE(makeCollectionMetadata()->keyBelongsToMe(BSON("a" << 20)));
- ASSERT_FALSE(makeCollectionMetadata()->keyBelongsToMe(BSON("a" << 1234)));
- ASSERT_FALSE(makeCollectionMetadata()->keyBelongsToMe(BSON("a" << MINKEY)));
- ASSERT_FALSE(makeCollectionMetadata()->keyBelongsToMe(BSON("a" << MAXKEY)));
-}
+ ASSERT(!makeCollectionMetadata()->keyBelongsToMe(BSON("a" << 0)));
+ ASSERT(!makeCollectionMetadata()->keyBelongsToMe(BSON("a" << 9)));
+ ASSERT(!makeCollectionMetadata()->keyBelongsToMe(BSON("a" << 20)));
+ ASSERT(!makeCollectionMetadata()->keyBelongsToMe(BSON("a" << 1234)));
+ ASSERT(!makeCollectionMetadata()->keyBelongsToMe(BSON("a" << MINKEY)));
+ ASSERT(!makeCollectionMetadata()->keyBelongsToMe(BSON("a" << MAXKEY)));
-TEST_F(SingleChunkFixture, CompoundKeyBelongsToMe) {
- ASSERT(makeCollectionMetadata()->keyBelongsToMe(BSON("a" << 15 << "a" << 14)));
+ ASSERT(!makeCollectionMetadata()->keyBelongsToMe(BSONObj()));
}
-TEST_F(SingleChunkFixture, getNextFromEmpty) {
+TEST_F(SingleChunkFixture, GetNextChunk) {
ChunkType nextChunk;
ASSERT(
makeCollectionMetadata()->getNextChunk(makeCollectionMetadata()->getMinKey(), &nextChunk));
@@ -195,19 +196,32 @@ TEST_F(SingleChunkFixture, getNextFromEmpty) {
ASSERT_EQUALS(0, nextChunk.getMax().woCompare(BSON("a" << 20)));
}
-TEST_F(SingleChunkFixture, GetLastChunkIsFalse) {
+TEST_F(SingleChunkFixture, GetNextChunkShouldFindNothing) {
ChunkType nextChunk;
ASSERT(
!makeCollectionMetadata()->getNextChunk(makeCollectionMetadata()->getMaxKey(), &nextChunk));
}
-TEST_F(SingleChunkFixture, getDifferentFromOneIsFalse) {
+TEST_F(SingleChunkFixture, GetDifferentChunkShouldFindNothing) {
ChunkType differentChunk;
ASSERT(!makeCollectionMetadata()->getDifferentChunk(BSON("a" << 10), &differentChunk));
}
+TEST_F(SingleChunkFixture, RangeOverlapsChunk) {
+ ASSERT(!makeCollectionMetadata()->rangeOverlapsChunk(
+ ChunkRange{BSON("a" << 20), BSON("a" << 30)}));
+ ASSERT(!makeCollectionMetadata()->rangeOverlapsChunk(
+ ChunkRange{BSON("a" << 100), BSON("a" << 200)}));
+ ASSERT(
+ !makeCollectionMetadata()->rangeOverlapsChunk(ChunkRange{BSON("a" << 0), BSON("a" << 10)}));
+ ASSERT(
+ makeCollectionMetadata()->rangeOverlapsChunk(ChunkRange{BSON("a" << 11), BSON("a" << 19)}));
+ ASSERT(
+ makeCollectionMetadata()->rangeOverlapsChunk(ChunkRange{BSON("a" << 19), BSON("a" << 20)}));
+}
+
TEST_F(SingleChunkFixture, ChunkOrphanedDataRanges) {
- stRangeMap pending;
+ ConstructedRangeMap pending;
auto keyRange = makeCollectionMetadata()->getNextOrphanRange(
pending, makeCollectionMetadata()->getMinKey());
ASSERT(keyRange);
@@ -237,14 +251,14 @@ protected:
}
};
-// Note: no tests for single key belongsToMe because they are not allowed
-// if shard key is compound.
-
-TEST_F(SingleChunkMinMaxCompoundKeyFixture, CompoundKeyBelongsToMe) {
+TEST_F(SingleChunkMinMaxCompoundKeyFixture, KeyBelongsToMe) {
ASSERT(makeCollectionMetadata()->keyBelongsToMe(BSON("a" << MINKEY << "b" << MINKEY)));
- ASSERT_FALSE(makeCollectionMetadata()->keyBelongsToMe(BSON("a" << MAXKEY << "b" << MAXKEY)));
ASSERT(makeCollectionMetadata()->keyBelongsToMe(BSON("a" << MINKEY << "b" << 10)));
ASSERT(makeCollectionMetadata()->keyBelongsToMe(BSON("a" << 10 << "b" << 20)));
+
+ ASSERT(!makeCollectionMetadata()->keyBelongsToMe(BSON("a" << MAXKEY << "b" << MAXKEY)));
+
+ ASSERT(!makeCollectionMetadata()->keyBelongsToMe(BSONObj()));
}
/**
@@ -262,7 +276,7 @@ protected:
};
TEST_F(TwoChunksWithGapCompoundKeyFixture, ChunkGapOrphanedDataRanges) {
- stRangeMap pending;
+ ConstructedRangeMap pending;
auto keyRange = makeCollectionMetadata()->getNextOrphanRange(
pending, makeCollectionMetadata()->getMinKey());
@@ -318,20 +332,20 @@ TEST_F(ThreeChunkWithRangeGapFixture, GetNextChunkMatch) {
ASSERT_BSONOBJ_EQ(BSON("a" << MAXKEY), chunk.getMax());
}
-TEST_F(ThreeChunkWithRangeGapFixture, ShardOwnsDoc) {
+TEST_F(ThreeChunkWithRangeGapFixture, KeyBelongsToMe) {
ASSERT(makeCollectionMetadata()->keyBelongsToMe(BSON("a" << 5)));
ASSERT(makeCollectionMetadata()->keyBelongsToMe(BSON("a" << 10)));
ASSERT(makeCollectionMetadata()->keyBelongsToMe(BSON("a" << 30)));
ASSERT(makeCollectionMetadata()->keyBelongsToMe(BSON("a" << 40)));
-}
-TEST_F(ThreeChunkWithRangeGapFixture, ShardDoesntOwnDoc) {
- ASSERT_FALSE(makeCollectionMetadata()->keyBelongsToMe(BSON("a" << 20)));
- ASSERT_FALSE(makeCollectionMetadata()->keyBelongsToMe(BSON("a" << 25)));
- ASSERT_FALSE(makeCollectionMetadata()->keyBelongsToMe(BSON("a" << MAXKEY)));
+ ASSERT(!makeCollectionMetadata()->keyBelongsToMe(BSON("a" << 20)));
+ ASSERT(!makeCollectionMetadata()->keyBelongsToMe(BSON("a" << 25)));
+ ASSERT(!makeCollectionMetadata()->keyBelongsToMe(BSON("a" << MAXKEY)));
+
+ ASSERT(!makeCollectionMetadata()->keyBelongsToMe(BSONObj()));
}
-TEST_F(ThreeChunkWithRangeGapFixture, GetNextFromEmpty) {
+TEST_F(ThreeChunkWithRangeGapFixture, GetNextChunkFromBeginning) {
ChunkType nextChunk;
ASSERT(
makeCollectionMetadata()->getNextChunk(makeCollectionMetadata()->getMinKey(), &nextChunk));
@@ -339,21 +353,21 @@ TEST_F(ThreeChunkWithRangeGapFixture, GetNextFromEmpty) {
ASSERT_EQUALS(0, nextChunk.getMax().woCompare(BSON("a" << 10)));
}
-TEST_F(ThreeChunkWithRangeGapFixture, GetNextFromMiddle) {
+TEST_F(ThreeChunkWithRangeGapFixture, GetNextChunkFromMiddle) {
ChunkType nextChunk;
ASSERT(makeCollectionMetadata()->getNextChunk(BSON("a" << 20), &nextChunk));
ASSERT_EQUALS(0, nextChunk.getMin().woCompare(BSON("a" << 30)));
ASSERT_EQUALS(0, nextChunk.getMax().woCompare(BSON("a" << MAXKEY)));
}
-TEST_F(ThreeChunkWithRangeGapFixture, GetNextFromLast) {
+TEST_F(ThreeChunkWithRangeGapFixture, GetNextChunkFromLast) {
ChunkType nextChunk;
ASSERT(makeCollectionMetadata()->getNextChunk(BSON("a" << 30), &nextChunk));
ASSERT_EQUALS(0, nextChunk.getMin().woCompare(BSON("a" << 30)));
ASSERT_EQUALS(0, nextChunk.getMax().woCompare(BSON("a" << MAXKEY)));
}
-TEST_F(ThreeChunkWithRangeGapFixture, GetDifferentFromBeginning) {
+TEST_F(ThreeChunkWithRangeGapFixture, GetDifferentChunkFromBeginning) {
auto metadata(makeCollectionMetadata());
ChunkType differentChunk;
@@ -362,14 +376,14 @@ TEST_F(ThreeChunkWithRangeGapFixture, GetDifferentFromBeginning) {
ASSERT_BSONOBJ_EQ(BSON("a" << 20), differentChunk.getMax());
}
-TEST_F(ThreeChunkWithRangeGapFixture, GetDifferentFromMiddle) {
+TEST_F(ThreeChunkWithRangeGapFixture, GetDifferentChunkFromMiddle) {
ChunkType differentChunk;
ASSERT(makeCollectionMetadata()->getDifferentChunk(BSON("a" << 10), &differentChunk));
ASSERT_EQUALS(0, differentChunk.getMin().woCompare(BSON("a" << MINKEY)));
ASSERT_EQUALS(0, differentChunk.getMax().woCompare(BSON("a" << 10)));
}
-TEST_F(ThreeChunkWithRangeGapFixture, GetDifferentFromLast) {
+TEST_F(ThreeChunkWithRangeGapFixture, GetDifferentChunkFromLast) {
ChunkType differentChunk;
ASSERT(makeCollectionMetadata()->getDifferentChunk(BSON("a" << 30), &differentChunk));
ASSERT_EQUALS(0, differentChunk.getMin().woCompare(BSON("a" << MINKEY)));
diff --git a/src/mongo/db/s/merge_chunks_command.cpp b/src/mongo/db/s/merge_chunks_command.cpp
index 2b3818d5d07..aba860514cb 100644
--- a/src/mongo/db/s/merge_chunks_command.cpp
+++ b/src/mongo/db/s/merge_chunks_command.cpp
@@ -161,8 +161,6 @@ Status mergeChunks(OperationContext* opCtx,
ChunkType itChunk;
itChunk.setMin(minKey);
itChunk.setMax(minKey);
- itChunk.setNS(nss.ns());
- itChunk.setShard(shardingState->getShardName());
while (itChunk.getMax().woCompare(maxKey) < 0 &&
metadata->getNextChunk(itChunk.getMax(), &itChunk)) {
@@ -170,7 +168,6 @@ Status mergeChunks(OperationContext* opCtx,
chunksToMerge.push_back(itChunk);
}
-
if (chunksToMerge.empty()) {
std::string errmsg = stream()
<< "could not merge chunks, collection " << nss.ns() << " range starting at "
@@ -409,6 +406,7 @@ public:
auto mergeStatus = mergeChunks(opCtx, NamespaceString(ns), minKey, maxKey, epoch);
return appendCommandStatus(result, mergeStatus);
}
+
} mergeChunksCmd;
BSONField<string> MergeChunksCommand::nsField("mergeChunks");
diff --git a/src/mongo/db/s/split_chunk.cpp b/src/mongo/db/s/split_chunk.cpp
index f37c825dbfd..7a34132f7ad 100644
--- a/src/mongo/db/s/split_chunk.cpp
+++ b/src/mongo/db/s/split_chunk.cpp
@@ -122,7 +122,7 @@ bool checkMetadataForSuccessfulSplitChunk(OperationContext* opCtx,
return true;
}
-} // anonymous namespace
+} // namespace
StatusWith<boost::optional<ChunkRange>> splitChunk(OperationContext* opCtx,
const NamespaceString& nss,
diff --git a/src/mongo/s/chunk_manager.cpp b/src/mongo/s/chunk_manager.cpp
index 1644a79782c..964be3ed1b9 100644
--- a/src/mongo/s/chunk_manager.cpp
+++ b/src/mongo/s/chunk_manager.cpp
@@ -177,7 +177,7 @@ void ChunkManager::getShardIdsForRange(const BSONObj& min,
invariant(it != _chunkMapViews.chunkRangeMap.end());
// We need to include the last chunk
- if (end != _chunkMapViews.chunkRangeMap.cend()) {
+ if (end != _chunkMapViews.chunkRangeMap.end()) {
++end;
}
@@ -335,10 +335,22 @@ ChunkVersion ChunkManager::getVersion(const ShardId& shardName) const {
std::string ChunkManager::toString() const {
StringBuilder sb;
- sb << "ChunkManager: " << _nss.ns() << " key:" << _shardKeyPattern.toString() << '\n';
+ sb << "ChunkManager: " << _nss.ns() << " key: " << _shardKeyPattern.toString() << '\n';
+ sb << "Chunks:\n";
for (const auto& entry : _chunkMap) {
- sb << "\t" << entry.second->toString() << '\n';
+ sb << "\t" << entry.first << ": " << entry.second->toString() << '\n';
+ }
+
+ sb << "Ranges:\n";
+ for (const auto& entry : _chunkMapViews.chunkRangeMap) {
+ sb << "\t" << entry.first << ": " << entry.second.range.toString() << " @ "
+ << entry.second.shardId << '\n';
+ }
+
+ sb << "Shard versions:\n";
+ for (const auto& entry : _chunkMapViews.shardVersions) {
+ sb << "\t" << entry.first << ": " << entry.second.toString() << '\n';
}
return sb.str();
diff --git a/src/mongo/s/chunk_manager_query_test.cpp b/src/mongo/s/chunk_manager_query_test.cpp
index d9ddb4ec980..be057095300 100644
--- a/src/mongo/s/chunk_manager_query_test.cpp
+++ b/src/mongo/s/chunk_manager_query_test.cpp
@@ -35,6 +35,7 @@
#include "mongo/db/query/collation/collator_interface_mock.h"
#include "mongo/s/catalog_cache_test_fixture.h"
#include "mongo/s/chunk_manager.h"
+#include "mongo/util/log.h"
namespace mongo {
namespace {
@@ -43,13 +44,28 @@ const NamespaceString kNss("TestDB", "TestColl");
class ChunkManagerQueryTest : public CatalogCacheTestFixture {
protected:
+ void runGetShardIdsForRangeTest(const BSONObj& shardKey,
+ bool unique,
+ const std::vector<BSONObj>& splitPoints,
+ const BSONObj& min,
+ const BSONObj& max,
+ const std::set<ShardId>& expectedShardIds) {
+ const ShardKeyPattern shardKeyPattern(shardKey);
+ auto chunkManager = makeChunkManager(kNss, shardKeyPattern, nullptr, false, splitPoints);
+
+ std::set<ShardId> shardIds;
+ chunkManager->getShardIdsForRange(min, max, &shardIds);
+
+ _assertShardIdsMatch(expectedShardIds, shardIds);
+ }
+
void runQueryTest(const BSONObj& shardKey,
std::unique_ptr<CollatorInterface> defaultCollator,
bool unique,
const std::vector<BSONObj>& splitPoints,
const BSONObj& query,
const BSONObj& queryCollation,
- const std::set<ShardId> expectedShardIds) {
+ const std::set<ShardId>& expectedShardIds) {
const ShardKeyPattern shardKeyPattern(shardKey);
auto chunkManager =
makeChunkManager(kNss, shardKeyPattern, std::move(defaultCollator), false, splitPoints);
@@ -57,13 +73,19 @@ protected:
std::set<ShardId> shardIds;
chunkManager->getShardIdsForQuery(operationContext(), query, queryCollation, &shardIds);
+ _assertShardIdsMatch(expectedShardIds, shardIds);
+ }
+
+private:
+ static void _assertShardIdsMatch(const std::set<ShardId>& expectedShardIds,
+ const std::set<ShardId>& actualShardIds) {
BSONArrayBuilder expectedBuilder;
for (const auto& shardId : expectedShardIds) {
expectedBuilder << shardId;
}
BSONArrayBuilder actualBuilder;
- for (const auto& shardId : shardIds) {
+ for (const auto& shardId : actualShardIds) {
actualBuilder << shardId;
}
@@ -71,6 +93,33 @@ protected:
}
};
+TEST_F(ChunkManagerQueryTest, GetShardIdsForRangeMinAndMaxAreInclusive) {
+ runGetShardIdsForRangeTest(BSON("a" << 1),
+ false,
+ {BSON("a" << -100), BSON("a" << 0), BSON("a" << 100)},
+ BSON("a" << -100),
+ BSON("a" << 0),
+ {ShardId("1"), ShardId("2")});
+}
+
+TEST_F(ChunkManagerQueryTest, GetShardIdsForRangeMinAndMaxAreTheSameAtFirstChunkMaxBoundary) {
+ runGetShardIdsForRangeTest(BSON("a" << 1),
+ false,
+ {BSON("a" << -100), BSON("a" << 0), BSON("a" << 100)},
+ BSON("a" << -100),
+ BSON("a" << -100),
+ {ShardId("1")});
+}
+
+TEST_F(ChunkManagerQueryTest, GetShardIdsForRangeMinAndMaxAreTheSameAtLastChunkMinBoundary) {
+ runGetShardIdsForRangeTest(BSON("a" << 1),
+ false,
+ {BSON("a" << -100), BSON("a" << 0), BSON("a" << 100)},
+ BSON("a" << 100),
+ BSON("a" << 100),
+ {ShardId("3")});
+}
+
TEST_F(ChunkManagerQueryTest, EmptyQuerySingleShard) {
runQueryTest(BSON("a" << 1), nullptr, false, {}, BSONObj(), BSONObj(), {ShardId("0")});
}