summaryrefslogtreecommitdiff
path: root/jstests/sharding
diff options
context:
space:
mode:
authorKaloian Manassiev <kaloian.manassiev@mongodb.com>2018-07-17 16:49:36 -0400
committerKaloian Manassiev <kaloian.manassiev@mongodb.com>2018-07-23 10:42:44 -0400
commitfc372cdf9a070eecaf600b75649fd2690c3d927d (patch)
treeb27b7f33716c614af57e491645b10edc535a83b9 /jstests/sharding
parentba25922e6b2bffa60a8a4f3db8adca612da55e95 (diff)
downloadmongo-fc372cdf9a070eecaf600b75649fd2690c3d927d.tar.gz
SERVER-14394 Add UUID checking to the hashed sharding with initial split test
Diffstat (limited to 'jstests/sharding')
-rw-r--r--jstests/sharding/hash_skey_split.js26
-rw-r--r--jstests/sharding/initial_split_validate_shard_collections.js74
2 files changed, 74 insertions, 26 deletions
diff --git a/jstests/sharding/hash_skey_split.js b/jstests/sharding/hash_skey_split.js
deleted file mode 100644
index d6d1fcf5c84..00000000000
--- a/jstests/sharding/hash_skey_split.js
+++ /dev/null
@@ -1,26 +0,0 @@
-(function() {
-
- var st = new ShardingTest({shards: 2});
-
- var configDB = st.s.getDB('config');
- assert.commandWorked(configDB.adminCommand({enableSharding: 'test'}));
-
- st.ensurePrimaryShard('test', st.shard1.shardName);
- assert.commandWorked(configDB.adminCommand(
- {shardCollection: 'test.user', key: {x: 'hashed'}, numInitialChunks: 2}));
-
- var metadata = st.rs0.getPrimary().getDB('admin').runCommand(
- {getShardVersion: 'test.user', fullMetadata: true});
- var chunks =
- metadata.metadata.chunks.length > 0 ? metadata.metadata.chunks : metadata.metadata.pending;
- assert(bsonWoCompare(chunks[0][0], chunks[0][1]) < 0, tojson(metadata));
-
- metadata = st.rs1.getPrimary().getDB('admin').runCommand(
- {getShardVersion: 'test.user', fullMetadata: true});
- chunks =
- metadata.metadata.chunks.length > 0 ? metadata.metadata.chunks : metadata.metadata.pending;
- assert(bsonWoCompare(chunks[0][0], chunks[0][1]) < 0, tojson(metadata));
-
- st.stop();
-
-})();
diff --git a/jstests/sharding/initial_split_validate_shard_collections.js b/jstests/sharding/initial_split_validate_shard_collections.js
new file mode 100644
index 00000000000..a2bc2070622
--- /dev/null
+++ b/jstests/sharding/initial_split_validate_shard_collections.js
@@ -0,0 +1,74 @@
+/**
+ * Explicitly validates that all shards' collections have the correct UUIDs after an initial split
+ * which spreads the collection across all available shards.
+ */
+
+load("jstests/libs/feature_compatibility_version.js");
+load("jstests/libs/uuid_util.js");
+
+(function() {
+ 'use strict';
+
+ let st = new ShardingTest({shards: 2});
+ let mongos = st.s0;
+
+ assert.commandWorked(mongos.adminCommand({enableSharding: 'test'}));
+ st.ensurePrimaryShard('test', st.shard1.shardName);
+
+ assert.commandWorked(mongos.adminCommand(
+ {shardCollection: 'test.user', key: {x: 'hashed'}, numInitialChunks: 2}));
+
+ // Ensure that all the pending (received chunks) have been incorporated in the shard's filtering
+ // metadata so they will show up in the getShardVersion command
+ assert.eq(0, mongos.getDB('test').user.find({}).itcount());
+
+ st.printShardingStatus();
+
+ function checkMetadata(metadata) {
+ jsTestLog(tojson(metadata));
+
+ assert.eq(1, metadata.chunks.length);
+ assert.eq(0, metadata.pending.length);
+
+ // Check that the single chunk on the shard's metadata is a valid chunk (getShardVersion
+ // represents chunks as an array of [min, max])
+ let chunks = metadata.chunks;
+ assert(bsonWoCompare(chunks[0][0], chunks[0][1]) < 0);
+ }
+
+ // Check that the shards' in-memory catalog caches were refreshed
+ checkMetadata(assert
+ .commandWorked(st.rs0.getPrimary().adminCommand(
+ {getShardVersion: 'test.user', fullMetadata: true}))
+ .metadata);
+ checkMetadata(assert
+ .commandWorked(st.rs1.getPrimary().adminCommand(
+ {getShardVersion: 'test.user', fullMetadata: true}))
+ .metadata);
+
+ // Check that the shards' catalogs have the correct UUIDs
+ const configUUID = getUUIDFromConfigCollections(mongos, 'test.user');
+ const shard0UUID = getUUIDFromListCollections(st.shard0.getDB('test'), 'user');
+ const shard1UUID = getUUIDFromListCollections(st.shard1.getDB('test'), 'user');
+ assert.eq(configUUID, shard0UUID);
+ assert.eq(configUUID, shard1UUID);
+
+ // Check that the shards' on-disk caches have the correct number of chunks
+ assert.commandWorked(st.shard0.adminCommand(
+ {_flushRoutingTableCacheUpdates: 'test.user', syncFromConfig: false}));
+ assert.commandWorked(st.shard1.adminCommand(
+ {_flushRoutingTableCacheUpdates: 'test.user', syncFromConfig: false}));
+
+ const chunksOnConfigCount = st.config.chunks.count({ns: 'test.user'});
+ assert.eq(2, chunksOnConfigCount);
+
+ const cacheChunksOnShard0 =
+ st.shard0.getDB("config").getCollection("cache.chunks.test.user").find().toArray();
+ const cacheChunksOnShard1 =
+ st.shard1.getDB("config").getCollection("cache.chunks.test.user").find().toArray();
+ assert.eq(chunksOnConfigCount, cacheChunksOnShard0.length);
+ assert.eq(chunksOnConfigCount, cacheChunksOnShard1.length);
+ assert.eq(cacheChunksOnShard0, cacheChunksOnShard1);
+
+ st.stop();
+})();