summaryrefslogtreecommitdiff
path: root/jstests
diff options
context:
space:
mode:
authorJack Mulrow <jack.mulrow@mongodb.com>2020-03-05 12:12:39 -0500
committerEvergreen Agent <no-reply@evergreen.mongodb.com>2020-03-13 00:05:44 +0000
commit8781a75d17393b9f2c747116ed17a91b734bb2db (patch)
treecc252478ca53f479ee916482fd0e65b6218bbdc7 /jstests
parentd09c84a0856060c38e58d971599966af8719a454 (diff)
downloadmongo-8781a75d17393b9f2c747116ed17a91b734bb2db.tar.gz
SERVER-46004 refineCollectionShardKey should verify indexes on a shard with a chunk
Diffstat (limited to 'jstests')
-rw-r--r--jstests/sharding/check_sharding_index_versioned.js31
-rw-r--r--jstests/sharding/refine_collection_shard_key_basic.js96
-rw-r--r--jstests/sharding/refine_collection_shard_key_primary_without_chunks.js31
3 files changed, 158 insertions, 0 deletions
diff --git a/jstests/sharding/check_sharding_index_versioned.js b/jstests/sharding/check_sharding_index_versioned.js
new file mode 100644
index 00000000000..53af7ab5148
--- /dev/null
+++ b/jstests/sharding/check_sharding_index_versioned.js
@@ -0,0 +1,31 @@
+/*
+ * Tests that the checkShardingIndex command checks shard version when run on a sharded collection.
+ * @tags: [requires_fcv_44]
+ */
+(function() {
+"use strict";
+
+const st = new ShardingTest({shards: 1});
+const dbName = "test";
+const collName = "foo";
+const ns = dbName + "." + collName;
+
+assert.commandWorked(st.s.adminCommand({enableSharding: dbName}));
+assert.commandWorked(st.s.adminCommand({shardCollection: ns, key: {_id: 1}}));
+
+// checkShardingIndex only exists on a mongod, so run the command directly against a shard with
+// a dummy shard version that should fail with StaleConfig.
+//
+// Note the shell connects to shards with a DBClient, which throws StaleConfig errors as JS
+// exceptions when the error does not come from a mongos.
+const error = assert.throws(() => {
+ st.rs0.getPrimary().getDB(dbName).runCommand({
+ checkShardingIndex: ns,
+ keyPattern: {x: 1},
+ shardVersion: [Timestamp(99, 10101), ObjectId()],
+ });
+});
+assert.eq(error.code, ErrorCodes.StaleConfig);
+
+st.stop();
+})();
diff --git a/jstests/sharding/refine_collection_shard_key_basic.js b/jstests/sharding/refine_collection_shard_key_basic.js
index 4c03a6f53f8..7d8a9bcccb5 100644
--- a/jstests/sharding/refine_collection_shard_key_basic.js
+++ b/jstests/sharding/refine_collection_shard_key_basic.js
@@ -9,6 +9,8 @@
(function() {
'use strict';
load('jstests/libs/fail_point_util.js');
+load('jstests/libs/profiler.js');
+load('jstests/sharding/libs/shard_versioning_util.js');
load('jstests/sharding/libs/sharded_transactions_helpers.js');
const st = new ShardingTest({mongos: 2, shards: 2, rs: {nodes: 3}});
@@ -630,5 +632,99 @@ assert.soon(() => oldSecondaryEpoch ===
st.shard1.adminCommand({getShardVersion: kNsName, fullMetadata: true})
.metadata.shardVersionEpoch.toString());
+(() => {
+ //
+ // Verify listIndexes and checkShardingIndexes are retried on shard version errors and are sent
+ // with shard versions.
+ //
+
+ // Create a sharded collection with one chunk on shard0.
+ const dbName = "testShardVersions";
+ const collName = "fooShardVersions";
+ const ns = dbName + "." + collName;
+ assert.commandWorked(st.s.adminCommand({enableSharding: dbName}));
+ st.ensurePrimaryShard(dbName, st.shard0.shardName);
+ assert.commandWorked(st.s.adminCommand({shardCollection: ns, key: {x: 1}}));
+
+ const minKeyShardDB = st.rs0.getPrimary().getDB(dbName);
+ assert.commandWorked(minKeyShardDB.setProfilingLevel(2));
+
+ // Refining the shard key should internally retry on a stale epoch error for listIndexes and
+ // succeed.
+ assert.commandWorked(minKeyShardDB.adminCommand({
+ configureFailPoint: "failCommand",
+ mode: {times: 5},
+ data: {
+ errorCode: ErrorCodes.StaleEpoch,
+ failCommands: ["listIndexes"],
+ failInternalCommands: true
+ }
+ }));
+ assert.commandWorked(st.s.getCollection(ns).createIndex({x: 1, y: 1}));
+ assert.commandWorked(st.s.adminCommand({refineCollectionShardKey: ns, key: {x: 1, y: 1}}));
+
+ // Refining the shard key should internally retry on a stale epoch error for checkShardingIndex
+ // and succeed.
+ assert.commandWorked(minKeyShardDB.adminCommand({
+ configureFailPoint: "failCommand",
+ mode: {times: 5},
+ data: {
+ errorCode: ErrorCodes.StaleEpoch,
+ failCommands: ["checkShardingIndex"],
+ failInternalCommands: true
+ }
+ }));
+ assert.commandWorked(st.s.getCollection(ns).createIndex({x: 1, y: 1, z: 1}));
+ assert.commandWorked(
+ st.s.adminCommand({refineCollectionShardKey: ns, key: {x: 1, y: 1, z: 1}}));
+
+ // Verify both commands were sent with shard versions through the profiler.
+ profilerHasAtLeastOneMatchingEntryOrThrow({
+ profileDB: minKeyShardDB,
+ filter: {"command.listIndexes": collName, "command.shardVersion": {"$exists": true}}
+ });
+
+ profilerHasAtLeastOneMatchingEntryOrThrow({
+ profileDB: minKeyShardDB,
+ filter: {"command.checkShardingIndex": ns, "command.shardVersion": {"$exists": true}}
+ });
+
+ // Clean up.
+ assert.commandWorked(minKeyShardDB.setProfilingLevel(0));
+ assert(minKeyShardDB.system.profile.drop());
+})();
+
+(() => {
+ //
+ // Verify refineCollectionShardKey can return a StaleConfig error without crashing the config
+ // server.
+ //
+
+ // Create a sharded collection with one chunk on shard0.
+ const dbName = "testReturnStaleConfig";
+ const ns = dbName + ".fooReturnStaleConfig";
+ assert.commandWorked(st.s.adminCommand({enableSharding: dbName}));
+ st.ensurePrimaryShard(dbName, st.shard0.shardName);
+ assert.commandWorked(st.s.adminCommand({shardCollection: ns, key: {x: 1}}));
+
+ // Move the last chunk away from shard0 without refreshing shard1 so it will be stale when its
+ // indexes are read during the next refine. Disable refreshes on shard1 so it will repeatedly
+ // return StaleConfig until refineCollectionShardKey runs out of retries.
+ ShardVersioningUtil.moveChunkNotRefreshRecipient(st.s, ns, st.shard0, st.shard1, {x: 1});
+
+ let disableRefreshesFailPoint =
+ configureFailPoint(st.rs1.getPrimary(), "skipShardFilteringMetadataRefresh");
+
+ assert.commandWorked(st.rs1.getPrimary().getCollection(ns).createIndex({x: 1, y: 1}));
+ assert.commandFailedWithCode(
+ st.s.adminCommand({refineCollectionShardKey: ns, key: {x: 1, y: 1}}),
+ ErrorCodes.StaleConfig);
+
+ disableRefreshesFailPoint.off();
+
+ // The refresh should succeed now.
+ assert.commandWorked(st.s.adminCommand({refineCollectionShardKey: ns, key: {x: 1, y: 1}}));
+})();
+
st.stop();
})();
diff --git a/jstests/sharding/refine_collection_shard_key_primary_without_chunks.js b/jstests/sharding/refine_collection_shard_key_primary_without_chunks.js
new file mode 100644
index 00000000000..1d1ebfa5d57
--- /dev/null
+++ b/jstests/sharding/refine_collection_shard_key_primary_without_chunks.js
@@ -0,0 +1,31 @@
+// Verifies refining a shard key checks for the presence of a compatible shard key index on a shard
+// with chunks, not the primary shard.
+// @tags: [requires_fcv_44]
+(function() {
+"use strict";
+
+const st = new ShardingTest({shards: 2});
+
+// The orphan hook assumes every shard has the shard key index, which is not true for test_primary
+// after the refine.
+TestData.skipCheckOrphans = true;
+
+const dbName = "test_primary";
+const collName = "foo";
+const ns = dbName + "." + collName;
+
+// Create a sharded collection with all chunks on the non-primary shard, shard1.
+assert.commandWorked(st.s.adminCommand({enableSharding: dbName}));
+st.ensurePrimaryShard(dbName, st.shard0.shardName);
+assert.commandWorked(st.s.adminCommand({shardCollection: ns, key: {x: 1}}));
+
+// Move the last chunk away from the primary shard and create an index compatible with the refined
+// key only on the non-primary shard.
+assert.commandWorked(st.s.adminCommand({moveChunk: ns, find: {x: 1}, to: st.shard1.shardName}));
+assert.commandWorked(st.rs1.getPrimary().getCollection(ns).createIndex({x: 1, y: 1}));
+
+// Refining the shard key should succeed even though the primary does not have a compatible index.
+assert.commandWorked(st.s.adminCommand({refineCollectionShardKey: ns, key: {x: 1, y: 1}}));
+
+st.stop();
+})();