summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMax Hirschhorn <max.hirschhorn@mongodb.com>2017-01-20 16:29:12 -0500
committerMax Hirschhorn <max.hirschhorn@mongodb.com>2017-01-20 16:29:12 -0500
commitbcca51caf0054c7def89d7588a1c99c6b2513a79 (patch)
tree69e2db6b14478f756adb2023fa791798048e9d59
parentd4e459b768349936ce72bb53ec056bc5918119f1 (diff)
downloadmongo-bcca51caf0054c7def89d7588a1c99c6b2513a79.tar.gz
SERVER-24994 Check in Helpers::removeRange() if shard key index dropped.
(cherry picked from commit 57ed82f0692bfb4e7a045a0108d029e53b21e3f8)
-rw-r--r--jstests/concurrency/fsm_all.js7
-rw-r--r--jstests/concurrency/fsm_all_composed.js3
-rw-r--r--jstests/concurrency/fsm_all_replication.js3
-rw-r--r--jstests/concurrency/fsm_all_sharded_replication_legacy_config_servers_with_balancer.js5
-rw-r--r--jstests/concurrency/fsm_all_sharded_replication_with_balancer.js5
-rw-r--r--jstests/concurrency/fsm_workloads/sharded_moveChunk_drop_shard_key_index.js84
-rw-r--r--src/mongo/db/dbhelpers.cpp6
7 files changed, 112 insertions, 1 deletions
diff --git a/jstests/concurrency/fsm_all.js b/jstests/concurrency/fsm_all.js
index 35031becb89..4c45ac7b99d 100644
--- a/jstests/concurrency/fsm_all.js
+++ b/jstests/concurrency/fsm_all.js
@@ -4,7 +4,12 @@ load('jstests/concurrency/fsm_libs/runner.js');
var dir = 'jstests/concurrency/fsm_workloads';
-var blacklist = [].map(function(file) {
+var blacklist = [
+ // Disabled due to MongoDB restrictions and/or workload restrictions
+
+ // This workload assumes it is running against a sharded cluster.
+ 'sharded_moveChunk_drop_shard_key_index.js',
+].map(function(file) {
return dir + '/' + file;
});
diff --git a/jstests/concurrency/fsm_all_composed.js b/jstests/concurrency/fsm_all_composed.js
index 159ff0919c9..c32b039dd32 100644
--- a/jstests/concurrency/fsm_all_composed.js
+++ b/jstests/concurrency/fsm_all_composed.js
@@ -16,6 +16,9 @@ var blacklist = [
// is slow and the composer doesn't honor iteration counts:
'remove_single_document_eval_nolock.js',
'update_simple_eval_nolock.js',
+
+ // This workload assumes it is running against a sharded cluster.
+ 'sharded_moveChunk_drop_shard_key_index.js',
].map(function(file) {
return dir + '/' + file;
});
diff --git a/jstests/concurrency/fsm_all_replication.js b/jstests/concurrency/fsm_all_replication.js
index 31770af9f80..93be7b7dfe2 100644
--- a/jstests/concurrency/fsm_all_replication.js
+++ b/jstests/concurrency/fsm_all_replication.js
@@ -9,6 +9,9 @@ var blacklist = [
'agg_group_external.js', // uses >100MB of data, which can overwhelm test hosts
'agg_sort_external.js', // uses >100MB of data, which can overwhelm test hosts
'findAndModify_update_grow.js', // can cause OOM kills on test hosts
+
+ // This workload assumes it is running against a sharded cluster.
+ 'sharded_moveChunk_drop_shard_key_index.js',
].map(function(file) {
return dir + '/' + file;
});
diff --git a/jstests/concurrency/fsm_all_sharded_replication_legacy_config_servers_with_balancer.js b/jstests/concurrency/fsm_all_sharded_replication_legacy_config_servers_with_balancer.js
index 8fa15374dce..167efc452ed 100644
--- a/jstests/concurrency/fsm_all_sharded_replication_legacy_config_servers_with_balancer.js
+++ b/jstests/concurrency/fsm_all_sharded_replication_legacy_config_servers_with_balancer.js
@@ -74,6 +74,11 @@ var blacklist = [
'rename_collection_dbname_droptarget.js',
'rename_collection_droptarget.js',
+ // This workload assumes that the distributed lock can always be acquired when running the split
+ // command in its setup() function; however, a LockBusy error may be returned if the balancer is
+ // running.
+ 'sharded_moveChunk_drop_shard_key_index.js',
+
'update_simple_eval.js', // eval doesn't work with sharded collections
'update_simple_eval_nolock.js', // eval doesn't work with sharded collections
'update_upsert_multi.js', // our update queries lack shard keys
diff --git a/jstests/concurrency/fsm_all_sharded_replication_with_balancer.js b/jstests/concurrency/fsm_all_sharded_replication_with_balancer.js
index 59d6e0f4a61..9e63ec701ee 100644
--- a/jstests/concurrency/fsm_all_sharded_replication_with_balancer.js
+++ b/jstests/concurrency/fsm_all_sharded_replication_with_balancer.js
@@ -75,6 +75,11 @@ var blacklist = [
'rename_collection_dbname_droptarget.js',
'rename_collection_droptarget.js',
+ // This workload assumes that the distributed lock can always be acquired when running the split
+ // command in its setup() function; however, a LockBusy error may be returned if the balancer is
+ // running.
+ 'sharded_moveChunk_drop_shard_key_index.js',
+
'update_simple_eval.js', // eval doesn't work with sharded collections
'update_simple_eval_nolock.js', // eval doesn't work with sharded collections
'update_upsert_multi.js', // our update queries lack shard keys
diff --git a/jstests/concurrency/fsm_workloads/sharded_moveChunk_drop_shard_key_index.js b/jstests/concurrency/fsm_workloads/sharded_moveChunk_drop_shard_key_index.js
new file mode 100644
index 00000000000..c476b7a0dab
--- /dev/null
+++ b/jstests/concurrency/fsm_workloads/sharded_moveChunk_drop_shard_key_index.js
@@ -0,0 +1,84 @@
+'use strict';
+
+/**
+ * sharded_moveChunk_drop_shard_key_index.js
+ *
+ * Tests that dropping the shard key index while migrating a chunk doesn't cause the shard to abort.
+ *
+ * This workload was designed to reproduce SERVER-24994.
+ */
+
+var $config = (function() {
+
+ var data = {
+ numSplitPoints: 100,
+ shardKey: {key: 1}
+ };
+
+ var states = {
+
+ init: function init(db, collName) {
+ // No-op
+ },
+
+ moveChunk: function moveChunk(db, collName) {
+ var configDB = db.getSiblingDB('config');
+ var shards = configDB.shards.aggregate([{$sample: {size: 1}}]).toArray();
+ assertAlways.eq(1, shards.length, tojson(shards));
+
+ var shardName = shards[0]._id;
+ var chunkBoundary = Random.randInt(this.numSplitPoints);
+
+ // We don't assert that the command succeeded when migrating a chunk because it's
+ // possible another thread has already started migrating a chunk.
+ db.adminCommand({
+ moveChunk: db[collName].getFullName(),
+ find: {key: chunkBoundary},
+ to: shardName,
+ _waitForDelete: true,
+ });
+ },
+
+ dropIndex: function dropIndex(db, collName) {
+ // We don't assert that the command succeeded when dropping an index because it's
+ // possible another thread has already dropped this index.
+ db[collName].dropIndex(this.shardKey);
+
+ // Re-create the index that was dropped.
+ assertAlways.commandWorked(db[collName].createIndex(this.shardKey));
+ }
+
+ };
+
+ var transitions = {
+ init: {moveChunk: 0.5, dropIndex: 0.5},
+ moveChunk: {moveChunk: 0.5, dropIndex: 0.5},
+ dropIndex: {moveChunk: 0.5, dropIndex: 0.5}
+ };
+
+ function setup(db, collName, cluster) {
+ var bulk = db[collName].initializeUnorderedBulkOp();
+ for (var i = 0; i < this.numSplitPoints; ++i) {
+ bulk.insert({key: i});
+ }
+
+ var res = bulk.execute();
+ assertAlways.writeOK(res);
+ assertAlways.eq(this.numSplitPoints, res.nInserted, tojson(res));
+
+ for (i = 0; i < this.numSplitPoints; ++i) {
+ assertWhenOwnColl.commandWorked(
+ db.adminCommand({split: db[collName].getFullName(), middle: {key: i}}));
+ }
+ }
+
+ return {
+ threadCount: 10,
+ iterations: 100,
+ data: data,
+ states: states,
+ transitions: transitions,
+ setup: setup
+ };
+
+})();
diff --git a/src/mongo/db/dbhelpers.cpp b/src/mongo/db/dbhelpers.cpp
index e0d6da23040..d3c62ea0ef8 100644
--- a/src/mongo/db/dbhelpers.cpp
+++ b/src/mongo/db/dbhelpers.cpp
@@ -351,6 +351,12 @@ long long Helpers::removeRange(OperationContext* txn,
IndexDescriptor* desc =
collection->getIndexCatalog()->findIndexByKeyPattern(txn, indexKeyPattern.toBSON());
+ if (!desc) {
+ warning(LogComponent::kSharding) << "shard key index " << indexKeyPattern.toBSON()
+ << " on '" << ns << "' was dropped";
+ return -1;
+ }
+
unique_ptr<PlanExecutor> exec(
InternalPlanner::indexScan(txn,
collection,