summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMisha Tyulenev <misha.tyulenev@mongodb.com>2020-07-14 20:31:51 +0000
committerEvergreen Agent <no-reply@evergreen.mongodb.com>2020-07-15 17:15:53 +0000
commitf5396269916adaf6b3539fb804ee20ae54950df7 (patch)
treeeaca834db65a450224cbece1dc55365ac8f58dc9
parent5034da92513be28db2a597bed80a6c548ecfa1e3 (diff)
downloadmongo-f5396269916adaf6b3539fb804ee20ae54950df7.tar.gz
SERVER-49092 fail with MovePrimaryInProgress in createIndexes command
-rw-r--r--jstests/sharding/move_primary_with_writes.js33
-rw-r--r--src/mongo/db/commands/create_indexes.cpp30
2 files changed, 50 insertions, 13 deletions
diff --git a/jstests/sharding/move_primary_with_writes.js b/jstests/sharding/move_primary_with_writes.js
index 46ec381de29..42fb2763ea6 100644
--- a/jstests/sharding/move_primary_with_writes.js
+++ b/jstests/sharding/move_primary_with_writes.js
@@ -22,19 +22,18 @@ function createCollections() {
assert.commandWorked(st.getDB(dbName).runCommand({dropDatabase: 1}));
let db = st.getDB(dbName);
- const unshardedFooIndexes = [{key: {a: 1}, name: 'fooIndex'}];
- const shardedBarIndexes = [{key: {a: 1}, name: 'barIndex'}];
+ const unshardedFooIndexes = [{key: {a: 1}, name: 'fooIndex_a'}];
+ const shardedBarIndexes = [{key: {a: 1}, name: 'barIndex_a'}];
assert.commandWorked(st.s.adminCommand({enableSharding: dbName}));
assert.commandWorked(st.s.adminCommand({movePrimary: dbName, to: st.shard0.shardName}));
assert.commandWorked(db.createCollection('unshardedFoo'));
- assert.commandWorked(db.createView('viewOnFoo', 'unshardedFoo', [{$match: {}}]));
assert.commandWorked(db.createCollection('shardedBar'));
for (let i = 0; i < 3; i++) {
- assert.commandWorked(db.unshardedFoo.insert({_id: i, a: i}));
- assert.commandWorked(db.shardedBar.insert({_id: i, a: i}));
+ assert.commandWorked(db.unshardedFoo.insert({_id: i, a: i, b: i}));
+ assert.commandWorked(db.shardedBar.insert({_id: i, a: i, b: i}));
}
assert.commandWorked(
@@ -43,6 +42,9 @@ function createCollections() {
assert.commandWorked(db.adminCommand({enableSharding: dbName}));
assert.commandWorked(db.adminCommand({shardCollection: dbName + '.shardedBar', key: {_id: 1}}));
+
+ assert.commandWorked(db.createView('unshardedFooView', 'unshardedFoo', [{$match: {}}]));
+ assert.commandWorked(db.createView('shardedBarView', 'shardedBar', [{$match: {}}]));
}
function mapFunc() {
@@ -97,13 +99,17 @@ function buildCommands(collName) {
alwaysFail: true
},
{command: {create: "testCollection"}, alwaysFail: true},
+ {
+ command: {createIndexes: collName, indexes: [{key: {b: 1}, name: collName + "Idx_b"}]},
+ alwaysFail: false
+ },
];
return commands;
}
-function buildDDLCommands() {
+function buildDDLCommands(collName) {
const commands = [{
- command: {renameCollection: "testdb.unshardedFoo", to: "testdb.testCollection"},
+ command: {renameCollection: dbName + "." + collName, to: dbName + ".testCollection"},
alwaysFail: true
}];
return commands;
@@ -148,7 +154,7 @@ function testMovePrimary(failpoint, fromShard, toShard, db, shouldFail, sharded)
awaitShell();
}
-function testMovePrimaryDDL(failpoint, fromShard, toShard, db, shouldFail) {
+function testMovePrimaryDDL(failpoint, fromShard, toShard, db, shouldFail, sharded) {
let codeToRunInParallelShell = '{ db.getSiblingDB("admin").runCommand({movePrimary: "' +
dbName + '", to: "' + toShard.name + '"}); }';
@@ -160,7 +166,14 @@ function testMovePrimaryDDL(failpoint, fromShard, toShard, db, shouldFail) {
waitForFailpoint("Hit " + failpoint, 1);
clearRawMongoProgramOutput();
- buildDDLCommands().forEach(commandObj => {
+ let collName;
+ if (sharded) {
+ collName = "shardedBar";
+ } else {
+ collName = "unshardedFoo";
+ }
+
+ buildDDLCommands(collName).forEach(commandObj => {
if (shouldFail) {
jsTestLog("running command: " + tojson(commandObj.command) +
",\nshoudFail: " + shouldFail);
@@ -197,7 +210,7 @@ verifyDocuments(fromShard.getDB(dbName), 0);
createCollections();
fromShard = st.getPrimaryShard(dbName);
toShard = st.getOther(fromShard);
-testMovePrimaryDDL('hangInCloneStage', fromShard, toShard, st.s.getDB("admin"), true);
+testMovePrimaryDDL('hangInCloneStage', fromShard, toShard, st.s.getDB("admin"), false, true);
createCollections();
fromShard = st.getPrimaryShard(dbName);
diff --git a/src/mongo/db/commands/create_indexes.cpp b/src/mongo/db/commands/create_indexes.cpp
index ddf42dfdc00..abcf15f4681 100644
--- a/src/mongo/db/commands/create_indexes.cpp
+++ b/src/mongo/db/commands/create_indexes.cpp
@@ -55,6 +55,7 @@
#include "mongo/db/repl/repl_client_info.h"
#include "mongo/db/repl/repl_set_config.h"
#include "mongo/db/repl/replication_coordinator.h"
+#include "mongo/db/s/collection_sharding_runtime.h"
#include "mongo/db/s/collection_sharding_state.h"
#include "mongo/db/s/database_sharding_state.h"
#include "mongo/db/server_options.h"
@@ -355,10 +356,33 @@ bool indexesAlreadyExist(OperationContext* opCtx,
/**
* Checks database sharding state. Throws exception on error.
*/
-void checkDatabaseShardingState(OperationContext* opCtx, StringData dbName) {
- auto dss = DatabaseShardingState::get(opCtx, dbName);
+void checkDatabaseShardingState(OperationContext* opCtx, const NamespaceString& ns) {
+ auto dss = DatabaseShardingState::get(opCtx, ns.db());
auto dssLock = DatabaseShardingState::DSSLock::lockShared(opCtx, dss);
dss->checkDbVersion(opCtx, dssLock);
+
+ Lock::CollectionLock collLock(opCtx, ns, MODE_IS);
+ try {
+ const auto collDesc =
+ CollectionShardingState::get(opCtx, ns)->getCollectionDescription(opCtx);
+ if (!collDesc.isSharded()) {
+ auto mpsm = dss->getMovePrimarySourceManager(dssLock);
+
+ if (mpsm) {
+ LOGV2(
+ 4909200, "assertMovePrimaryInProgress", "movePrimaryNss"_attr = ns.toString());
+
+ uasserted(ErrorCodes::MovePrimaryInProgress,
+ "movePrimary is in progress for namespace " + ns.toString());
+ }
+ }
+ } catch (const DBException& ex) {
+ if (ex.toStatus() != ErrorCodes::MovePrimaryInProgress) {
+ LOGV2(4909201, "Error when getting colleciton description", "what"_attr = ex.what());
+ return;
+ }
+ throw;
+ }
}
/**
@@ -487,7 +511,7 @@ bool runCreateIndexesWithCoordinator(OperationContext* opCtx,
OptionalCollectionUUID collectionUUID;
{
Lock::DBLock dbLock(opCtx, ns.db(), MODE_IX);
- checkDatabaseShardingState(opCtx, ns.db());
+ checkDatabaseShardingState(opCtx, ns);
if (!repl::ReplicationCoordinator::get(opCtx)->canAcceptWritesFor(opCtx, ns)) {
uasserted(ErrorCodes::NotMaster,
str::stream() << "Not primary while creating indexes in " << ns.ns());