summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorGreg Studer <greg@10gen.com>2014-04-02 10:24:27 -0400
committerGreg Studer <greg@10gen.com>2014-04-02 11:04:58 -0400
commitbafd9c7e0e39f86d251271517f518a24acb853c5 (patch)
treeaba490de34a3ee99b1b31683e4b60c8974f85af1
parent8e82f5620a230fe890d09872c48ca41734b66dca (diff)
downloadmongo-bafd9c7e0e39f86d251271517f518a24acb853c5.tar.gz
SERVER-13459 migration rollback needs to use original metadatar2.6.0-rc3
(cherry picked from commit 9b65ac6d4361c07374b9e95e41afd6c2a3199b8e)
-rw-r--r--jstests/sharding/migration_failure.js41
-rw-r--r--src/mongo/s/d_migrate.cpp31
2 files changed, 62 insertions, 10 deletions
diff --git a/jstests/sharding/migration_failure.js b/jstests/sharding/migration_failure.js
new file mode 100644
index 00000000000..65b21661a74
--- /dev/null
+++ b/jstests/sharding/migration_failure.js
@@ -0,0 +1,41 @@
+//
+// Tests that migration failures before and after commit correctly roll back
+// when possible
+//
+
+var options = { separateConfig : true };
+
+var st = new ShardingTest({ shards : 2, mongos : 1, other : options });
+st.stopBalancer();
+
+var mongos = st.s0;
+var admin = mongos.getDB( "admin" );
+var shards = mongos.getCollection( "config.shards" ).find().toArray();
+var coll = mongos.getCollection( "foo.bar" );
+
+assert( admin.runCommand({ enableSharding : coll.getDB() + "" }).ok );
+printjson( admin.runCommand({ movePrimary : coll.getDB() + "", to : shards[0]._id }) );
+assert( admin.runCommand({ shardCollection : coll + "", key : { _id : 1 } }).ok );
+assert( admin.runCommand({ split : coll + "", middle : { _id : 0 } }).ok );
+
+st.printShardingStatus();
+
+jsTest.log("Testing failed migrations...");
+
+assert.commandWorked(
+ st.shard0.getDB("admin").runCommand({
+ configureFailPoint : 'failMigrationConfigWritePrepare', mode : 'alwaysOn' }));
+
+var version = st.shard0.getDB("admin").runCommand({ getShardVersion : coll.toString() });
+
+assert.commandFailed( admin.runCommand({ moveChunk : coll + "",
+ find : { _id : 0 },
+ to : shards[1]._id }) );
+
+var failVersion = st.shard0.getDB("admin").runCommand({ getShardVersion : coll.toString() });
+
+assert.eq(version.global, failVersion.global);
+
+jsTest.log( "DONE!" );
+
+st.stop();
diff --git a/src/mongo/s/d_migrate.cpp b/src/mongo/s/d_migrate.cpp
index 46a5adcb8b0..c003a63a492 100644
--- a/src/mongo/s/d_migrate.cpp
+++ b/src/mongo/s/d_migrate.cpp
@@ -88,6 +88,9 @@ using namespace std;
namespace mongo {
+ MONGO_FP_DECLARE(failMigrationCommit);
+ MONGO_FP_DECLARE(failMigrationConfigWritePrepare);
+
Tee* migrateLog = RamLog::get("migrate");
class MoveTimingHelper {
@@ -1046,9 +1049,9 @@ namespace mongo {
// 3.
- CollectionMetadataPtr collMetadata = shardingState.getCollectionMetadata( ns );
- verify( collMetadata != NULL );
- BSONObj shardKeyPattern = collMetadata->getKeyPattern();
+ const CollectionMetadataPtr origCollMetadata( shardingState.getCollectionMetadata( ns ) );
+ verify( origCollMetadata != NULL );
+ BSONObj shardKeyPattern = origCollMetadata->getKeyPattern();
if ( shardKeyPattern.isEmpty() ){
errmsg = "no shard key found";
return false;
@@ -1255,7 +1258,7 @@ namespace mongo {
ok = false;
}
- if ( !ok ) {
+ if ( !ok || MONGO_FAIL_POINT(failMigrationCommit) ) {
log() << "moveChunk migrate commit not accepted by TO-shard: " << res
<< " resetting shard version to: " << startingVersion << migrateLog;
{
@@ -1266,7 +1269,7 @@ namespace mongo {
// revert the chunk manager back to the state before "forgetting" about the
// chunk
- shardingState.undoDonateChunk( ns, collMetadata );
+ shardingState.undoDonateChunk( ns, origCollMetadata );
}
log() << "Shard version successfully reset to clean up failed migration"
<< endl;
@@ -1321,13 +1324,14 @@ namespace mongo {
// if we have chunks left on the FROM shard, update the version of one of them as
// well. we can figure that out by grabbing the metadata installed on 5.a
- collMetadata = shardingState.getCollectionMetadata( ns );
- if( collMetadata->getNumChunks() > 0 ) {
+ const CollectionMetadataPtr bumpedCollMetadata( shardingState.getCollectionMetadata( ns ) );
+ if( bumpedCollMetadata->getNumChunks() > 0 ) {
// get another chunk on that shard
ChunkType bumpChunk;
- bool result = collMetadata->getNextChunk( collMetadata->getMinKey(),
- &bumpChunk );
+ bool result =
+ bumpedCollMetadata->getNextChunk( bumpedCollMetadata->getMinKey(),
+ &bumpChunk );
BSONObj bumpMin = bumpChunk.getMin();
BSONObj bumpMax = bumpChunk.getMax();
@@ -1391,6 +1395,13 @@ namespace mongo {
ok = false;
BSONObj cmdResult;
try {
+
+ // For testing migration failures
+ if ( MONGO_FAIL_POINT(failMigrationConfigWritePrepare) ) {
+ throw DBException( "mock migration failure before config write",
+ PrepareConfigsFailedCode );
+ }
+
ScopedDbConnection conn(shardingState.getConfigServer(), 10.0);
ok = conn->runCommand( "config" , cmd , cmdResult );
conn.done();
@@ -1421,7 +1432,7 @@ namespace mongo {
// Revert the metadata back to the state before "forgetting"
// about the chunk.
- shardingState.undoDonateChunk( ns, collMetadata );
+ shardingState.undoDonateChunk( ns, origCollMetadata );
}
log() << "Shard version successfully reset to clean up failed migration" << endl;