summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorKaloian Manassiev <kaloian.manassiev@mongodb.com>2018-01-05 16:18:09 -0500
committerKaloian Manassiev <kaloian.manassiev@mongodb.com>2018-01-09 17:57:16 -0500
commita9d076d8642c13859588c3bc111b3c9af08cea2f (patch)
tree66ec2f7f10ea318d6fc43d5d285e90ee1860c13d
parent6e3b0deb789ec1e9bbdb78f42547278fb7b6b8f0 (diff)
downloadmongo-a9d076d8642c13859588c3bc111b3c9af08cea2f.tar.gz
SERVER-32568 Blacklist migration_sets_fromMigrate_flag.js from the CSRS continuous stepdown suite
* Make the test less resource intensive and verbose by lowering the number of spawned mongod instances * Add logging to dump the contents of the oplog on failure
-rw-r--r--buildscripts/resmokeconfig/suites/sharding_continuous_config_stepdown.yml3
-rw-r--r--jstests/sharding/migration_sets_fromMigrate_flag.js75
2 files changed, 43 insertions, 35 deletions
diff --git a/buildscripts/resmokeconfig/suites/sharding_continuous_config_stepdown.yml b/buildscripts/resmokeconfig/suites/sharding_continuous_config_stepdown.yml
index 444cde68c15..35b739a2cba 100644
--- a/buildscripts/resmokeconfig/suites/sharding_continuous_config_stepdown.yml
+++ b/buildscripts/resmokeconfig/suites/sharding_continuous_config_stepdown.yml
@@ -138,8 +138,9 @@ selector:
# Expects that connections to all shards/config servers will never close
- jstests/sharding/shard6.js
# Stepping down the primary can make the balancer rerun a migration that was designed to fail
- # earlier, but can potentially pass on the second try.
+ # earlier, but can potentially pass or have different side effects on the second try
- jstests/sharding/migration_ignore_interrupts_1.js
+ - jstests/sharding/migration_sets_fromMigrate_flag.js
# Stepping down the config can cause moveChunks stopped on shards via killOp to be restarted.
- jstests/sharding/migration_ignore_interrupts_3.js
- jstests/sharding/migration_ignore_interrupts_4.js
diff --git a/jstests/sharding/migration_sets_fromMigrate_flag.js b/jstests/sharding/migration_sets_fromMigrate_flag.js
index d6453c63676..5a19e5ede95 100644
--- a/jstests/sharding/migration_sets_fromMigrate_flag.js
+++ b/jstests/sharding/migration_sets_fromMigrate_flag.js
@@ -21,12 +21,7 @@ load('./jstests/libs/chunk_manipulation_util.js');
var staticMongod = MongoRunner.runMongod({}); // For startParallelOps.
- /**
- * Start up new sharded cluster, stop balancer that would interfere in manual chunk management.
- */
-
- var st = new ShardingTest({shards: 2, mongos: 1, rs: {nodes: 3}});
- st.stopBalancer();
+ var st = new ShardingTest({shards: 2, mongos: 1, rs: {nodes: 1}});
const dbName = "testDB";
const ns = dbName + ".foo";
@@ -135,52 +130,64 @@ load('./jstests/libs/chunk_manipulation_util.js');
jsTest.log('Checking donor and recipient oplogs for correct fromMigrate flags...');
+ function assertEqAndDumpOpLog(expected, actual, msg) {
+ if (expected === actual)
+ return;
+
+ print('Dumping oplog contents for', ns);
+ print('On donor:');
+ print(tojson(donorLocal.oplog.rs.find({ns: ns}).toArray()));
+
+ print('On recipient:');
+ print(tojson(recipientLocal.oplog.rs.find({ns: ns}).toArray()));
+
+ assert.eq(expected, actual, msg);
+ }
+
var donorOplogRes = donorLocal.oplog.rs.find({op: 'd', fromMigrate: true, 'o._id': 2}).count();
- assert.eq(1,
- donorOplogRes,
- "fromMigrate flag wasn't set on the donor shard's oplog for " +
- "migrating delete op on {_id: 2}! Test #2 failed.");
+ assertEqAndDumpOpLog(1,
+ donorOplogRes,
+ "fromMigrate flag wasn't set on the donor shard's oplog for " +
+ "migrating delete op on {_id: 2}! Test #2 failed.");
donorOplogRes =
donorLocal.oplog.rs.find({op: 'd', fromMigrate: {$exists: false}, 'o._id': 4}).count();
- assert.eq(1,
- donorOplogRes,
- "Real delete of {_id: 4} on donor shard incorrectly set the " +
- "fromMigrate flag in the oplog! Test #5 failed.");
+ assertEqAndDumpOpLog(1,
+ donorOplogRes,
+ "Real delete of {_id: 4} on donor shard incorrectly set the " +
+ "fromMigrate flag in the oplog! Test #5 failed.");
// Expect to see two oplog entries for {_id: 2} with 'fromMigrate: true', because this doc was
// cloned as part of the first failed migration as well as the second successful migration.
var recipientOplogRes =
recipientLocal.oplog.rs.find({op: 'i', fromMigrate: true, 'o._id': 2}).count();
- assert.eq(2,
- recipientOplogRes,
- "fromMigrate flag wasn't set on the recipient shard's " +
- "oplog for migrating insert op on {_id: 2}! Test #3 failed.");
+ assertEqAndDumpOpLog(2,
+ recipientOplogRes,
+ "fromMigrate flag wasn't set on the recipient shard's " +
+ "oplog for migrating insert op on {_id: 2}! Test #3 failed.");
recipientOplogRes =
recipientLocal.oplog.rs.find({op: 'd', fromMigrate: true, 'o._id': 2}).count();
- assert.eq(1,
- recipientOplogRes,
- "fromMigrate flag wasn't set on the recipient shard's " +
- "oplog for delete op on the old {_id: 2} that overlapped " +
- "with the chunk about to be copied! Test #1 failed.");
+ assertEqAndDumpOpLog(1,
+ recipientOplogRes,
+ "fromMigrate flag wasn't set on the recipient shard's " +
+ "oplog for delete op on the old {_id: 2} that overlapped " +
+ "with the chunk about to be copied! Test #1 failed.");
recipientOplogRes =
recipientLocal.oplog.rs.find({op: 'u', fromMigrate: true, 'o._id': 3}).count();
- assert.eq(1,
- recipientOplogRes,
- "fromMigrate flag wasn't set on the recipient shard's " +
- "oplog for update op on {_id: 3}! Test #4 failed.");
+ assertEqAndDumpOpLog(1,
+ recipientOplogRes,
+ "fromMigrate flag wasn't set on the recipient shard's " +
+ "oplog for update op on {_id: 3}! Test #4 failed.");
recipientOplogRes =
recipientLocal.oplog.rs.find({op: 'd', fromMigrate: true, 'o._id': 4}).count();
- assert.eq(1,
- recipientOplogRes,
- "fromMigrate flag wasn't set on the recipient shard's " +
- "oplog for delete op on {_id: 4} that occurred during " +
- "migration! Test #5 failed.");
+ assertEqAndDumpOpLog(1,
+ recipientOplogRes,
+ "fromMigrate flag wasn't set on the recipient shard's " +
+ "oplog for delete op on {_id: 4} that occurred during " +
+ "migration! Test #5 failed.");
- jsTest.log('DONE!');
st.stop();
-
})();