summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorSpencer T Brody <spencer@mongodb.com>2015-10-06 15:01:57 -0400
committerSpencer T Brody <spencer@mongodb.com>2015-10-06 16:41:27 -0400
commit6a462c52e99509c114fc14b5f3e089bd37a486f9 (patch)
tree53c49542464f72d51ceba832305179b047488086
parent7f62960716b37662070ba6fada747c60f5968c09 (diff)
downloadmongo-6a462c52e99509c114fc14b5f3e089bd37a486f9.tar.gz
SERVER-20690 Test with multiple mongos can fail because one of them talked to a stale secondary
-rw-r--r--jstests/gle/gle_after_split_failure_during_migration.js136
-rw-r--r--jstests/noPassthroughWithMongod/sharding_rs1.js2
-rw-r--r--jstests/sharding/cleanup_orphaned_cmd.js4
-rw-r--r--jstests/sharding/cleanup_orphaned_cmd_prereload.js4
-rw-r--r--jstests/sharding/fair_balancer_round.js12
-rw-r--r--jstests/sharding/merge_chunks_test_with_md_ops.js3
-rw-r--r--jstests/sharding/rename_across_mongos.js6
-rw-r--r--jstests/sharding/shard5.js4
8 files changed, 21 insertions, 150 deletions
diff --git a/jstests/gle/gle_after_split_failure_during_migration.js b/jstests/gle/gle_after_split_failure_during_migration.js
deleted file mode 100644
index 9d0a6a9ca2a..00000000000
--- a/jstests/gle/gle_after_split_failure_during_migration.js
+++ /dev/null
@@ -1,136 +0,0 @@
-/**
- * SERVER-4987 This test tries to check the getLastError call will still use
- * the same connection even if a split chunk triggered while doing inserts
- * failed (cause by StaleConfigException).
- *
- * TODO: SERVER-5175
- * This test relies on the corresponding delays inside (1) WriteBackListener::run
- * and (2) ShardStrategy::_insert and (3) receivedInsert from instance.cpp
- * to make the bug easier to manifest.
- *
- * The purpose of (1) is to make the writebacks slower so the failed inserts won't
- * be reapplied on time.
- *
- * The purpose of (2) is to make it easier for the moveChunk command from the other
- * mongos to interleave in between the moment the insert has set its shard version and
- * when in tries to autosplit (Note: it should be long enough to allow the moveChunk
- * to actually complete before it tries to proceed to autosplit).
- *
- * The purpose of (3) is to make sure that the insert won't get applied to the
- * shard right away so when a different connection is used to do the getLastError,
- * the write will still not be applied.
- */
-function testGleAfterSplitDuringMigration(){
- var st = new ShardingTest({ shards: 2, verbose: 2, mongos: 2,
- other: { chunksize: 1 }});
-
- // Stop the balancer to prevent it from contending with the distributed lock.
- st.stopBalancer();
-
- var DB_NAME = jsTest.name();
- var COLL_NAME = "coll";
-
- var mongos = st.s0;
- var confDB = mongos.getDB( "config" );
- var coll = mongos.getCollection( DB_NAME + "." + COLL_NAME );
-
- var shardConn = st.d0;
- var shardColl = shardConn.getCollection( coll.getFullName() );
-
- var data = "x";
- var dataSize = 1024 * 256; // bytes, must be power of 2
- while( data.length < dataSize ) data += data;
-
- // Shard collection
- st.shardColl( coll, { _id : 1 }, false );
-
- var docID = 0;
-
- /**
- * @return {Mongo} the connection object given the name of the shard.
- */
- var getShardConn = function( shardName ) {
- var shardLoc = confDB.shards.findOne({ _id: shardName }).host;
- return new Mongo( shardLoc );
- };
-
- /**
- * Inserts documents using a direct shard connection to the max key chunk
- * enough to make sure that it will trigger the auto split.
- *
- * variables from outer scope: docID, coll, confDB, data
- */
- var primeForSplitting = function() {
- var topChunk = confDB.chunks.find().sort({ max: -1 }).limit( 1 ).next();
- var shardLoc = getShardConn( topChunk.shard );
- var testColl = shardLoc.getCollection( coll.getFullName() );
-
- var superSaturatedChunkSize = 1024 * 1024 * 10; // 10MB
- var docsToSaturateChunkSize = superSaturatedChunkSize / dataSize;
-
- for ( var i = 0; i < docsToSaturateChunkSize; i++ ) {
- testColl.insert({ _id: docID++, val: data });
- }
-
- assert.eq( null, testColl.getDB().getLastError() );
- };
-
- /**
- * Moves a random chunk to a new shard using a different mongos.
- *
- * @param tries {Number} number of retry attempts when the moveChunk command
- * fails.
- *
- * variables from outer scope: coll, st
- */
- var moveRandomChunk = function( tries ) {
- var otherConfDB = st.s1.getDB( "config" );
- var chunksCursor = otherConfDB.chunks.find().sort({ max: 1 });
- var chunkCount = chunksCursor.count();
-
- var randIdx = Math.floor( Math.random() * chunkCount );
- // Don't get the chunk with max/min key
- randIdx = ( randIdx == chunkCount )? randIdx - 1 : randIdx;
- randIdx = ( randIdx == 0 )? randIdx + 1 : randIdx;
-
- var chunk = chunksCursor.arrayAccess( randIdx );
- var chunkOwner = chunk.shard;
- var newOwner = otherConfDB.shards.findOne({ _id: { $ne: chunkOwner }})._id;
-
- var result = otherConfDB.adminCommand({ moveChunk: coll.getFullName(),
- find: { _id: chunk.min._id },
- to: newOwner });
-
- jsTest.log( "moveChunk result: " + tojson( result ));
- if( !result.ok && tries > 1 ) {
- moveRandomChunk( tries - 1 );
- }
- };
-
- var chunks = 0;
- do {
- coll.insert({ _id: docID++, val: data });
- chunks = mongos.getDB( "config" ).chunks.find().count();
- } while ( chunks < 5 );
-
- primeForSplitting();
-
- jsTest.log( "Starting the insert that should trigger auto-split." );
-
- // TODO: SERVER-5175 Trigger delays here
- coll.insert({ _id: docID++, val: data });
- moveRandomChunk( 3 );
-
- // getLastError should wait for all writes to this connection.
- var errObj = coll.getDB().getLastErrorObj();
- jsTest.log( "Last Error Object: " + tojson( errObj ));
-
- assert.eq( docID, coll.find().itcount(), "Count does not match!" );
-
- jsTest.log( "Finished counting." );
-
- st.stop();
-}
-
-testGleAfterSplitDuringMigration();
-
diff --git a/jstests/noPassthroughWithMongod/sharding_rs1.js b/jstests/noPassthroughWithMongod/sharding_rs1.js
index b4db34d0d49..4c93b43dbfa 100644
--- a/jstests/noPassthroughWithMongod/sharding_rs1.js
+++ b/jstests/noPassthroughWithMongod/sharding_rs1.js
@@ -1,6 +1,6 @@
// tests sharding with replica sets
-var s = new ShardingTest({ shards: 3, verbose: 1, mongos: 2,
+var s = new ShardingTest({ shards: 3,
other: { rs: true , chunksize: 1, enableBalancer: true }});
s.adminCommand( { enablesharding : "test" } );
diff --git a/jstests/sharding/cleanup_orphaned_cmd.js b/jstests/sharding/cleanup_orphaned_cmd.js
index 795be09d3b7..deca5a19bc8 100644
--- a/jstests/sharding/cleanup_orphaned_cmd.js
+++ b/jstests/sharding/cleanup_orphaned_cmd.js
@@ -2,9 +2,7 @@
// Tests cleanup of orphaned data via the orphaned data cleanup command
//
-var options = { shardOptions : { verbose : 2 } };
-
-var st = new ShardingTest({ shards : 2, mongos : 2, other : options });
+var st = new ShardingTest({ shards: 2 });
st.stopBalancer();
var mongos = st.s0;
diff --git a/jstests/sharding/cleanup_orphaned_cmd_prereload.js b/jstests/sharding/cleanup_orphaned_cmd_prereload.js
index 1e176ee412a..483c3d6de6b 100644
--- a/jstests/sharding/cleanup_orphaned_cmd_prereload.js
+++ b/jstests/sharding/cleanup_orphaned_cmd_prereload.js
@@ -2,9 +2,7 @@
// Tests failed cleanup of orphaned data when we have pending chunks
//
-var options = { shardOptions : { verbose : 2 } };
-
-var st = new ShardingTest({ shards : 2, mongos : 2, other : options });
+var st = new ShardingTest({ shards: 2 });
st.stopBalancer();
var mongos = st.s0;
diff --git a/jstests/sharding/fair_balancer_round.js b/jstests/sharding/fair_balancer_round.js
index d741a2aa2c6..8373d6fb0d6 100644
--- a/jstests/sharding/fair_balancer_round.js
+++ b/jstests/sharding/fair_balancer_round.js
@@ -9,8 +9,8 @@ var st = new ShardingTest({shards : 2, mongos : 2, other : options});
// Stop balancer initially
st.stopBalancer();
-var staleMongos = st.s0;
-var mongos = st.s1;
+var mongos = st.s0;
+var staleMongos = st.s1;
var coll = mongos.getCollection("foo.bar");
// Shard collection through first mongos
@@ -24,11 +24,13 @@ for ( var i = 0; i < numSplits; i++) {
assert(mongos.adminCommand({split : coll + "", middle : {_id : i}}).ok);
}
-// stop st.s1
-st.stopMongos(1);
+// Stop the first mongos who setup the cluster.
+st.stopMongos(0);
// Start balancer, which lets the stale mongos balance
-st.startBalancer();
+assert.writeOK(staleMongos.getDB("config").settings.update({_id: "balancer"},
+ {$set: {stopped: false}},
+ true));
// Make sure we eventually start moving chunks
assert.soon(function() {
diff --git a/jstests/sharding/merge_chunks_test_with_md_ops.js b/jstests/sharding/merge_chunks_test_with_md_ops.js
index 143d3bc835c..accb32f4c33 100644
--- a/jstests/sharding/merge_chunks_test_with_md_ops.js
+++ b/jstests/sharding/merge_chunks_test_with_md_ops.js
@@ -4,11 +4,10 @@
var options = { shardOptions : { verbose : 0 } };
-var st = new ShardingTest({ shards : 2, mongos : 2, other : options });
+var st = new ShardingTest({ shards: 2, other: options });
st.stopBalancer();
var mongos = st.s0;
-var staleMongos = st.s1;
var admin = mongos.getDB( "admin" );
var shards = mongos.getCollection( "config.shards" ).find().toArray();
var coll = mongos.getCollection( "foo.bar" );
diff --git a/jstests/sharding/rename_across_mongos.js b/jstests/sharding/rename_across_mongos.js
index 71ab514e3d0..5d5dc1fcaf8 100644
--- a/jstests/sharding/rename_across_mongos.js
+++ b/jstests/sharding/rename_across_mongos.js
@@ -12,6 +12,12 @@ st.s1.getDB(dbName).dropDatabase();
assert.commandWorked(st.s0.getDB(dbName).runCommand({ create: 'CollNameBeforeRename' }));
assert.writeOK(st.s0.getDB(dbName).CollNameBeforeRename.insert({ Key: 1, Value: 1 }));
+if (st.configRS) {
+ // Ensure that the second mongos will see the newly created database metadata when
+ // it tries to do the collection rename.
+ st.configRS.awaitLastOpCommitted();
+}
+
// Rename collection on second mongos and ensure the document is found
assert.commandWorked(
st.s1.getDB(dbName).CollNameBeforeRename.renameCollection('CollNameAfterRename'));
diff --git a/jstests/sharding/shard5.js b/jstests/sharding/shard5.js
index 1822a3fcb81..6c3293c4ce9 100644
--- a/jstests/sharding/shard5.js
+++ b/jstests/sharding/shard5.js
@@ -10,6 +10,10 @@ s2 = s._mongos[1];
s.adminCommand( { enablesharding : "test" } );
s.ensurePrimaryShard('test', 'shard0001');
s.adminCommand( { shardcollection : "test.foo" , key : { num : 1 } } );
+if (s.configRS) {
+ // Ensure that the second mongos will see the movePrimary
+ s.configRS.awaitLastOpCommitted();
+}
s.getDB( "test" ).foo.save( { num : 1 } );
s.getDB( "test" ).foo.save( { num : 2 } );