summaryrefslogtreecommitdiff
path: root/jstests/gle
diff options
context:
space:
mode:
authorRandolph Tan <randolph@10gen.com>2014-05-14 14:11:11 -0400
committerRandolph Tan <randolph@10gen.com>2014-05-15 14:04:11 -0400
commit0dabee8227d445a18fa5e8e49b2be60ba2a0beef (patch)
tree5cd9058a4483202c2f7e2ab6f5c44079a74ac157 /jstests/gle
parent4de88387eec6c0bb08b10d0ba1574a656f56232d (diff)
downloadmongo-0dabee8227d445a18fa5e8e49b2be60ba2a0beef.tar.gz
SERVER-13741 Migrate remaining tests to use write commands
Diffstat (limited to 'jstests/gle')
-rw-r--r--jstests/gle/block2.js58
-rw-r--r--jstests/gle/gle_after_split_failure_during_migration.js136
-rw-r--r--jstests/gle/opcounters_legacy.js174
-rw-r--r--jstests/gle/sync1.js57
-rw-r--r--jstests/gle/sync4.js26
-rw-r--r--jstests/gle/sync8.js21
6 files changed, 472 insertions, 0 deletions
diff --git a/jstests/gle/block2.js b/jstests/gle/block2.js
new file mode 100644
index 00000000000..142d51519b2
--- /dev/null
+++ b/jstests/gle/block2.js
@@ -0,0 +1,58 @@
+/**
+ * Basic gle testing for master/slave environment. Write command version also
+ * available at jstests/repl.
+ */
+
+var rt = new ReplTest( "block1" );
+
+var m = rt.start( true );
+var s = rt.start( false );
+
+if (m.writeMode() == 'commands') {
+ jsTest.log('Skipping test since commands mode is already tested in repl/');
+}
+else {
+
+ function setup(){
+
+ dbm = m.getDB( "foo" );
+ dbs = s.getDB( "foo" );
+
+ tm = dbm.bar;
+ ts = dbs.bar;
+ }
+ setup();
+
+ function check( msg ){
+ assert.eq( tm.count() , ts.count() , "check: " + msg );
+ }
+
+ function worked( w , wtimeout ){
+ var gle = dbm.getLastError( w , wtimeout );
+ if (gle != null) {
+ printjson(gle);
+ }
+ return gle == null;
+ }
+
+ check( "A" );
+
+ tm.save( { x : 1 } );
+ assert( worked( 2 ) , "B" );
+
+ tm.save( { x : 2 } );
+ assert( worked( 2 , 3000 ) , "C" );
+
+ rt.stop( false );
+ tm.save( { x : 3 } );
+ assert.eq( 3 , tm.count() , "D1" );
+ assert( ! worked( 2 , 3000 ) , "D2" );
+
+ s = rt.start( false );
+ setup();
+ assert( worked( 2 , 30000 ) , "E" );
+
+}
+
+rt.stop();
+
diff --git a/jstests/gle/gle_after_split_failure_during_migration.js b/jstests/gle/gle_after_split_failure_during_migration.js
new file mode 100644
index 00000000000..9d0a6a9ca2a
--- /dev/null
+++ b/jstests/gle/gle_after_split_failure_during_migration.js
@@ -0,0 +1,136 @@
+/**
+ * SERVER-4987 This test tries to check the getLastError call will still use
+ * the same connection even if a split chunk triggered while doing inserts
+ * failed (cause by StaleConfigException).
+ *
+ * TODO: SERVER-5175
+ * This test relies on the corresponding delays inside (1) WriteBackListener::run
+ * and (2) ShardStrategy::_insert and (3) receivedInsert from instance.cpp
+ * to make the bug easier to manifest.
+ *
+ * The purpose of (1) is to make the writebacks slower so the failed inserts won't
+ * be reapplied on time.
+ *
+ * The purpose of (2) is to make it easier for the moveChunk command from the other
+ * mongos to interleave in between the moment the insert has set its shard version and
+ * when in tries to autosplit (Note: it should be long enough to allow the moveChunk
+ * to actually complete before it tries to proceed to autosplit).
+ *
+ * The purpose of (3) is to make sure that the insert won't get applied to the
+ * shard right away so when a different connection is used to do the getLastError,
+ * the write will still not be applied.
+ */
+function testGleAfterSplitDuringMigration(){
+ var st = new ShardingTest({ shards: 2, verbose: 2, mongos: 2,
+ other: { chunksize: 1 }});
+
+ // Stop the balancer to prevent it from contending with the distributed lock.
+ st.stopBalancer();
+
+ var DB_NAME = jsTest.name();
+ var COLL_NAME = "coll";
+
+ var mongos = st.s0;
+ var confDB = mongos.getDB( "config" );
+ var coll = mongos.getCollection( DB_NAME + "." + COLL_NAME );
+
+ var shardConn = st.d0;
+ var shardColl = shardConn.getCollection( coll.getFullName() );
+
+ var data = "x";
+ var dataSize = 1024 * 256; // bytes, must be power of 2
+ while( data.length < dataSize ) data += data;
+
+ // Shard collection
+ st.shardColl( coll, { _id : 1 }, false );
+
+ var docID = 0;
+
+ /**
+ * @return {Mongo} the connection object given the name of the shard.
+ */
+ var getShardConn = function( shardName ) {
+ var shardLoc = confDB.shards.findOne({ _id: shardName }).host;
+ return new Mongo( shardLoc );
+ };
+
+ /**
+ * Inserts documents using a direct shard connection to the max key chunk
+ * enough to make sure that it will trigger the auto split.
+ *
+ * variables from outer scope: docID, coll, confDB, data
+ */
+ var primeForSplitting = function() {
+ var topChunk = confDB.chunks.find().sort({ max: -1 }).limit( 1 ).next();
+ var shardLoc = getShardConn( topChunk.shard );
+ var testColl = shardLoc.getCollection( coll.getFullName() );
+
+ var superSaturatedChunkSize = 1024 * 1024 * 10; // 10MB
+ var docsToSaturateChunkSize = superSaturatedChunkSize / dataSize;
+
+ for ( var i = 0; i < docsToSaturateChunkSize; i++ ) {
+ testColl.insert({ _id: docID++, val: data });
+ }
+
+ assert.eq( null, testColl.getDB().getLastError() );
+ };
+
+ /**
+ * Moves a random chunk to a new shard using a different mongos.
+ *
+ * @param tries {Number} number of retry attempts when the moveChunk command
+ * fails.
+ *
+ * variables from outer scope: coll, st
+ */
+ var moveRandomChunk = function( tries ) {
+ var otherConfDB = st.s1.getDB( "config" );
+ var chunksCursor = otherConfDB.chunks.find().sort({ max: 1 });
+ var chunkCount = chunksCursor.count();
+
+ var randIdx = Math.floor( Math.random() * chunkCount );
+ // Don't get the chunk with max/min key
+ randIdx = ( randIdx == chunkCount )? randIdx - 1 : randIdx;
+ randIdx = ( randIdx == 0 )? randIdx + 1 : randIdx;
+
+ var chunk = chunksCursor.arrayAccess( randIdx );
+ var chunkOwner = chunk.shard;
+ var newOwner = otherConfDB.shards.findOne({ _id: { $ne: chunkOwner }})._id;
+
+ var result = otherConfDB.adminCommand({ moveChunk: coll.getFullName(),
+ find: { _id: chunk.min._id },
+ to: newOwner });
+
+ jsTest.log( "moveChunk result: " + tojson( result ));
+ if( !result.ok && tries > 1 ) {
+ moveRandomChunk( tries - 1 );
+ }
+ };
+
+ var chunks = 0;
+ do {
+ coll.insert({ _id: docID++, val: data });
+ chunks = mongos.getDB( "config" ).chunks.find().count();
+ } while ( chunks < 5 );
+
+ primeForSplitting();
+
+ jsTest.log( "Starting the insert that should trigger auto-split." );
+
+ // TODO: SERVER-5175 Trigger delays here
+ coll.insert({ _id: docID++, val: data });
+ moveRandomChunk( 3 );
+
+ // getLastError should wait for all writes to this connection.
+ var errObj = coll.getDB().getLastErrorObj();
+ jsTest.log( "Last Error Object: " + tojson( errObj ));
+
+ assert.eq( docID, coll.find().itcount(), "Count does not match!" );
+
+ jsTest.log( "Finished counting." );
+
+ st.stop();
+}
+
+testGleAfterSplitDuringMigration();
+
diff --git a/jstests/gle/opcounters_legacy.js b/jstests/gle/opcounters_legacy.js
new file mode 100644
index 00000000000..52e18c48643
--- /dev/null
+++ b/jstests/gle/opcounters_legacy.js
@@ -0,0 +1,174 @@
+// Test that opcounters get incremented properly.
+// Write command version also available at jstests/core.
+
+// Remember the global 'db' var
+var lastDB = db;
+var mongo = new Mongo(db.getMongo().host);
+mongo.writeMode = function() { return "legacy"; }
+db = mongo.getDB(db.toString());
+
+var t = db.opcounters;
+var isMongos = ("isdbgrid" == db.runCommand("ismaster").msg);
+var opCounters;
+
+//
+// 1. Insert.
+//
+// - mongod, single insert:
+// counted as 1 op if successful, else 0
+// - mongod, bulk insert of N with continueOnError=true:
+// counted as N ops, regardless of errors
+// - mongod, bulk insert of N with continueOnError=false:
+// counted as K ops, where K is number of docs successfully inserted
+//
+// - mongos
+// count ops attempted like insert commands
+//
+
+t.drop();
+
+// Single insert, no error.
+opCounters = db.serverStatus().opcounters;
+t.insert({_id:0});
+assert(!db.getLastError());
+assert.eq(opCounters.insert + 1, db.serverStatus().opcounters.insert);
+
+// Bulk insert, no error.
+opCounters = db.serverStatus().opcounters;
+t.insert([{_id:1},{_id:2}])
+assert(!db.getLastError());
+assert.eq(opCounters.insert + 2, db.serverStatus().opcounters.insert);
+
+// Single insert, with error.
+opCounters = db.serverStatus().opcounters;
+t.insert({_id:0})
+print( db.getLastError() )
+assert(db.getLastError());
+assert.eq(opCounters.insert + (isMongos ? 1 : 0), db.serverStatus().opcounters.insert);
+
+// Bulk insert, with error, continueOnError=false.
+opCounters = db.serverStatus().opcounters;
+t.insert([{_id:3},{_id:3},{_id:4}])
+assert(db.getLastError());
+assert.eq(opCounters.insert + (isMongos ? 2 : 1), db.serverStatus().opcounters.insert);
+
+// Bulk insert, with error, continueOnError=true.
+var continueOnErrorFlag = 1;
+opCounters = db.serverStatus().opcounters;
+t.insert([{_id:5},{_id:5},{_id:6}], continueOnErrorFlag)
+assert(db.getLastError());
+assert.eq(opCounters.insert + 3, db.serverStatus().opcounters.insert);
+
+//
+// 2. Update.
+//
+// - counted as 1 op, regardless of errors
+//
+
+t.drop();
+t.insert({_id:0});
+
+// Update, no error.
+opCounters = db.serverStatus().opcounters;
+t.update({_id:0}, {$set:{a:1}});
+assert(!db.getLastError());
+assert.eq(opCounters.update + 1, db.serverStatus().opcounters.update);
+
+// Update, with error.
+opCounters = db.serverStatus().opcounters;
+t.update({_id:0}, {$set:{_id:1}});
+assert(db.getLastError());
+assert.eq(opCounters.update + 1, db.serverStatus().opcounters.update);
+
+//
+// 3. Delete.
+//
+// - counted as 1 op, regardless of errors
+//
+
+t.drop();
+t.insert([{_id:0},{_id:1}]);
+
+// Delete, no error.
+opCounters = db.serverStatus().opcounters;
+t.remove({_id:0});
+assert(!db.getLastError());
+assert.eq(opCounters.delete + 1, db.serverStatus().opcounters.delete);
+
+// Delete, with error.
+opCounters = db.serverStatus().opcounters;
+t.remove({_id:{$invalidOp:1}});
+assert(db.getLastError());
+assert.eq(opCounters.delete + 1, db.serverStatus().opcounters.delete);
+
+//
+// 4. Query.
+//
+// - mongod: counted as 1 op, regardless of errors
+// - mongos: counted as 1 op if successful, else 0
+//
+
+t.drop();
+t.insert({_id:0});
+
+// Query, no error.
+opCounters = db.serverStatus().opcounters;
+t.findOne();
+assert.eq(opCounters.query + 1, db.serverStatus().opcounters.query);
+
+// Query, with error.
+opCounters = db.serverStatus().opcounters;
+assert.throws(function() { t.findOne({_id:{$invalidOp:1}}) });
+assert.eq(opCounters.query + (isMongos ? 0 : 1), db.serverStatus().opcounters.query);
+
+//
+// 5. Getmore.
+//
+// - counted as 1 op per getmore issued, regardless of errors
+//
+
+t.drop();
+t.insert([{_id:0},{_id:1},{_id:2}]);
+
+// Getmore, no error.
+opCounters = db.serverStatus().opcounters;
+t.find().batchSize(2).toArray(); // 3 documents, batchSize=2 => 1 query + 1 getmore
+assert.eq(opCounters.query + 1, db.serverStatus().opcounters.query);
+assert.eq(opCounters.getmore + 1, db.serverStatus().opcounters.getmore);
+
+// Getmore, with error (TODO implement when SERVER-5813 is resolved).
+
+//
+// 6. Command.
+//
+// - unrecognized commands not counted
+// - recognized commands counted as 1 op, regardless of errors
+// - some (recognized) commands can suppress command counting (i.e. aren't counted as commands)
+//
+
+t.drop();
+t.insert({_id:0})
+
+// Command, recognized, no error.
+opCounters = db.serverStatus().opcounters;
+assert.eq(opCounters.command + 1, db.serverStatus().opcounters.command); // "serverStatus" counted
+
+// Command, recognized, with error.
+opCounters = db.serverStatus().opcounters;
+res = t.runCommand("count", {query:{$invalidOp:1}});
+assert.eq(0, res.ok);
+assert.eq(opCounters.command + 2,
+ db.serverStatus().opcounters.command); // "serverStatus", "count" counted
+
+// Command, unrecognized.
+opCounters = db.serverStatus().opcounters;
+res = t.runCommand("command that doesn't exist");
+assert.eq(0, res.ok);
+//assert.eq(opCounters.command + 1, db.serverStatus().opcounters.command); // "serverStatus" counted
+// TODO Replace below with above when SERVER-9038 is resolved (mongos counts unrecognized commands)
+assert.eq(opCounters.command + (isMongos ? 2 : 1), db.serverStatus().opcounters.command);
+
+// Command, recognized, counting suppressed (TODO implement when SERVER-9038 is resolved).
+
+// Restore 'db' var
+db = lastDB;
diff --git a/jstests/gle/sync1.js b/jstests/gle/sync1.js
new file mode 100644
index 00000000000..83d26d1e71f
--- /dev/null
+++ b/jstests/gle/sync1.js
@@ -0,0 +1,57 @@
+// TODO: remove test after we deprecate SyncClusterConnection
+
+var test = new SyncCCTest( "sync1" );
+
+if (test.conn.writeMode() == 'commands') {
+ jsTest.log('Skipping test not compatible with write commands');
+}
+else {
+
+ db = test.conn.getDB( "test" )
+ t = db.sync1
+ t.save( { x : 1 } )
+ assert.eq( 1 , t.find().itcount() , "A1" );
+ assert.eq( 1 , t.find().count() , "A2" );
+ t.save( { x : 2 } )
+ assert.eq( 2 , t.find().itcount() , "A3" );
+ assert.eq( 2 , t.find().count() , "A4" );
+
+ test.checkHashes( "test" , "A3" );
+
+ test.tempKill();
+ assert.throws( function(){ t.save( { x : 3 } ); } , null , "B1" );
+ // It's ok even for some of the mongod to be unreachable for read-only cmd
+ assert.eq( 2, t.find({}).count() );
+ // It's NOT ok for some of the mongod to be unreachable for write cmd
+ assert.throws( function(){ t.getDB().runCommand({ profile: 1 }); });
+ assert.eq( 2 , t.find().itcount() , "B2" );
+ test.tempStart();
+ test.checkHashes( "test" , "B3" );
+
+ // Trying killing the second mongod
+ test.tempKill( 1 );
+ assert.throws( function(){ t.save( { x : 3 } ); } );
+ // It's ok even for some of the mongod to be unreachable for read-only cmd
+ assert.eq( 2, t.find({}).count() );
+ // It's NOT ok for some of the mongod to be unreachable for write cmd
+ assert.throws( function(){ t.getDB().runCommand({ profile: 1 }); });
+ assert.eq( 2 , t.find().itcount() );
+ test.tempStart( 1 );
+
+ assert.eq( 2 , t.find().itcount() , "C1" );
+ assert.soon( function(){
+ try {
+ t.remove( { x : 1 } )
+ return true;
+ }
+ catch ( e ){
+ print( e );
+ }
+ return false;
+ } )
+ t.find().forEach( printjson )
+ assert.eq( 1 , t.find().itcount() , "C2" );
+
+ test.stop();
+
+}
diff --git a/jstests/gle/sync4.js b/jstests/gle/sync4.js
new file mode 100644
index 00000000000..b6b1a777856
--- /dev/null
+++ b/jstests/gle/sync4.js
@@ -0,0 +1,26 @@
+// TODO: remove test after we deprecate SyncClusterConnection
+
+test = new SyncCCTest( "sync4" )
+
+if (test.conn.writeMode() == 'commands') {
+ jsTest.log('Skipping test not compatible with write commands');
+}
+else {
+
+ db = test.conn.getDB( "test" )
+ t = db.sync4
+
+ for ( i=0; i<1000; i++ ){
+ t.insert( { _id : i , x : "asdasdsdasdas" } )
+ }
+ db.getLastError();
+
+ test.checkHashes( "test" , "A0" );
+ assert.eq( 1000 , t.find().count() , "A1" )
+ assert.eq( 1000 , t.find().itcount() , "A2" )
+ assert.eq( 1000 , t.find().snapshot().batchSize(10).itcount() , "A2" )
+
+ test.stop();
+
+}
+
diff --git a/jstests/gle/sync8.js b/jstests/gle/sync8.js
new file mode 100644
index 00000000000..81404785ac3
--- /dev/null
+++ b/jstests/gle/sync8.js
@@ -0,0 +1,21 @@
+// TODO: remove test after we deprecate SyncClusterConnection
+
+// Test for SERVER-11492 - make sure that upserting a new document reports n:1 in GLE
+
+var test = new SyncCCTest( "sync1" );
+
+if (test.conn.writeMode() == 'commands') {
+ jsTest.log('Skipping test not compatible with write commands');
+}
+else {
+ var db = test.conn.getDB( "test" );
+ var t = db.sync8;
+ t.remove({});
+
+ t.update({_id:1}, {$set:{a:1}}, true);
+ var le = db.getLastErrorObj();
+ assert.eq(1, le.n);
+
+ test.stop();
+
+}