summaryrefslogtreecommitdiff
path: root/jstests/gle
diff options
context:
space:
mode:
authorRandolph Tan <randolph@10gen.com>2014-03-04 17:41:56 -0500
committerRandolph Tan <randolph@10gen.com>2014-04-21 16:53:25 -0400
commit7acafe85d9bdd63122c19ba1cca86a7f55174941 (patch)
tree234effd8e5a5b6c63d8b12c74de2d9acb78a7509 /jstests/gle
parente87b42c4f13e48078f5c4aefba3caf18dcfba072 (diff)
downloadmongo-7acafe85d9bdd63122c19ba1cca86a7f55174941.tar.gz
SERVER-13425 migrate sharding jstest suite to use write commands api
Diffstat (limited to 'jstests/gle')
-rw-r--r--jstests/gle/gle_sharded_wc.js123
-rw-r--r--jstests/gle/gle_sharded_write.js176
-rw-r--r--jstests/gle/updated_existing.js29
3 files changed, 328 insertions, 0 deletions
diff --git a/jstests/gle/gle_sharded_wc.js b/jstests/gle/gle_sharded_wc.js
new file mode 100644
index 00000000000..54050acb141
--- /dev/null
+++ b/jstests/gle/gle_sharded_wc.js
@@ -0,0 +1,123 @@
+//
+// Tests of sharded GLE enforcing write concern against operations in a cluster
+// Basic sharded GLE operation is tested elsewhere.
+//
+
+// Options for a cluster with two replica set shards, the first with two nodes the second with one
+// This lets us try a number of GLE scenarios
+var options = { separateConfig : true,
+ rs : true,
+ rsOptions : { nojournal : "" },
+ // Options for each replica set shard
+ rs0 : { nodes : 2 },
+ rs1 : { nodes : 1 } };
+
+var st = new ShardingTest({ shards : 2, mongos : 1, other : options });
+st.stopBalancer();
+
+var mongos = st.s0;
+var admin = mongos.getDB( "admin" );
+var config = mongos.getDB( "config" );
+var coll = mongos.getCollection( jsTestName() + ".coll" );
+var shards = config.shards.find().toArray();
+
+assert.commandWorked( admin.runCommand({ enableSharding : coll.getDB().toString() }) );
+printjson( admin.runCommand({ movePrimary : coll.getDB().toString(), to : shards[0]._id }) );
+assert.commandWorked( admin.runCommand({ shardCollection : coll.toString(), key : { _id : 1 } }) );
+assert.commandWorked( admin.runCommand({ split : coll.toString(), middle : { _id : 0 } }) );
+assert.commandWorked( admin.runCommand({ moveChunk : coll.toString(),
+ find : { _id : 0 },
+ to : shards[1]._id }) );
+
+st.printShardingStatus();
+
+var gle = null;
+
+//
+// No journal insert, GLE fails
+coll.remove({});
+coll.insert({ _id : 1 });
+printjson(gle = coll.getDB().runCommand({ getLastError : 1, j : true }));
+assert(!gle.ok);
+assert(gle.errmsg);
+
+//
+// Successful insert, write concern mode invalid
+coll.remove({});
+coll.insert({ _id : -1 });
+printjson(gle = coll.getDB().runCommand({ getLastError : 1, w : 'invalid' }));
+assert(!gle.ok);
+assert(!gle.err);
+assert(gle.errmsg);
+assert.eq(gle.code, 79); // UnknownReplWriteConcern - needed for backwards compatibility
+assert.eq(coll.count(), 1);
+
+//
+// Error on insert (dup key), write concern error not reported
+coll.remove({});
+coll.insert({ _id : -1 });
+coll.insert({ _id : -1 });
+printjson(gle = coll.getDB().runCommand({ getLastError : 1, w : 'invalid' }));
+assert(gle.ok);
+assert(gle.err);
+assert(gle.code);
+assert(!gle.errmsg);
+assert.eq(coll.count(), 1);
+
+//
+// Successful remove on two hosts, write concern timeout on one
+coll.remove({});
+st.rs0.awaitReplication(); // To ensure the first shard won't timeout
+printjson(gle = coll.getDB().runCommand({ getLastError : 1, w : 2, wtimeout : 5 * 1000 }));
+assert(gle.ok);
+assert.eq(gle.err, 'timeout');
+assert(gle.wtimeout);
+assert(gle.shards);
+assert.eq(coll.count(), 0);
+
+//
+// Successful remove on two hosts, write concern timeout on both
+// We don't aggregate two timeouts together
+coll.remove({});
+st.rs0.awaitReplication();
+printjson(gle = coll.getDB().runCommand({ getLastError : 1, w : 3, wtimeout : 5 * 1000 }));
+assert(!gle.ok);
+assert(gle.errmsg);
+assert.eq(gle.code, 64); // WriteConcernFailed - needed for backwards compatibility
+assert(!gle.wtimeout);
+assert(gle.shards);
+assert(gle.errs);
+assert.eq(coll.count(), 0);
+
+//
+// First replica set with no primary
+//
+
+//
+// Successful bulk insert on two hosts, host changes before gle (error contacting host)
+coll.remove({});
+coll.insert([{ _id : 1 }, { _id : -1 }]);
+// Wait for write to be written to shards before shutting it down.
+printjson(gle = coll.getDB().runCommand({ getLastError : 1 }));
+st.rs0.stop(st.rs0.getPrimary(), true); // wait for stop
+printjson(gle = coll.getDB().runCommand({ getLastError : 1 }));
+// Should get an error about contacting dead host.
+assert(!gle.ok);
+assert(gle.errmsg);
+assert.eq(coll.count({ _id : 1 }), 1);
+
+//
+// Failed insert on two hosts, first replica set with no primary
+// NOTE: This is DIFFERENT from 2.4, since we don't need to contact a host we didn't get
+// successful writes from.
+coll.remove({ _id : 1 });
+coll.insert([{ _id : 1 }, { _id : -1 }]);
+
+printjson(gle = coll.getDB().runCommand({ getLastError : 1 }));
+assert(gle.ok);
+assert(gle.err);
+assert.eq(coll.count({ _id : 1 }), 1);
+
+jsTest.log( "DONE!" );
+
+st.stop();
diff --git a/jstests/gle/gle_sharded_write.js b/jstests/gle/gle_sharded_write.js
new file mode 100644
index 00000000000..0f602a5e4d7
--- /dev/null
+++ b/jstests/gle/gle_sharded_write.js
@@ -0,0 +1,176 @@
+//
+// Ensures GLE correctly reports basic write stats and failures
+// Note that test should work correctly with and without write commands.
+//
+
+var options = { separateConfig : true };
+
+var st = new ShardingTest({ shards : 2, mongos : 1, other : options });
+st.stopBalancer();
+
+var mongos = st.s0;
+var admin = mongos.getDB( "admin" );
+var config = mongos.getDB( "config" );
+var coll = mongos.getCollection( jsTestName() + ".coll" );
+var shards = config.shards.find().toArray();
+
+assert.commandWorked( admin.runCommand({ enableSharding : coll.getDB().toString() }) );
+printjson( admin.runCommand({ movePrimary : coll.getDB().toString(), to : shards[0]._id }) );
+assert.commandWorked( admin.runCommand({ shardCollection : coll.toString(), key : { _id : 1 } }) );
+assert.commandWorked( admin.runCommand({ split : coll.toString(), middle : { _id : 0 } }) );
+assert.commandWorked( admin.runCommand({ moveChunk : coll.toString(),
+ find : { _id : 0 },
+ to : shards[1]._id }) );
+
+st.printShardingStatus();
+
+var gle = null;
+
+//
+// Successful insert
+coll.remove({});
+coll.insert({ _id : -1 });
+printjson(gle = coll.getDB().runCommand({ getLastError : 1 }));
+assert(gle.ok);
+assert('err' in gle);
+assert(!gle.err);
+assert.eq(coll.count(), 1);
+
+//
+// Successful update
+coll.remove({});
+coll.insert({ _id : 1 });
+coll.update({ _id : 1 }, { $set : { foo : "bar" } });
+printjson(gle = coll.getDB().runCommand({ getLastError : 1 }));
+assert(gle.ok);
+assert('err' in gle);
+assert(!gle.err);
+assert(gle.updatedExisting);
+assert.eq(gle.n, 1);
+assert.eq(coll.count(), 1);
+
+//
+// Successful multi-update
+coll.remove({});
+coll.insert({ _id : 1 });
+coll.update({ }, { $set : { foo : "bar" } }, false, true);
+printjson(gle = coll.getDB().runCommand({ getLastError : 1 }));
+assert(gle.ok);
+assert('err' in gle);
+assert(!gle.err);
+assert(gle.updatedExisting);
+assert.eq(gle.n, 1);
+assert.eq(coll.count(), 1);
+
+//
+// Successful upsert
+coll.remove({});
+coll.update({ _id : 1 }, { _id : 1 }, true);
+printjson(gle = coll.getDB().runCommand({ getLastError : 1 }));
+assert(gle.ok);
+assert('err' in gle);
+assert(!gle.err);
+assert(!gle.updatedExisting);
+assert.eq(gle.n, 1);
+assert.eq(gle.upserted, 1);
+assert.eq(coll.count(), 1);
+
+//
+// Successful upserts
+coll.remove({});
+coll.update({ _id : -1 }, { _id : -1 }, true);
+coll.update({ _id : 1 }, { _id : 1 }, true);
+printjson(gle = coll.getDB().runCommand({ getLastError : 1 }));
+assert(gle.ok);
+assert('err' in gle);
+assert(!gle.err);
+assert(!gle.updatedExisting);
+assert.eq(gle.n, 1);
+assert.eq(gle.upserted, 1);
+assert.eq(coll.count(), 2);
+
+//
+// Successful remove
+coll.remove({});
+coll.insert({ _id : 1 });
+coll.remove({ _id : 1 });
+printjson(gle = coll.getDB().runCommand({ getLastError : 1 }));
+assert(gle.ok);
+assert('err' in gle);
+assert(!gle.err);
+assert.eq(gle.n, 1);
+assert.eq(coll.count(), 0);
+
+//
+// Error on one host during update
+coll.remove({});
+coll.update({ _id : 1 }, { $invalid : "xxx" }, true);
+printjson(gle = coll.getDB().runCommand({ getLastError : 1 }));
+assert(gle.ok);
+assert(gle.err);
+assert(gle.code);
+assert(!gle.errmsg);
+assert(gle.singleShard);
+assert.eq(coll.count(), 0);
+
+//
+// Error on two hosts during remove
+coll.remove({});
+coll.remove({ $invalid : 'remove' });
+printjson(gle = coll.getDB().runCommand({ getLastError : 1 }));
+assert(gle.ok);
+assert(gle.err);
+assert(gle.code);
+assert(!gle.errmsg);
+assert(gle.shards);
+assert.eq(coll.count(), 0);
+
+//
+// Repeated calls to GLE should work
+coll.remove({});
+coll.update({ _id : 1 }, { $invalid : "xxx" }, true);
+printjson(gle = coll.getDB().runCommand({ getLastError : 1 }));
+assert(gle.ok);
+assert(gle.err);
+assert(gle.code);
+assert(!gle.errmsg);
+assert(gle.singleShard);
+printjson(gle = coll.getDB().runCommand({ getLastError : 1 }));
+assert(gle.ok);
+assert(gle.err);
+assert(gle.code);
+assert(!gle.errmsg);
+assert(gle.singleShard);
+assert.eq(coll.count(), 0);
+
+//
+// First shard down
+//
+
+//
+// Successful bulk insert on two hosts, host dies before gle (error contacting host)
+coll.remove({});
+coll.insert([{ _id : 1 }, { _id : -1 }]);
+// Wait for write to be written to shards before shutting it down.
+printjson(gle = coll.getDB().runCommand({ getLastError : 1 }));
+MongoRunner.stopMongod( st.shard0 );
+printjson(gle = coll.getDB().runCommand({ getLastError : 1 }));
+// Should get an error about contacting dead host.
+assert(!gle.ok);
+assert(gle.errmsg);
+
+//
+// Failed insert on two hosts, first host dead
+// NOTE: This is DIFFERENT from 2.4, since we don't need to contact a host we didn't get
+// successful writes from.
+coll.remove({ _id : 1 });
+coll.insert([{ _id : 1 }, { _id : -1 }]);
+printjson(gle = coll.getDB().runCommand({ getLastError : 1 }));
+assert(gle.ok);
+assert(gle.err);
+assert.eq(coll.count({ _id : 1 }), 1);
+
+jsTest.log("DONE!");
+
+st.stop();
+
diff --git a/jstests/gle/updated_existing.js b/jstests/gle/updated_existing.js
new file mode 100644
index 00000000000..bd03c535099
--- /dev/null
+++ b/jstests/gle/updated_existing.js
@@ -0,0 +1,29 @@
+/**
+* SERVER-5872 : This test checks that the return message "updatedExisting" of
+* an upsert is not missing when autosplit takes place.
+*/
+
+var st = new ShardingTest({ shards : 1, mongos : 1, verbose : 1, chunksize : 1 });
+
+var testDB = st.getDB("test");
+var coll = "foo";
+testDB[coll].drop();
+
+st.adminCommand({ enablesharding : 'test' });
+st.adminCommand({ shardcollection : 'test.' + coll, key : { "shardkey2" : 1, "shardkey1" : 1 } });
+
+var bigString = "";
+while ( bigString.length < 1024 * 50 )
+ bigString += "asocsancdnsjfnsdnfsjdhfasdfasdfasdfnsadofnsadlkfnsaldknfsad";
+
+
+
+for (var i = 0; i < 10000; ++i) {
+ testDB[coll].update({ "shardkey1" : "test" + i, "shardkey2" : "test" + i },
+ { $set : { "test_upsert": bigString } },
+ true, // upsert
+ false); // multi
+ assert.eq(testDB.getLastErrorObj().updatedExisting, false);
+}
+
+st.stop();