summaryrefslogtreecommitdiff
path: root/jstests/sharding/sharding_rs2.js
diff options
context:
space:
mode:
Diffstat (limited to 'jstests/sharding/sharding_rs2.js')
-rw-r--r--jstests/sharding/sharding_rs2.js377
1 files changed, 187 insertions, 190 deletions
diff --git a/jstests/sharding/sharding_rs2.js b/jstests/sharding/sharding_rs2.js
index affe175eaa4..7c323ac5d44 100644
--- a/jstests/sharding/sharding_rs2.js
+++ b/jstests/sharding/sharding_rs2.js
@@ -10,247 +10,244 @@
//
(function() {
-'use strict';
-
-// The mongod secondaries are set to priority 0 and votes 0 to prevent the primaries
-// from stepping down during migrations on slow evergreen builders.
-var s = new ShardingTest({ shards: 2,
- other: {
- chunkSize: 1,
- rs0: {
- nodes: [
- {rsConfig: {votes: 1}},
- {rsConfig: {priority: 0, votes: 0}},
- ],
- },
- rs1: {
- nodes: [
- {rsConfig: {votes: 1}},
- {rsConfig: {priority: 0, votes: 0}},
- ],
- }
- } });
-
-var db = s.getDB("test");
-var t = db.foo;
-
-s.adminCommand({ enablesharding: "test" });
-s.ensurePrimaryShard('test', 'test-rs0');
-
-// -------------------------------------------------------------------------------------------
-// ---------- test that config server updates when replica set config changes ----------------
-// -------------------------------------------------------------------------------------------
-
-
-db.foo.save({ _id: 5,x: 17 });
-assert.eq(1, db.foo.count());
-
-s.config.databases.find().forEach(printjson);
-s.config.shards.find().forEach(printjson);
-
-var dbPrimaryShardId = s.getPrimaryShardIdForDatabase("test");
-
-function countNodes(){
- var x = s.config.shards.findOne({ _id: dbPrimaryShardId });
- return x.host.split(",").length;
-}
-
-assert.eq(2, countNodes(), "A1");
-
-var rs = s.getRSEntry(dbPrimaryShardId);
-rs.test.add();
-try {
- rs.test.reInitiate();
-}
-catch (e){
- // this os ok as rs's may close connections on a change of master
- print(e);
-}
-
-assert.soon(
- function(){
+ 'use strict';
+
+ // The mongod secondaries are set to priority 0 and votes 0 to prevent the primaries
+ // from stepping down during migrations on slow evergreen builders.
+ var s = new ShardingTest({
+ shards: 2,
+ other: {
+ chunkSize: 1,
+ rs0: {
+ nodes: [{rsConfig: {votes: 1}}, {rsConfig: {priority: 0, votes: 0}}, ],
+ },
+ rs1: {
+ nodes: [{rsConfig: {votes: 1}}, {rsConfig: {priority: 0, votes: 0}}, ],
+ }
+ }
+ });
+
+ var db = s.getDB("test");
+ var t = db.foo;
+
+ s.adminCommand({enablesharding: "test"});
+ s.ensurePrimaryShard('test', 'test-rs0');
+
+ // -------------------------------------------------------------------------------------------
+ // ---------- test that config server updates when replica set config changes ----------------
+ // -------------------------------------------------------------------------------------------
+
+ db.foo.save({_id: 5, x: 17});
+ assert.eq(1, db.foo.count());
+
+ s.config.databases.find().forEach(printjson);
+ s.config.shards.find().forEach(printjson);
+
+ var dbPrimaryShardId = s.getPrimaryShardIdForDatabase("test");
+
+ function countNodes() {
+ var x = s.config.shards.findOne({_id: dbPrimaryShardId});
+ return x.host.split(",").length;
+ }
+
+ assert.eq(2, countNodes(), "A1");
+
+ var rs = s.getRSEntry(dbPrimaryShardId);
+ rs.test.add();
+ try {
+ rs.test.reInitiate();
+ } catch (e) {
+ // this os ok as rs's may close connections on a change of master
+ print(e);
+ }
+
+ assert.soon(function() {
try {
printjson(rs.test.getPrimary().getDB("admin").runCommand("isMaster"));
s.config.shards.find().forEach(printjsononeline);
return countNodes() == 3;
- }
- catch (e){
+ } catch (e) {
print(e);
}
}, "waiting for config server to update", 180 * 1000, 1000);
-// cleanup after adding node
-for (var i = 0; i < 5; i++) {
- try {
- db.foo.findOne();
- }
- catch (e) {
-
+ // cleanup after adding node
+ for (var i = 0; i < 5; i++) {
+ try {
+ db.foo.findOne();
+ } catch (e) {
+ }
}
-}
-
-jsTest.log("Awaiting replication of all nodes, so spurious sync'ing queries don't upset our counts...");
-rs.test.awaitReplication();
-// Make sure we wait for secondaries here - otherwise a secondary could come online later and be used for the
-// count command before being fully replicated
-jsTest.log("Awaiting secondary status of all nodes");
-rs.test.waitForState(rs.test.getSecondaries(), ReplSetTest.State.SECONDARY, 180 * 1000);
-
-// -------------------------------------------------------------------------------------------
-// ---------- test routing to slaves ----------------
-// -------------------------------------------------------------------------------------------
-
-// --- not sharded ----
-var m = new Mongo(s.s.name);
-var ts = m.getDB("test").foo;
+ jsTest.log(
+ "Awaiting replication of all nodes, so spurious sync'ing queries don't upset our counts...");
+ rs.test.awaitReplication();
+ // Make sure we wait for secondaries here - otherwise a secondary could come online later and be
+ // used for the
+ // count command before being fully replicated
+ jsTest.log("Awaiting secondary status of all nodes");
+ rs.test.waitForState(rs.test.getSecondaries(), ReplSetTest.State.SECONDARY, 180 * 1000);
-var before = rs.test.getPrimary().adminCommand("serverStatus").opcounters;
+ // -------------------------------------------------------------------------------------------
+ // ---------- test routing to slaves ----------------
+ // -------------------------------------------------------------------------------------------
-for (var i = 0; i < 10; i++) {
- assert.eq(17, ts.findOne().x, "B1");
-}
+ // --- not sharded ----
-m.setSlaveOk();
+ var m = new Mongo(s.s.name);
+ var ts = m.getDB("test").foo;
-for (var i = 0; i < 10; i++) {
- assert.eq(17, ts.findOne().x, "B2");
-}
+ var before = rs.test.getPrimary().adminCommand("serverStatus").opcounters;
-var after = rs.test.getPrimary().adminCommand("serverStatus").opcounters;
-
-printjson(before);
-printjson(after);
-
-assert.lte(before.query + 10, after.query, "B3");
+ for (var i = 0; i < 10; i++) {
+ assert.eq(17, ts.findOne().x, "B1");
+ }
-// --- add more data ----
+ m.setSlaveOk();
-db.foo.ensureIndex({ x: 1 });
+ for (var i = 0; i < 10; i++) {
+ assert.eq(17, ts.findOne().x, "B2");
+ }
-var bulk = db.foo.initializeUnorderedBulkOp();
-for (var i = 0; i < 100; i++) {
- if (i == 17) continue;
- bulk.insert({ x: i });
-}
-assert.writeOK(bulk.execute({ w: 3 }));
+ var after = rs.test.getPrimary().adminCommand("serverStatus").opcounters;
-// Counts pass the options of the connection - which is slaveOk'd, so we need to wait for
-// replication for this and future tests to pass
-rs.test.awaitReplication();
+ printjson(before);
+ printjson(after);
-assert.eq(100, ts.count(), "B4");
-assert.eq(100, ts.find().itcount(), "B5");
-assert.eq(100, ts.find().batchSize(5).itcount(), "B6");
+ assert.lte(before.query + 10, after.query, "B3");
-t.find().batchSize(3).next();
-gc(); gc(); gc();
+ // --- add more data ----
-// --- sharded ----
+ db.foo.ensureIndex({x: 1});
-assert.eq(100, db.foo.count(), "C1");
+ var bulk = db.foo.initializeUnorderedBulkOp();
+ for (var i = 0; i < 100; i++) {
+ if (i == 17)
+ continue;
+ bulk.insert({x: i});
+ }
+ assert.writeOK(bulk.execute({w: 3}));
-assert.commandWorked(s.s0.adminCommand({ shardcollection: "test.foo", key: { x: 1 } }));
+ // Counts pass the options of the connection - which is slaveOk'd, so we need to wait for
+ // replication for this and future tests to pass
+ rs.test.awaitReplication();
-// We're doing some manual chunk stuff, so stop the balancer first
-s.stopBalancer();
+ assert.eq(100, ts.count(), "B4");
+ assert.eq(100, ts.find().itcount(), "B5");
+ assert.eq(100, ts.find().batchSize(5).itcount(), "B6");
-assert.eq(100, t.count(), "C2");
-assert.commandWorked(s.s0.adminCommand({ split: "test.foo", middle: { x: 50 } }));
+ t.find().batchSize(3).next();
+ gc();
+ gc();
+ gc();
-s.printShardingStatus();
+ // --- sharded ----
-var other = s.config.shards.findOne({ _id: { $ne: dbPrimaryShardId } });
-assert.commandWorked(s.getDB('admin').runCommand({ moveChunk: "test.foo",
- find: { x: 10 },
- to: other._id,
- _secondaryThrottle: true,
- writeConcern: { w: 2 },
- _waitForDelete: true }));
-assert.eq(100, t.count(), "C3");
+ assert.eq(100, db.foo.count(), "C1");
-assert.eq(50, rs.test.getPrimary().getDB("test").foo.count(), "C4");
+ assert.commandWorked(s.s0.adminCommand({shardcollection: "test.foo", key: {x: 1}}));
-// by non-shard key
+ // We're doing some manual chunk stuff, so stop the balancer first
+ s.stopBalancer();
-m = new Mongo(s.s.name);
-ts = m.getDB("test").foo;
+ assert.eq(100, t.count(), "C2");
+ assert.commandWorked(s.s0.adminCommand({split: "test.foo", middle: {x: 50}}));
-before = rs.test.getPrimary().adminCommand("serverStatus").opcounters;
+ s.printShardingStatus();
-for (var i = 0; i < 10; i++) {
- assert.eq(17, ts.findOne({ _id: 5 }).x, "D1");
-}
+ var other = s.config.shards.findOne({_id: {$ne: dbPrimaryShardId}});
+ assert.commandWorked(s.getDB('admin').runCommand({
+ moveChunk: "test.foo",
+ find: {x: 10},
+ to: other._id,
+ _secondaryThrottle: true,
+ writeConcern: {w: 2},
+ _waitForDelete: true
+ }));
+ assert.eq(100, t.count(), "C3");
-m.setSlaveOk();
-for (var i = 0; i < 10; i++) {
- assert.eq(17, ts.findOne({ _id: 5 }).x, "D2");
-}
+ assert.eq(50, rs.test.getPrimary().getDB("test").foo.count(), "C4");
-after = rs.test.getPrimary().adminCommand("serverStatus").opcounters;
+ // by non-shard key
-assert.lte(before.query + 10, after.query, "D3");
+ m = new Mongo(s.s.name);
+ ts = m.getDB("test").foo;
-// by shard key
+ before = rs.test.getPrimary().adminCommand("serverStatus").opcounters;
-m = new Mongo(s.s.name);
-m.forceWriteMode("commands");
+ for (var i = 0; i < 10; i++) {
+ assert.eq(17, ts.findOne({_id: 5}).x, "D1");
+ }
-s.printShardingStatus();
+ m.setSlaveOk();
+ for (var i = 0; i < 10; i++) {
+ assert.eq(17, ts.findOne({_id: 5}).x, "D2");
+ }
-ts = m.getDB("test").foo;
+ after = rs.test.getPrimary().adminCommand("serverStatus").opcounters;
-before = rs.test.getPrimary().adminCommand("serverStatus").opcounters;
+ assert.lte(before.query + 10, after.query, "D3");
-for (var i = 0; i < 10; i++) {
- assert.eq(57, ts.findOne({ x: 57 }).x, "E1");
-}
+ // by shard key
-m.setSlaveOk();
-for (var i = 0; i < 10; i++) {
- assert.eq(57, ts.findOne({ x: 57 }).x, "E2");
-}
+ m = new Mongo(s.s.name);
+ m.forceWriteMode("commands");
-after = rs.test.getPrimary().adminCommand("serverStatus").opcounters;
+ s.printShardingStatus();
-assert.lte(before.query + 10, after.query, "E3");
+ ts = m.getDB("test").foo;
-assert.eq(100, ts.count(), "E4");
-assert.eq(100, ts.find().itcount(), "E5");
-printjson(ts.find().batchSize(5).explain());
+ before = rs.test.getPrimary().adminCommand("serverStatus").opcounters;
-// fsyncLock the secondaries
-rs.test.getSecondaries().forEach(function(secondary) {
- assert.commandWorked(secondary.getDB("test").fsyncLock());
-});
-// Modify data only on the primary replica of the primary shard.
-// { x: 60 } goes to the shard of "rs", which is the primary shard.
-assert.writeOK(ts.insert({ primaryOnly: true, x: 60 }));
-// Read from secondary through mongos, the doc is not there due to replication delay or fsync.
-// But we can guarantee not to read from primary.
-assert.eq(0, ts.find({ primaryOnly: true, x: 60 }).itcount());
-// Unlock the secondaries
-rs.test.getSecondaries().forEach(function(secondary) {
- secondary.getDB("test").fsyncUnlock();
-});
-// Clean up the data
-assert.writeOK(ts.remove({ primaryOnly: true, x: 60 }, { writeConcern: { w: 3 }}));
+ for (var i = 0; i < 10; i++) {
+ assert.eq(57, ts.findOne({x: 57}).x, "E1");
+ }
-for (var i = 0; i < 10; i++) {
- m = new Mongo(s.s.name);
m.setSlaveOk();
- ts = m.getDB("test").foo;
- assert.eq(100, ts.find().batchSize(5).itcount(), "F2." + i);
-}
+ for (var i = 0; i < 10; i++) {
+ assert.eq(57, ts.findOne({x: 57}).x, "E2");
+ }
-for (var i = 0; i < 10; i++) {
- m = new Mongo(s.s.name);
- ts = m.getDB("test").foo;
- assert.eq(100, ts.find().batchSize(5).itcount(), "F3." + i);
-}
+ after = rs.test.getPrimary().adminCommand("serverStatus").opcounters;
+
+ assert.lte(before.query + 10, after.query, "E3");
+
+ assert.eq(100, ts.count(), "E4");
+ assert.eq(100, ts.find().itcount(), "E5");
+ printjson(ts.find().batchSize(5).explain());
+
+ // fsyncLock the secondaries
+ rs.test.getSecondaries().forEach(function(secondary) {
+ assert.commandWorked(secondary.getDB("test").fsyncLock());
+ });
+ // Modify data only on the primary replica of the primary shard.
+ // { x: 60 } goes to the shard of "rs", which is the primary shard.
+ assert.writeOK(ts.insert({primaryOnly: true, x: 60}));
+ // Read from secondary through mongos, the doc is not there due to replication delay or fsync.
+ // But we can guarantee not to read from primary.
+ assert.eq(0, ts.find({primaryOnly: true, x: 60}).itcount());
+ // Unlock the secondaries
+ rs.test.getSecondaries().forEach(function(secondary) {
+ secondary.getDB("test").fsyncUnlock();
+ });
+ // Clean up the data
+ assert.writeOK(ts.remove({primaryOnly: true, x: 60}, {writeConcern: {w: 3}}));
+
+ for (var i = 0; i < 10; i++) {
+ m = new Mongo(s.s.name);
+ m.setSlaveOk();
+ ts = m.getDB("test").foo;
+ assert.eq(100, ts.find().batchSize(5).itcount(), "F2." + i);
+ }
+
+ for (var i = 0; i < 10; i++) {
+ m = new Mongo(s.s.name);
+ ts = m.getDB("test").foo;
+ assert.eq(100, ts.find().batchSize(5).itcount(), "F3." + i);
+ }
-printjson(db.adminCommand("getShardMap"));
+ printjson(db.adminCommand("getShardMap"));
-s.stop();
+ s.stop();
})();