summaryrefslogtreecommitdiff
path: root/jstests/sharding/balance_repl.js
diff options
context:
space:
mode:
authorKaloian Manassiev <kaloian.manassiev@mongodb.com>2016-07-15 14:00:33 -0400
committerKaloian Manassiev <kaloian.manassiev@mongodb.com>2016-07-19 18:03:45 -0400
commit971026616e9c96398a1e9c8af7107dfd48ff84e8 (patch)
treeeaf368ef1192898d73112ccf59b8a9a176cfe27a /jstests/sharding/balance_repl.js
parent89d75b1fe6bc12330b44f40507396c72da67b150 (diff)
downloadmongo-971026616e9c96398a1e9c8af7107dfd48ff84e8.tar.gz
SERVER-24853 Support parallel chunk migrations
This change connects the new policy, which supports returning multiple migrations for the same collection with the MigrationManager in order to allow chunks to migrate in parallel.
Diffstat (limited to 'jstests/sharding/balance_repl.js')
-rw-r--r--jstests/sharding/balance_repl.js35
1 files changed, 20 insertions, 15 deletions
diff --git a/jstests/sharding/balance_repl.js b/jstests/sharding/balance_repl.js
index 43357ba3ca6..02e2c54bbc0 100644
--- a/jstests/sharding/balance_repl.js
+++ b/jstests/sharding/balance_repl.js
@@ -1,7 +1,7 @@
-//
-// Testing migrations are successful with secondaryThrottle.
-//
-
+/**
+ * Testing migrations are successful and immediately visible on the secondaries, when
+ * secondaryThrottle is used.
+ */
(function() {
'use strict';
@@ -26,30 +26,32 @@
}
});
- var coll = s.s0.getCollection("test.foo");
- var bulk = coll.initializeUnorderedBulkOp();
+ var bulk = s.s0.getDB('TestDB').TestColl.initializeUnorderedBulkOp();
for (var i = 0; i < 2100; i++) {
bulk.insert({_id: i, x: i});
}
assert.writeOK(bulk.execute());
- assert.commandWorked(s.s0.adminCommand({enablesharding: coll.getDB() + ""}));
- s.ensurePrimaryShard(coll.getDB() + "", s.shard0.shardName);
- assert.commandWorked(s.s0.adminCommand({shardcollection: coll + "", key: {_id: 1}}));
+ assert.commandWorked(s.s0.adminCommand({enablesharding: 'TestDB'}));
+ s.ensurePrimaryShard('TestDB', s.shard0.shardName);
+ assert.commandWorked(s.s0.adminCommand({shardcollection: 'TestDB.TestColl', key: {_id: 1}}));
for (i = 0; i < 20; i++) {
- assert.commandWorked(s.s0.adminCommand({split: coll + "", middle: {_id: i * 100}}));
+ assert.commandWorked(s.s0.adminCommand({split: 'TestDB.TestColl', middle: {_id: i * 100}}));
}
- assert.eq(2100, coll.find().itcount());
- coll.setSlaveOk();
- assert.eq(2100, coll.find().itcount());
+ var collPrimary = (new Mongo(s.s0.host)).getDB('TestDB').TestColl;
+ assert.eq(2100, collPrimary.find().itcount());
+
+ var collSlaveOk = (new Mongo(s.s0.host)).getDB('TestDB').TestColl;
+ collSlaveOk.setSlaveOk();
+ assert.eq(2100, collSlaveOk.find().itcount());
for (i = 0; i < 20; i++) {
// Needs to waitForDelete because we'll be performing a slaveOk query, and secondaries don't
// have a chunk manager so it doesn't know how to filter out docs it doesn't own.
assert.commandWorked(s.s0.adminCommand({
- moveChunk: "test.foo",
+ moveChunk: 'TestDB.TestColl',
find: {_id: i * 100},
to: s.shard1.shardName,
_secondaryThrottle: true,
@@ -57,7 +59,10 @@
_waitForDelete: true
}));
- assert.eq(2100, coll.find().itcount());
+ assert.eq(2100,
+ collSlaveOk.find().itcount(),
+ 'Incorrect count when reading from secondary. Count from primary is ' +
+ collPrimary.find().itcount());
}
s.stop();