summaryrefslogtreecommitdiff
path: root/jstests
diff options
context:
space:
mode:
authorKaloian Manassiev <kaloian.manassiev@mongodb.com>2016-05-03 10:15:20 -0400
committerKaloian Manassiev <kaloian.manassiev@mongodb.com>2016-05-04 16:09:10 -0400
commit29ff4c8033c8c22cf212e13bd27be05649c03f3e (patch)
treebb86f9263ccfa6ea13cb91f5bef3f2c4fb93bdcb /jstests
parent7a014ed60bf96ee11ce11acfa931268422030ca3 (diff)
downloadmongo-29ff4c8033c8c22cf212e13bd27be05649c03f3e.tar.gz
SERVER-23733 Tests should not write chunkSize to config.settings directly
Instead, they should pass it as parameter to ShardingTest. This change is in preparation for removing the chunkSize parameter to mongos.
Diffstat (limited to 'jstests')
-rw-r--r--jstests/sharding/auth.js9
-rw-r--r--jstests/sharding/authCommands.js4
-rw-r--r--jstests/sharding/auto_rebalance.js15
-rw-r--r--jstests/sharding/autosplit_heuristics.js4
-rw-r--r--jstests/sharding/disable_autosplit.js6
-rw-r--r--jstests/sharding/findandmodify2.js210
-rw-r--r--jstests/sharding/large_chunk.js37
-rw-r--r--jstests/sharding/listDatabases.js2
-rw-r--r--jstests/sharding/localhostAuthBypass.js2
-rw-r--r--jstests/sharding/zbigMapReduce.js1
10 files changed, 142 insertions, 148 deletions
diff --git a/jstests/sharding/auth.js b/jstests/sharding/auth.js
index 7b8d55ee075..ea3ed974cc5 100644
--- a/jstests/sharding/auth.js
+++ b/jstests/sharding/auth.js
@@ -49,10 +49,7 @@
name: "auth",
mongos: 1,
shards: 0,
- other: {
- extraOptions: {"keyFile": "jstests/libs/key1"},
- noChunkSize: true,
- }
+ other: {extraOptions: {"keyFile": "jstests/libs/key1"}, chunkSize: 1},
});
if (s.getDB('admin').runCommand('buildInfo').bits < 64) {
@@ -67,8 +64,6 @@
login(adminUser);
// Set the chunk size, disable the secondary throttle (so the test doesn't run so slow)
- assert.writeOK(
- s.getDB("config").settings.update({_id: "chunksize"}, {$set: {value: 1}}, {upsert: true}));
assert.writeOK(s.getDB("config").settings.update(
{_id: "balancer"},
{$set: {"_secondaryThrottle": false, "_waitForDelete": true}},
@@ -77,7 +72,7 @@
printjson(s.getDB("config").settings.find().toArray());
print("Restart mongos with different auth options");
- s.restartMongos(0, {v: 2, configdb: s._configDB, keyFile: "jstests/libs/key1", chunkSize: 1});
+ s.restartMongos(0);
login(adminUser);
var d1 = new ReplSetTest({name: "d1", nodes: 3, useHostName: true});
diff --git a/jstests/sharding/authCommands.js b/jstests/sharding/authCommands.js
index cb1887d4aae..03e77848974 100644
--- a/jstests/sharding/authCommands.js
+++ b/jstests/sharding/authCommands.js
@@ -10,9 +10,9 @@ var doTest = function() {
var st = new ShardingTest({
keyFile: 'jstests/libs/key1',
shards: 2,
- chunksize: 2,
+ chunkSize: 2,
rs: rsOpts,
- other: {nopreallocj: 1, useHostname: false}
+ other: {useHostname: false},
});
var mongos = st.s;
diff --git a/jstests/sharding/auto_rebalance.js b/jstests/sharding/auto_rebalance.js
index 4a6ce4c86c3..18374b59a4d 100644
--- a/jstests/sharding/auto_rebalance.js
+++ b/jstests/sharding/auto_rebalance.js
@@ -3,14 +3,15 @@
(function() {
'use strict';
- var st = new ShardingTest(
- {name: 'auto_rebalance_rs', mongos: 1, shards: 2, chunksize: 1, rs: {nodes: 3}});
+ var st = new ShardingTest({mongos: 1, shards: 2, chunkSize: 1, rs: {nodes: 3}});
assert.writeOK(st.getDB("config").settings.update(
{_id: "balancer"}, {$set: {"_secondaryThrottle": false}}, {upsert: true}));
- st.getDB("admin").runCommand({enableSharding: "TestDB_auto_rebalance_rs"});
- st.getDB("admin").runCommand({shardCollection: "TestDB_auto_rebalance_rs.foo", key: {x: 1}});
+ assert.commandWorked(
+ st.getDB("admin").runCommand({enableSharding: "TestDB_auto_rebalance_rs"}));
+ assert.commandWorked(st.getDB("admin").runCommand(
+ {shardCollection: "TestDB_auto_rebalance_rs.foo", key: {x: 1}}));
var dbTest = st.getDB("TestDB_auto_rebalance_rs");
@@ -23,11 +24,11 @@
assert.writeOK(bulk.execute());
// Wait for the rebalancing to kick in
- st.startBalancer(60000);
+ st.startBalancer();
assert.soon(function() {
- var s1Chunks = st.getDB("config").chunks.count({shard: "auto_rebalance_rs-rs0"});
- var s2Chunks = st.getDB("config").chunks.count({shard: "auto_rebalance_rs-rs1"});
+ var s1Chunks = st.getDB("config").chunks.count({shard: "test-rs0"});
+ var s2Chunks = st.getDB("config").chunks.count({shard: "test-rs1"});
var total = st.getDB("config").chunks.count({ns: "TestDB_auto_rebalance_rs.foo"});
print("chunks: " + s1Chunks + " " + s2Chunks + " " + total);
diff --git a/jstests/sharding/autosplit_heuristics.js b/jstests/sharding/autosplit_heuristics.js
index 38331352ff7..bfde45cfc67 100644
--- a/jstests/sharding/autosplit_heuristics.js
+++ b/jstests/sharding/autosplit_heuristics.js
@@ -5,7 +5,7 @@
(function() {
'use strict';
- var st = new ShardingTest({shards: 1, mongos: 1, other: {mongosOptions: {chunkSize: 1}}});
+ var st = new ShardingTest({shards: 1, mongos: 1, other: {chunkSize: 1}});
// The balancer is by default stopped, thus it will NOT interfere unpredictably with the chunk
// moves/splits depending on the timing.
@@ -48,7 +48,7 @@
// if resetting the chunk size happens during reloads. If the size is
// reset, we'd expect to split less, since the first split would then
// disable further splits (statistically, since the decision is randomized).
- // We choose 1.4 since split attempts happen about once every 1/5 chunksize,
+ // We choose 1.4 since split attempts happen about once every 1/5 chunkSize,
// and we want to be sure we def get a split attempt at a full chunk.
var insertsForSplit = Math.ceil((chunkSizeBytes * 1.4) / approxSize);
var totalInserts = insertsForSplit * numChunks;
diff --git a/jstests/sharding/disable_autosplit.js b/jstests/sharding/disable_autosplit.js
index c6b7b7d5e1f..db267cc28a0 100644
--- a/jstests/sharding/disable_autosplit.js
+++ b/jstests/sharding/disable_autosplit.js
@@ -2,13 +2,13 @@
(function() {
'use strict';
- var chunkSize = 1; // In MB
+ var chunkSizeMB = 1;
var st = new ShardingTest(
- {shards: 1, mongos: 1, other: {chunksize: chunkSize, mongosOptions: {noAutoSplit: ""}}});
+ {shards: 1, mongos: 1, other: {chunkSize: chunkSizeMB, mongosOptions: {noAutoSplit: ""}}});
var data = "x";
- while (data.length < chunkSize * 1024 * 1024) {
+ while (data.length < chunkSizeMB * 1024 * 1024) {
data += data;
}
diff --git a/jstests/sharding/findandmodify2.js b/jstests/sharding/findandmodify2.js
index afa727e77b9..6df89a73d04 100644
--- a/jstests/sharding/findandmodify2.js
+++ b/jstests/sharding/findandmodify2.js
@@ -1,115 +1,115 @@
-var s = new ShardingTest(
- {name: "find_and_modify_sharded_2", shards: 2, mongos: 1, other: {chunkSize: 1}});
-s.adminCommand({enablesharding: "test"});
-
-var db = s.getDB("test");
-s.ensurePrimaryShard('test', 'shard0001');
-var primary = s.getPrimaryShard("test").getDB("test");
-var secondary = s.getOther(primary).getDB("test");
-
-var n = 100;
-var collection = "stuff";
-var minChunks = 2;
-
-var col_update = collection + '_col_update';
-var col_update_upsert = col_update + '_upsert';
-var col_fam = collection + '_col_fam';
-var col_fam_upsert = col_fam + '_upsert';
-
-var big = "x";
-
-print("---------- Creating large payload...");
-for (var i = 0; i < 15; i++) {
- big += big;
-}
-print("---------- Done.");
-
-// drop the collection
-db[col_update].drop();
-db[col_update_upsert].drop();
-db[col_fam].drop();
-db[col_fam_upsert].drop();
-
-// shard the collection on _id
-s.adminCommand({shardcollection: 'test.' + col_update, key: {_id: 1}});
-s.adminCommand({shardcollection: 'test.' + col_update_upsert, key: {_id: 1}});
-s.adminCommand({shardcollection: 'test.' + col_fam, key: {_id: 1}});
-s.adminCommand({shardcollection: 'test.' + col_fam_upsert, key: {_id: 1}});
-
-// update via findAndModify
-function via_fam() {
- for (var i = 0; i < n; i++) {
- db[col_fam].save({_id: i});
- }
+(function() {
+ 'use strict';
+
+ var s = new ShardingTest({shards: 2, mongos: 1, other: {chunkSize: 1}});
+ assert.commandWorked(s.s0.adminCommand({enablesharding: "test"}));
+
+ var db = s.getDB("test");
+ s.ensurePrimaryShard('test', 'shard0001');
+ var primary = s.getPrimaryShard("test").getDB("test");
+ var secondary = s.getOther(primary).getDB("test");
+
+ var n = 100;
+ var collection = "stuff";
+ var minChunks = 2;
- for (var i = 0; i < n; i++) {
- db[col_fam].findAndModify({query: {_id: i}, update: {$set: {big: big}}});
+ var col_update = collection + '_col_update';
+ var col_update_upsert = col_update + '_upsert';
+ var col_fam = collection + '_col_fam';
+ var col_fam_upsert = col_fam + '_upsert';
+
+ var big = "x";
+ for (var i = 0; i < 15; i++) {
+ big += big;
}
-}
-// upsert via findAndModify
-function via_fam_upsert() {
- for (var i = 0; i < n; i++) {
- db[col_fam_upsert].findAndModify(
- {query: {_id: i}, update: {$set: {big: big}}, upsert: true});
+ // drop the collection
+ db[col_update].drop();
+ db[col_update_upsert].drop();
+ db[col_fam].drop();
+ db[col_fam_upsert].drop();
+
+ // shard the collection on _id
+ s.adminCommand({shardcollection: 'test.' + col_update, key: {_id: 1}});
+ s.adminCommand({shardcollection: 'test.' + col_update_upsert, key: {_id: 1}});
+ s.adminCommand({shardcollection: 'test.' + col_fam, key: {_id: 1}});
+ s.adminCommand({shardcollection: 'test.' + col_fam_upsert, key: {_id: 1}});
+
+ // update via findAndModify
+ function via_fam() {
+ for (var i = 0; i < n; i++) {
+ db[col_fam].save({_id: i});
+ }
+
+ for (var i = 0; i < n; i++) {
+ db[col_fam].findAndModify({query: {_id: i}, update: {$set: {big: big}}});
+ }
}
-}
-// update data using basic update
-function via_update() {
- for (var i = 0; i < n; i++) {
- db[col_update].save({_id: i});
+ // upsert via findAndModify
+ function via_fam_upsert() {
+ for (var i = 0; i < n; i++) {
+ db[col_fam_upsert].findAndModify(
+ {query: {_id: i}, update: {$set: {big: big}}, upsert: true});
+ }
}
- for (var i = 0; i < n; i++) {
- db[col_update].update({_id: i}, {$set: {big: big}});
+ // update data using basic update
+ function via_update() {
+ for (var i = 0; i < n; i++) {
+ db[col_update].save({_id: i});
+ }
+
+ for (var i = 0; i < n; i++) {
+ db[col_update].update({_id: i}, {$set: {big: big}});
+ }
}
-}
-// upsert data using basic update
-function via_update_upsert() {
- for (var i = 0; i < n; i++) {
- db[col_update_upsert].update({_id: i}, {$set: {big: big}}, true);
+ // upsert data using basic update
+ function via_update_upsert() {
+ for (var i = 0; i < n; i++) {
+ db[col_update_upsert].update({_id: i}, {$set: {big: big}}, true);
+ }
}
-}
-
-print("---------- Update via findAndModify...");
-via_fam();
-print("---------- Done.");
-
-print("---------- Upsert via findAndModify...");
-via_fam_upsert();
-print("---------- Done.");
-
-print("---------- Basic update...");
-via_update();
-print("---------- Done.");
-
-print("---------- Basic update with upsert...");
-via_update_upsert();
-print("---------- Done.");
-
-print("---------- Printing chunks:");
-s.printChunks();
-
-print("---------- Verifying that both codepaths resulted in splits...");
-assert.gte(s.config.chunks.count({"ns": "test." + col_fam}),
- minChunks,
- "findAndModify update code path didn't result in splits");
-assert.gte(s.config.chunks.count({"ns": "test." + col_fam_upsert}),
- minChunks,
- "findAndModify upsert code path didn't result in splits");
-assert.gte(s.config.chunks.count({"ns": "test." + col_update}),
- minChunks,
- "update code path didn't result in splits");
-assert.gte(s.config.chunks.count({"ns": "test." + col_update_upsert}),
- minChunks,
- "upsert code path didn't result in splits");
-
-printjson(db[col_update].stats());
-
-// ensure that all chunks are smaller than chunksize
-// make sure not teensy
-// test update without upsert and with upsert
-
-s.stop();
+
+ print("---------- Update via findAndModify...");
+ via_fam();
+ print("---------- Done.");
+
+ print("---------- Upsert via findAndModify...");
+ via_fam_upsert();
+ print("---------- Done.");
+
+ print("---------- Basic update...");
+ via_update();
+ print("---------- Done.");
+
+ print("---------- Basic update with upsert...");
+ via_update_upsert();
+ print("---------- Done.");
+
+ print("---------- Printing chunks:");
+ s.printChunks();
+
+ print("---------- Verifying that both codepaths resulted in splits...");
+ assert.gte(s.config.chunks.count({"ns": "test." + col_fam}),
+ minChunks,
+ "findAndModify update code path didn't result in splits");
+ assert.gte(s.config.chunks.count({"ns": "test." + col_fam_upsert}),
+ minChunks,
+ "findAndModify upsert code path didn't result in splits");
+ assert.gte(s.config.chunks.count({"ns": "test." + col_update}),
+ minChunks,
+ "update code path didn't result in splits");
+ assert.gte(s.config.chunks.count({"ns": "test." + col_update_upsert}),
+ minChunks,
+ "upsert code path didn't result in splits");
+
+ printjson(db[col_update].stats());
+
+ // ensure that all chunks are smaller than chunkSize
+ // make sure not teensy
+ // test update without upsert and with upsert
+
+ s.stop();
+})();
diff --git a/jstests/sharding/large_chunk.js b/jstests/sharding/large_chunk.js
index 3318142ecac..34b0cb1d1cf 100644
--- a/jstests/sharding/large_chunk.js
+++ b/jstests/sharding/large_chunk.js
@@ -1,30 +1,27 @@
// Where we test operations dealing with large chunks
(function() {
+ 'use strict';
- // Starts a new sharding environment limiting the chunksize to 1GB (highest value allowed).
+ // Starts a new sharding environment limiting the chunk size to 1GB (highest value allowed).
// Note that early splitting will start with a 1/4 of max size currently.
var s = new ShardingTest({name: 'large_chunk', shards: 2, other: {chunkSize: 1024}});
-
- // take the balancer out of the equation
- s.config.settings.update({_id: "balancer"}, {$set: {stopped: true}}, true);
- s.config.settings.find().forEach(printjson);
-
- db = s.getDB("test");
+ var db = s.getDB("test");
//
// Step 1 - Test moving a large chunk
//
// Turn on sharding on the 'test.foo' collection and generate a large chunk
- s.adminCommand({enablesharding: "test"});
+ assert.commandWorked(s.s0.adminCommand({enablesharding: "test"}));
s.ensurePrimaryShard('test', 'shard0001');
- bigString = "";
- while (bigString.length < 10000)
+ var bigString = "";
+ while (bigString.length < 10000) {
bigString += "asdasdasdasdadasdasdasdasdasdasdasdasda";
+ }
- inserted = 0;
- num = 0;
+ var inserted = 0;
+ var num = 0;
var bulk = db.foo.initializeUnorderedBulkOp();
while (inserted < (400 * 1024 * 1024)) {
bulk.insert({_id: num++, s: bigString});
@@ -32,17 +29,17 @@
}
assert.writeOK(bulk.execute());
- s.adminCommand({shardcollection: "test.foo", key: {_id: 1}});
+ assert.commandWorked(s.s0.adminCommand({shardcollection: "test.foo", key: {_id: 1}}));
assert.eq(1, s.config.chunks.count(), "step 1 - need one large chunk");
- primary = s.getPrimaryShard("test").getDB("test");
- secondary = s.getOther(primary).getDB("test");
+ var primary = s.getPrimaryShard("test").getDB("test");
+ var secondary = s.getOther(primary).getDB("test");
// Make sure that we don't move that chunk if it goes past what we consider the maximum chunk
// size
print("Checkpoint 1a");
- max = 200 * 1024 * 1024;
+ var max = 200 * 1024 * 1024;
assert.throws(function() {
s.adminCommand({
movechunk: "test.foo",
@@ -54,9 +51,11 @@
// Move the chunk
print("checkpoint 1b");
- before = s.config.chunks.find().toArray();
- s.adminCommand({movechunk: "test.foo", find: {_id: 1}, to: secondary.getMongo().name});
- after = s.config.chunks.find().toArray();
+ var before = s.config.chunks.find().toArray();
+ assert.commandWorked(
+ s.s0.adminCommand({movechunk: "test.foo", find: {_id: 1}, to: secondary.getMongo().name}));
+
+ var after = s.config.chunks.find().toArray();
assert.neq(before[0].shard, after[0].shard, "move chunk did not work");
s.config.changelog.find().forEach(printjson);
diff --git a/jstests/sharding/listDatabases.js b/jstests/sharding/listDatabases.js
index f6281a2b025..0ed7cf2a286 100644
--- a/jstests/sharding/listDatabases.js
+++ b/jstests/sharding/listDatabases.js
@@ -1,6 +1,6 @@
// tests that listDatabases doesn't show config db on a shard, even if it is there
-var test = new ShardingTest({shards: 1, mongos: 1, other: {chunksize: 1}});
+var test = new ShardingTest({shards: 1, mongos: 1, other: {chunkSize: 1}});
var mongos = test.s0;
var mongod = test.shard0;
diff --git a/jstests/sharding/localhostAuthBypass.js b/jstests/sharding/localhostAuthBypass.js
index 448d40c5649..459e627fb3f 100644
--- a/jstests/sharding/localhostAuthBypass.js
+++ b/jstests/sharding/localhostAuthBypass.js
@@ -180,7 +180,7 @@ var start = function() {
auth: "",
keyFile: keyfile,
shards: numShards,
- chunksize: 1,
+ chunkSize: 1,
other: {
nopreallocj: 1,
useHostname: false // Must use localhost to take advantage of the localhost auth bypass
diff --git a/jstests/sharding/zbigMapReduce.js b/jstests/sharding/zbigMapReduce.js
index fda81e12df8..9853982d535 100644
--- a/jstests/sharding/zbigMapReduce.js
+++ b/jstests/sharding/zbigMapReduce.js
@@ -15,7 +15,6 @@ function setupTest() {
// Reduce chunk size to split
var config = s.getDB("config");
- config.settings.save({_id: "chunksize", value: 1});
assert.commandWorked(s.s0.adminCommand({enablesharding: "test"}));
s.ensurePrimaryShard('test', 'test-rs0');