summaryrefslogtreecommitdiff
path: root/jstests
diff options
context:
space:
mode:
authorKevin Pulo <kevin.pulo@mongodb.com>2017-07-16 11:07:49 +1000
committerKevin Pulo <kevin.pulo@mongodb.com>2017-08-25 11:09:24 +1000
commite1f5f40fc17f99fc06dda4621564db7e31be1132 (patch)
treef9d065919b002e2d7d944964e372de7eb9dd82c3 /jstests
parentd78c225444bf4ab93e8cbe824f622f7d8940bd8d (diff)
downloadmongo-e1f5f40fc17f99fc06dda4621564db7e31be1132.tar.gz
SERVER-20392 remove early chunksize autosplit heuristic
Plus some additional 3.4-specific jstest fixes. (cherry picked from commit ad6a668da49c61a4276749aef7529088dc3524ea)
Diffstat (limited to 'jstests')
-rw-r--r--jstests/sharding/auth.js3
-rw-r--r--jstests/sharding/autosplit.js7
-rw-r--r--jstests/sharding/shard_existing_coll_chunk_count.js165
-rw-r--r--jstests/sharding/write_cmd_auto_split.js6
4 files changed, 176 insertions, 5 deletions
diff --git a/jstests/sharding/auth.js b/jstests/sharding/auth.js
index 61b25f10dde..037532acaec 100644
--- a/jstests/sharding/auth.js
+++ b/jstests/sharding/auth.js
@@ -38,7 +38,7 @@
name: "auth",
mongos: 1,
shards: 0,
- other: {keyFile: "jstests/libs/key1", chunkSize: 1, enableAutoSplit: true},
+ other: {keyFile: "jstests/libs/key1", chunkSize: 1, enableAutoSplit: false},
});
if (s.getDB('admin').runCommand('buildInfo').bits < 64) {
@@ -167,6 +167,7 @@
s.getDB("test").foo.remove({});
var num = 10000;
+ assert.commandWorked(s.s.adminCommand({split: "test.foo", middle: {x: num / 2}}));
var bulk = s.getDB("test").foo.initializeUnorderedBulkOp();
for (i = 0; i < num; i++) {
bulk.insert(
diff --git a/jstests/sharding/autosplit.js b/jstests/sharding/autosplit.js
index bb34021487f..0eba386b6a3 100644
--- a/jstests/sharding/autosplit.js
+++ b/jstests/sharding/autosplit.js
@@ -4,7 +4,12 @@
(function() {
'use strict';
- var s = new ShardingTest({name: "auto1", shards: 2, mongos: 1, other: {enableAutoSplit: true}});
+ var s = new ShardingTest({
+ name: "auto1",
+ shards: 2,
+ mongos: 1,
+ other: {enableAutoSplit: true, chunkSize: 10},
+ });
assert.commandWorked(s.s0.adminCommand({enablesharding: "test"}));
s.ensurePrimaryShard('test', 'shard0001');
diff --git a/jstests/sharding/shard_existing_coll_chunk_count.js b/jstests/sharding/shard_existing_coll_chunk_count.js
new file mode 100644
index 00000000000..60145fef712
--- /dev/null
+++ b/jstests/sharding/shard_existing_coll_chunk_count.js
@@ -0,0 +1,165 @@
+/**
+ * This test confirms that after sharding a collection with some pre-existing data,
+ * the resulting chunks aren't auto-split too aggressively.
+ */
+(function() {
+ 'use strict';
+
+ var s = new ShardingTest({
+ name: "shard_existing_coll_chunk_count",
+ shards: 1,
+ mongos: 1,
+ other: {enableAutoSplit: true},
+ });
+
+ assert.commandWorked(s.s.adminCommand({enablesharding: "test"}));
+
+ var collNum = 0;
+ var overhead = Object.bsonsize({_id: ObjectId(), i: 1, pad: ""});
+
+ var getNumberChunks = function(ns) {
+ return s.configRS.getPrimary().getDB("config").getCollection("chunks").count({ns});
+ };
+
+ var runCase = function(opts) {
+ // Expected options.
+ assert.gte(opts.docSize, 0);
+ assert.gte(opts.stages.length, 2);
+
+ // Compute padding.
+ if (opts.docSize < overhead) {
+ var pad = "";
+ } else {
+ var pad = (new Array(opts.docSize - overhead + 1)).join(' ');
+ }
+
+ collNum++;
+ var db = s.getDB("test");
+ var collName = "coll" + collNum;
+ var coll = db.getCollection(collName);
+ var i = 0;
+ var limit = 0;
+ var stageNum = 0;
+ var stage = opts.stages[stageNum];
+
+ // Insert initial docs.
+ var bulk = coll.initializeUnorderedBulkOp();
+ limit += stage.numDocsToInsert;
+ for (; i < limit; i++) {
+ bulk.insert({i, pad});
+ }
+ assert.writeOK(bulk.execute());
+
+ // Create shard key index.
+ assert.commandWorked(coll.createIndex({i: 1}));
+
+ // Shard collection.
+ assert.commandWorked(s.s.adminCommand({shardcollection: coll.getFullName(), key: {i: 1}}));
+
+ // Confirm initial number of chunks.
+ var numChunks = getNumberChunks(coll.getFullName());
+ assert.eq(numChunks,
+ stage.expectedNumChunks,
+ 'in ' + coll.getFullName() + ' expected ' + stage.expectedNumChunks +
+ ' initial chunks, but found ' + numChunks + '\nopts: ' + tojson(opts) +
+ '\nchunks:\n' + s.getChunksString(coll.getFullName()));
+
+ // Do the rest of the stages.
+ for (stageNum = 1; stageNum < opts.stages.length; stageNum++) {
+ stage = opts.stages[stageNum];
+
+ // Insert the later docs (one at a time, to maximise the autosplit effects).
+ limit += stage.numDocsToInsert;
+ for (; i < limit; i++) {
+ coll.insert({i, pad});
+ }
+
+ // Confirm number of chunks for this stage.
+ var numChunks = getNumberChunks(coll.getFullName());
+ assert.eq(numChunks,
+ stage.expectedNumChunks,
+ 'in ' + coll.getFullName() + ' expected ' + stage.expectedNumChunks +
+ ' chunks for stage ' + stageNum + ', but found ' + numChunks +
+ '\nopts: ' + tojson(opts) + '\nchunks:\n' +
+ s.getChunksString(coll.getFullName()));
+ }
+ };
+
+ // Original problematic case.
+ runCase({
+ docSize: 0,
+ stages: [
+ {numDocsToInsert: 20000, expectedNumChunks: 1},
+ {numDocsToInsert: 7, expectedNumChunks: 1},
+ {numDocsToInsert: 1000, expectedNumChunks: 1},
+ ],
+ });
+
+ // Original problematic case (worse).
+ runCase({
+ docSize: 0,
+ stages: [
+ {numDocsToInsert: 90000, expectedNumChunks: 1},
+ {numDocsToInsert: 7, expectedNumChunks: 1},
+ {numDocsToInsert: 1000, expectedNumChunks: 1},
+ ],
+ });
+
+ // Pathological case #1.
+ runCase({
+ docSize: 522,
+ stages: [
+ {numDocsToInsert: 8191, expectedNumChunks: 1},
+ {numDocsToInsert: 2, expectedNumChunks: 1},
+ {numDocsToInsert: 1000, expectedNumChunks: 1},
+ ],
+ });
+
+ // Pathological case #2.
+ runCase({
+ docSize: 522,
+ stages: [
+ {numDocsToInsert: 8192, expectedNumChunks: 1},
+ {numDocsToInsert: 8192, expectedNumChunks: 1},
+ ],
+ });
+
+ // Lower chunksize to 1MB, and restart the mongos for it to take.
+ assert.writeOK(
+ s.getDB("config").getCollection("settings").update({_id: "chunksize"}, {$set: {value: 1}}, {
+ upsert: true
+ }));
+ s.restartMongos(0);
+
+ // Original problematic case, scaled down to smaller chunksize.
+ runCase({
+ docSize: 0,
+ stages: [
+ {numDocsToInsert: 10000, expectedNumChunks: 1},
+ {numDocsToInsert: 10, expectedNumChunks: 1},
+ {numDocsToInsert: 20, expectedNumChunks: 1},
+ {numDocsToInsert: 40, expectedNumChunks: 1},
+ {numDocsToInsert: 1000, expectedNumChunks: 1},
+ ],
+ });
+
+ // Docs just smaller than half chunk size.
+ runCase({
+ docSize: 510 * 1024,
+ stages: [
+ {numDocsToInsert: 10, expectedNumChunks: 6},
+ {numDocsToInsert: 10, expectedNumChunks: 12},
+ ],
+ });
+
+ // Docs just larger than half chunk size.
+ runCase({
+ docSize: 514 * 1024,
+ stages: [
+ {numDocsToInsert: 10, expectedNumChunks: 10},
+ {numDocsToInsert: 10, expectedNumChunks: 20},
+ ],
+ });
+
+ s.stop();
+})();
diff --git a/jstests/sharding/write_cmd_auto_split.js b/jstests/sharding/write_cmd_auto_split.js
index 1cf9b5ab39a..95151b1e7e9 100644
--- a/jstests/sharding/write_cmd_auto_split.js
+++ b/jstests/sharding/write_cmd_auto_split.js
@@ -40,7 +40,7 @@
assert.eq(1, configDB.chunks.find().itcount());
- for (var x = 0; x < 1100; x++) {
+ for (var x = 0; x < 3100; x++) {
assert.writeOK(testDB.runCommand({
update: 'update',
updates: [{q: {x: x}, u: {x: x, v: doc1k}, upsert: true}],
@@ -80,7 +80,7 @@
// Note: Estimated 'chunk size' tracked by mongos is initialized with a random value so
// we are going to be conservative.
- for (var x = 0; x < 1100; x += 400) {
+ for (var x = 0; x < 3100; x += 400) {
var docs = [];
for (var y = 0; y < 400; y++) {
@@ -101,7 +101,7 @@
assert.eq(1, configDB.chunks.find().itcount());
- for (var x = 0; x < 1100; x += 400) {
+ for (var x = 0; x < 3100; x += 400) {
var docs = [];
for (var y = 0; y < 400; y++) {