summaryrefslogtreecommitdiff
path: root/jstests/sharding/autosplit.js
diff options
context:
space:
mode:
authorDan Pasette <dan@mongodb.com>2016-09-29 19:19:59 -0400
committerDan Pasette <dan@mongodb.com>2016-09-29 19:19:59 -0400
commit9a4474ea06409f729121283dadbdf3a17b131cdb (patch)
tree8ec980a5569c8ab6dc3b5d4e93e0f9da29d2edc6 /jstests/sharding/autosplit.js
parent4b6f1b99aea01016e875bb74872927b621c7321a (diff)
downloadmongo-9a4474ea06409f729121283dadbdf3a17b131cdb.tar.gz
Revert "SERVER-26309 Disable auto splitting in ShardingTest by default"
This reverts commit c47eb7408d712ac75c09b54079abfd0c28346a07.
Diffstat (limited to 'jstests/sharding/autosplit.js')
-rw-r--r--jstests/sharding/autosplit.js82
1 files changed, 0 insertions, 82 deletions
diff --git a/jstests/sharding/autosplit.js b/jstests/sharding/autosplit.js
deleted file mode 100644
index bb34021487f..00000000000
--- a/jstests/sharding/autosplit.js
+++ /dev/null
@@ -1,82 +0,0 @@
-/**
- * This test confirms that chunks get split as they grow due to data insertion.
- */
-(function() {
- 'use strict';
-
- var s = new ShardingTest({name: "auto1", shards: 2, mongos: 1, other: {enableAutoSplit: true}});
-
- assert.commandWorked(s.s0.adminCommand({enablesharding: "test"}));
- s.ensurePrimaryShard('test', 'shard0001');
- assert.commandWorked(s.s0.adminCommand({shardcollection: "test.foo", key: {num: 1}}));
-
- var bigString = "";
- while (bigString.length < 1024 * 50)
- bigString += "asocsancdnsjfnsdnfsjdhfasdfasdfasdfnsadofnsadlkfnsaldknfsad";
-
- var db = s.getDB("test");
- var coll = db.foo;
-
- var i = 0;
-
- var bulk = coll.initializeUnorderedBulkOp();
- for (; i < 100; i++) {
- bulk.insert({num: i, s: bigString});
- }
- assert.writeOK(bulk.execute());
-
- var primary = s.getPrimaryShard("test").getDB("test");
-
- var counts = [];
-
- s.printChunks();
- counts.push(s.config.chunks.count());
- assert.eq(100, db.foo.find().itcount());
-
- print("datasize: " +
- tojson(s.getPrimaryShard("test").getDB("admin").runCommand({datasize: "test.foo"})));
-
- bulk = coll.initializeUnorderedBulkOp();
- for (; i < 200; i++) {
- bulk.insert({num: i, s: bigString});
- }
- assert.writeOK(bulk.execute());
-
- s.printChunks();
- s.printChangeLog();
- counts.push(s.config.chunks.count());
-
- bulk = coll.initializeUnorderedBulkOp();
- for (; i < 400; i++) {
- bulk.insert({num: i, s: bigString});
- }
- assert.writeOK(bulk.execute());
-
- s.printChunks();
- s.printChangeLog();
- counts.push(s.config.chunks.count());
-
- bulk = coll.initializeUnorderedBulkOp();
- for (; i < 700; i++) {
- bulk.insert({num: i, s: bigString});
- }
- assert.writeOK(bulk.execute());
-
- s.printChunks();
- s.printChangeLog();
- counts.push(s.config.chunks.count());
-
- assert(counts[counts.length - 1] > counts[0], "counts 1 : " + tojson(counts));
- var sorted = counts.slice(0);
- // Sort doesn't sort numbers correctly by default, resulting in fail
- sorted.sort(function(a, b) {
- return a - b;
- });
- assert.eq(counts, sorted, "counts 2 : " + tojson(counts));
-
- print(counts);
-
- printjson(db.stats());
-
- s.stop();
-})();