summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorKaloian Manassiev <kaloian.manassiev@mongodb.com>2016-04-14 15:41:16 -0400
committerRamon Fernandez <ramon@mongodb.com>2016-04-15 10:55:17 +0100
commitdf35458a85c99c3838858cd3981fda1ba4ddd2a7 (patch)
treebd1720c1424bd1e6d6240a9666e49be5e2a1f9cc
parent60681f21a65226d5a38ac632b7ec28d081c2671c (diff)
downloadmongo-df35458a85c99c3838858cd3981fda1ba4ddd2a7.tar.gz
SERVER-23704 Make shard_keycount.js invoke the anonymous test function
(cherry picked from commit 8fc79125bb85c8939bd6aa4050fec29de79399d5)
-rw-r--r--jstests/sharding/hash_basic.js112
-rw-r--r--jstests/sharding/shard_keycount.js33
2 files changed, 56 insertions, 89 deletions
diff --git a/jstests/sharding/hash_basic.js b/jstests/sharding/hash_basic.js
index e4bf6ded27b..4e3d6806a30 100644
--- a/jstests/sharding/hash_basic.js
+++ b/jstests/sharding/hash_basic.js
@@ -1,88 +1,58 @@
-var st = new ShardingTest({shards: 2, chunkSize: 1});
-
-var testDB = st.s.getDB('test');
-testDB.adminCommand({enableSharding: 'test'});
-st.ensurePrimaryShard('test', 'shard0001');
-testDB.adminCommand({shardCollection: 'test.user', key: {x: 'hashed'}});
-
-var configDB = st.s.getDB('config');
-var chunkCountBefore = configDB.chunks.count();
-assert.gt(chunkCountBefore, 1);
-
-for (var x = 0; x < 1000; x++) {
- testDB.user.insert({x: x});
-}
-
-// For debugging
(function() {
- var chunkList = configDB.chunks.find().sort({min: -1}).toArray();
- chunkList.forEach(function(chunk) {
- chunk.count = 0;
- });
+ 'use strict';
- for (var x = 0; x < 1000; x++) {
- var hashVal = testDB.adminCommand({_hashBSONElement: x}).out;
- var countSet = false;
+ var st = new ShardingTest({shards: 2, chunkSize: 1});
- for (var y = 0; y < chunkList.length - 2; y++) {
- var chunkDoc = chunkList[y];
- if (chunkDoc.min.x <= hashVal) {
- countSet = true;
- chunkDoc.count++;
+ var testDB = st.s.getDB('test');
+ assert.commandWorked(testDB.adminCommand({enableSharding: 'test'}));
+ st.ensurePrimaryShard('test', 'shard0001');
+ assert.commandWorked(testDB.adminCommand({shardCollection: 'test.user', key: {x: 'hashed'}}));
- print('doc in chunk: x [' + x + '], h[' + hashVal + '], min[' + chunkDoc.min.x +
- '], max[' + chunkDoc.max.x + ']');
- break;
- }
- }
+ var configDB = st.s.getDB('config');
+ var chunkCountBefore = configDB.chunks.count();
+ assert.gt(chunkCountBefore, 1);
- if (!countSet) {
- chunkDoc = chunkList[chunkList.length - 1];
- print('doc in chunk: x [' + x + '], h[' + hashVal + '], min[' + chunkDoc.min.x +
- '], max[' + chunkDoc.max.x + ']');
- chunkDoc.count++;
- }
+ for (var x = 0; x < 1000; x++) {
+ testDB.user.insert({x: x});
}
- chunkList.forEach(function(chunkDoc) {
- print('chunk details: ' + tojson(chunkDoc));
- });
-});
-
-var chunkDoc = configDB.chunks.find().sort({min: 1}).next();
-var min = chunkDoc.min;
-var max = chunkDoc.max;
+ var chunkDoc = configDB.chunks.find().sort({min: 1}).next();
+ var min = chunkDoc.min;
+ var max = chunkDoc.max;
-// Assumption: There are documents in the MinKey chunk, otherwise, splitVector will
-// fail. Note: This chunk will have 267 documents if collection was presplit to 4.
-var cmdRes = testDB.adminCommand({split: 'test.user', bounds: [min, max]});
-assert(cmdRes.ok, 'split on bounds failed on chunk[' + tojson(chunkDoc) + ']: ' + tojson(cmdRes));
+ // Assumption: There are documents in the MinKey chunk, otherwise, splitVector will
+ // fail. Note: This chunk will have 267 documents if collection was presplit to 4.
+ var cmdRes = testDB.adminCommand({split: 'test.user', bounds: [min, max]});
+ assert(cmdRes.ok,
+ 'split on bounds failed on chunk[' + tojson(chunkDoc) + ']: ' + tojson(cmdRes));
-chunkDoc = configDB.chunks.find().sort({min: 1}).skip(1).next();
-var middle = chunkDoc.min + 1000000;
+ chunkDoc = configDB.chunks.find().sort({min: 1}).skip(1).next();
+ var middle = chunkDoc.min + 1000000;
-cmdRes = testDB.adminCommand({split: 'test.user', middle: {x: middle}});
-assert(cmdRes.ok, 'split failed with middle [' + middle + ']: ' + tojson(cmdRes));
+ cmdRes = testDB.adminCommand({split: 'test.user', middle: {x: middle}});
+ assert(cmdRes.ok, 'split failed with middle [' + middle + ']: ' + tojson(cmdRes));
-cmdRes = testDB.adminCommand({split: 'test.user', find: {x: 7}});
-assert(cmdRes.ok, 'split failed with find: ' + tojson(cmdRes));
+ cmdRes = testDB.adminCommand({split: 'test.user', find: {x: 7}});
+ assert(cmdRes.ok, 'split failed with find: ' + tojson(cmdRes));
-var chunkList = configDB.chunks.find().sort({min: 1}).toArray();
-assert.eq(chunkCountBefore + 3, chunkList.length);
+ var chunkList = configDB.chunks.find().sort({min: 1}).toArray();
+ assert.eq(chunkCountBefore + 3, chunkList.length);
-chunkList.forEach(function(chunkToMove) {
- var toShard = configDB.shards.findOne({_id: {$ne: chunkToMove.shard}})._id;
+ chunkList.forEach(function(chunkToMove) {
+ var toShard = configDB.shards.findOne({_id: {$ne: chunkToMove.shard}})._id;
- print(jsTestName() + " - moving chunk " + chunkToMove._id + " from shard " + chunkToMove.shard +
- " to " + toShard + "...");
+ print(jsTestName() + " - moving chunk " + chunkToMove._id + " from shard " +
+ chunkToMove.shard + " to " + toShard + "...");
- var cmdRes = testDB.adminCommand({
- moveChunk: 'test.user',
- bounds: [chunkToMove.min, chunkToMove.max],
- to: toShard,
- _waitForDelete: true
+ var cmdRes = testDB.adminCommand({
+ moveChunk: 'test.user',
+ bounds: [chunkToMove.min, chunkToMove.max],
+ to: toShard,
+ _waitForDelete: true
+ });
+ print(jsTestName() + " - result from moving chunk " + chunkToMove._id + ": " +
+ tojson(cmdRes));
});
- print(jsTestName() + " - result from moving chunk " + chunkToMove._id + ": " + tojson(cmdRes));
-});
-st.stop();
+ st.stop();
+})();
diff --git a/jstests/sharding/shard_keycount.js b/jstests/sharding/shard_keycount.js
index 9a63a2cfdce..ae4c1d58574 100644
--- a/jstests/sharding/shard_keycount.js
+++ b/jstests/sharding/shard_keycount.js
@@ -1,46 +1,43 @@
// Tests splitting a chunk twice
(function() {
+ 'use strict';
var s = new ShardingTest({name: "shard_keycount", shards: 2, mongos: 1, other: {chunkSize: 1}});
- dbName = "test";
- collName = "foo";
- ns = dbName + "." + collName;
+ var dbName = "test";
+ var collName = "foo";
+ var ns = dbName + "." + collName;
- db = s.getDB(dbName);
+ var db = s.getDB(dbName);
for (var i = 0; i < 10; i++) {
db.foo.insert({_id: i});
}
// Enable sharding on DB
- s.adminCommand({enablesharding: dbName});
+ assert.commandWorked(s.s0.adminCommand({enablesharding: dbName}));
s.ensurePrimaryShard(dbName, 'shard0001');
// Enable sharding on collection
- s.adminCommand({shardcollection: ns, key: {_id: 1}});
+ assert.commandWorked(s.s0.adminCommand({shardcollection: ns, key: {_id: 1}}));
// Split into two chunks
- s.adminCommand({split: ns, find: {_id: 3}});
+ assert.commandWorked(s.s0.adminCommand({split: ns, find: {_id: 3}}));
- coll = db.getCollection(collName);
+ var coll = db.getCollection(collName);
// Split chunk again
- s.adminCommand({split: ns, find: {_id: 3}});
+ assert.commandWorked(s.s0.adminCommand({split: ns, find: {_id: 3}}));
- coll.update({_id: 3}, {_id: 3});
+ assert.writeOK(coll.update({_id: 3}, {_id: 3}));
// Split chunk again
- s.adminCommand({split: ns, find: {_id: 3}});
+ assert.commandWorked(s.s0.adminCommand({split: ns, find: {_id: 3}}));
- coll.update({_id: 3}, {_id: 3});
+ assert.writeOK(coll.update({_id: 3}, {_id: 3}));
// Split chunk again
- // FAILS since the key count is based on the full index, not the chunk itself
- // i.e. Split point calc'd is 5 key offset (10 documents), but only four docs
- // in chunk with bounds _id : 0 => 5
- s.adminCommand({split: ns, find: {_id: 3}});
+ assert.commandWorked(s.s0.adminCommand({split: ns, find: {_id: 3}}));
s.stop();
-
-});
+})();