diff options
author | Randolph Tan <randolph@10gen.com> | 2013-01-25 18:35:51 -0500 |
---|---|---|
committer | Randolph Tan <randolph@10gen.com> | 2013-02-01 15:16:47 -0500 |
commit | 0a590e42086869bb05b2bff36fb104f0f91ba98a (patch) | |
tree | 95a194dcbc7fe5aad1d93cc2c047ca598588dd8a /jstests/sharding/hash_basic.js | |
parent | 6981863f1174a3c8d0b25a441ad82085cb11f32c (diff) | |
download | mongo-0a590e42086869bb05b2bff36fb104f0f91ba98a.tar.gz |
SERVER-8187 Usability issues with splitting + hashed shard keys
Added new option for splitting keys on a cluster using hashed shard keys.
Diffstat (limited to 'jstests/sharding/hash_basic.js')
-rw-r--r-- | jstests/sharding/hash_basic.js | 84 |
1 files changed, 84 insertions, 0 deletions
diff --git a/jstests/sharding/hash_basic.js b/jstests/sharding/hash_basic.js new file mode 100644 index 00000000000..fd3c9bc42a1 --- /dev/null +++ b/jstests/sharding/hash_basic.js @@ -0,0 +1,84 @@ +var st = new ShardingTest({ shards: 2, chunkSize: 1, other: { shardOptions: { verbose: 1 }} }); +st.stopBalancer(); + +var testDB = st.s.getDB('test'); +testDB.adminCommand({ enableSharding: 'test' }); +testDB.adminCommand({ shardCollection: 'test.user', key: { x: 'hashed' }}); + +var configDB = st.s.getDB('config'); +var chunkCountBefore = configDB.chunks.count(); +assert.gt(chunkCountBefore, 1); + +for (var x = 0; x < 1000; x++) { + testDB.user.insert({ x: x }); +} + +// For debugging +(function() { + var chunkList = configDB.chunks.find().sort({ min: -1 }).toArray(); + chunkList.forEach(function(chunk) { chunk.count = 0; }); + + for (var x = 0; x < 1000; x++) { + var hashVal = testDB.adminCommand({ _hashBSONElement: x }).out; + var countSet = false; + + for (var y = 0; y < chunkList.length - 2; y++) { + var chunkDoc = chunkList[y]; + if (chunkDoc.min.x <= hashVal) { + countSet = true; + chunkDoc.count++; + + print('doc in chunk: x [' + x + '], h[' + hashVal + + '], min[' + chunkDoc.min.x + + '], max[' + chunkDoc.max.x + ']'); + break; + } + } + + if (!countSet) { + chunkDoc = chunkList[chunkList.length - 1]; + print('doc in chunk: x [' + x + '], h[' + hashVal + + '], min[' + chunkDoc.min.x + + '], max[' + chunkDoc.max.x + ']'); + chunkDoc.count++; + } + } + + chunkList.forEach(function(chunkDoc) { + print('chunk details: ' + tojson(chunkDoc)); + }); +}); + +var chunkDoc = configDB.chunks.find().sort({ min: 1 }).next(); +var min = chunkDoc.min; +var max = chunkDoc.max; + +// Assumption: There are documents in the MinKey chunk, otherwise, splitVector will +// fail. Note: This chunk will have 267 documents if collection was presplit to 4. +var cmdRes = testDB.adminCommand({ split: 'test.user', bounds: [ min, max ]}); +assert(cmdRes.ok, 'split on bounds failed on chunk[' + tojson(chunkDoc) + + ']: ' + tojson(cmdRes)); + +chunkDoc = configDB.chunks.find().sort({ min: 1 }).skip(1).next(); +var middle = chunkDoc.min + 1000000; + +cmdRes = testDB.adminCommand({ split: 'test.user', middle: { x: middle }}); +assert(cmdRes.ok, 'split failed with middle [' + middle + ']: ' + tojson(cmdRes)); + +cmdRes = testDB.adminCommand({ split: 'test.user', find: { x: 7 }}); +assert(cmdRes.ok, 'split failed with find: ' + tojson(cmdRes)); + +var chunkList = configDB.chunks.find().sort({ min: 1 }).toArray(); +assert.eq(chunkCountBefore + 3, chunkList.length); + +chunkList.forEach(function(chunkToMove) { + var toShard = configDB.shards.findOne({ _id: { $ne: chunkToMove.shard }})._id; + + var cmdRes = testDB.adminCommand({ moveChunk: 'test.user', + bounds: [ chunkToMove.min, chunkToMove.max ], + to: toShard, _waitForDelete: true }); + assert(cmdRes.ok, 'Cmd failed: ' + tojson(cmdRes)); +}); + +st.stop(); + |