summaryrefslogtreecommitdiff
path: root/jstests
diff options
context:
space:
mode:
authorRandolph Tan <randolph@10gen.com>2014-02-05 17:43:15 -0500
committerRandolph Tan <randolph@10gen.com>2014-02-12 15:21:45 -0500
commit5e0365665f7244a9382b9bf078c890dce1cd5c03 (patch)
treebb6e4d5c62ba397726028ea197ad6f2a16656df9 /jstests
parente203fc775578a42c88494639b15f9f67f350c4c1 (diff)
downloadmongo-5e0365665f7244a9382b9bf078c890dce1cd5c03.tar.gz
SERVER-9620 Index can be made multikey if a prefix of it is a shard key
Allowed sharded collections to have multikey indexes based on the invariant that shard key values can never be arrays.
Diffstat (limited to 'jstests')
-rw-r--r--jstests/sharding/array_shard_key.js23
-rw-r--r--jstests/sharding/prefix_shard_key.js76
2 files changed, 71 insertions, 28 deletions
diff --git a/jstests/sharding/array_shard_key.js b/jstests/sharding/array_shard_key.js
index 46ed72addd1..902c3227b8c 100644
--- a/jstests/sharding/array_shard_key.js
+++ b/jstests/sharding/array_shard_key.js
@@ -85,6 +85,29 @@ error = coll.getDB().getLastError()
assert.eq( error, null )
assert.eq( coll.find().itcount(), 0 )
+coll.ensureIndex({ _id : 1, i : 1, j: 1 });
+// Can insert document that will make index into a multi-key as long as it's not part of shard key.
+coll.remove();
+coll.insert({ i: 1, j: [1, 2] });
+error = coll.getDB().getLastError();
+assert.eq( error, null );
+assert.eq( coll.find().itcount(), 1 );
+
+// Same is true for updates.
+coll.remove();
+coll.insert({ _id: 1, i: 1 });
+coll.update({ _id: 1, i: 1 }, { _id: 1, i:1, j: [1, 2] });
+error = coll.getDB().getLastError();
+assert.eq( error, null );
+assert.eq( coll.find().itcount(), 1 );
+
+// Same for upserts.
+coll.remove();
+coll.update({ _id: 1, i: 1 }, { _id: 1, i:1, j: [1, 2] }, true);
+error = coll.getDB().getLastError();
+assert.eq( error, null );
+assert.eq( coll.find().itcount(), 1 );
+
printjson( "Sharding-then-inserting-multikey tested, now trying inserting-then-sharding-multikey" )
// Insert a bunch of data then shard over key which is an array
diff --git a/jstests/sharding/prefix_shard_key.js b/jstests/sharding/prefix_shard_key.js
index 58091490b1c..4eed2e72159 100644
--- a/jstests/sharding/prefix_shard_key.js
+++ b/jstests/sharding/prefix_shard_key.js
@@ -2,8 +2,7 @@
//
// Part 1: Shard new collection on {num : 1} with an index on {num : 1, x : 1}.
// Test that you can split and move chunks around.
-// Part 2: Test that adding an array value for x makes it unusuable. Deleting the
-// array value and re-indexing makes it usable again.
+// Part 2: Test that adding an array value for x doesn't make it unusuable.
// Part 3: Shard new collection on {skey : 1} but with a longer index.
// Insert docs with same val for 'skey' but different vals for 'extra'.
// Move chunks around and check that [min,max) chunk boundaries are properly obeyed.
@@ -78,35 +77,56 @@ assert.eq( 1, result3.ok , "moveChunk didn't succeed");
//******************Part 2********************
-// Test that inserting array values fails because we don't support multi-key indexes for the shard key
-coll.save({ num : [1,2], x : 1});
-err = db.getLastError();
-print( err );
-assert.neq( null, err, "Inserting document with array value for shard key succeeded");
-
-// Because of SERVER-6095, making the index a multi-key index (on a value that *isn't* part of the
-// shard key) makes that index unusable for migrations. Test that removing the multi-key value and
-// rebuilding the index allows it to be used again
-coll.save({ num : 100, x : [1,2]});
-var result4 = admin.runCommand({ movechunk: coll.getFullName(), find: { num: 70 },
- to: s.getOther(s.getServer("test")).name, _waitForDelete: true });
-printjson( result4 );
-assert.eq( 0, result4.ok , "moveChunk succeeded without a usable index");
+// Migrations and splits will still work on a sharded collection that only has multi key
+// index.
+db.user.ensureIndex({ num: 1, x: 1 });
+db.adminCommand({ shardCollection: 'test.user', key: { num: 1 }});
-coll.remove({ num : 100 });
-db.getLastError();
-coll.reIndex();
-db.getLastError();
-result4 = admin.runCommand({ movechunk: coll.getFullName(), find : { num : 70 },
+var indexCount = db.system.indexes.find({ ns: 'test.user' }).count();
+assert.eq(2, indexCount, // indexes for _id_ and num_1_x_1
+ 'index count not expected: ' + tojson(db.system.indexes.find().toArray()));
+
+var array = [];
+for (var item = 0; item < 50; item++) {
+ array.push(item);
+}
+
+for (var docs = 0; docs < 1000; docs++) {
+ db.user.insert({ num: docs, x: array });
+}
+
+assert.eq(1000, db.user.find().itcount());
+
+var result4 = admin.runCommand({ movechunk: 'test.user', find: { num: 70 },
to: s.getOther(s.getServer("test")).name, _waitForDelete: true });
-printjson( result4 );
-assert.eq( 1, result4.ok , "moveChunk failed after rebuilding index");
+assert.commandWorked(result4);
-// Make sure the previous migrates cleanup doesn't interfere with later tests
-assert.soon( function(){
- print( "Waiting for migration cleanup to occur..." );
- return coll.count() == coll.find().itcount();
-})
+var expectedShardCount = { shard0000: 0, shard0001: 0 };
+config.chunks.find({ ns: 'test.user' }).forEach(function(chunkDoc) {
+ var min = chunkDoc.min.num;
+ var max = chunkDoc.max.num;
+
+ if (min < 0 || min == MinKey) {
+ min = 0;
+ }
+
+ if (max > 1000 || max == MaxKey) {
+ max = 1000;
+ }
+
+ if (max > 0) {
+ expectedShardCount[chunkDoc.shard] += (max - min);
+ }
+});
+
+assert.eq(expectedShardCount['shard0000'], shard0.getDB('test').user.find().count());
+assert.eq(expectedShardCount['shard0001'], shard1.getDB('test').user.find().count());
+
+result4 = admin.runCommand({ split: 'test.user', middle: { num: 70 }});
+assert.commandWorked(result4);
+
+assert.eq(expectedShardCount['shard0000'], shard0.getDB('test').user.find().count());
+assert.eq(expectedShardCount['shard0001'], shard1.getDB('test').user.find().count());
//******************Part 3********************