summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorGreg Studer <greg@10gen.com>2014-06-03 13:57:13 -0400
committerDan Pasette <dan@mongodb.com>2014-06-05 19:24:55 -0400
commit03c83b974897ce58d7cd6f69e27cae7bc1433e2f (patch)
tree999f0bb0fbb00cc2fb02c032638e7601a2bc531b
parent3b612717c5a5819dce90ccf6d02883108979488e (diff)
downloadmongo-03c83b974897ce58d7cd6f69e27cae7bc1433e2f.tar.gz
SERVER-14138 mongos incorrectly targets multiple shards for nested field shard keys
(cherry picked from commit 777de742ee578b62b12ded8381aadf98dfa9fa5f) Conflicts: jstests/sharding/hash_regex_targetting.js
-rw-r--r--jstests/sharding/exact_shard_key_target.js74
-rw-r--r--jstests/sharding/hash_regex_targetting.js21
-rw-r--r--jstests/sharding/regex_targeting.js257
-rw-r--r--src/mongo/s/chunk.cpp8
-rw-r--r--src/mongo/s/chunk.h4
-rw-r--r--src/mongo/s/chunk_manager_targeter.cpp58
-rw-r--r--src/mongo/s/chunk_manager_targeter.h5
-rw-r--r--src/mongo/s/shardkey.cpp33
-rw-r--r--src/mongo/s/shardkey.h8
9 files changed, 423 insertions, 45 deletions
diff --git a/jstests/sharding/exact_shard_key_target.js b/jstests/sharding/exact_shard_key_target.js
new file mode 100644
index 00000000000..bc416166866
--- /dev/null
+++ b/jstests/sharding/exact_shard_key_target.js
@@ -0,0 +1,74 @@
+//
+// Verifies that shard key targeted update/delete operations go to exactly one shard when targeted
+// by nested shard keys.
+// SERVER-14138
+//
+
+var st = new ShardingTest({ shards : 2, verbose : 4 });
+st.stopBalancer();
+
+var mongos = st.s0;
+var coll = mongos.getCollection("foo.bar");
+var admin = mongos.getDB("admin");
+var shards = mongos.getDB("config").shards.find().toArray();
+
+assert.commandWorked(admin.runCommand({ enableSharding : coll.getDB().getName() }));
+printjson(admin.runCommand({ movePrimary : coll.getDB().getName(), to : shards[0]._id }));
+assert.commandWorked(admin.runCommand({ shardCollection: coll.getFullName(),
+ key: { "a.b": 1 } }));
+assert.commandWorked(admin.runCommand({ split: coll.getFullName(), middle: { "a.b": 0 } }));
+assert.commandWorked(admin.runCommand({ moveChunk: coll.getFullName(),
+ find: { "a.b": 0 },
+ to: shards[1]._id }));
+
+st.printShardingStatus();
+
+//
+// JustOne remove
+coll.remove({});
+assert.writeOK(coll.insert({ _id : 1, a : { b : -1 } }));
+assert.writeOK(coll.insert({ _id : 2, a : { b : 1 } }));
+// Need orphaned data to see the impact
+assert.writeOK(st.shard0.getCollection(coll.toString()).insert({ _id : 3, a : { b : 1 } }));
+assert.eq(1, coll.remove({ a : { b : 1 } }, { justOne : true }).nRemoved);
+assert.eq(2, st.shard0.getCollection(coll.toString()).count() +
+ st.shard1.getCollection(coll.toString()).count() );
+
+//
+// Non-multi update
+coll.remove({});
+assert.writeOK(coll.insert({ _id : 1, a : { b : 1 } }));
+assert.writeOK(coll.insert({ _id : 2, a : { b : -1 } }));
+// Need orphaned data to see the impact
+assert.writeOK(st.shard0.getCollection(coll.toString()).insert({ _id : 3, a : { b : 1 } }));
+assert.eq(1, coll.update({ a : { b : 1 } }, { $set : { updated : true } },
+ { multi : false }).nMatched);
+assert.eq(1, st.shard0.getCollection(coll.toString()).count({ updated : true }) +
+ st.shard1.getCollection(coll.toString()).count({ updated : true }) );
+
+//
+// Successive upserts (save()-style)
+coll.remove({});
+assert.writeOK(coll.update({ _id : 1 }, { _id : 1, a : { b : 1 } }, { upsert : true }));
+assert.writeOK(coll.update({ _id : 1 }, { _id : 1, a : { b : 1 } }, { upsert : true }));
+assert.eq(1, st.shard0.getCollection(coll.toString()).count() +
+ st.shard1.getCollection(coll.toString()).count() );
+
+//
+// Successive upserts (replacement-style)
+coll.remove({});
+assert.writeOK(coll.update({ a : { b : 1 } }, { a : { b : 1 } }, { upsert : true }));
+assert.writeOK(coll.update({ a : { b : 1 } }, { a : { b : 1 } }, { upsert : true }));
+assert.eq(1, st.shard0.getCollection(coll.toString()).count() +
+ st.shard1.getCollection(coll.toString()).count() );
+
+//
+// Successive upserts ($op-style)
+coll.remove({});
+assert.writeOK(coll.update({ a : { b : 1 } }, { $set : { upserted : true } }, { upsert : true }));
+assert.writeOK(coll.update({ a : { b : 1 } }, { $set : { upserted : true } }, { upsert : true }));
+assert.eq(1, st.shard0.getCollection(coll.toString()).count() +
+ st.shard1.getCollection(coll.toString()).count() );
+
+jsTest.log("DONE!");
+st.stop(); \ No newline at end of file
diff --git a/jstests/sharding/hash_regex_targetting.js b/jstests/sharding/hash_regex_targetting.js
deleted file mode 100644
index 3e139496d94..00000000000
--- a/jstests/sharding/hash_regex_targetting.js
+++ /dev/null
@@ -1,21 +0,0 @@
-// This checks to make sure that the query is untargetted when using a regex
-var st = new ShardingTest({ shards: 3});
-
-st.adminCommand({ enablesharding: "test" });
-st.adminCommand({ shardcollection: "test.server11430", key: { "path" : "hashed" } });
-
-var col = st.s.getDB('test').getCollection('server11430');
-
-var doc1 = { path: "thisisastring", val: true }
-var doc2 = { path: "thisisabigString", val: true }
-
-col.insert([doc1, doc2])
-printjson(col.find({ path : /isa/ }).explain());
-col.update({ path : /isa/ }, { "$set" : { val : false } }, {multi:true});
-var leObj = col.getDB().getLastErrorObj();
-var result = col.findOne();
-
-assert.eq(false, result.val);
-assert.eq(2, leObj['n']);
-
-st.stop();
diff --git a/jstests/sharding/regex_targeting.js b/jstests/sharding/regex_targeting.js
new file mode 100644
index 00000000000..777d1c184c7
--- /dev/null
+++ b/jstests/sharding/regex_targeting.js
@@ -0,0 +1,257 @@
+//
+// This checks to make sure that sharded regex queries behave the same as unsharded regex queries
+//
+
+var options = { mongosOptions : { binVersion : "" },
+ shardOptions : { binVersion : "" },
+ configOptions : { binVersion : "" },
+ separateConfig : true };
+
+var st = new ShardingTest({ shards : 2, other : options });
+st.stopBalancer();
+
+var mongos = st.s0;
+var admin = mongos.getDB("admin");
+var shards = mongos.getDB("config").shards.find().toArray();
+
+//
+// Set up multiple collections to target with regex shard keys on two shards
+//
+
+var coll = mongos.getCollection("foo.bar");
+var collSharded = mongos.getCollection("foo.barSharded");
+var collCompound = mongos.getCollection("foo.barCompound");
+var collNested = mongos.getCollection("foo.barNested");
+var collHashed = mongos.getCollection("foo.barHashed");
+
+assert.commandWorked(admin.runCommand({ enableSharding : coll.getDB().toString() }));
+admin.runCommand({ movePrimary : coll.getDB().toString(), to : shards[0]._id });
+
+//
+// Split the collection so that "abcde-0" and "abcde-1" go on different shards when possible
+//
+
+assert.commandWorked(admin.runCommand({ shardCollection : collSharded.toString(),
+ key: { a : 1 } }));
+assert.commandWorked(admin.runCommand({ split : collSharded.toString(),
+ middle : { a : "abcde-1" } }));
+assert.commandWorked(admin.runCommand({ moveChunk : collSharded.toString(),
+ find : { a : 0 },
+ to : shards[1]._id,
+ _waitForDelete : true }));
+
+assert.commandWorked(admin.runCommand({ shardCollection : collCompound.toString(),
+ key: { a : 1, b : 1 } }));
+assert.commandWorked(admin.runCommand({ split : collCompound.toString(),
+ middle : { a : "abcde-1", b : 0 } }));
+assert.commandWorked(admin.runCommand({ moveChunk : collCompound.toString(),
+ find : { a : 0, b : 0 },
+ to : shards[1]._id,
+ _waitForDelete : true }));
+
+assert.commandWorked(admin.runCommand({ shardCollection : collNested.toString(),
+ key : { 'a.b' : 1 } }));
+assert.commandWorked(admin.runCommand({ split : collNested.toString(),
+ middle : { 'a.b' : "abcde-1" } }));
+assert.commandWorked(admin.runCommand({ moveChunk : collNested.toString(),
+ find : { a : { b : 0 } },
+ to : shards[1]._id,
+ _waitForDelete : true }));
+
+assert.commandWorked(admin.runCommand({ shardCollection : collHashed.toString(),
+ key: { hash : "hashed" } }));
+
+st.printShardingStatus();
+
+//
+//
+// Cannot insert regex _id
+assert.writeError(coll.insert({ _id : /regex value/ }));
+assert.writeError(collSharded.insert({ _id : /regex value/, a : 0 }));
+assert.writeError(collCompound.insert({ _id : /regex value/, a : 0, b : 0 }));
+assert.writeError(collNested.insert({ _id : /regex value/, a : { b : 0 } }));
+assert.writeError(collHashed.insert({ _id : /regex value/, hash : 0 }));
+
+
+//
+//
+// (For now) we can insert a regex shard key
+assert.writeOK(collSharded.insert({ a : /regex value/ }));
+assert.writeOK(collCompound.insert({ a : /regex value/, b : "other value" }));
+assert.writeOK(collNested.insert({ a : { b : /regex value/ } }));
+assert.writeOK(collHashed.insert({ hash : /regex value/ }));
+
+
+//
+//
+// Query by regex should hit all matching keys, across all shards if applicable
+coll.remove({});
+assert.writeOK(coll.insert({ a : "abcde-0" }));
+assert.writeOK(coll.insert({ a : "abcde-1" }));
+assert.writeOK(coll.insert({ a : /abcde.*/ }));
+assert.eq(coll.find().itcount(), coll.find({ a : /abcde.*/ }).itcount());
+
+collSharded.remove({});
+assert.writeOK(collSharded.insert({ a : "abcde-0" }));
+assert.writeOK(collSharded.insert({ a : "abcde-1" }));
+assert.writeOK(collSharded.insert({ a : /abcde.*/ }));
+assert.eq(collSharded.find().itcount(), collSharded.find({ a : /abcde.*/ }).itcount());
+
+collCompound.remove({});
+assert.writeOK(collCompound.insert({ a : "abcde-0", b : 0 }));
+assert.writeOK(collCompound.insert({ a : "abcde-1", b : 0 }));
+assert.writeOK(collCompound.insert({ a : /abcde.*/, b : 0 }));
+assert.eq(collCompound.find().itcount(), collCompound.find({ a : /abcde.*/ }).itcount());
+
+collNested.remove({});
+assert.writeOK(collNested.insert({ a : { b : "abcde-0" } }));
+assert.writeOK(collNested.insert({ a : { b : "abcde-1" } }));
+assert.writeOK(collNested.insert({ a : { b : /abcde.*/ } }));
+assert.eq(collNested.find().itcount(), collNested.find({ 'a.b' : /abcde.*/ }).itcount());
+
+collHashed.remove({});
+while (st.shard0.getCollection(collHashed.toString()).count() == 0 ||
+ st.shard1.getCollection(collHashed.toString()).count() == 0) {
+ assert.writeOK(collHashed.insert({ hash : "abcde-" + ObjectId().toString() }));
+}
+assert.writeOK(collHashed.insert({ hash : /abcde.*/ }));
+assert.eq(collHashed.find().itcount(), collHashed.find({ hash : /abcde.*/ }).itcount());
+
+
+//
+//
+// Update by regex should hit all matching keys, across all shards if applicable
+coll.remove({});
+assert.writeOK(coll.insert({ a : "abcde-0" }));
+assert.writeOK(coll.insert({ a : "abcde-1" }));
+assert.writeOK(coll.insert({ a : /abcde.*/ }));
+assert.writeOK(coll.update({ a : /abcde.*/ },
+ { $set : { updated : true } },
+ { multi : true }));
+assert.eq(coll.find().itcount(), coll.find({ updated : true }).itcount());
+
+collSharded.remove({});
+assert.writeOK(collSharded.insert({ a : "abcde-0" }));
+assert.writeOK(collSharded.insert({ a : "abcde-1" }));
+assert.writeOK(collSharded.insert({ a : /abcde.*/ }));
+assert.writeOK(collSharded.update({ a : /abcde.*/ },
+ { $set : { updated : true } },
+ { multi : true }));
+assert.eq(collSharded.find().itcount(), collSharded.find({ updated : true }).itcount());
+
+collCompound.remove({});
+assert.writeOK(collCompound.insert({ a : "abcde-0", b : 0 }));
+assert.writeOK(collCompound.insert({ a : "abcde-1", b : 0 }));
+assert.writeOK(collCompound.insert({ a : /abcde.*/, b : 0 }));
+assert.writeOK(collCompound.update({ a : /abcde.*/ },
+ { $set : { updated : true } },
+ { multi : true }));
+assert.eq(collCompound.find().itcount(), collCompound.find({ updated : true }).itcount());
+
+collNested.remove({});
+assert.writeOK(collNested.insert({ a : { b : "abcde-0" } }));
+assert.writeOK(collNested.insert({ a : { b : "abcde-1" } }));
+assert.writeOK(collNested.insert({ a : { b : /abcde.*/ } }));
+assert.writeOK(collNested.update({ 'a.b' : /abcde.*/ },
+ { $set : { updated : true } },
+ { multi : true }));
+assert.eq(collNested.find().itcount(), collNested.find({ updated : true }).itcount());
+
+collHashed.remove({});
+while (st.shard0.getCollection(collHashed.toString()).count() == 0 ||
+ st.shard1.getCollection(collHashed.toString()).count() == 0) {
+ assert.writeOK(collHashed.insert({ hash : "abcde-" + ObjectId().toString() }));
+}
+assert.writeOK(collHashed.insert({ hash : /abcde.*/ }));
+assert.writeOK(collHashed.update({ hash : /abcde.*/ },
+ { $set : { updated : true } },
+ { multi : true }));
+assert.eq(collHashed.find().itcount(), collHashed.find({ updated : true }).itcount());
+
+
+//
+//
+// Upsert by regex should fail on sharded collections
+// Regex is not targeted in queries, so can't be targeted for updates
+collSharded.remove({});
+assert.writeError(collSharded.update({ a : /abcde.*/ }, { a : /abcde.*/ }, { upsert : true }));
+assert.writeError(collCompound.update({ a : /abcde.*/ }, { a : /abcde.*/ }, { upsert : true }));
+assert.writeError(collSharded.update({ a : /abcde.*/ }, { a : /abcde.*/ }, { upsert : true }));
+assert.writeError(collNested.update({ a : { b : /abcde.*/ } }, { a : { b : /abcde.*/ } },
+ { upsert : true }));
+assert.writeError(collNested.update({ c : 1 }, { a : { b : /abcde.*/ } },
+ { upsert : true }));
+
+
+//
+//
+// Remove by regex should hit all matching keys, across all shards if applicable
+coll.remove({});
+assert.writeOK(coll.insert({ a : "abcde-0" }));
+assert.writeOK(coll.insert({ a : "abcde-1" }));
+assert.writeOK(coll.insert({ a : /abcde.*/ }));
+assert.writeOK(coll.remove({ a : /abcde.*/ }));
+assert.eq(0, coll.find({}).itcount());
+
+
+collSharded.remove({});
+assert.writeOK(collSharded.insert({ a : "abcde-0" }));
+assert.writeOK(collSharded.insert({ a : "abcde-1" }));
+assert.writeOK(collSharded.insert({ a : /abcde.*/ }));
+assert.writeOK(collSharded.remove({ a : /abcde.*/ }));
+assert.eq(0, collSharded.find({}).itcount());
+
+collCompound.remove({});
+assert.writeOK(collCompound.insert({ a : "abcde-0", b : 0 }));
+assert.writeOK(collCompound.insert({ a : "abcde-1", b : 0 }));
+assert.writeOK(collCompound.insert({ a : /abcde.*/, b : 0 }));
+assert.writeOK(collCompound.remove({ a : /abcde.*/ }));
+assert.eq(0, collCompound.find({}).itcount());
+
+collNested.remove({});
+assert.writeOK(collNested.insert({ a : { b : "abcde-0" } }));
+assert.writeOK(collNested.insert({ a : { b : "abcde-1" } }));
+assert.writeOK(collNested.insert({ a : { b : /abcde.*/ } }));
+assert.writeOK(collNested.remove({ 'a.b' : /abcde.*/ }));
+assert.eq(0, collNested.find({}).itcount());
+
+collHashed.remove({});
+while (st.shard0.getCollection(collHashed.toString()).count() == 0 ||
+ st.shard1.getCollection(collHashed.toString()).count() == 0) {
+ assert.writeOK(collHashed.insert({ hash : "abcde-" + ObjectId().toString() }));
+}
+assert.writeOK(collHashed.insert({ hash : /abcde.*/ }));
+assert.writeOK(collHashed.remove({ hash : /abcde.*/ }));
+assert.eq(0, collHashed.find({}).itcount());
+
+
+//
+//
+// Query/Update/Remove by nested regex is different depending on how the nested regex is specified
+coll.remove({});
+assert.writeOK(coll.insert({ a : { b : "abcde-0" } }));
+assert.writeOK(coll.insert({ a : { b : "abcde-1" } }));
+assert.writeOK(coll.insert({ a : { b : /abcde.*/ } }));
+assert.eq(1, coll.find({ a : { b : /abcde.*/ } }).itcount());
+assert.writeOK(coll.update({ a : { b : /abcde.*/ } },
+ { $set : { updated : true } },
+ { multi : true }));
+assert.eq(1, coll.find({ updated : true }).itcount());
+assert.writeOK(coll.remove({ a : { b : /abcde.*/ } }));
+assert.eq(2, coll.find().itcount());
+
+collNested.remove({});
+assert.writeOK(collNested.insert({ a : { b : "abcde-0" } }));
+assert.writeOK(collNested.insert({ a : { b : "abcde-1" } }));
+assert.writeOK(collNested.insert({ a : { b : /abcde.*/ } }));
+assert.eq(1, collNested.find({ a : { b : /abcde.*/ } }).itcount());
+assert.writeOK(collNested.update({ a : { b : /abcde.*/ } },
+ { $set : { updated : true } },
+ { multi : true }));
+assert.eq(1, collNested.find({ updated : true }).itcount());
+assert.writeOK(collNested.remove({ a : { b : /abcde.*/ } }));
+assert.eq(2, collNested.find().itcount());
+
+jsTest.log("DONE!");
+st.stop();
+
diff --git a/src/mongo/s/chunk.cpp b/src/mongo/s/chunk.cpp
index 98e70935898..8c806918e07 100644
--- a/src/mongo/s/chunk.cpp
+++ b/src/mongo/s/chunk.cpp
@@ -957,8 +957,12 @@ namespace mongo {
}
}
- bool ChunkManager::hasShardKey( const BSONObj& obj ) const {
- return _key.hasShardKey( obj );
+ bool ChunkManager::hasShardKey(const BSONObj& doc) const {
+ return _key.hasShardKey(doc);
+ }
+
+ bool ChunkManager::hasTargetableShardKey(const BSONObj& doc) const {
+ return _key.hasTargetableShardKey(doc);
}
void ChunkManager::calcInitSplitsAndShards( const Shard& primary,
diff --git a/src/mongo/s/chunk.h b/src/mongo/s/chunk.h
index b0d5492f090..0b3019ccbdc 100644
--- a/src/mongo/s/chunk.h
+++ b/src/mongo/s/chunk.h
@@ -361,7 +361,9 @@ namespace mongo {
const ShardKeyPattern& getShardKey() const { return _key; }
- bool hasShardKey( const BSONObj& obj ) const;
+ bool hasShardKey(const BSONObj& doc) const;
+
+ bool hasTargetableShardKey(const BSONObj& doc) const;
bool isUnique() const { return _unique; }
diff --git a/src/mongo/s/chunk_manager_targeter.cpp b/src/mongo/s/chunk_manager_targeter.cpp
index 19295a3b6bf..4b0ce033287 100644
--- a/src/mongo/s/chunk_manager_targeter.cpp
+++ b/src/mongo/s/chunk_manager_targeter.cpp
@@ -179,7 +179,6 @@ namespace mongo {
}
}
-
Status ChunkManagerTargeter::targetUpdate( const BatchedUpdateDocument& updateDoc,
vector<ShardEndpoint*>* endpoints ) const {
@@ -208,8 +207,8 @@ namespace mongo {
}
BSONObj targetedDoc = updateType == UpdateType_OpStyle ? query : updateExpr;
- Status result = targetQuery( targetedDoc, endpoints );
- if ( !result.isOK() ) return result;
+
+ bool exactShardKeyQuery = false;
if ( _manager ) {
@@ -220,7 +219,7 @@ namespace mongo {
// Non-multi updates must be targeted exactly by shard key *or* exact _id.
//
- bool exactShardKeyQuery = _manager->hasShardKey( targetedDoc );
+ exactShardKeyQuery = _manager->hasTargetableShardKey(targetedDoc);
if ( updateDoc.getUpsert() && !exactShardKeyQuery ) {
return Status( ErrorCodes::ShardKeyNotFound,
@@ -239,22 +238,35 @@ namespace mongo {
}
// Track autosplit stats for sharded collections
+ // Note: this is only best effort accounting and is not accurate.
if ( exactShardKeyQuery ) {
- // Note: this is only best effort accounting and is not accurate.
- ChunkPtr chunk = _manager->findChunkForDoc( targetedDoc );
+ ChunkPtr chunk = _manager->findChunkForDoc(targetedDoc);
_stats->chunkSizeDelta[chunk->getMin()] +=
( query.objsize() + updateExpr.objsize() );
}
}
+ Status result = Status::OK();
+ if (exactShardKeyQuery) {
+ // We can't rely on our query targeting to be exact
+ ShardEndpoint* endpoint = NULL;
+ result = targetShardKey(targetedDoc, &endpoint);
+ endpoints->push_back(endpoint);
+
+ invariant(result.isOK());
+ invariant(NULL != endpoint);
+ }
+ else {
+ result = targetQuery(targetedDoc, endpoints);
+ }
+
return result;
}
Status ChunkManagerTargeter::targetDelete( const BatchedDeleteDocument& deleteDoc,
vector<ShardEndpoint*>* endpoints ) const {
- Status result = targetQuery( deleteDoc.getQuery(), endpoints );
- if ( !result.isOK() ) return result;
+ bool exactShardKeyQuery = false;
if ( _manager ) {
@@ -264,7 +276,7 @@ namespace mongo {
// Limit-1 deletes must be targeted exactly by shard key *or* exact _id
//
- bool exactShardKeyQuery = _manager->hasShardKey( deleteDoc.getQuery() );
+ exactShardKeyQuery = _manager->hasTargetableShardKey(deleteDoc.getQuery());
bool exactIdQuery = isExactIdQuery( deleteDoc.getQuery() );
if ( deleteDoc.getLimit() == 1 && !exactShardKeyQuery && !exactIdQuery ) {
@@ -275,10 +287,23 @@ namespace mongo {
}
}
+ Status result = Status::OK();
+ if (exactShardKeyQuery) {
+ // We can't rely on our query targeting to be exact
+ ShardEndpoint* endpoint = NULL;
+ result = targetShardKey(deleteDoc.getQuery(), &endpoint);
+ endpoints->push_back(endpoint);
+
+ invariant(result.isOK());
+ invariant(NULL != endpoint);
+ }
+ else {
+ result = targetQuery(deleteDoc.getQuery(), endpoints);
+ }
+
return result;
}
-
Status ChunkManagerTargeter::targetQuery( const BSONObj& query,
vector<ShardEndpoint*>* endpoints ) const {
@@ -311,7 +336,20 @@ namespace mongo {
return Status::OK();
}
+ Status ChunkManagerTargeter::targetShardKey(const BSONObj& doc,
+ ShardEndpoint** endpoint) const {
+
+ invariant(NULL != _manager);
+ dassert(_manager->hasShardKey(doc));
+ ChunkPtr chunk = _manager->findChunkForDoc(doc);
+
+ Shard shard = chunk->getShard();
+ *endpoint = new ShardEndpoint(shard.getName(),
+ _manager->getVersion(StringData(shard.getName())));
+
+ return Status::OK();
+ }
Status ChunkManagerTargeter::targetCollection( vector<ShardEndpoint*>* endpoints ) const {
diff --git a/src/mongo/s/chunk_manager_targeter.h b/src/mongo/s/chunk_manager_targeter.h
index bd9e46adb0a..3d3d4ea8995 100644
--- a/src/mongo/s/chunk_manager_targeter.h
+++ b/src/mongo/s/chunk_manager_targeter.h
@@ -122,6 +122,11 @@ namespace mongo {
*/
Status targetQuery( const BSONObj& query, std::vector<ShardEndpoint*>* endpoints ) const;
+ /**
+ * Returns a ShardEndpoint for an exact shard key query.
+ */
+ Status targetShardKey( const BSONObj& doc, ShardEndpoint** endpoint ) const;
+
NamespaceString _nss;
// Zero or one of these are filled at all times
diff --git a/src/mongo/s/shardkey.cpp b/src/mongo/s/shardkey.cpp
index 3b838b9fc6e..027de1bc2fa 100644
--- a/src/mongo/s/shardkey.cpp
+++ b/src/mongo/s/shardkey.cpp
@@ -56,16 +56,21 @@ namespace mongo {
gMax = max.obj();
}
- bool ShardKeyPattern::hasShardKey( const BSONObj& obj ) const {
- /* this is written s.t. if obj has lots of fields, if the shard key fields are early,
- it is fast. so a bit more work to try to be semi-fast.
- */
-
- for(set<string>::const_iterator it = patternfields.begin(); it != patternfields.end(); ++it) {
- BSONElement e = obj.getFieldDotted(it->c_str());
- if( e.eoo() ||
- e.type() == Array ||
- (e.type() == Object && !e.embeddedObject().okForStorage())) {
+ static bool _hasShardKey(const BSONObj& doc,
+ const set<string>& patternFields,
+ bool allowRegex) {
+
+ // this is written s.t. if doc has lots of fields, if the shard key fields are early,
+ // it is fast. so a bit more work to try to be semi-fast.
+
+ for (set<string>::const_iterator it = patternFields.begin(); it != patternFields.end();
+ ++it) {
+ BSONElement shardKeyField = doc.getFieldDotted(it->c_str());
+ if (shardKeyField.eoo()
+ || shardKeyField.type() == Array
+ || (!allowRegex && shardKeyField.type() == RegEx)
+ || (shardKeyField.type() == Object &&
+ !shardKeyField.embeddedObject().okForStorage())) {
// Don't allow anything for a shard key we can't store -- like $gt/$lt ops
return false;
}
@@ -73,6 +78,14 @@ namespace mongo {
return true;
}
+ bool ShardKeyPattern::hasShardKey(const BSONObj& doc) const {
+ return _hasShardKey(doc, patternfields, true);
+ }
+
+ bool ShardKeyPattern::hasTargetableShardKey(const BSONObj& doc) const {
+ return _hasShardKey(doc, patternfields, false);
+ }
+
bool ShardKeyPattern::isUniqueIndexCompatible( const KeyPattern& uniqueIndexPattern ) const {
return mongo::isUniqueIndexCompatible( pattern.toBSON(), uniqueIndexPattern.toBSON() );
}
diff --git a/src/mongo/s/shardkey.h b/src/mongo/s/shardkey.h
index aa63fe9ec3c..1378614f421 100644
--- a/src/mongo/s/shardkey.h
+++ b/src/mongo/s/shardkey.h
@@ -86,7 +86,13 @@ namespace mongo {
see unit test for more examples
*/
- bool hasShardKey( const BSONObj& obj ) const;
+ bool hasShardKey( const BSONObj& doc ) const;
+
+ /**
+ * Same as the above, but disallow certain shard key values which are interpreted for
+ * targeting as a multi-shard query (i.e. RegExes)
+ */
+ bool hasTargetableShardKey( const BSONObj& doc ) const;
BSONObj key() const { return pattern.toBSON(); }