summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorKaloian Manassiev <kaloian.manassiev@mongodb.com>2018-04-26 16:00:43 -0400
committerKaloian Manassiev <kaloian.manassiev@mongodb.com>2018-04-30 12:00:48 -0400
commitcc7a103bd71ee98ad85e1b5dcca8763b4eb16679 (patch)
tree79d69534528aab8438b6f9bd84bf84a92d4b2ccb
parentbd93ed7abe5b4fd862065e240be493702ad4d790 (diff)
downloadmongo-cc7a103bd71ee98ad85e1b5dcca8763b4eb16679.tar.gz
SERVER-34644 Only explicitly do $-prefix check when writing sharded metadata
-rw-r--r--src/mongo/db/s/config/sharding_catalog_manager_assign_key_range_to_zone_test.cpp52
-rw-r--r--src/mongo/db/s/config/sharding_catalog_manager_chunk_operations.cpp13
-rw-r--r--src/mongo/db/s/config/sharding_catalog_manager_merge_chunks_test.cpp87
-rw-r--r--src/mongo/db/s/config/sharding_catalog_manager_split_chunk_test.cpp46
-rw-r--r--src/mongo/db/s/config/sharding_catalog_manager_zone_operations.cpp11
-rw-r--r--src/mongo/s/catalog/type_chunk.cpp5
-rw-r--r--src/mongo/s/request_types/update_zone_key_range_request_test.cpp20
-rw-r--r--src/mongo/s/shard_key_pattern.cpp30
-rw-r--r--src/mongo/s/shard_key_pattern.h6
-rw-r--r--src/mongo/s/shard_key_pattern_test.cpp20
10 files changed, 229 insertions, 61 deletions
diff --git a/src/mongo/db/s/config/sharding_catalog_manager_assign_key_range_to_zone_test.cpp b/src/mongo/db/s/config/sharding_catalog_manager_assign_key_range_to_zone_test.cpp
index 89b64867b99..8f80c6bfd8e 100644
--- a/src/mongo/db/s/config/sharding_catalog_manager_assign_key_range_to_zone_test.cpp
+++ b/src/mongo/db/s/config/sharding_catalog_manager_assign_key_range_to_zone_test.cpp
@@ -189,6 +189,56 @@ TEST_F(AssignKeyRangeToZoneTestFixture, MaxWithInvalidShardKeyShouldFail) {
assertNoZoneDoc();
}
+TEST_F(AssignKeyRangeToZoneTestFixture, AssignZoneWithDollarPrefixedShardKeysShouldFail) {
+ ASSERT_NOT_OK(ShardingCatalogManager::get(operationContext())
+ ->assignKeyRangeToZone(
+ operationContext(),
+ shardedNS(),
+ ChunkRange(BSON("x" << BSON("$A" << 1)), BSON("x" << BSON("$B" << 1))),
+ zoneName()));
+ assertNoZoneDoc();
+
+ ASSERT_NOT_OK(
+ ShardingCatalogManager::get(operationContext())
+ ->assignKeyRangeToZone(operationContext(),
+ shardedNS(),
+ ChunkRange(BSON("x" << 0), BSON("x" << BSON("$maxKey" << 1))),
+ zoneName()));
+ assertNoZoneDoc();
+}
+
+TEST_F(AssignKeyRangeToZoneTestFixture, RemoveZoneWithDollarPrefixedShardKeysShouldFail) {
+ ChunkRange zoneWithDollarKeys(BSON("x" << BSON("$A" << 1)), BSON("x" << BSON("$B" << 1)));
+
+ // Manually insert a zone with illegal keys in order to bypass the checks performed by
+ // assignKeyRangeToZone
+ BSONObj updateQuery(BSON("_id" << BSON(TagsType::ns(shardedNS().ns())
+ << TagsType::min(zoneWithDollarKeys.getMin()))));
+
+ BSONObjBuilder updateBuilder;
+ updateBuilder.append(
+ "_id", BSON(TagsType::ns(shardedNS().ns()) << TagsType::min(zoneWithDollarKeys.getMin())));
+ updateBuilder.append(TagsType::ns(), shardedNS().ns());
+ updateBuilder.append(TagsType::min(), zoneWithDollarKeys.getMin());
+ updateBuilder.append(TagsType::max(), zoneWithDollarKeys.getMax());
+ updateBuilder.append(TagsType::tag(), "TestZone");
+
+ ASSERT_OK(Grid::get(operationContext())
+ ->catalogClient()
+ ->updateConfigDocument(
+ operationContext(),
+ TagsType::ConfigNS,
+ updateQuery,
+ updateBuilder.obj(),
+ true,
+ WriteConcernOptions(1, WriteConcernOptions::SyncMode::UNSET, Seconds(0))));
+ assertOnlyZone(shardedNS(), zoneWithDollarKeys, "TestZone");
+
+ ASSERT_OK(ShardingCatalogManager::get(operationContext())
+ ->removeKeyRangeFromZone(operationContext(), shardedNS(), zoneWithDollarKeys));
+ assertNoZoneDoc();
+}
+
TEST_F(AssignKeyRangeToZoneTestFixture, MinThatIsAShardKeyPrefixShouldConvertToFullShardKey) {
NamespaceString ns("compound.shard");
CollectionType shardedCollection;
@@ -721,5 +771,5 @@ TEST_F(AssignKeyRangeWithOneRangeFixture, RemoveThatIsOnlyMaxPrefixOfExistingSho
}
}
-} // unnamed namespace
+} // namespace
} // namespace mongo
diff --git a/src/mongo/db/s/config/sharding_catalog_manager_chunk_operations.cpp b/src/mongo/db/s/config/sharding_catalog_manager_chunk_operations.cpp
index e4a73bc6d54..bb2e5ece687 100644
--- a/src/mongo/db/s/config/sharding_catalog_manager_chunk_operations.cpp
+++ b/src/mongo/db/s/config/sharding_catalog_manager_chunk_operations.cpp
@@ -330,9 +330,16 @@ Status ShardingCatalogManager::commitChunkSplit(OperationContext* opCtx,
}
// verify that splits don't create too-big shard keys
- Status shardKeyStatus = ShardKeyPattern::checkShardKeySize(endKey);
- if (!shardKeyStatus.isOK()) {
- return shardKeyStatus;
+ Status shardKeySizeStatus = ShardKeyPattern::checkShardKeySize(endKey);
+ if (!shardKeySizeStatus.isOK()) {
+ return shardKeySizeStatus;
+ }
+
+ // verify that splits don't use disallowed BSON object format
+ Status shardKeyStorageStatus =
+ ShardKeyPattern::checkShardKeyIsValidForMetadataStorage(endKey);
+ if (!shardKeyStorageStatus.isOK()) {
+ return shardKeyStorageStatus;
}
// splits only update the 'minor' portion of version
diff --git a/src/mongo/db/s/config/sharding_catalog_manager_merge_chunks_test.cpp b/src/mongo/db/s/config/sharding_catalog_manager_merge_chunks_test.cpp
index fee827a420f..f4dc7f93b13 100644
--- a/src/mongo/db/s/config/sharding_catalog_manager_merge_chunks_test.cpp
+++ b/src/mongo/db/s/config/sharding_catalog_manager_merge_chunks_test.cpp
@@ -71,7 +71,7 @@ TEST_F(MergeChunkTest, MergeExistingChunksCorrectlyShouldSucceed) {
ASSERT_OK(ShardingCatalogManager::get(operationContext())
->commitChunkMerge(operationContext(),
- NamespaceString("TestDB.TestColl"),
+ kNamespace,
origVersion.epoch(),
chunkBoundaries,
"shard0000",
@@ -142,7 +142,7 @@ TEST_F(MergeChunkTest, MergeSeveralChunksCorrectlyShouldSucceed) {
ASSERT_OK(ShardingCatalogManager::get(operationContext())
->commitChunkMerge(operationContext(),
- NamespaceString("TestDB.TestColl"),
+ kNamespace,
origVersion.epoch(),
chunkBoundaries,
"shard0000",
@@ -217,7 +217,7 @@ TEST_F(MergeChunkTest, NewMergeShouldClaimHighestVersion) {
ASSERT_OK(ShardingCatalogManager::get(operationContext())
->commitChunkMerge(operationContext(),
- NamespaceString("TestDB.TestColl"),
+ kNamespace,
collEpoch,
chunkBoundaries,
"shard0000",
@@ -288,7 +288,7 @@ TEST_F(MergeChunkTest, MergeLeavesOtherChunksAlone) {
ASSERT_OK(ShardingCatalogManager::get(operationContext())
->commitChunkMerge(operationContext(),
- NamespaceString("TestDB.TestColl"),
+ kNamespace,
origVersion.epoch(),
chunkBoundaries,
"shard0000",
@@ -391,7 +391,7 @@ TEST_F(MergeChunkTest, NonMatchingEpochsOfChunkAndRequestErrors) {
auto mergeStatus = ShardingCatalogManager::get(operationContext())
->commitChunkMerge(operationContext(),
- NamespaceString("TestDB.TestColl"),
+ kNamespace,
OID::gen(),
chunkBoundaries,
"shard0000",
@@ -435,7 +435,7 @@ TEST_F(MergeChunkTest, MergeAlreadyHappenedFailsPrecondition) {
ASSERT_EQ(ErrorCodes::BadValue,
ShardingCatalogManager::get(operationContext())
->commitChunkMerge(operationContext(),
- NamespaceString("TestDB.TestColl"),
+ kNamespace,
origVersion.epoch(),
chunkBoundaries,
"shard0000",
@@ -496,14 +496,81 @@ TEST_F(MergeChunkTest, ChunkBoundariesOutOfOrderFails) {
Timestamp validAfter{1};
- ASSERT_EQ(ErrorCodes::InvalidOptions,
- ShardingCatalogManager::get(operationContext())
+ ASSERT_EQ(
+ ErrorCodes::InvalidOptions,
+ ShardingCatalogManager::get(operationContext())
+ ->commitChunkMerge(
+ operationContext(), kNamespace, epoch, chunkBoundaries, "shard0000", validAfter));
+}
+
+TEST_F(MergeChunkTest, MergingChunksWithDollarPrefixShouldSucceed) {
+ ChunkType chunk1;
+ chunk1.setNS(kNamespace);
+
+ auto origVersion = ChunkVersion(1, 0, OID::gen());
+ chunk1.setVersion(origVersion);
+ chunk1.setShard(ShardId("shard0000"));
+
+ auto chunk2(chunk1);
+ auto chunk3(chunk1);
+
+ auto chunkMin = BSON("a" << kMinBSONKey);
+ auto chunkBound1 = BSON("a" << BSON("$maxKey" << 1));
+ auto chunkBound2 = BSON("a" << BSON("$mixKey" << 1));
+ auto chunkMax = BSON("a" << kMaxBSONKey);
+
+ // first chunk boundaries
+ chunk1.setMin(chunkMin);
+ chunk1.setMax(chunkBound1);
+ // second chunk boundaries
+ chunk2.setMin(chunkBound1);
+ chunk2.setMax(chunkBound2);
+ // third chunk boundaries
+ chunk3.setMin(chunkBound2);
+ chunk3.setMax(chunkMax);
+
+ ASSERT_OK(setupChunks({chunk1, chunk2, chunk3}));
+
+ // Record chunk boundaries for passing into commitChunkMerge
+ std::vector<BSONObj> chunkBoundaries{chunkMin, chunkBound1, chunkBound2, chunkMax};
+ Timestamp validAfter{100, 0};
+
+ ASSERT_OK(ShardingCatalogManager::get(operationContext())
->commitChunkMerge(operationContext(),
- NamespaceString("TestDB.TestColl"),
- epoch,
+ kNamespace,
+ origVersion.epoch(),
chunkBoundaries,
"shard0000",
validAfter));
+
+ auto findResponse = uassertStatusOK(
+ getConfigShard()->exhaustiveFindOnConfig(operationContext(),
+ ReadPreferenceSetting{ReadPreference::PrimaryOnly},
+ repl::ReadConcernLevel::kLocalReadConcern,
+ ChunkType::ConfigNS,
+ BSON(ChunkType::ns() << "TestDB.TestColl"),
+ BSON(ChunkType::lastmod << -1),
+ boost::none));
+
+ const auto& chunksVector = findResponse.docs;
+
+ // There should be exactly one chunk left in the collection
+ ASSERT_EQ(1u, chunksVector.size());
+
+ // MergedChunk should have range [chunkMin, chunkMax]
+ auto mergedChunk = uassertStatusOK(ChunkType::fromConfigBSON(chunksVector.front()));
+ ASSERT_BSONOBJ_EQ(chunkMin, mergedChunk.getMin());
+ ASSERT_BSONOBJ_EQ(chunkMax, mergedChunk.getMax());
+
+ {
+ // Check for increment on mergedChunk's minor version
+ ASSERT_EQ(origVersion.majorVersion(), mergedChunk.getVersion().majorVersion());
+ ASSERT_EQ(origVersion.minorVersion() + 1, mergedChunk.getVersion().minorVersion());
+ }
+
+ // Make sure history is there
+ ASSERT_EQ(1UL, mergedChunk.getHistory().size());
+ ASSERT_EQ(validAfter, mergedChunk.getHistory().front().getValidAfter());
}
} // namespace
diff --git a/src/mongo/db/s/config/sharding_catalog_manager_split_chunk_test.cpp b/src/mongo/db/s/config/sharding_catalog_manager_split_chunk_test.cpp
index df7734f0372..58cebd7ef26 100644
--- a/src/mongo/db/s/config/sharding_catalog_manager_split_chunk_test.cpp
+++ b/src/mongo/db/s/config/sharding_catalog_manager_split_chunk_test.cpp
@@ -63,7 +63,7 @@ TEST_F(SplitChunkTest, SplitExistingChunkCorrectlyShouldSucceed) {
ASSERT_OK(ShardingCatalogManager::get(operationContext())
->commitChunkSplit(operationContext(),
- NamespaceString("TestDB.TestColl"),
+ kNamespace,
origVersion.epoch(),
ChunkRange(chunkMin, chunkMax),
splitPoints,
@@ -124,7 +124,7 @@ TEST_F(SplitChunkTest, MultipleSplitsOnExistingChunkShouldSucceed) {
ASSERT_OK(ShardingCatalogManager::get(operationContext())
->commitChunkSplit(operationContext(),
- NamespaceString("TestDB.TestColl"),
+ kNamespace,
origVersion.epoch(),
ChunkRange(chunkMin, chunkMax),
splitPoints,
@@ -208,7 +208,7 @@ TEST_F(SplitChunkTest, NewSplitShouldClaimHighestVersion) {
ASSERT_OK(ShardingCatalogManager::get(operationContext())
->commitChunkSplit(operationContext(),
- NamespaceString("TestDB.TestColl"),
+ kNamespace,
collEpoch,
ChunkRange(chunkMin, chunkMax),
splitPoints,
@@ -258,7 +258,7 @@ TEST_F(SplitChunkTest, PreConditionFailErrors) {
auto splitStatus = ShardingCatalogManager::get(operationContext())
->commitChunkSplit(operationContext(),
- NamespaceString("TestDB.TestColl"),
+ kNamespace,
origVersion.epoch(),
ChunkRange(chunkMin, BSON("a" << 7)),
splitPoints,
@@ -312,7 +312,7 @@ TEST_F(SplitChunkTest, NonMatchingEpochsOfChunkAndRequestErrors) {
auto splitStatus = ShardingCatalogManager::get(operationContext())
->commitChunkSplit(operationContext(),
- NamespaceString("TestDB.TestColl"),
+ kNamespace,
OID::gen(),
ChunkRange(chunkMin, chunkMax),
splitPoints,
@@ -339,7 +339,7 @@ TEST_F(SplitChunkTest, SplitPointsOutOfOrderShouldFail) {
auto splitStatus = ShardingCatalogManager::get(operationContext())
->commitChunkSplit(operationContext(),
- NamespaceString("TestDB.TestColl"),
+ kNamespace,
origVersion.epoch(),
ChunkRange(chunkMin, chunkMax),
splitPoints,
@@ -366,7 +366,7 @@ TEST_F(SplitChunkTest, SplitPointsOutOfRangeAtMinShouldFail) {
auto splitStatus = ShardingCatalogManager::get(operationContext())
->commitChunkSplit(operationContext(),
- NamespaceString("TestDB.TestColl"),
+ kNamespace,
origVersion.epoch(),
ChunkRange(chunkMin, chunkMax),
splitPoints,
@@ -393,7 +393,7 @@ TEST_F(SplitChunkTest, SplitPointsOutOfRangeAtMaxShouldFail) {
auto splitStatus = ShardingCatalogManager::get(operationContext())
->commitChunkSplit(operationContext(),
- NamespaceString("TestDB.TestColl"),
+ kNamespace,
origVersion.epoch(),
ChunkRange(chunkMin, chunkMax),
splitPoints,
@@ -401,5 +401,35 @@ TEST_F(SplitChunkTest, SplitPointsOutOfRangeAtMaxShouldFail) {
ASSERT_EQ(ErrorCodes::InvalidOptions, splitStatus);
}
+TEST_F(SplitChunkTest, SplitPointsWithDollarPrefixShouldFail) {
+ ChunkType chunk;
+ chunk.setNS(kNamespace);
+
+ auto origVersion = ChunkVersion(1, 0, OID::gen());
+ chunk.setVersion(origVersion);
+ chunk.setShard(ShardId("shard0000"));
+
+ auto chunkMin = BSON("a" << kMinBSONKey);
+ auto chunkMax = BSON("a" << kMaxBSONKey);
+ chunk.setMin(chunkMin);
+ chunk.setMax(chunkMax);
+ ASSERT_OK(setupChunks({chunk}));
+
+ ASSERT_NOT_OK(ShardingCatalogManager::get(operationContext())
+ ->commitChunkSplit(operationContext(),
+ kNamespace,
+ origVersion.epoch(),
+ ChunkRange(chunkMin, chunkMax),
+ {BSON("a" << BSON("$minKey" << 1))},
+ "shard0000"));
+ ASSERT_NOT_OK(ShardingCatalogManager::get(operationContext())
+ ->commitChunkSplit(operationContext(),
+ kNamespace,
+ origVersion.epoch(),
+ ChunkRange(chunkMin, chunkMax),
+ {BSON("a" << BSON("$maxKey" << 1))},
+ "shard0000"));
+}
+
} // namespace
} // namespace mongo
diff --git a/src/mongo/db/s/config/sharding_catalog_manager_zone_operations.cpp b/src/mongo/db/s/config/sharding_catalog_manager_zone_operations.cpp
index 37d783a9a02..a54e30824e7 100644
--- a/src/mongo/db/s/config/sharding_catalog_manager_zone_operations.cpp
+++ b/src/mongo/db/s/config/sharding_catalog_manager_zone_operations.cpp
@@ -313,6 +313,17 @@ Status ShardingCatalogManager::assignKeyRangeToZone(OperationContext* opCtx,
const NamespaceString& nss,
const ChunkRange& givenRange,
const std::string& zoneName) {
+ auto zoneBoundsStorageStatus =
+ ShardKeyPattern::checkShardKeyIsValidForMetadataStorage(givenRange.getMin());
+ if (zoneBoundsStorageStatus.isOK()) {
+ zoneBoundsStorageStatus =
+ ShardKeyPattern::checkShardKeyIsValidForMetadataStorage(givenRange.getMax());
+ }
+
+ if (!zoneBoundsStorageStatus.isOK()) {
+ return zoneBoundsStorageStatus;
+ }
+
Lock::ExclusiveLock lk(opCtx->lockState(), _kZoneOpLock);
auto configServer = Grid::get(opCtx)->shardRegistry()->getConfigShard();
diff --git a/src/mongo/s/catalog/type_chunk.cpp b/src/mongo/s/catalog/type_chunk.cpp
index 8757022002b..57c610e23ad 100644
--- a/src/mongo/s/catalog/type_chunk.cpp
+++ b/src/mongo/s/catalog/type_chunk.cpp
@@ -76,11 +76,6 @@ Status extractObject(const BSONObj& obj, const std::string& fieldName, BSONEleme
str::stream() << "The field '" << fieldName << "' cannot be empty"};
}
- auto validForStorageStatus = bsonElement->Obj().storageValidEmbedded();
- if (!validForStorageStatus.isOK()) {
- return validForStorageStatus;
- }
-
return Status::OK();
}
diff --git a/src/mongo/s/request_types/update_zone_key_range_request_test.cpp b/src/mongo/s/request_types/update_zone_key_range_request_test.cpp
index e687c06c590..0a4fa4452fb 100644
--- a/src/mongo/s/request_types/update_zone_key_range_request_test.cpp
+++ b/src/mongo/s/request_types/update_zone_key_range_request_test.cpp
@@ -196,26 +196,6 @@ TEST(UpdateZoneKeyRangeRequest, WrongMaxRangeTypeErrors) {
ASSERT_EQ(ErrorCodes::TypeMismatch, request.getStatus());
}
-TEST(UpdateZoneKeyRangeRequest, WrongPrefixCharacterMinErrors) {
- auto request = UpdateZoneKeyRangeRequest::parseFromMongosCommand(fromjson(R"BSON({
- updateZoneKeyRange: "foo.bar",
- min: { $x: 1 },
- max: { x: 100 },
- zone: "z"
- })BSON"));
- ASSERT_EQ(ErrorCodes::DollarPrefixedFieldName, request.getStatus());
-}
-
-TEST(UpdateZoneKeyRangeRequest, WrongPrefixCharacterMaxErrors) {
- auto request = UpdateZoneKeyRangeRequest::parseFromMongosCommand(fromjson(R"BSON({
- updateZoneKeyRange: "foo.bar",
- min: { x: 1 },
- max: { $x: 100 },
- zone: "z"
- })BSON"));
- ASSERT_EQ(ErrorCodes::DollarPrefixedFieldName, request.getStatus());
-}
-
TEST(UpdateZoneKeyRangeRequest, WrongZoneNameTypeErrors) {
auto request = UpdateZoneKeyRangeRequest::parseFromMongosCommand(fromjson(R"BSON({
updateZoneKeyRange: "foo.bar",
diff --git a/src/mongo/s/shard_key_pattern.cpp b/src/mongo/s/shard_key_pattern.cpp
index b2ce514472e..d37df0cceb2 100644
--- a/src/mongo/s/shard_key_pattern.cpp
+++ b/src/mongo/s/shard_key_pattern.cpp
@@ -102,12 +102,15 @@ std::vector<std::unique_ptr<FieldRef>> parseShardKeyPattern(const BSONObj& keyPa
return parsedPaths;
}
-bool isShardKeyElement(const BSONElement& element, bool allowRegex) {
- if (element.eoo() || element.type() == Array)
+bool isValidShardKeyElement(const BSONElement& element) {
+ return !element.eoo() && element.type() != Array;
+}
+
+bool isValidShardKeyElementForStorage(const BSONElement& element) {
+ if (!isValidShardKeyElement(element))
return false;
- // TODO: Disallow regex all the time
- if (!allowRegex && element.type() == RegEx)
+ if (element.type() == RegEx)
return false;
if (element.type() == Object && !element.embeddedObject().storageValidEmbedded().isOK())
@@ -166,6 +169,17 @@ Status ShardKeyPattern::checkShardKeySize(const BSONObj& shardKey) {
<< " bytes"};
}
+Status ShardKeyPattern::checkShardKeyIsValidForMetadataStorage(const BSONObj& shardKey) {
+ for (const auto& elem : shardKey) {
+ if (!isValidShardKeyElementForStorage(elem)) {
+ return {ErrorCodes::BadValue,
+ str::stream() << "Shard key element " << elem << " is not valid for storage"};
+ }
+ }
+
+ return Status::OK();
+}
+
ShardKeyPattern::ShardKeyPattern(const BSONObj& keyPattern)
: _keyPattern(keyPattern),
_keyPatternPaths(parseShardKeyPattern(keyPattern)),
@@ -200,7 +214,7 @@ bool ShardKeyPattern::isShardKey(const BSONObj& shardKey) const {
for (const auto& patternEl : keyPatternBSON) {
BSONElement keyEl = shardKey[patternEl.fieldNameStringData()];
- if (!isShardKeyElement(keyEl, true))
+ if (!isValidShardKeyElement(keyEl))
return false;
}
@@ -219,7 +233,7 @@ BSONObj ShardKeyPattern::normalizeShardKey(const BSONObj& shardKey) const {
BSONElement keyEl = shardKey[patternEl.fieldNameStringData()];
- if (!isShardKeyElement(keyEl, true))
+ if (!isValidShardKeyElement(keyEl))
return BSONObj();
keyBuilder.appendAs(keyEl, patternEl.fieldName());
@@ -238,7 +252,7 @@ BSONObj ShardKeyPattern::extractShardKeyFromMatchable(const MatchableDocument& m
BSONElement matchEl =
extractKeyElementFromMatchable(matchable, patternEl.fieldNameStringData());
- if (!isShardKeyElement(matchEl, true))
+ if (!isValidShardKeyElement(matchEl))
return BSONObj();
if (isHashedPatternEl(patternEl)) {
@@ -304,7 +318,7 @@ BSONObj ShardKeyPattern::extractShardKeyFromQuery(const CanonicalQuery& query) c
const FieldRef& patternPath = **it;
BSONElement equalEl = findEqualityElement(equalities, patternPath);
- if (!isShardKeyElement(equalEl, false))
+ if (!isValidShardKeyElementForStorage(equalEl))
return BSONObj();
if (isHashedPattern()) {
diff --git a/src/mongo/s/shard_key_pattern.h b/src/mongo/s/shard_key_pattern.h
index 5b6ec73e876..fe01252f61a 100644
--- a/src/mongo/s/shard_key_pattern.h
+++ b/src/mongo/s/shard_key_pattern.h
@@ -74,6 +74,12 @@ public:
static Status checkShardKeySize(const BSONObj& shardKey);
/**
+ * Validates whether the specified shard key is valid to be written as part of the sharding
+ * metadata.
+ */
+ static Status checkShardKeyIsValidForMetadataStorage(const BSONObj& shardKey);
+
+ /**
* Constructs a shard key pattern from a BSON pattern document. If the document is not a
* valid shard key pattern, !isValid() will be true and key extraction will fail.
*/
diff --git a/src/mongo/s/shard_key_pattern_test.cpp b/src/mongo/s/shard_key_pattern_test.cpp
index 18bbe3233f5..c288853464c 100644
--- a/src/mongo/s/shard_key_pattern_test.cpp
+++ b/src/mongo/s/shard_key_pattern_test.cpp
@@ -109,10 +109,13 @@ TEST(ShardKeyPattern, NormalizeShardKey) {
BSON("a.b" << 10 << "c" << 30));
ASSERT_BSONOBJ_EQ(normKey(pattern, BSON("c" << 30 << "a.b" << 10)),
BSON("a.b" << 10 << "c" << 30));
+ ASSERT_BSONOBJ_EQ(normKey(pattern, BSON("a.b" << BSON("$notAndOperator" << 10) << "c" << 30)),
+ BSON("a.b" << BSON("$notAndOperator" << 10) << "c" << 30));
+ ASSERT_BSONOBJ_EQ(normKey(pattern, BSON("a.b" << BSON("$gt" << 10) << "c" << 30)),
+ BSON("a.b" << BSON("$gt" << 10) << "c" << 30));
ASSERT_BSONOBJ_EQ(normKey(pattern, BSON("b" << 10)), BSONObj());
ASSERT_BSONOBJ_EQ(normKey(pattern, BSON("a" << 10 << "c" << 30)), BSONObj());
- ASSERT_BSONOBJ_EQ(normKey(pattern, BSON("a.b" << BSON("$gt" << 10) << "c" << 30)), BSONObj());
}
static BSONObj docKey(const ShardKeyPattern& pattern, const BSONObj& doc) {
@@ -138,13 +141,16 @@ TEST(ShardKeyPattern, ExtractDocShardKeySingle) {
<< "$id"
<< 1);
ASSERT_BSONOBJ_EQ(docKey(pattern, BSON("a" << ref)), BSON("a" << ref));
+ ASSERT_BSONOBJ_EQ(docKey(pattern, fromjson("{a:{$dollarPrefixKey:true}}")),
+ fromjson("{a:{$dollarPrefixKey:true}}"));
+ ASSERT_BSONOBJ_EQ(docKey(pattern, fromjson("{a:{$gt:10}}")), fromjson("{a:{$gt:10}}"));
+ ASSERT_BSONOBJ_EQ(docKey(pattern, fromjson("{a:{$gt:{$dollarPrefixKey:10}}}}")),
+ fromjson("{a:{$gt:{$dollarPrefixKey:10}}}}"));
ASSERT_BSONOBJ_EQ(docKey(pattern, BSONObj()), BSONObj());
ASSERT_BSONOBJ_EQ(docKey(pattern, fromjson("{b:10}")), BSONObj());
ASSERT_BSONOBJ_EQ(docKey(pattern, BSON("" << 10)), BSONObj());
ASSERT_BSONOBJ_EQ(docKey(pattern, fromjson("{a:[1,2]}")), BSONObj());
- ASSERT_BSONOBJ_EQ(docKey(pattern, fromjson("{a:{$invalid:true}}")), BSONObj());
- ASSERT_BSONOBJ_EQ(docKey(pattern, fromjson("{a:{$gt:10}}")), BSONObj());
// BSONObjIterator breaks this for now
// ASSERT_EQUALS(docKey(pattern, BSON("a" << 10 << "a" << 20)), BSONObj());
}
@@ -164,15 +170,17 @@ TEST(ShardKeyPattern, ExtractDocShardKeyCompound) {
<< "a"
<< 10)),
fromjson("{a:10, b:'20'}"));
+ ASSERT_BSONOBJ_EQ(docKey(pattern, fromjson("{a:10, b:{$dollarPrefixKey:true}}")),
+ fromjson("{a:10, b:{$dollarPrefixKey:true}}"));
+ ASSERT_BSONOBJ_EQ(docKey(pattern, fromjson("{a:10, b:{$gt:20}}")),
+ fromjson("{a:10, b:{$gt:20}}"));
ASSERT_BSONOBJ_EQ(docKey(pattern, fromjson("{a:10, b:[1, 2]}")), BSONObj());
- ASSERT_BSONOBJ_EQ(docKey(pattern, fromjson("{a:10, b:{$invalid:true}}")), BSONObj());
ASSERT_BSONOBJ_EQ(docKey(pattern, fromjson("{b:20}")), BSONObj());
ASSERT_BSONOBJ_EQ(docKey(pattern,
BSON("" << 10 << "b"
<< "20")),
BSONObj());
- ASSERT_BSONOBJ_EQ(docKey(pattern, fromjson("{a:10, b:{$gt:20}}")), BSONObj());
// Ordering
ASSERT_EQUALS(docKey(pattern, BSON("b" << 20 << "a" << 10)).firstElement().numberInt(), 10);
@@ -265,7 +273,7 @@ TEST(ShardKeyPattern, ExtractQueryShardKeySingle) {
ASSERT_BSONOBJ_EQ(queryKey(pattern, fromjson("{a:10,b:{$invalid:'20'}}")), BSONObj());
// Doc key extraction shouldn't work with query
- ASSERT_BSONOBJ_EQ(docKey(pattern, fromjson("{a:{$eq:[10, 20]}, c:30}")), BSONObj());
+ ASSERT_BSONOBJ_EQ(queryKey(pattern, fromjson("{a:{$eq:[10, 20]}, c:30}")), BSONObj());
// $eq/$or/$and/$all
ASSERT_BSONOBJ_EQ(queryKey(pattern, fromjson("{a:{$eq:10}}")), fromjson("{a:10}"));