summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorKaloian Manassiev <kaloian.manassiev@mongodb.com>2018-04-25 20:14:42 -0400
committerKaloian Manassiev <kaloian.manassiev@mongodb.com>2018-05-01 13:19:56 -0400
commite843a232d6be0089abc8457e3f5a1dd349605c6e (patch)
treea6e440eca1795aa91d6d9fdda5ef92b76e357574
parentc78a7b639334c44c31dea771c6338ea430da73e7 (diff)
downloadmongo-e843a232d6be0089abc8457e3f5a1dd349605c6e.tar.gz
SERVER-34644 Only explicitly do $-prefix check when writing sharded metadata
(cherry picked from commit cc7a103bd71ee98ad85e1b5dcca8763b4eb16679) (cherry picked from commit b8485e13095318f40474885b8b3e7625adee59eb) (cherry picked from commit bd93ed7abe5b4fd862065e240be493702ad4d790)
-rw-r--r--src/mongo/db/dbhelpers.cpp3
-rw-r--r--src/mongo/db/s/config/configsvr_shard_collection_command.cpp8
-rw-r--r--src/mongo/s/catalog/sharding_catalog_assign_key_range_to_zone_test.cpp54
-rw-r--r--src/mongo/s/catalog/sharding_catalog_manager_chunk_operations.cpp13
-rw-r--r--src/mongo/s/catalog/sharding_catalog_manager_zone_operations.cpp11
-rw-r--r--src/mongo/s/catalog/sharding_catalog_merge_chunks_test.cpp128
-rw-r--r--src/mongo/s/catalog/sharding_catalog_split_chunk_test.cpp66
-rw-r--r--src/mongo/s/shard_key_pattern.cpp266
-rw-r--r--src/mongo/s/shard_key_pattern.h17
-rw-r--r--src/mongo/s/shard_key_pattern_test.cpp133
10 files changed, 408 insertions, 291 deletions
diff --git a/src/mongo/db/dbhelpers.cpp b/src/mongo/db/dbhelpers.cpp
index 158e4b727a6..abddfd6a112 100644
--- a/src/mongo/db/dbhelpers.cpp
+++ b/src/mongo/db/dbhelpers.cpp
@@ -35,7 +35,6 @@
#include <boost/filesystem/operations.hpp>
#include <fstream>
-#include "mongo/db/catalog/collection.h"
#include "mongo/db/catalog/index_create.h"
#include "mongo/db/db.h"
#include "mongo/db/db_raii.h"
@@ -56,7 +55,6 @@
#include "mongo/db/query/query_planner.h"
#include "mongo/db/range_arithmetic.h"
#include "mongo/db/repl/repl_client_info.h"
-#include "mongo/db/repl/replication_coordinator_global.h"
#include "mongo/db/s/collection_metadata.h"
#include "mongo/db/s/collection_sharding_state.h"
#include "mongo/db/s/sharding_state.h"
@@ -66,7 +64,6 @@
#include "mongo/db/storage/storage_options.h"
#include "mongo/db/write_concern.h"
#include "mongo/db/write_concern_options.h"
-#include "mongo/s/shard_key_pattern.h"
#include "mongo/util/log.h"
#include "mongo/util/mongoutils/str.h"
#include "mongo/util/scopeguard.h"
diff --git a/src/mongo/db/s/config/configsvr_shard_collection_command.cpp b/src/mongo/db/s/config/configsvr_shard_collection_command.cpp
index 49d09e326e5..1426e84117b 100644
--- a/src/mongo/db/s/config/configsvr_shard_collection_command.cpp
+++ b/src/mongo/db/s/config/configsvr_shard_collection_command.cpp
@@ -139,14 +139,6 @@ void validateAndDeduceFullRequestOptions(OperationContext* opCtx,
uassert(
ErrorCodes::InvalidOptions, "cannot have empty shard key", !request->getKey().isEmpty());
- // Ensure the proposed shard key is valid.
- uassert(ErrorCodes::InvalidOptions,
- str::stream()
- << "Unsupported shard key pattern "
- << shardKeyPattern.toString()
- << ". Pattern must either be a single hashed field, or a list of ascending fields",
- shardKeyPattern.isValid());
-
// Ensure that hashed and unique are not both set.
uassert(ErrorCodes::InvalidOptions,
"Hashed shard keys cannot be declared unique. It's possible to ensure uniqueness on "
diff --git a/src/mongo/s/catalog/sharding_catalog_assign_key_range_to_zone_test.cpp b/src/mongo/s/catalog/sharding_catalog_assign_key_range_to_zone_test.cpp
index f4ff94429c7..2109161fb28 100644
--- a/src/mongo/s/catalog/sharding_catalog_assign_key_range_to_zone_test.cpp
+++ b/src/mongo/s/catalog/sharding_catalog_assign_key_range_to_zone_test.cpp
@@ -31,6 +31,7 @@
#include "mongo/bson/json.h"
#include "mongo/client/read_preference.h"
#include "mongo/db/namespace_string.h"
+#include "mongo/s/catalog/sharding_catalog_client.h"
#include "mongo/s/catalog/sharding_catalog_manager.h"
#include "mongo/s/catalog/type_chunk.h"
#include "mongo/s/catalog/type_collection.h"
@@ -38,6 +39,7 @@
#include "mongo/s/catalog/type_tags.h"
#include "mongo/s/client/shard.h"
#include "mongo/s/config_server_test_fixture.h"
+#include "mongo/s/grid.h"
namespace mongo {
namespace {
@@ -191,6 +193,56 @@ TEST_F(AssignKeyRangeToZoneTestFixture, MaxWithInvalidShardKeyShouldFail) {
assertNoZoneDoc();
}
+TEST_F(AssignKeyRangeToZoneTestFixture, AssignZoneWithDollarPrefixedShardKeysShouldFail) {
+ ASSERT_NOT_OK(ShardingCatalogManager::get(operationContext())
+ ->assignKeyRangeToZone(
+ operationContext(),
+ shardedNS(),
+ ChunkRange(BSON("x" << BSON("$A" << 1)), BSON("x" << BSON("$B" << 1))),
+ zoneName()));
+ assertNoZoneDoc();
+
+ ASSERT_NOT_OK(
+ ShardingCatalogManager::get(operationContext())
+ ->assignKeyRangeToZone(operationContext(),
+ shardedNS(),
+ ChunkRange(BSON("x" << 0), BSON("x" << BSON("$maxKey" << 1))),
+ zoneName()));
+ assertNoZoneDoc();
+}
+
+TEST_F(AssignKeyRangeToZoneTestFixture, RemoveZoneWithDollarPrefixedShardKeysShouldFail) {
+ ChunkRange zoneWithDollarKeys(BSON("x" << BSON("$A" << 1)), BSON("x" << BSON("$B" << 1)));
+
+ // Manually insert a zone with illegal keys in order to bypass the checks performed by
+ // assignKeyRangeToZone
+ BSONObj updateQuery(BSON("_id" << BSON(TagsType::ns(shardedNS().ns())
+ << TagsType::min(zoneWithDollarKeys.getMin()))));
+
+ BSONObjBuilder updateBuilder;
+ updateBuilder.append(
+ "_id", BSON(TagsType::ns(shardedNS().ns()) << TagsType::min(zoneWithDollarKeys.getMin())));
+ updateBuilder.append(TagsType::ns(), shardedNS().ns());
+ updateBuilder.append(TagsType::min(), zoneWithDollarKeys.getMin());
+ updateBuilder.append(TagsType::max(), zoneWithDollarKeys.getMax());
+ updateBuilder.append(TagsType::tag(), "TestZone");
+
+ ASSERT_OK(Grid::get(operationContext())
+ ->catalogClient()
+ ->updateConfigDocument(
+ operationContext(),
+ TagsType::ConfigNS,
+ updateQuery,
+ updateBuilder.obj(),
+ true,
+ WriteConcernOptions(1, WriteConcernOptions::SyncMode::UNSET, Seconds(0))));
+ assertOnlyZone(shardedNS(), zoneWithDollarKeys, "TestZone");
+
+ ASSERT_OK(ShardingCatalogManager::get(operationContext())
+ ->removeKeyRangeFromZone(operationContext(), shardedNS(), zoneWithDollarKeys));
+ assertNoZoneDoc();
+}
+
TEST_F(AssignKeyRangeToZoneTestFixture, MinThatIsAShardKeyPrefixShouldConvertToFullShardKey) {
NamespaceString ns("compound.shard");
CollectionType shardedCollection;
@@ -733,5 +785,5 @@ TEST_F(AssignKeyRangeWithOneRangeFixture, RemoveThatIsOnlyMaxPrefixOfExistingSho
}
}
-} // unnamed namespace
+} // namespace
} // namespace mongo
diff --git a/src/mongo/s/catalog/sharding_catalog_manager_chunk_operations.cpp b/src/mongo/s/catalog/sharding_catalog_manager_chunk_operations.cpp
index e60a6e951d8..fe3594ef193 100644
--- a/src/mongo/s/catalog/sharding_catalog_manager_chunk_operations.cpp
+++ b/src/mongo/s/catalog/sharding_catalog_manager_chunk_operations.cpp
@@ -313,9 +313,16 @@ Status ShardingCatalogManager::commitChunkSplit(OperationContext* opCtx,
}
// verify that splits don't create too-big shard keys
- Status shardKeyStatus = ShardKeyPattern::checkShardKeySize(endKey);
- if (!shardKeyStatus.isOK()) {
- return shardKeyStatus;
+ Status shardKeySizeStatus = ShardKeyPattern::checkShardKeySize(endKey);
+ if (!shardKeySizeStatus.isOK()) {
+ return shardKeySizeStatus;
+ }
+
+ // verify that splits don't use disallowed BSON object format
+ Status shardKeyStorageStatus =
+ ShardKeyPattern::checkShardKeyIsValidForMetadataStorage(endKey);
+ if (!shardKeyStorageStatus.isOK()) {
+ return shardKeyStorageStatus;
}
// splits only update the 'minor' portion of version
diff --git a/src/mongo/s/catalog/sharding_catalog_manager_zone_operations.cpp b/src/mongo/s/catalog/sharding_catalog_manager_zone_operations.cpp
index 5b21ab927ab..057943027ea 100644
--- a/src/mongo/s/catalog/sharding_catalog_manager_zone_operations.cpp
+++ b/src/mongo/s/catalog/sharding_catalog_manager_zone_operations.cpp
@@ -313,6 +313,17 @@ Status ShardingCatalogManager::assignKeyRangeToZone(OperationContext* opCtx,
const NamespaceString& ns,
const ChunkRange& givenRange,
const std::string& zoneName) {
+ auto zoneBoundsStorageStatus =
+ ShardKeyPattern::checkShardKeyIsValidForMetadataStorage(givenRange.getMin());
+ if (zoneBoundsStorageStatus.isOK()) {
+ zoneBoundsStorageStatus =
+ ShardKeyPattern::checkShardKeyIsValidForMetadataStorage(givenRange.getMax());
+ }
+
+ if (!zoneBoundsStorageStatus.isOK()) {
+ return zoneBoundsStorageStatus;
+ }
+
Lock::ExclusiveLock lk(opCtx->lockState(), _kZoneOpLock);
auto configServer = Grid::get(opCtx)->shardRegistry()->getConfigShard();
diff --git a/src/mongo/s/catalog/sharding_catalog_merge_chunks_test.cpp b/src/mongo/s/catalog/sharding_catalog_merge_chunks_test.cpp
index 4e02dd07dfd..cb9d036280d 100644
--- a/src/mongo/s/catalog/sharding_catalog_merge_chunks_test.cpp
+++ b/src/mongo/s/catalog/sharding_catalog_merge_chunks_test.cpp
@@ -40,6 +40,8 @@ namespace {
using MergeChunkTest = ConfigServerTestFixture;
+const NamespaceString kNamespace("TestDB.TestColl");
+
TEST_F(MergeChunkTest, MergeExistingChunksCorrectlyShouldSucceed) {
ChunkType chunk;
chunk.setNS("TestDB.TestColl");
@@ -65,12 +67,10 @@ TEST_F(MergeChunkTest, MergeExistingChunksCorrectlyShouldSucceed) {
setupChunks({chunk, chunk2}).transitional_ignore();
- ASSERT_OK(ShardingCatalogManager::get(operationContext())
- ->commitChunkMerge(operationContext(),
- NamespaceString("TestDB.TestColl"),
- origVersion.epoch(),
- chunkBoundaries,
- "shard0000"));
+ ASSERT_OK(
+ ShardingCatalogManager::get(operationContext())
+ ->commitChunkMerge(
+ operationContext(), kNamespace, origVersion.epoch(), chunkBoundaries, "shard0000"));
auto findResponse = uassertStatusOK(
getConfigShard()->exhaustiveFindOnConfig(operationContext(),
@@ -129,12 +129,10 @@ TEST_F(MergeChunkTest, MergeSeveralChunksCorrectlyShouldSucceed) {
setupChunks({chunk, chunk2, chunk3}).transitional_ignore();
- ASSERT_OK(ShardingCatalogManager::get(operationContext())
- ->commitChunkMerge(operationContext(),
- NamespaceString("TestDB.TestColl"),
- origVersion.epoch(),
- chunkBoundaries,
- "shard0000"));
+ ASSERT_OK(
+ ShardingCatalogManager::get(operationContext())
+ ->commitChunkMerge(
+ operationContext(), kNamespace, origVersion.epoch(), chunkBoundaries, "shard0000"));
auto findResponse = uassertStatusOK(
getConfigShard()->exhaustiveFindOnConfig(operationContext(),
@@ -198,11 +196,8 @@ TEST_F(MergeChunkTest, NewMergeShouldClaimHighestVersion) {
setupChunks({chunk, chunk2, otherChunk}).transitional_ignore();
ASSERT_OK(ShardingCatalogManager::get(operationContext())
- ->commitChunkMerge(operationContext(),
- NamespaceString("TestDB.TestColl"),
- collEpoch,
- chunkBoundaries,
- "shard0000"));
+ ->commitChunkMerge(
+ operationContext(), kNamespace, collEpoch, chunkBoundaries, "shard0000"));
auto findResponse = uassertStatusOK(
getConfigShard()->exhaustiveFindOnConfig(operationContext(),
@@ -261,12 +256,10 @@ TEST_F(MergeChunkTest, MergeLeavesOtherChunksAlone) {
setupChunks({chunk, chunk2, otherChunk}).transitional_ignore();
- ASSERT_OK(ShardingCatalogManager::get(operationContext())
- ->commitChunkMerge(operationContext(),
- NamespaceString("TestDB.TestColl"),
- origVersion.epoch(),
- chunkBoundaries,
- "shard0000"));
+ ASSERT_OK(
+ ShardingCatalogManager::get(operationContext())
+ ->commitChunkMerge(
+ operationContext(), kNamespace, origVersion.epoch(), chunkBoundaries, "shard0000"));
auto findResponse = uassertStatusOK(
getConfigShard()->exhaustiveFindOnConfig(operationContext(),
@@ -358,12 +351,10 @@ TEST_F(MergeChunkTest, NonMatchingEpochsOfChunkAndRequestErrors) {
setupChunks({chunk, chunk2}).transitional_ignore();
- auto mergeStatus = ShardingCatalogManager::get(operationContext())
- ->commitChunkMerge(operationContext(),
- NamespaceString("TestDB.TestColl"),
- OID::gen(),
- chunkBoundaries,
- "shard0000");
+ auto mergeStatus =
+ ShardingCatalogManager::get(operationContext())
+ ->commitChunkMerge(
+ operationContext(), kNamespace, OID::gen(), chunkBoundaries, "shard0000");
ASSERT_EQ(ErrorCodes::StaleEpoch, mergeStatus);
}
@@ -398,13 +389,11 @@ TEST_F(MergeChunkTest, MergeAlreadyHappenedFailsPrecondition) {
setupChunks({mergedChunk}).transitional_ignore();
- ASSERT_EQ(ErrorCodes::BadValue,
- ShardingCatalogManager::get(operationContext())
- ->commitChunkMerge(operationContext(),
- NamespaceString("TestDB.TestColl"),
- origVersion.epoch(),
- chunkBoundaries,
- "shard0000"));
+ ASSERT_EQ(
+ ErrorCodes::BadValue,
+ ShardingCatalogManager::get(operationContext())
+ ->commitChunkMerge(
+ operationContext(), kNamespace, origVersion.epoch(), chunkBoundaries, "shard0000"));
// Verify that no change to config.chunks happened.
auto findResponse = uassertStatusOK(
@@ -461,11 +450,68 @@ TEST_F(MergeChunkTest, ChunkBoundariesOutOfOrderFails) {
ASSERT_EQ(ErrorCodes::InvalidOptions,
ShardingCatalogManager::get(operationContext())
- ->commitChunkMerge(operationContext(),
- NamespaceString("TestDB.TestColl"),
- epoch,
- chunkBoundaries,
- "shard0000"));
+ ->commitChunkMerge(
+ operationContext(), kNamespace, epoch, chunkBoundaries, "shard0000"));
+}
+
+TEST_F(MergeChunkTest, MergingChunksWithDollarPrefixShouldSucceed) {
+ ChunkType chunk1;
+ chunk1.setNS(kNamespace.ns());
+
+ auto origVersion = ChunkVersion(1, 0, OID::gen());
+ chunk1.setVersion(origVersion);
+ chunk1.setShard(ShardId("shard0000"));
+
+ auto chunk2(chunk1);
+ auto chunk3(chunk1);
+
+ auto chunkMin = BSON("a" << kMinBSONKey);
+ auto chunkBound1 = BSON("a" << BSON("$maxKey" << 1));
+ auto chunkBound2 = BSON("a" << BSON("$mixKey" << 1));
+ auto chunkMax = BSON("a" << kMaxBSONKey);
+
+ // first chunk boundaries
+ chunk1.setMin(chunkMin);
+ chunk1.setMax(chunkBound1);
+ // second chunk boundaries
+ chunk2.setMin(chunkBound1);
+ chunk2.setMax(chunkBound2);
+ // third chunk boundaries
+ chunk3.setMin(chunkBound2);
+ chunk3.setMax(chunkMax);
+
+ ASSERT_OK(setupChunks({chunk1, chunk2, chunk3}));
+
+ // Record chunk boundaries for passing into commitChunkMerge
+ std::vector<BSONObj> chunkBoundaries{chunkMin, chunkBound1, chunkBound2, chunkMax};
+
+ ASSERT_OK(
+ ShardingCatalogManager::get(operationContext())
+ ->commitChunkMerge(
+ operationContext(), kNamespace, origVersion.epoch(), chunkBoundaries, "shard0000"));
+
+ auto findResponse = uassertStatusOK(
+ getConfigShard()->exhaustiveFindOnConfig(operationContext(),
+ ReadPreferenceSetting{ReadPreference::PrimaryOnly},
+ repl::ReadConcernLevel::kLocalReadConcern,
+ NamespaceString(ChunkType::ConfigNS),
+ BSON(ChunkType::ns() << "TestDB.TestColl"),
+ BSON(ChunkType::lastmod << -1),
+ boost::none));
+
+ const auto& chunksVector = findResponse.docs;
+
+ // There should be exactly one chunk left in the collection
+ ASSERT_EQ(1u, chunksVector.size());
+
+ // MergedChunk should have range [chunkMin, chunkMax]
+ auto mergedChunk = uassertStatusOK(ChunkType::fromConfigBSON(chunksVector.front()));
+ ASSERT_BSONOBJ_EQ(chunkMin, mergedChunk.getMin());
+ ASSERT_BSONOBJ_EQ(chunkMax, mergedChunk.getMax());
+
+ // Check for increment on mergedChunk's minor version
+ ASSERT_EQ(origVersion.majorVersion(), mergedChunk.getVersion().majorVersion());
+ ASSERT_EQ(origVersion.minorVersion() + 1, mergedChunk.getVersion().minorVersion());
}
} // namespace
diff --git a/src/mongo/s/catalog/sharding_catalog_split_chunk_test.cpp b/src/mongo/s/catalog/sharding_catalog_split_chunk_test.cpp
index 1a9d8eb857c..60f3cf5ae84 100644
--- a/src/mongo/s/catalog/sharding_catalog_split_chunk_test.cpp
+++ b/src/mongo/s/catalog/sharding_catalog_split_chunk_test.cpp
@@ -39,6 +39,8 @@ namespace {
using SplitChunkTest = ConfigServerTestFixture;
+const NamespaceString kNamespace("TestDB.TestColl");
+
TEST_F(SplitChunkTest, SplitExistingChunkCorrectlyShouldSucceed) {
ChunkType chunk;
chunk.setNS("TestDB.TestColl");
@@ -55,11 +57,11 @@ TEST_F(SplitChunkTest, SplitExistingChunkCorrectlyShouldSucceed) {
auto chunkSplitPoint = BSON("a" << 5);
std::vector<BSONObj> splitPoints{chunkSplitPoint};
- setupChunks({chunk}).transitional_ignore();
+ ASSERT_OK(setupChunks({chunk}));
ASSERT_OK(ShardingCatalogManager::get(operationContext())
->commitChunkSplit(operationContext(),
- NamespaceString("TestDB.TestColl"),
+ kNamespace,
origVersion.epoch(),
ChunkRange(chunkMin, chunkMax),
splitPoints,
@@ -105,11 +107,11 @@ TEST_F(SplitChunkTest, MultipleSplitsOnExistingChunkShouldSucceed) {
auto chunkSplitPoint2 = BSON("a" << 7);
std::vector<BSONObj> splitPoints{chunkSplitPoint, chunkSplitPoint2};
- setupChunks({chunk}).transitional_ignore();
+ ASSERT_OK(setupChunks({chunk}));
ASSERT_OK(ShardingCatalogManager::get(operationContext())
->commitChunkSplit(operationContext(),
- NamespaceString("TestDB.TestColl"),
+ kNamespace,
origVersion.epoch(),
ChunkRange(chunkMin, chunkMax),
splitPoints,
@@ -176,11 +178,11 @@ TEST_F(SplitChunkTest, NewSplitShouldClaimHighestVersion) {
chunk2.setMin(BSON("a" << 10));
chunk2.setMax(BSON("a" << 20));
- setupChunks({chunk, chunk2}).transitional_ignore();
+ ASSERT_OK(setupChunks({chunk, chunk2}));
ASSERT_OK(ShardingCatalogManager::get(operationContext())
->commitChunkSplit(operationContext(),
- NamespaceString("TestDB.TestColl"),
+ kNamespace,
collEpoch,
ChunkRange(chunkMin, chunkMax),
splitPoints,
@@ -226,11 +228,11 @@ TEST_F(SplitChunkTest, PreConditionFailErrors) {
auto chunkSplitPoint = BSON("a" << 5);
splitPoints.push_back(chunkSplitPoint);
- setupChunks({chunk}).transitional_ignore();
+ ASSERT_OK(setupChunks({chunk}));
auto splitStatus = ShardingCatalogManager::get(operationContext())
->commitChunkSplit(operationContext(),
- NamespaceString("TestDB.TestColl"),
+ kNamespace,
origVersion.epoch(),
ChunkRange(chunkMin, BSON("a" << 7)),
splitPoints,
@@ -253,7 +255,7 @@ TEST_F(SplitChunkTest, NonExisingNamespaceErrors) {
std::vector<BSONObj> splitPoints{BSON("a" << 5)};
- setupChunks({chunk}).transitional_ignore();
+ ASSERT_OK(setupChunks({chunk}));
auto splitStatus = ShardingCatalogManager::get(operationContext())
->commitChunkSplit(operationContext(),
@@ -280,11 +282,11 @@ TEST_F(SplitChunkTest, NonMatchingEpochsOfChunkAndRequestErrors) {
std::vector<BSONObj> splitPoints{BSON("a" << 5)};
- setupChunks({chunk}).transitional_ignore();
+ ASSERT_OK(setupChunks({chunk}));
auto splitStatus = ShardingCatalogManager::get(operationContext())
->commitChunkSplit(operationContext(),
- NamespaceString("TestDB.TestColl"),
+ kNamespace,
OID::gen(),
ChunkRange(chunkMin, chunkMax),
splitPoints,
@@ -307,11 +309,11 @@ TEST_F(SplitChunkTest, SplitPointsOutOfOrderShouldFail) {
std::vector<BSONObj> splitPoints{BSON("a" << 5), BSON("a" << 4)};
- setupChunks({chunk}).transitional_ignore();
+ ASSERT_OK(setupChunks({chunk}));
auto splitStatus = ShardingCatalogManager::get(operationContext())
->commitChunkSplit(operationContext(),
- NamespaceString("TestDB.TestColl"),
+ kNamespace,
origVersion.epoch(),
ChunkRange(chunkMin, chunkMax),
splitPoints,
@@ -334,11 +336,11 @@ TEST_F(SplitChunkTest, SplitPointsOutOfRangeAtMinShouldFail) {
std::vector<BSONObj> splitPoints{BSON("a" << 0), BSON("a" << 5)};
- setupChunks({chunk}).transitional_ignore();
+ ASSERT_OK(setupChunks({chunk}));
auto splitStatus = ShardingCatalogManager::get(operationContext())
->commitChunkSplit(operationContext(),
- NamespaceString("TestDB.TestColl"),
+ kNamespace,
origVersion.epoch(),
ChunkRange(chunkMin, chunkMax),
splitPoints,
@@ -361,11 +363,11 @@ TEST_F(SplitChunkTest, SplitPointsOutOfRangeAtMaxShouldFail) {
std::vector<BSONObj> splitPoints{BSON("a" << 5), BSON("a" << 15)};
- setupChunks({chunk}).transitional_ignore();
+ ASSERT_OK(setupChunks({chunk}));
auto splitStatus = ShardingCatalogManager::get(operationContext())
->commitChunkSplit(operationContext(),
- NamespaceString("TestDB.TestColl"),
+ kNamespace,
origVersion.epoch(),
ChunkRange(chunkMin, chunkMax),
splitPoints,
@@ -373,5 +375,35 @@ TEST_F(SplitChunkTest, SplitPointsOutOfRangeAtMaxShouldFail) {
ASSERT_EQ(ErrorCodes::InvalidOptions, splitStatus);
}
+TEST_F(SplitChunkTest, SplitPointsWithDollarPrefixShouldFail) {
+ ChunkType chunk;
+ chunk.setNS("TestDB.TestColl");
+
+ auto origVersion = ChunkVersion(1, 0, OID::gen());
+ chunk.setVersion(origVersion);
+ chunk.setShard(ShardId("shard0000"));
+
+ auto chunkMin = BSON("a" << kMinBSONKey);
+ auto chunkMax = BSON("a" << kMaxBSONKey);
+ chunk.setMin(chunkMin);
+ chunk.setMax(chunkMax);
+ ASSERT_OK(setupChunks({chunk}));
+
+ ASSERT_NOT_OK(ShardingCatalogManager::get(operationContext())
+ ->commitChunkSplit(operationContext(),
+ kNamespace,
+ origVersion.epoch(),
+ ChunkRange(chunkMin, chunkMax),
+ {BSON("a" << BSON("$minKey" << 1))},
+ "shard0000"));
+ ASSERT_NOT_OK(ShardingCatalogManager::get(operationContext())
+ ->commitChunkSplit(operationContext(),
+ kNamespace,
+ origVersion.epoch(),
+ ChunkRange(chunkMin, chunkMax),
+ {BSON("a" << BSON("$maxKey" << 1))},
+ "shard0000"));
+}
+
} // namespace
} // namespace mongo
diff --git a/src/mongo/s/shard_key_pattern.cpp b/src/mongo/s/shard_key_pattern.cpp
index 8a57289ca8c..adbfbebfaab 100644
--- a/src/mongo/s/shard_key_pattern.cpp
+++ b/src/mongo/s/shard_key_pattern.cpp
@@ -44,18 +44,15 @@
namespace mongo {
-using std::shared_ptr;
-using std::string;
-using std::unique_ptr;
-using std::vector;
-
using pathsupport::EqualityMatches;
-const int ShardKeyPattern::kMaxShardKeySizeBytes = 512;
-const unsigned int ShardKeyPattern::kMaxFlattenedInCombinations = 4000000;
-
namespace {
+// Maximum number of intervals produced by $in queries
+constexpr size_t kMaxFlattenedInCombinations = 4000000;
+
+constexpr auto kIdField = "_id"_sd;
+
bool isHashedPatternEl(const BSONElement& el) {
return el.type() == String && el.String() == IndexNames::HASHED;
}
@@ -66,32 +63,38 @@ bool isHashedPatternEl(const BSONElement& el) {
* ii) a compound list of ascending, potentially-nested field paths, e.g. { a : 1 , b.c : 1 }
*/
std::vector<std::unique_ptr<FieldRef>> parseShardKeyPattern(const BSONObj& keyPattern) {
+ uassert(ErrorCodes::BadValue, "Shard key is empty", !keyPattern.isEmpty());
+
std::vector<std::unique_ptr<FieldRef>> parsedPaths;
for (const auto& patternEl : keyPattern) {
auto newFieldRef(stdx::make_unique<FieldRef>(patternEl.fieldNameStringData()));
// Empty path
- if (newFieldRef->numParts() == 0)
- return {};
+ uassert(ErrorCodes::BadValue,
+ str::stream() << "Field " << patternEl.fieldNameStringData() << " is empty",
+ newFieldRef->numParts() > 0);
// Extra "." in path?
- if (newFieldRef->dottedField() != patternEl.fieldNameStringData())
- return {};
+ uassert(ErrorCodes::BadValue,
+ str::stream() << "Field " << patternEl.fieldNameStringData()
+ << " contains extra dot",
+ newFieldRef->dottedField() == patternEl.fieldNameStringData());
// Empty parts of the path, ".."?
for (size_t i = 0; i < newFieldRef->numParts(); ++i) {
- if (newFieldRef->getPart(i).empty())
- return {};
+ uassert(ErrorCodes::BadValue,
+ str::stream() << "Field " << patternEl.fieldNameStringData()
+ << " contains empty parts",
+ !newFieldRef->getPart(i).empty());
}
// Numeric and ascending (1.0), or "hashed" and single field
- if (!patternEl.isNumber()) {
- if (keyPattern.nFields() != 1 || !isHashedPatternEl(patternEl))
- return {};
- } else if (patternEl.numberInt() != 1) {
- return {};
- }
+ uassert(ErrorCodes::BadValue,
+ str::stream() << "Field " << patternEl.fieldNameStringData()
+ << " can only be 1 or 'hashed'",
+ (patternEl.isNumber() && patternEl.numberInt() == 1) ||
+ (keyPattern.nFields() == 1 && isHashedPatternEl(patternEl)));
parsedPaths.emplace_back(std::move(newFieldRef));
}
@@ -99,12 +102,15 @@ std::vector<std::unique_ptr<FieldRef>> parseShardKeyPattern(const BSONObj& keyPa
return parsedPaths;
}
-bool isShardKeyElement(const BSONElement& element, bool allowRegex) {
- if (element.eoo() || element.type() == Array)
+bool isValidShardKeyElement(const BSONElement& element) {
+ return !element.eoo() && element.type() != Array;
+}
+
+bool isValidShardKeyElementForStorage(const BSONElement& element) {
+ if (!isValidShardKeyElement(element))
return false;
- // TODO: Disallow regex all the time
- if (!allowRegex && element.type() == RegEx)
+ if (element.type() == RegEx)
return false;
if (element.type() == Object && !element.embeddedObject().storageValidEmbedded().isOK())
@@ -113,8 +119,43 @@ bool isShardKeyElement(const BSONElement& element, bool allowRegex) {
return true;
}
+BSONElement extractKeyElementFromMatchable(const MatchableDocument& matchable, StringData pathStr) {
+ ElementPath path;
+ path.init(pathStr).transitional_ignore();
+ path.setLeafArrayBehavior(ElementPath::LeafArrayBehavior::kNoTraversal);
+ path.setNonLeafArrayBehavior(ElementPath::NonLeafArrayBehavior::kNoTraversal);
+
+ MatchableDocument::IteratorHolder matchIt(&matchable, &path);
+ if (!matchIt->more())
+ return BSONElement();
+
+ BSONElement matchEl = matchIt->next().element();
+ // We shouldn't have more than one element - we don't expand arrays
+ dassert(!matchIt->more());
+
+ return matchEl;
+}
+
+BSONElement findEqualityElement(const EqualityMatches& equalities, const FieldRef& path) {
+ int parentPathPart;
+ const BSONElement parentEl =
+ pathsupport::findParentEqualityElement(equalities, path, &parentPathPart);
+
+ if (parentPathPart == static_cast<int>(path.numParts()))
+ return parentEl;
+
+ if (parentEl.type() != Object)
+ return BSONElement();
+
+ StringData suffixStr = path.dottedSubstring(parentPathPart, path.numParts());
+ BSONMatchableDocument matchable(parentEl.Obj());
+ return extractKeyElementFromMatchable(matchable, suffixStr);
+}
+
} // namespace
+constexpr int ShardKeyPattern::kMaxShardKeySizeBytes;
+
Status ShardKeyPattern::checkShardKeySize(const BSONObj& shardKey) {
if (shardKey.objsize() <= kMaxShardKeySizeBytes)
return Status::OK();
@@ -128,18 +169,25 @@ Status ShardKeyPattern::checkShardKeySize(const BSONObj& shardKey) {
<< " bytes"};
}
+Status ShardKeyPattern::checkShardKeyIsValidForMetadataStorage(const BSONObj& shardKey) {
+ for (const auto& elem : shardKey) {
+ if (!isValidShardKeyElementForStorage(elem)) {
+ return {ErrorCodes::BadValue,
+ str::stream() << "Shard key element " << elem << " is not valid for storage"};
+ }
+ }
+
+ return Status::OK();
+}
+
ShardKeyPattern::ShardKeyPattern(const BSONObj& keyPattern)
- : _keyPatternPaths(parseShardKeyPattern(keyPattern)),
- _keyPattern(_keyPatternPaths.empty() ? BSONObj() : keyPattern),
+ : _keyPattern(keyPattern),
+ _keyPatternPaths(parseShardKeyPattern(keyPattern)),
_hasId(keyPattern.hasField("_id"_sd)) {}
ShardKeyPattern::ShardKeyPattern(const KeyPattern& keyPattern)
: ShardKeyPattern(keyPattern.toBSON()) {}
-bool ShardKeyPattern::isValid() const {
- return !_keyPattern.toBSON().isEmpty();
-}
-
bool ShardKeyPattern::isHashedPattern() const {
return isHashedPatternEl(_keyPattern.toBSON().firstElement());
}
@@ -156,22 +204,17 @@ const BSONObj& ShardKeyPattern::toBSON() const {
return _keyPattern.toBSON();
}
-string ShardKeyPattern::toString() const {
+std::string ShardKeyPattern::toString() const {
return toBSON().toString();
}
bool ShardKeyPattern::isShardKey(const BSONObj& shardKey) const {
- // Shard keys are always of the form: { 'nested.path' : value, 'nested.path2' : value }
-
- if (!isValid())
- return false;
-
const auto& keyPatternBSON = _keyPattern.toBSON();
for (const auto& patternEl : keyPatternBSON) {
BSONElement keyEl = shardKey[patternEl.fieldNameStringData()];
- if (!isShardKeyElement(keyEl, true))
+ if (!isValidShardKeyElement(keyEl))
return false;
}
@@ -179,12 +222,6 @@ bool ShardKeyPattern::isShardKey(const BSONObj& shardKey) const {
}
BSONObj ShardKeyPattern::normalizeShardKey(const BSONObj& shardKey) const {
- // Shard keys are always of the form: { 'nested.path' : value, 'nested.path2' : value }
- // and in the same order as the key pattern
-
- if (!isValid())
- return BSONObj();
-
// We want to return an empty key if users pass us something that's not a shard key
if (shardKey.nFields() > _keyPattern.toBSON().nFields())
return BSONObj();
@@ -196,7 +233,7 @@ BSONObj ShardKeyPattern::normalizeShardKey(const BSONObj& shardKey) const {
BSONElement keyEl = shardKey[patternEl.fieldNameStringData()];
- if (!isShardKeyElement(keyEl, true))
+ if (!isValidShardKeyElement(keyEl))
return BSONObj();
keyBuilder.appendAs(keyEl, patternEl.fieldName());
@@ -206,28 +243,7 @@ BSONObj ShardKeyPattern::normalizeShardKey(const BSONObj& shardKey) const {
return keyBuilder.obj();
}
-static BSONElement extractKeyElementFromMatchable(const MatchableDocument& matchable,
- StringData pathStr) {
- ElementPath path;
- path.init(pathStr).transitional_ignore();
- path.setLeafArrayBehavior(ElementPath::LeafArrayBehavior::kNoTraversal);
- path.setNonLeafArrayBehavior(ElementPath::NonLeafArrayBehavior::kNoTraversal);
-
- MatchableDocument::IteratorHolder matchIt(&matchable, &path);
- if (!matchIt->more())
- return BSONElement();
-
- BSONElement matchEl = matchIt->next().element();
- // We shouldn't have more than one element - we don't expand arrays
- dassert(!matchIt->more());
-
- return matchEl;
-}
-
BSONObj ShardKeyPattern::extractShardKeyFromMatchable(const MatchableDocument& matchable) const {
- if (!isValid())
- return BSONObj();
-
BSONObjBuilder keyBuilder;
BSONObjIterator patternIt(_keyPattern.toBSON());
@@ -236,7 +252,7 @@ BSONObj ShardKeyPattern::extractShardKeyFromMatchable(const MatchableDocument& m
BSONElement matchEl =
extractKeyElementFromMatchable(matchable, patternEl.fieldNameStringData());
- if (!isShardKeyElement(matchEl, true))
+ if (!isValidShardKeyElement(matchEl))
return BSONObj();
if (isHashedPatternEl(patternEl)) {
@@ -259,27 +275,8 @@ BSONObj ShardKeyPattern::extractShardKeyFromDoc(const BSONObj& doc) const {
return extractShardKeyFromMatchable(matchable);
}
-static BSONElement findEqualityElement(const EqualityMatches& equalities, const FieldRef& path) {
- int parentPathPart;
- const BSONElement parentEl =
- pathsupport::findParentEqualityElement(equalities, path, &parentPathPart);
-
- if (parentPathPart == static_cast<int>(path.numParts()))
- return parentEl;
-
- if (parentEl.type() != Object)
- return BSONElement();
-
- StringData suffixStr = path.dottedSubstring(parentPathPart, path.numParts());
- BSONMatchableDocument matchable(parentEl.Obj());
- return extractKeyElementFromMatchable(matchable, suffixStr);
-}
-
StatusWith<BSONObj> ShardKeyPattern::extractShardKeyFromQuery(OperationContext* opCtx,
const BSONObj& basicQuery) const {
- if (!isValid())
- return StatusWith<BSONObj>(BSONObj());
-
auto qr = stdx::make_unique<QueryRequest>(NamespaceString(""));
qr->setFilter(basicQuery);
@@ -291,17 +288,13 @@ StatusWith<BSONObj> ShardKeyPattern::extractShardKeyFromQuery(OperationContext*
ExtensionsCallbackNoop(),
MatchExpressionParser::kAllowAllSpecialFeatures);
if (!statusWithCQ.isOK()) {
- return StatusWith<BSONObj>(statusWithCQ.getStatus());
+ return statusWithCQ.getStatus();
}
- unique_ptr<CanonicalQuery> query = std::move(statusWithCQ.getValue());
- return extractShardKeyFromQuery(*query);
+ return extractShardKeyFromQuery(*statusWithCQ.getValue());
}
BSONObj ShardKeyPattern::extractShardKeyFromQuery(const CanonicalQuery& query) const {
- if (!isValid())
- return BSONObj();
-
// Extract equalities from query.
EqualityMatches equalities;
// TODO: Build the path set initially?
@@ -325,7 +318,7 @@ BSONObj ShardKeyPattern::extractShardKeyFromQuery(const CanonicalQuery& query) c
const FieldRef& patternPath = **it;
BSONElement equalEl = findEqualityElement(equalities, patternPath);
- if (!isShardKeyElement(equalEl, false))
+ if (!isValidShardKeyElementForStorage(equalEl))
return BSONObj();
if (isHashedPattern()) {
@@ -333,8 +326,8 @@ BSONObj ShardKeyPattern::extractShardKeyFromQuery(const CanonicalQuery& query) c
patternPath.dottedField(),
BSONElementHasher::hash64(equalEl, BSONElementHasher::DEFAULT_HASH_SEED));
} else {
- // NOTE: The equal element may *not* have the same field name as the path -
- // nested $and, $eq, for example
+ // NOTE: The equal element may *not* have the same field name as the path - nested $and,
+ // $eq, for example
keyBuilder.appendAs(equalEl, patternPath.dottedField());
}
}
@@ -346,8 +339,7 @@ BSONObj ShardKeyPattern::extractShardKeyFromQuery(const CanonicalQuery& query) c
bool ShardKeyPattern::isUniqueIndexCompatible(const BSONObj& uniqueIndexPattern) const {
dassert(!KeyPattern::isHashedKeyPattern(uniqueIndexPattern));
- if (!uniqueIndexPattern.isEmpty() &&
- string("_id") == uniqueIndexPattern.firstElementFieldName()) {
+ if (!uniqueIndexPattern.isEmpty() && uniqueIndexPattern.firstElementFieldName() == kIdField) {
return true;
}
@@ -358,10 +350,8 @@ BoundList ShardKeyPattern::flattenBounds(const IndexBounds& indexBounds) const {
invariant(indexBounds.fields.size() == (size_t)_keyPattern.toBSON().nFields());
// If any field is unsatisfied, return empty bound list.
- for (vector<OrderedIntervalList>::const_iterator it = indexBounds.fields.begin();
- it != indexBounds.fields.end();
- it++) {
- if (it->intervals.size() == 0) {
+ for (const auto& field : indexBounds.fields) {
+ if (field.intervals.empty()) {
return BoundList();
}
}
@@ -374,77 +364,75 @@ BoundList ShardKeyPattern::flattenBounds(const IndexBounds& indexBounds) const {
// in another iteration of the loop. We define these partially constructed intervals using pairs
// of BSONObjBuilders (shared_ptrs, since after one iteration of the loop they still must exist
// outside their scope).
- typedef vector<std::pair<std::shared_ptr<BSONObjBuilder>, std::shared_ptr<BSONObjBuilder>>>
- BoundBuilders;
+ using BoundBuilders = std::vector<std::pair<BSONObjBuilder, BSONObjBuilder>>;
BoundBuilders builders;
- builders.emplace_back(shared_ptr<BSONObjBuilder>(new BSONObjBuilder()),
- shared_ptr<BSONObjBuilder>(new BSONObjBuilder()));
+ builders.emplace_back();
+
BSONObjIterator keyIter(_keyPattern.toBSON());
- // until equalityOnly is false, we are just dealing with equality (no range or $in queries).
+ // Until equalityOnly is false, we are just dealing with equality (no range or $in queries).
bool equalityOnly = true;
- for (size_t i = 0; i < indexBounds.fields.size(); i++) {
+ for (size_t i = 0; i < indexBounds.fields.size(); ++i) {
BSONElement e = keyIter.next();
StringData fieldName = e.fieldNameStringData();
- // get the relevant intervals for this field, but we may have to transform the
- // list of what's relevant according to the expression for this field
+ // Get the relevant intervals for this field, but we may have to transform the list of
+ // what's relevant according to the expression for this field
const OrderedIntervalList& oil = indexBounds.fields[i];
- const vector<Interval>& intervals = oil.intervals;
+ const auto& intervals = oil.intervals;
if (equalityOnly) {
if (intervals.size() == 1 && intervals.front().isPoint()) {
- // this field is only a single point-interval
- BoundBuilders::const_iterator j;
- for (j = builders.begin(); j != builders.end(); ++j) {
- j->first->appendAs(intervals.front().start, fieldName);
- j->second->appendAs(intervals.front().end, fieldName);
+ // This field is only a single point-interval
+ for (auto& builder : builders) {
+ builder.first.appendAs(intervals.front().start, fieldName);
+ builder.second.appendAs(intervals.front().end, fieldName);
}
} else {
- // This clause is the first to generate more than a single point.
- // We only execute this clause once. After that, we simplify the bound
- // extensions to prevent combinatorial explosion.
+ // This clause is the first to generate more than a single point. We only execute
+ // this clause once. After that, we simplify the bound extensions to prevent
+ // combinatorial explosion.
equalityOnly = false;
BoundBuilders newBuilders;
- for (BoundBuilders::const_iterator it = builders.begin(); it != builders.end();
- ++it) {
- BSONObj first = it->first->obj();
- BSONObj second = it->second->obj();
+ for (auto& builder : builders) {
+ BSONObj first = builder.first.obj();
+ BSONObj second = builder.second.obj();
- for (vector<Interval>::const_iterator interval = intervals.begin();
- interval != intervals.end();
- ++interval) {
+ for (const auto& interval : intervals) {
uassert(17439,
"combinatorial limit of $in partitioning of results exceeded",
newBuilders.size() < kMaxFlattenedInCombinations);
- newBuilders.emplace_back(shared_ptr<BSONObjBuilder>(new BSONObjBuilder()),
- shared_ptr<BSONObjBuilder>(new BSONObjBuilder()));
- newBuilders.back().first->appendElements(first);
- newBuilders.back().second->appendElements(second);
- newBuilders.back().first->appendAs(interval->start, fieldName);
- newBuilders.back().second->appendAs(interval->end, fieldName);
+
+ newBuilders.emplace_back();
+
+ newBuilders.back().first.appendElements(first);
+ newBuilders.back().first.appendAs(interval.start, fieldName);
+
+ newBuilders.back().second.appendElements(second);
+ newBuilders.back().second.appendAs(interval.end, fieldName);
}
}
- builders = newBuilders;
+
+ builders = std::move(newBuilders);
}
} else {
- // if we've already generated a range or multiple point-intervals
- // just extend what we've generated with min/max bounds for this field
- BoundBuilders::const_iterator j;
- for (j = builders.begin(); j != builders.end(); ++j) {
- j->first->appendAs(intervals.front().start, fieldName);
- j->second->appendAs(intervals.back().end, fieldName);
+ // If we've already generated a range or multiple point-intervals just extend what we've
+ // generated with min/max bounds for this field
+ for (auto& builder : builders) {
+ builder.first.appendAs(intervals.front().start, fieldName);
+ builder.second.appendAs(intervals.back().end, fieldName);
}
}
}
BoundList ret;
- for (BoundBuilders::const_iterator i = builders.begin(); i != builders.end(); ++i)
- ret.emplace_back(i->first->obj(), i->second->obj());
+ for (auto& builder : builders) {
+ ret.emplace_back(builder.first.obj(), builder.second.obj());
+ }
return ret;
}
diff --git a/src/mongo/s/shard_key_pattern.h b/src/mongo/s/shard_key_pattern.h
index 1ceed1a8147..fe01252f61a 100644
--- a/src/mongo/s/shard_key_pattern.h
+++ b/src/mongo/s/shard_key_pattern.h
@@ -66,10 +66,7 @@ typedef std::vector<std::pair<BSONObj, BSONObj>> BoundList;
class ShardKeyPattern {
public:
// Maximum size of shard key
- static const int kMaxShardKeySizeBytes;
-
- // Maximum number of intervals produced by $in queries.
- static const unsigned int kMaxFlattenedInCombinations;
+ static constexpr int kMaxShardKeySizeBytes = 512;
/**
* Helper to check shard key size and generate an appropriate error message.
@@ -77,6 +74,12 @@ public:
static Status checkShardKeySize(const BSONObj& shardKey);
/**
+ * Validates whether the specified shard key is valid to be written as part of the sharding
+ * metadata.
+ */
+ static Status checkShardKeyIsValidForMetadataStorage(const BSONObj& shardKey);
+
+ /**
* Constructs a shard key pattern from a BSON pattern document. If the document is not a
* valid shard key pattern, !isValid() will be true and key extraction will fail.
*/
@@ -87,8 +90,6 @@ public:
*/
explicit ShardKeyPattern(const KeyPattern& keyPattern);
- bool isValid() const;
-
bool isHashedPattern() const;
const KeyPattern& getKeyPattern() const;
@@ -233,11 +234,11 @@ public:
};
private:
+ KeyPattern _keyPattern;
+
// Ordered, parsed paths
std::vector<std::unique_ptr<FieldRef>> _keyPatternPaths;
- KeyPattern _keyPattern;
-
bool _hasId;
};
diff --git a/src/mongo/s/shard_key_pattern_test.cpp b/src/mongo/s/shard_key_pattern_test.cpp
index 2b4ddb22fae..c288853464c 100644
--- a/src/mongo/s/shard_key_pattern_test.cpp
+++ b/src/mongo/s/shard_key_pattern_test.cpp
@@ -25,6 +25,8 @@
* then also delete it in the license file.
*/
+#include "mongo/platform/basic.h"
+
#include "mongo/s/shard_key_pattern.h"
#include "mongo/db/hasher.h"
@@ -32,78 +34,57 @@
#include "mongo/db/query/query_test_service_context.h"
#include "mongo/unittest/unittest.h"
+namespace mongo {
namespace {
using std::string;
-using namespace mongo;
-
-TEST(ShardKeyPattern, ValidShardKeyPatternSingle) {
- BSONObj empty;
- ASSERT(!ShardKeyPattern(empty).isValid());
-
- //
- // Single field ShardKeyPatterns
- //
-
- ASSERT(ShardKeyPattern(BSON("a" << 1)).isValid());
- ASSERT(ShardKeyPattern(BSON("a" << 1)).isValid());
- ASSERT(ShardKeyPattern(BSON("a" << 1.0f)).isValid());
- ASSERT(ShardKeyPattern(BSON("a" << (long long)1L)).isValid());
-
- ASSERT(!ShardKeyPattern(BSON("a" << -1)).isValid());
- ASSERT(!ShardKeyPattern(BSON("a" << -1.0)).isValid());
- ASSERT(!ShardKeyPattern(BSON("a"
- << "1"))
- .isValid());
-
- ASSERT(ShardKeyPattern(BSON("a"
- << "hashed"))
- .isValid());
- ASSERT(!ShardKeyPattern(BSON("a"
- << "hash"))
- .isValid());
- ASSERT(!ShardKeyPattern(BSON("" << 1)).isValid());
- ASSERT(!ShardKeyPattern(BSON("." << 1)).isValid());
+TEST(ShardKeyPattern, SingleFieldShardKeyPatternsValidityCheck) {
+ ShardKeyPattern(BSON("a" << 1));
+ ShardKeyPattern(BSON("a" << 1.0f));
+ ShardKeyPattern(BSON("a" << (long long)1L));
+ ShardKeyPattern(BSON("a"
+ << "hashed"));
+
+ ASSERT_THROWS(ShardKeyPattern({}), DBException);
+ ASSERT_THROWS(ShardKeyPattern(BSON("a" << -1)), DBException);
+ ASSERT_THROWS(ShardKeyPattern(BSON("a" << -1.0)), DBException);
+ ASSERT_THROWS(ShardKeyPattern(BSON("a"
+ << "1")),
+ DBException);
+ ASSERT_THROWS(ShardKeyPattern(BSON("a"
+ << "hash")),
+ DBException);
+ ASSERT_THROWS(ShardKeyPattern(BSON("" << 1)), DBException);
+ ASSERT_THROWS(ShardKeyPattern(BSON("." << 1)), DBException);
}
-TEST(ShardKeyPattern, ValidShardKeyPatternComposite) {
- //
- // Composite ShardKeyPatterns
- //
-
- ASSERT(ShardKeyPattern(BSON("a" << 1 << "b" << 1)).isValid());
- ASSERT(ShardKeyPattern(BSON("a" << 1.0f << "b" << 1.0)).isValid());
- ASSERT(!ShardKeyPattern(BSON("a" << 1 << "b" << -1)).isValid());
- ASSERT(!ShardKeyPattern(BSON("a" << 1 << "b"
- << "1"))
- .isValid());
-
- ASSERT(ShardKeyPattern(BSON("a" << 1 << "b" << 1.0 << "c" << 1.0f)).isValid());
- ASSERT(!ShardKeyPattern(BSON("a" << 1 << "b." << 1.0)).isValid());
- ASSERT(!ShardKeyPattern(BSON("a" << 1 << "" << 1.0)).isValid());
+TEST(ShardKeyPattern, CompositeShardKeyPatternsValidityCheck) {
+ ShardKeyPattern(BSON("a" << 1 << "b" << 1));
+ ShardKeyPattern(BSON("a" << 1.0f << "b" << 1.0));
+ ShardKeyPattern(BSON("a" << 1 << "b" << 1.0 << "c" << 1.0f));
+
+ ASSERT_THROWS(ShardKeyPattern(BSON("a" << 1 << "b" << -1)), DBException);
+ ASSERT_THROWS(ShardKeyPattern(BSON("a" << 1 << "b"
+ << "1")),
+ DBException);
+ ASSERT_THROWS(ShardKeyPattern(BSON("a" << 1 << "b." << 1.0)), DBException);
+ ASSERT_THROWS(ShardKeyPattern(BSON("a" << 1 << "" << 1.0)), DBException);
}
-TEST(ShardKeyPattern, ValidShardKeyPatternNested) {
- //
- // Nested ShardKeyPatterns
- //
-
- ASSERT(ShardKeyPattern(BSON("a.b" << 1)).isValid());
- ASSERT(!ShardKeyPattern(BSON("a.b" << -1)).isValid());
- ASSERT(ShardKeyPattern(BSON("a.b.c.d" << 1.0)).isValid());
-
- ASSERT(!ShardKeyPattern(BSON("a" << BSON("b" << 1))).isValid());
-
- ASSERT(!ShardKeyPattern(BSON("a.b." << 1)).isValid());
- ASSERT(!ShardKeyPattern(BSON("a.b.." << 1)).isValid());
- ASSERT(!ShardKeyPattern(BSON("a..b" << 1)).isValid());
-
- ASSERT(ShardKeyPattern(BSON("a" << 1 << "c.d" << 1.0 << "e.f.g" << 1.0f)).isValid());
- ASSERT(ShardKeyPattern(BSON("a" << 1 << "a.b" << 1.0 << "a.b.c" << 1.0f)).isValid());
-
- ASSERT(!ShardKeyPattern(BSON("a" << 1 << "a.b." << 1.0)).isValid());
- ASSERT(!ShardKeyPattern(BSON("a" << BSON("b" << 1) << "c.d" << 1.0)).isValid());
+TEST(ShardKeyPattern, NestedShardKeyPatternsValidtyCheck) {
+ ShardKeyPattern(BSON("a.b" << 1));
+ ShardKeyPattern(BSON("a.b.c.d" << 1.0));
+ ShardKeyPattern(BSON("a" << 1 << "c.d" << 1.0 << "e.f.g" << 1.0f));
+ ShardKeyPattern(BSON("a" << 1 << "a.b" << 1.0 << "a.b.c" << 1.0f));
+
+ ASSERT_THROWS(ShardKeyPattern(BSON("a.b" << -1)), DBException);
+ ASSERT_THROWS(ShardKeyPattern(BSON("a" << BSON("b" << 1))), DBException);
+ ASSERT_THROWS(ShardKeyPattern(BSON("a.b." << 1)), DBException);
+ ASSERT_THROWS(ShardKeyPattern(BSON("a.b.." << 1)), DBException);
+ ASSERT_THROWS(ShardKeyPattern(BSON("a..b" << 1)), DBException);
+ ASSERT_THROWS(ShardKeyPattern(BSON("a" << 1 << "a.b." << 1.0)), DBException);
+ ASSERT_THROWS(ShardKeyPattern(BSON("a" << BSON("b" << 1) << "c.d" << 1.0)), DBException);
}
TEST(ShardKeyPattern, IsShardKey) {
@@ -128,10 +109,13 @@ TEST(ShardKeyPattern, NormalizeShardKey) {
BSON("a.b" << 10 << "c" << 30));
ASSERT_BSONOBJ_EQ(normKey(pattern, BSON("c" << 30 << "a.b" << 10)),
BSON("a.b" << 10 << "c" << 30));
+ ASSERT_BSONOBJ_EQ(normKey(pattern, BSON("a.b" << BSON("$notAndOperator" << 10) << "c" << 30)),
+ BSON("a.b" << BSON("$notAndOperator" << 10) << "c" << 30));
+ ASSERT_BSONOBJ_EQ(normKey(pattern, BSON("a.b" << BSON("$gt" << 10) << "c" << 30)),
+ BSON("a.b" << BSON("$gt" << 10) << "c" << 30));
ASSERT_BSONOBJ_EQ(normKey(pattern, BSON("b" << 10)), BSONObj());
ASSERT_BSONOBJ_EQ(normKey(pattern, BSON("a" << 10 << "c" << 30)), BSONObj());
- ASSERT_BSONOBJ_EQ(normKey(pattern, BSON("a.b" << BSON("$gt" << 10) << "c" << 30)), BSONObj());
}
static BSONObj docKey(const ShardKeyPattern& pattern, const BSONObj& doc) {
@@ -157,13 +141,16 @@ TEST(ShardKeyPattern, ExtractDocShardKeySingle) {
<< "$id"
<< 1);
ASSERT_BSONOBJ_EQ(docKey(pattern, BSON("a" << ref)), BSON("a" << ref));
+ ASSERT_BSONOBJ_EQ(docKey(pattern, fromjson("{a:{$dollarPrefixKey:true}}")),
+ fromjson("{a:{$dollarPrefixKey:true}}"));
+ ASSERT_BSONOBJ_EQ(docKey(pattern, fromjson("{a:{$gt:10}}")), fromjson("{a:{$gt:10}}"));
+ ASSERT_BSONOBJ_EQ(docKey(pattern, fromjson("{a:{$gt:{$dollarPrefixKey:10}}}}")),
+ fromjson("{a:{$gt:{$dollarPrefixKey:10}}}}"));
ASSERT_BSONOBJ_EQ(docKey(pattern, BSONObj()), BSONObj());
ASSERT_BSONOBJ_EQ(docKey(pattern, fromjson("{b:10}")), BSONObj());
ASSERT_BSONOBJ_EQ(docKey(pattern, BSON("" << 10)), BSONObj());
ASSERT_BSONOBJ_EQ(docKey(pattern, fromjson("{a:[1,2]}")), BSONObj());
- ASSERT_BSONOBJ_EQ(docKey(pattern, fromjson("{a:{$invalid:true}}")), BSONObj());
- ASSERT_BSONOBJ_EQ(docKey(pattern, fromjson("{a:{$gt:10}}")), BSONObj());
// BSONObjIterator breaks this for now
// ASSERT_EQUALS(docKey(pattern, BSON("a" << 10 << "a" << 20)), BSONObj());
}
@@ -183,15 +170,17 @@ TEST(ShardKeyPattern, ExtractDocShardKeyCompound) {
<< "a"
<< 10)),
fromjson("{a:10, b:'20'}"));
+ ASSERT_BSONOBJ_EQ(docKey(pattern, fromjson("{a:10, b:{$dollarPrefixKey:true}}")),
+ fromjson("{a:10, b:{$dollarPrefixKey:true}}"));
+ ASSERT_BSONOBJ_EQ(docKey(pattern, fromjson("{a:10, b:{$gt:20}}")),
+ fromjson("{a:10, b:{$gt:20}}"));
ASSERT_BSONOBJ_EQ(docKey(pattern, fromjson("{a:10, b:[1, 2]}")), BSONObj());
- ASSERT_BSONOBJ_EQ(docKey(pattern, fromjson("{a:10, b:{$invalid:true}}")), BSONObj());
ASSERT_BSONOBJ_EQ(docKey(pattern, fromjson("{b:20}")), BSONObj());
ASSERT_BSONOBJ_EQ(docKey(pattern,
BSON("" << 10 << "b"
<< "20")),
BSONObj());
- ASSERT_BSONOBJ_EQ(docKey(pattern, fromjson("{a:10, b:{$gt:20}}")), BSONObj());
// Ordering
ASSERT_EQUALS(docKey(pattern, BSON("b" << 20 << "a" << 10)).firstElement().numberInt(), 10);
@@ -284,7 +273,7 @@ TEST(ShardKeyPattern, ExtractQueryShardKeySingle) {
ASSERT_BSONOBJ_EQ(queryKey(pattern, fromjson("{a:10,b:{$invalid:'20'}}")), BSONObj());
// Doc key extraction shouldn't work with query
- ASSERT_BSONOBJ_EQ(docKey(pattern, fromjson("{a:{$eq:[10, 20]}, c:30}")), BSONObj());
+ ASSERT_BSONOBJ_EQ(queryKey(pattern, fromjson("{a:{$eq:[10, 20]}, c:30}")), BSONObj());
// $eq/$or/$and/$all
ASSERT_BSONOBJ_EQ(queryKey(pattern, fromjson("{a:{$eq:10}}")), fromjson("{a:10}"));
@@ -501,4 +490,6 @@ TEST(ShardKeyPattern, UniqueIndexCompatibleHashed) {
ASSERT(!indexComp(pattern, BSON("c" << 1)));
ASSERT(!indexComp(pattern, BSON("c" << -1 << "a.b" << 1)));
}
-}
+
+} // namespace
+} // namespace mongo