summaryrefslogtreecommitdiff
path: root/src/mongo/db/s
diff options
context:
space:
mode:
authorKaloian Manassiev <kaloian.manassiev@mongodb.com>2018-11-05 05:50:20 -0500
committerKaloian Manassiev <kaloian.manassiev@mongodb.com>2018-11-05 11:11:54 -0500
commitb0c9a1c8419d7a343c2a0496f2cd6a7c0c4d8c92 (patch)
treeb6dab9746ad486d4396cf8b1e981efbb0d8942ee /src/mongo/db/s
parentc7553b861796c1581c85b3cbc4e55dcc9666aa0f (diff)
downloadmongo-b0c9a1c8419d7a343c2a0496f2cd6a7c0c4d8c92.tar.gz
SERVER-37918 Make sure tags passed to the fast initial split algorithm are in sorted order
Diffstat (limited to 'src/mongo/db/s')
-rw-r--r--src/mongo/db/s/config/initial_split_policy.cpp44
-rw-r--r--src/mongo/db/s/config/initial_split_policy_test.cpp10
-rw-r--r--src/mongo/db/s/shardsvr_shard_collection.cpp43
3 files changed, 20 insertions, 77 deletions
diff --git a/src/mongo/db/s/config/initial_split_policy.cpp b/src/mongo/db/s/config/initial_split_policy.cpp
index c7056295592..f46418fae41 100644
--- a/src/mongo/db/s/config/initial_split_policy.cpp
+++ b/src/mongo/db/s/config/initial_split_policy.cpp
@@ -72,36 +72,27 @@ StringMap<std::vector<ShardId>> getTagToShardIds(OperationContext* opCtx,
return tagToShardIds;
}
- // get all docs in config.shards
- auto configServer = Grid::get(opCtx)->shardRegistry()->getConfigShard();
- auto findShardsStatus =
+ // Get all docs in config.shards through a query instead of going through the shard registry
+ // because we need the zones as well
+ const auto configServer = Grid::get(opCtx)->shardRegistry()->getConfigShard();
+ const auto shardDocs = uassertStatusOK(
configServer->exhaustiveFindOnConfig(opCtx,
ReadPreferenceSetting(ReadPreference::Nearest),
repl::ReadConcernLevel::kMajorityReadConcern,
ShardType::ConfigNS,
BSONObj(),
BSONObj(),
- 0);
- uassertStatusOK(findShardsStatus);
- uassert(ErrorCodes::InternalError,
- str::stream() << "cannot find any shard documents",
- !findShardsStatus.getValue().docs.empty());
+ 0));
+ uassert(50986, str::stream() << "Could not find any shard documents", !shardDocs.docs.empty());
for (const auto& tag : tags) {
tagToShardIds[tag.getTag()] = {};
}
- const auto& shardDocList = findShardsStatus.getValue().docs;
-
- for (const auto& shardDoc : shardDocList) {
- auto shardParseStatus = ShardType::fromBSON(shardDoc);
- uassertStatusOK(shardParseStatus);
- auto parsedShard = shardParseStatus.getValue();
+ for (const auto& shardDoc : shardDocs.docs) {
+ auto parsedShard = uassertStatusOK(ShardType::fromBSON(shardDoc));
for (const auto& tag : parsedShard.getTags()) {
- auto it = tagToShardIds.find(tag);
- if (it != tagToShardIds.end()) {
- it->second.push_back(parsedShard.getName());
- }
+ tagToShardIds[tag].push_back(parsedShard.getName());
}
}
@@ -214,10 +205,7 @@ InitialSplitPolicy::generateShardCollectionInitialZonedChunks(
const StringMap<std::vector<ShardId>>& tagToShards,
const std::vector<ShardId>& allShardIds) {
invariant(!allShardIds.empty());
-
- uassert(ErrorCodes::InvalidOptions,
- str::stream() << "cannot find zone split points because no zone docs were found",
- !tags.empty());
+ invariant(!tags.empty());
ChunkVersion version(1, 0, OID::gen());
const auto& keyPattern = shardKeyPattern.getKeyPattern();
@@ -240,7 +228,7 @@ InitialSplitPolicy::generateShardCollectionInitialZonedChunks(
uassert(50973,
str::stream()
<< "cannot shard collection "
- << tag.getNS().ns()
+ << nss.ns()
<< " because it is associated with zone: "
<< tag.getTag()
<< " which is not associated with a shard. please add this zone to a shard.",
@@ -255,6 +243,7 @@ InitialSplitPolicy::generateShardCollectionInitialZonedChunks(
&chunks);
lastChunkMax = tag.getMaxKey();
}
+
if (lastChunkMax.woCompare(keyPattern.globalMax()) < 0) {
// existing zones do not span to $maxKey so create a chunk for that
const ShardId shardId = allShardIds[indx++ % allShardIds.size()];
@@ -340,13 +329,12 @@ InitialSplitPolicy::ShardCollectionConfig InitialSplitPolicy::createFirstChunks(
}
}
- const auto tagToShards = getTagToShardIds(opCtx, tags);
- const Timestamp& validAfter = LogicalClock::get(opCtx)->getClusterTime().asTimestamp();
-
- uassert(ErrorCodes::InternalError,
+ uassert(ErrorCodes::InvalidOptions,
str::stream() << "cannot generate initial chunks based on both split points and tags",
tags.empty() || finalSplitPoints.empty());
+ const auto validAfter = LogicalClock::get(opCtx)->getClusterTime().asTimestamp();
+
auto initialChunks = tags.empty()
? InitialSplitPolicy::generateShardCollectionInitialChunks(nss,
shardKeyPattern,
@@ -356,7 +344,7 @@ InitialSplitPolicy::ShardCollectionConfig InitialSplitPolicy::createFirstChunks(
shardIds,
numContiguousChunksPerShard)
: InitialSplitPolicy::generateShardCollectionInitialZonedChunks(
- nss, shardKeyPattern, validAfter, tags, tagToShards, shardIds);
+ nss, shardKeyPattern, validAfter, tags, getTagToShardIds(opCtx, tags), shardIds);
return initialChunks;
}
diff --git a/src/mongo/db/s/config/initial_split_policy_test.cpp b/src/mongo/db/s/config/initial_split_policy_test.cpp
index 85cb8643ec8..7ff8e44fef2 100644
--- a/src/mongo/db/s/config/initial_split_policy_test.cpp
+++ b/src/mongo/db/s/config/initial_split_policy_test.cpp
@@ -420,16 +420,6 @@ TEST_F(GenerateShardCollectionInitialZonedChunksTest, NumRemainingChunksGreaterT
checkGeneratedInitialZoneChunks(tags, 2, expectedChunkRanges, expectedShardIds);
}
-TEST_F(GenerateShardCollectionInitialZonedChunksTest, EmptyTagsShouldFail) {
- const std::vector<ChunkRange> expectedChunkRanges;
- const std::vector<TagsType> tags;
- const std::vector<ShardId> expectedShardIds;
- ASSERT_THROWS_CODE(
- checkGeneratedInitialZoneChunks(tags, 1, expectedChunkRanges, expectedShardIds),
- AssertionException,
- ErrorCodes::InvalidOptions);
-}
-
TEST_F(GenerateShardCollectionInitialZonedChunksTest, ZoneNotAssociatedWithAnyShardShouldFail) {
const auto zone1 = zoneName("0");
const auto zone2 = zoneName("1");
diff --git a/src/mongo/db/s/shardsvr_shard_collection.cpp b/src/mongo/db/s/shardsvr_shard_collection.cpp
index d7bfc2c4209..56e9d516385 100644
--- a/src/mongo/db/s/shardsvr_shard_collection.cpp
+++ b/src/mongo/db/s/shardsvr_shard_collection.cpp
@@ -545,43 +545,6 @@ void shardCollection(OperationContext* opCtx,
ShardingCatalogClient::kMajorityWriteConcern);
}
-std::vector<TagsType> getExistingTags(OperationContext* opCtx, const NamespaceString& nss) {
- auto configServer = Grid::get(opCtx)->shardRegistry()->getConfigShard();
- auto tagStatus =
- configServer->exhaustiveFindOnConfig(opCtx,
- kConfigReadSelector,
- repl::ReadConcernLevel::kMajorityReadConcern,
- TagsType::ConfigNS,
- BSON(TagsType::ns(nss.ns())),
- BSONObj(),
- 0);
- uassertStatusOK(tagStatus);
-
- const auto& tagDocList = tagStatus.getValue().docs;
- std::vector<TagsType> tags;
- for (const auto& tagDoc : tagDocList) {
- auto tagParseStatus = TagsType::fromBSON(tagDoc);
- uassertStatusOK(tagParseStatus);
- const auto& parsedTag = tagParseStatus.getValue();
- uassert(ErrorCodes::InvalidOptions,
- str::stream() << "the min and max of the existing zone " << parsedTag.getMinKey()
- << " -->> "
- << parsedTag.getMaxKey()
- << " have non-matching number of keys",
- parsedTag.getMinKey().nFields() == parsedTag.getMaxKey().nFields());
-
- const auto& rangeMin = parsedTag.getMinKey();
- const auto& rangeMax = parsedTag.getMaxKey();
- uassert(ErrorCodes::InvalidOptions,
- str::stream() << "zone " << rangeMin << " -->> " << rangeMax
- << " has min greater than max",
- rangeMin.woCompare(rangeMax) < 0);
-
- tags.push_back(parsedTag);
- }
- return tags;
-}
-
/**
* Internal sharding command run on primary shard server to shard a collection.
*/
@@ -623,6 +586,7 @@ public:
const std::string& dbname,
const BSONObj& cmdObj,
BSONObjBuilder& result) override {
+ auto const grid = Grid::get(opCtx);
auto const shardingState = ShardingState::get(opCtx);
uassertStatusOK(shardingState->canAcceptShardedCommands());
@@ -650,7 +614,8 @@ public:
opCtx, nss, proposedKey, shardKeyPattern, request);
// Read zone info
- auto tags = getExistingTags(opCtx, nss);
+ const auto catalogClient = grid->catalogClient();
+ auto tags = uassertStatusOK(catalogClient->getTagsForCollection(opCtx, nss));
if (!tags.empty()) {
validateShardKeyAgainstExistingZones(opCtx, proposedKey, shardKeyPattern, tags);
@@ -663,7 +628,7 @@ public:
uuid = UUID::gen();
}
- auto shardRegistry = Grid::get(opCtx)->shardRegistry();
+ const auto shardRegistry = grid->shardRegistry();
shardRegistry->reload(opCtx);
DBDirectClient localClient(opCtx);