summaryrefslogtreecommitdiff
path: root/src/mongo/db/s/shardsvr_shard_collection.cpp
diff options
context:
space:
mode:
authorKaloian Manassiev <kaloian.manassiev@mongodb.com>2019-01-28 20:55:04 -0500
committerKaloian Manassiev <kaloian.manassiev@mongodb.com>2019-01-29 22:15:56 -0500
commit1c61dfa3307c2009dd29c893b8d2b08af6bcf7d6 (patch)
tree0c1158d649100c015c8e30d142a4b04a62213e90 /src/mongo/db/s/shardsvr_shard_collection.cpp
parent09abfff1c4ad2f98a9b83093b7e8b6454bc7c393 (diff)
downloadmongo-1c61dfa3307c2009dd29c893b8d2b08af6bcf7d6.tar.gz
SERVER-39234 Ensure `shardCollection` initial split works the same between config server and shard primary
Diffstat (limited to 'src/mongo/db/s/shardsvr_shard_collection.cpp')
-rw-r--r--src/mongo/db/s/shardsvr_shard_collection.cpp33
1 files changed, 20 insertions, 13 deletions
diff --git a/src/mongo/db/s/shardsvr_shard_collection.cpp b/src/mongo/db/s/shardsvr_shard_collection.cpp
index 45c942eb7bb..c10ed60aed3 100644
--- a/src/mongo/db/s/shardsvr_shard_collection.cpp
+++ b/src/mongo/db/s/shardsvr_shard_collection.cpp
@@ -423,15 +423,13 @@ void shardCollection(OperationContext* opCtx,
bool unique,
const std::vector<BSONObj>& splitPoints,
const std::vector<TagsType>& tags,
- const bool fromMapReduce,
+ bool fromMapReduce,
const ShardId& dbPrimaryShardId,
- const int numContiguousChunksPerShard,
- const bool isEmpty) {
+ int numContiguousChunksPerShard,
+ bool isEmpty) {
const auto shardRegistry = Grid::get(opCtx)->shardRegistry();
const auto primaryShard = uassertStatusOK(shardRegistry->getShard(opCtx, dbPrimaryShardId));
- const bool distributeChunks =
- fromMapReduce || fieldsAndOrder.isHashedPattern() || !tags.empty();
// Fail if there are partially written chunks from a previous failed shardCollection.
checkForExistingChunks(opCtx, nss);
@@ -441,9 +439,10 @@ void shardCollection(OperationContext* opCtx,
BSONObjBuilder collectionDetail;
collectionDetail.append("shardKey", fieldsAndOrder.toBSON());
collectionDetail.append("collection", nss.ns());
- if (uuid) {
+ if (uuid)
uuid->appendToBuilder(&collectionDetail, "uuid");
- }
+ collectionDetail.append("empty", isEmpty);
+ collectionDetail.append("fromMapReduce", fromMapReduce);
collectionDetail.append("primary", primaryShard->toString());
collectionDetail.append("numChunks", static_cast<int>(splitPoints.size() + 1));
uassertStatusOK(ShardingLogging::get(opCtx)->logChangeChecked(
@@ -467,7 +466,6 @@ void shardCollection(OperationContext* opCtx,
dbPrimaryShardId,
splitPoints,
tags,
- distributeChunks,
isEmpty,
numContiguousChunksPerShard);
@@ -681,7 +679,7 @@ public:
std::vector<BSONObj> finalSplitPoints;
if (request.getInitialSplitPoints()) {
- finalSplitPoints = std::move(*request.getInitialSplitPoints());
+ finalSplitPoints = *request.getInitialSplitPoints();
} else if (tags.empty()) {
InitialSplitPolicy::calculateHashedSplitPointsForEmptyCollection(
shardKeyPattern,
@@ -702,12 +700,21 @@ public:
LOG(0) << "CMD: shardcollection: " << cmdObj;
audit::logShardCollection(
- Client::getCurrent(), nss.ns(), proposedKey, request.getUnique());
+ opCtx->getClient(), nss.ns(), proposedKey, request.getUnique());
- // The initial chunks are distributed evenly across shards if the initial split
- // points were specified in the request by mapReduce or if we are using a hashed
- // shard key. Otherwise, all the initial chunks are placed on the primary shard.
+ // Map/reduce with output to an empty collection assumes it has full control of the
+ // output collection and it would be an unsupported operation if the collection is
+ // being concurrently written
const bool fromMapReduce = bool(request.getInitialSplitPoints());
+ if (fromMapReduce) {
+ uassert(ErrorCodes::ConflictingOperationInProgress,
+ str::stream()
+ << "Map reduce with sharded output to a new collection found "
+ << nss.ns()
+ << " to be non-empty which is not supported.",
+ isEmpty);
+ }
+
const int numContiguousChunksPerShard = initialSplitPoints.empty()
? 1
: (finalSplitPoints.size() + 1) / (initialSplitPoints.size() + 1);