summaryrefslogtreecommitdiff
path: root/src/mongo/s/commands
diff options
context:
space:
mode:
authorCharlie Swanson <charlie.swanson@mongodb.com>2018-09-28 17:13:22 -0400
committerCharlie Swanson <charlie.swanson@mongodb.com>2018-11-05 09:08:39 -0500
commit7dbcd710077bc4141e71730be9e12558880375e6 (patch)
tree46fde15b3f093a5876f74fefdbafe21b26845d9d /src/mongo/s/commands
parentbb8a75e904bb2accb192772126bc319cd16646a4 (diff)
downloadmongo-7dbcd710077bc4141e71730be9e12558880375e6.tar.gz
SERVER-36966 Validate ns in cluster mapReduce
Also ensures that the temp collections are always up to date, and extends the passthrough coverage of the mapReduce command outputting to a sharded collection.
Diffstat (limited to 'src/mongo/s/commands')
-rw-r--r--src/mongo/s/commands/cluster_map_reduce_cmd.cpp41
1 files changed, 21 insertions, 20 deletions
diff --git a/src/mongo/s/commands/cluster_map_reduce_cmd.cpp b/src/mongo/s/commands/cluster_map_reduce_cmd.cpp
index af3f0acc46d..c2343f1bdae 100644
--- a/src/mongo/s/commands/cluster_map_reduce_cmd.cpp
+++ b/src/mongo/s/commands/cluster_map_reduce_cmd.cpp
@@ -227,11 +227,13 @@ public:
}
outputCollNss = NamespaceString(outDB, finalColShort);
- uassert(ErrorCodes::InvalidNamespace,
- "Invalid output namespace",
- outputCollNss.isValid());
}
+ } else if (outElmt.type() == String) {
+ outputCollNss = NamespaceString(outDB, outElmt.String());
}
+ uassert(ErrorCodes::InvalidNamespace,
+ "Invalid output namespace",
+ inlineOutput || outputCollNss.isValid());
auto const catalogCache = Grid::get(opCtx)->catalogCache();
@@ -332,22 +334,25 @@ public:
auto splitPts = SimpleBSONObjComparator::kInstance.makeBSONObjSet();
+ // TODO: take distributed lock to prevent split / migration?
+ try {
+ Strategy::commandOp(
+ opCtx, dbname, shardedCommand, nss.ns(), q, collation, &mrCommandResults);
+ } catch (DBException& e) {
+ e.addContext(str::stream() << "could not run map command on all shards for ns "
+ << nss.ns()
+ << " and query "
+ << q);
+ throw;
+ }
+
+ // Now that the output collections of the first phase ("tmp.mrs.<>") have been created, make
+ // a best effort to drop them if any part of the second phase fails.
+ ON_BLOCK_EXIT([&]() { cleanUp(servers, dbname, shardResultCollection); });
+
{
bool ok = true;
- // TODO: take distributed lock to prevent split / migration?
-
- try {
- Strategy::commandOp(
- opCtx, dbname, shardedCommand, nss.ns(), q, collation, &mrCommandResults);
- } catch (DBException& e) {
- e.addContext(str::stream() << "could not run map command on all shards for ns "
- << nss.ns()
- << " and query "
- << q);
- throw;
- }
-
for (const auto& mrResult : mrCommandResults) {
// Need to gather list of all servers even if an error happened
const auto server = [&]() {
@@ -394,8 +399,6 @@ public:
}
if (!ok) {
- cleanUp(servers, dbname, shardResultCollection);
-
// Add "code" to the top-level response, if the failure of the sharded command
// can be accounted to a single error.
int code = getUniqueCodeFromCommandResults(mrCommandResults);
@@ -613,8 +616,6 @@ public:
outputRoutingInfo.cm());
}
- cleanUp(servers, dbname, shardResultCollection);
-
if (!ok) {
errmsg = str::stream() << "MR post processing failed: " << singleResult.toString();
return false;