summaryrefslogtreecommitdiff
path: root/src/mongo/s
diff options
context:
space:
mode:
authorCharlie Swanson <charlie.swanson@mongodb.com>2018-09-28 17:13:22 -0400
committerCharlie Swanson <charlie.swanson@mongodb.com>2018-11-19 09:47:43 -0500
commit794e75a24d975f80317cf92c004cf5cb9ea5b03d (patch)
tree8c52ccf651f9bfb65268e6d5c0f7df4abd0127ae /src/mongo/s
parent48d8edbb70092231c0f7c6c4c59af5c6e38a9781 (diff)
downloadmongo-794e75a24d975f80317cf92c004cf5cb9ea5b03d.tar.gz
SERVER-36966 Validate ns in cluster mapReduce
Also ensures that the temp collections are always up to date, and extends the passthrough coverage of the mapReduce command outputting to a sharded collection. (cherry picked from commit 7dbcd710077bc4141e71730be9e12558880375e6)
Diffstat (limited to 'src/mongo/s')
-rw-r--r--src/mongo/s/commands/cluster_map_reduce_cmd.cpp41
1 files changed, 21 insertions, 20 deletions
diff --git a/src/mongo/s/commands/cluster_map_reduce_cmd.cpp b/src/mongo/s/commands/cluster_map_reduce_cmd.cpp
index 22f71c0f18b..a829839a03b 100644
--- a/src/mongo/s/commands/cluster_map_reduce_cmd.cpp
+++ b/src/mongo/s/commands/cluster_map_reduce_cmd.cpp
@@ -227,11 +227,13 @@ public:
}
outputCollNss = NamespaceString(outDB, finalColShort);
- uassert(ErrorCodes::InvalidNamespace,
- "Invalid output namespace",
- outputCollNss.isValid());
}
+ } else if (outElmt.type() == String) {
+ outputCollNss = NamespaceString(outDB, outElmt.String());
}
+ uassert(ErrorCodes::InvalidNamespace,
+ "Invalid output namespace",
+ inlineOutput || outputCollNss.isValid());
auto const catalogCache = Grid::get(opCtx)->catalogCache();
@@ -332,22 +334,25 @@ public:
auto splitPts = SimpleBSONObjComparator::kInstance.makeBSONObjSet();
+ // TODO: take distributed lock to prevent split / migration?
+ try {
+ Strategy::commandOp(
+ opCtx, dbname, shardedCommand, nss.ns(), q, collation, &mrCommandResults);
+ } catch (DBException& e) {
+ e.addContext(str::stream() << "could not run map command on all shards for ns "
+ << nss.ns()
+ << " and query "
+ << q);
+ throw;
+ }
+
+ // Now that the output collections of the first phase ("tmp.mrs.<>") have been created, make
+ // a best effort to drop them if any part of the second phase fails.
+ ON_BLOCK_EXIT([&]() { cleanUp(servers, dbname, shardResultCollection); });
+
{
bool ok = true;
- // TODO: take distributed lock to prevent split / migration?
-
- try {
- Strategy::commandOp(
- opCtx, dbname, shardedCommand, nss.ns(), q, collation, &mrCommandResults);
- } catch (DBException& e) {
- e.addContext(str::stream() << "could not run map command on all shards for ns "
- << nss.ns()
- << " and query "
- << q);
- throw;
- }
-
for (const auto& mrResult : mrCommandResults) {
// Need to gather list of all servers even if an error happened
const auto server = [&]() {
@@ -394,8 +399,6 @@ public:
}
if (!ok) {
- cleanUp(servers, dbname, shardResultCollection);
-
// Add "code" to the top-level response, if the failure of the sharded command
// can be accounted to a single error.
int code = getUniqueCodeFromCommandResults(mrCommandResults);
@@ -640,8 +643,6 @@ public:
}
}
- cleanUp(servers, dbname, shardResultCollection);
-
if (!ok) {
errmsg = str::stream() << "MR post processing failed: " << singleResult.toString();
return false;