summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorBernard Gorman <bernard.gorman@gmail.com>2017-07-31 00:33:31 +0100
committerBernard Gorman <bernard.gorman@gmail.com>2017-08-02 17:09:37 +0100
commitcccb34c66a14bb335c60fe57ed8376ada8b0326c (patch)
tree75f53e5a425130f42929e0b76b73ed49bbb99b18
parent443a8f1af11374d8d8e3a35492415f8464987d54 (diff)
downloadmongo-cccb34c66a14bb335c60fe57ed8376ada8b0326c.tar.gz
SERVER-30084 Validate that at least one shard is present when running collectionless aggregations on mongos
-rw-r--r--jstests/aggregation/bugs/server30084.js18
-rw-r--r--src/mongo/s/commands/cluster_aggregate.cpp31
2 files changed, 48 insertions, 1 deletions
diff --git a/jstests/aggregation/bugs/server30084.js b/jstests/aggregation/bugs/server30084.js
new file mode 100644
index 00000000000..926b032e229
--- /dev/null
+++ b/jstests/aggregation/bugs/server30084.js
@@ -0,0 +1,18 @@
+/**
+ * Test that running a $currentOp aggregation on a cluster with no shards returns an empty result
+ * set, and does not cause the mongoS floating point failure described in SERVER-30084.
+ */
+(function() {
+ const st = new ShardingTest({shards: 0, config: 1});
+
+ const adminDB = st.s.getDB("admin");
+
+ assert.commandWorked(
+ adminDB.runCommand({aggregate: 1, pipeline: [{$currentOp: {}}], cursor: {}}));
+ assert.commandWorked(adminDB.currentOp());
+
+ assert.eq(adminDB.aggregate([{$currentOp: {}}]).itcount(), 0);
+ assert.eq(adminDB.currentOp().inprog.length, 0);
+
+ st.stop();
+})(); \ No newline at end of file
diff --git a/src/mongo/s/commands/cluster_aggregate.cpp b/src/mongo/s/commands/cluster_aggregate.cpp
index e8859622eca..ddc1a5df4e7 100644
--- a/src/mongo/s/commands/cluster_aggregate.cpp
+++ b/src/mongo/s/commands/cluster_aggregate.cpp
@@ -123,6 +123,27 @@ Status appendCursorResponseToCommandResult(const ShardId& shardId,
return getStatusFromCommandResult(result->asTempObj());
}
+StatusWith<CachedCollectionRoutingInfo> getExecutionNsRoutingInfo(OperationContext* opCtx,
+ const NamespaceString& execNss,
+ CatalogCache* catalogCache) {
+ // This call to getCollectionRoutingInfo will return !OK if the database does not exist.
+ auto swRoutingInfo = catalogCache->getCollectionRoutingInfo(opCtx, execNss);
+
+ // Collectionless aggregations, however, may be run on 'admin' (which should always exist) but
+ // are subsequently targeted towards the shards. If getCollectionRoutingInfo is OK, we perform a
+ // further check that at least one shard exists if the aggregation is collectionless.
+ if (swRoutingInfo.isOK() && execNss.isCollectionlessAggregateNS()) {
+ std::vector<ShardId> shardIds;
+ Grid::get(opCtx)->shardRegistry()->getAllShardIds(&shardIds);
+
+ if (shardIds.size() == 0) {
+ return {ErrorCodes::NamespaceNotFound, "No shards are present in the cluster"};
+ }
+ }
+
+ return swRoutingInfo;
+}
+
std::set<ShardId> getTargetedShards(OperationContext* opCtx,
const NamespaceString& nss,
const CachedCollectionRoutingInfo& routingInfo,
@@ -296,7 +317,8 @@ Status ClusterAggregate::runAggregate(OperationContext* opCtx,
const auto catalogCache = Grid::get(opCtx)->catalogCache();
auto executionNsRoutingInfoStatus =
- catalogCache->getCollectionRoutingInfo(opCtx, namespaces.executionNss);
+ getExecutionNsRoutingInfo(opCtx, namespaces.executionNss, catalogCache);
+
if (!executionNsRoutingInfoStatus.isOK()) {
appendEmptyResultSet(
*result, executionNsRoutingInfoStatus.getStatus(), namespaces.requestedNss.ns());
@@ -389,6 +411,11 @@ Status ClusterAggregate::runAggregate(OperationContext* opCtx,
shardQuery,
request.getCollation());
+ uassert(ErrorCodes::ShardNotFound,
+ "No targets were found for this aggregation. All shards were removed from the "
+ "cluster mid-operation",
+ shardIds.size() > 0);
+
// Don't need to split pipeline if we are only targeting a single shard, unless there is a
// stage that needs to be run on the primary shard and the single target shard is not the
// primary.
@@ -459,6 +486,8 @@ Status ClusterAggregate::runAggregate(OperationContext* opCtx,
// Retrieve the shard cursors and check whether or not we dispatched to a single shard.
auto cursors = uassertStatusOK(std::move(swCursors));
+ invariant(cursors.size() > 0);
+
// If we dispatched to a single shard, store the remote cursor and return immediately.
if (!pipelineForTargetedShards->isSplitForSharded()) {
invariant(cursors.size() == 1);