summaryrefslogtreecommitdiff
path: root/src/mongo/s
diff options
context:
space:
mode:
authorAnton Korshunov <anton.korshunov@mongodb.com>2019-02-19 10:54:29 +0000
committerAnton Korshunov <anton.korshunov@mongodb.com>2019-03-07 20:55:59 +0000
commitf2b20e43b4abd5dace54432676c539efb187b020 (patch)
treeb09c0b15e816f3a46331c6f689528c7849eac067 /src/mongo/s
parent9a7b62f9de5450fb06c9f0f280c12078b087ab33 (diff)
downloadmongo-f2b20e43b4abd5dace54432676c539efb187b020.tar.gz
SERVER-39650 Ensure internal options cannot be specified in a raw aggregate command sent to mongos
(cherry picked from commit 3b4c635f2a3a65a8804232c80a48cdefa3c26b65)
Diffstat (limited to 'src/mongo/s')
-rw-r--r--src/mongo/s/commands/cluster_aggregate.cpp8
-rw-r--r--src/mongo/s/commands/cluster_aggregate_test.cpp53
2 files changed, 56 insertions, 5 deletions
diff --git a/src/mongo/s/commands/cluster_aggregate.cpp b/src/mongo/s/commands/cluster_aggregate.cpp
index 38e01a6ab73..d58ce76220a 100644
--- a/src/mongo/s/commands/cluster_aggregate.cpp
+++ b/src/mongo/s/commands/cluster_aggregate.cpp
@@ -930,6 +930,14 @@ Status ClusterAggregate::runAggregate(OperationContext* opCtx,
const AggregationRequest& request,
BSONObj cmdObj,
BSONObjBuilder* result) {
+ uassert(51089,
+ str::stream() << "Internal parameter(s) [" << AggregationRequest::kNeedsMergeName
+ << ", "
+ << AggregationRequest::kFromMongosName
+ << ", "
+ << AggregationRequest::kMergeByPBRTName
+ << "] cannot be set to 'true' when sent to mongos",
+ !request.needsMerge() && !request.isFromMongos() && !request.mergeByPBRT());
auto executionNsRoutingInfoStatus = getExecutionNsRoutingInfo(opCtx, namespaces.executionNss);
boost::optional<CachedCollectionRoutingInfo> routingInfo;
LiteParsedPipeline litePipe(request);
diff --git a/src/mongo/s/commands/cluster_aggregate_test.cpp b/src/mongo/s/commands/cluster_aggregate_test.cpp
index db4d2981090..59ffa830242 100644
--- a/src/mongo/s/commands/cluster_aggregate_test.cpp
+++ b/src/mongo/s/commands/cluster_aggregate_test.cpp
@@ -59,12 +59,12 @@ class ClusterAggregateTest : public CatalogCacheTestFixture {
protected:
const BSONObj kAggregateCmdTargeted{
fromjson("{aggregate: 'coll', pipeline: [{$match: {_id: 0}}], explain: false, "
- "allowDiskUse: false, fromMongos: true, "
- "cursor: {batchSize: 10}, maxTimeMS: 100, readConcern: {level: 'snapshot'}}")};
+ "allowDiskUse: false, cursor: {batchSize: 10}, maxTimeMS: 100, readConcern: "
+ "{level: 'snapshot'}}")};
- const BSONObj kAggregateCmdScatterGather{fromjson(
- "{aggregate: 'coll', pipeline: [], explain: false, allowDiskUse: false, fromMongos: true, "
- "cursor: {batchSize: 10}, readConcern: {level: 'snapshot'}}")};
+ const BSONObj kAggregateCmdScatterGather{
+ fromjson("{aggregate: 'coll', pipeline: [], explain: false, allowDiskUse: false, cursor: "
+ "{batchSize: 10}, readConcern: {level: 'snapshot'}}")};
void setUp() {
CatalogCacheTestFixture::setUp();
@@ -188,6 +188,27 @@ protected:
future.timed_get(kFutureTimeout);
}
+
+ /**
+ * This method should only be used to test early exits from Cluster::runAggregate, before
+ * a request is sent to the shards. Otherwise the call would get blocked as no expect* hooks
+ * are provided in this method.
+ */
+ Status testRunAggregateEarlyExit(const BSONObj& inputBson) {
+ BSONObjBuilder result;
+ NamespaceString nss{"a.collection"};
+ auto client = getServiceContext()->makeClient("ClusterCmdClient");
+ auto opCtx = client->makeOperationContext();
+ auto request = AggregationRequest::parseFromBSON(nss, inputBson);
+ if (request.getStatus() != Status::OK()) {
+ return request.getStatus();
+ }
+ return ClusterAggregate::runAggregate(opCtx.get(),
+ ClusterAggregate::Namespaces{nss, nss},
+ request.getValue(),
+ inputBson,
+ &result);
+ }
};
TEST_F(ClusterAggregateTest, NoErrors) {
@@ -242,5 +263,27 @@ TEST_F(ClusterAggregateTest, MaxRetriesSnapshotErrors) {
runAggCommandMaxErrors(kAggregateCmdScatterGather, ErrorCodes::SnapshotTooOld, false);
}
+TEST_F(ClusterAggregateTest, ShouldFailWhenFromMongosIsTrue) {
+ const BSONObj inputBson = fromjson("{pipeline: [], cursor: {}, fromMongos: true}");
+ ASSERT_THROWS_CODE(testRunAggregateEarlyExit(inputBson).ignore(), AssertionException, 51089);
+}
+
+TEST_F(ClusterAggregateTest, ShouldFailWhenNeedsMergeIstrueAndFromMongosIsFalse) {
+ const BSONObj inputBson =
+ fromjson("{pipeline: [], cursor: {}, needsMerge: true, fromMongos: false}");
+ ASSERT_THROWS_CODE(testRunAggregateEarlyExit(inputBson).ignore(), AssertionException, 51089);
+}
+
+TEST_F(ClusterAggregateTest, ShouldFailWhenNeedsMergeIstrueAndFromMongosIsTrue) {
+ const BSONObj inputBson =
+ fromjson("{pipeline: [], cursor: {}, needsMerge: true, fromMongos: true}");
+ ASSERT_THROWS_CODE(testRunAggregateEarlyExit(inputBson).ignore(), AssertionException, 51089);
+}
+
+TEST_F(ClusterAggregateTest, ShouldFailWhenMergeByPBRTIsTrue) {
+ const BSONObj inputBson = fromjson("{pipeline: [], cursor: {}, mergeByPBRT: true}");
+ ASSERT_THROWS_CODE(testRunAggregateEarlyExit(inputBson).ignore(), AssertionException, 51089);
+}
+
} // namespace
} // namespace mongo