summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorArun Banala <arun.banala@mongodb.com>2022-05-09 12:19:59 +0100
committerEvergreen Agent <no-reply@evergreen.mongodb.com>2022-05-09 14:53:39 +0000
commit486b3428d8b545e40bda7e0ad0b03007f255a011 (patch)
tree8773dd56be4090644e8ee7c9735e6f80b9ab0d8c
parent5a4e56115f8ffa057c8e7fda5fb0980e85fdb28b (diff)
downloadmongo-486b3428d8b545e40bda7e0ad0b03007f255a011.tar.gz
SERVER-66307 [v6.0]Set $_generateV2ResumeTokens parameter only on mongos
-rw-r--r--jstests/noPassthrough/sharded_index_consistency_metrics.js263
-rw-r--r--src/mongo/db/pipeline/sharded_agg_helpers.cpp5
2 files changed, 153 insertions, 115 deletions
diff --git a/jstests/noPassthrough/sharded_index_consistency_metrics.js b/jstests/noPassthrough/sharded_index_consistency_metrics.js
index b3192b1b9a2..c937c31957e 100644
--- a/jstests/noPassthrough/sharded_index_consistency_metrics.js
+++ b/jstests/noPassthrough/sharded_index_consistency_metrics.js
@@ -51,119 +51,156 @@ function checkServerStatus(configPrimaryConn,
}
const intervalMS = 500;
-const st = new ShardingTest({
- shards: 2,
- config: 3,
- configOptions: {setParameter: {"shardedIndexConsistencyCheckIntervalMS": intervalMS}}
-});
-const dbName = "testDb";
-const ns1 = dbName + ".testColl1";
-const ns2 = dbName + ".testColl2";
-const ns3 = dbName + ".testColl3";
-const ns4 = dbName + ".testColl4";
-const expiration = 1000000;
-const filterExpr = {
- x: {$gt: 50}
-};
-
-assert.commandWorked(st.s.adminCommand({enableSharding: dbName}));
-st.ensurePrimaryShard(dbName, st.shard0.shardName);
-assert.commandWorked(st.s.adminCommand({shardCollection: ns1, key: {_id: "hashed"}}));
-assert.commandWorked(st.s.adminCommand({shardCollection: ns2, key: {_id: "hashed"}}));
-assert.commandWorked(st.s.adminCommand({shardCollection: ns3, key: {_id: "hashed"}}));
-assert.commandWorked(st.s.adminCommand({shardCollection: ns4, key: {_id: "hashed"}}));
-
-// Disable the check on one config secondary to verify this means metrics won't be shown in
-// serverStatus.
-assert.commandWorked(st.config2.getDB("admin").runCommand(
- {setParameter: 1, enableShardedIndexConsistencyCheck: false}));
-
-let configPrimaryConn = st.config0;
-let configSecondaryConn = st.config1;
-const connsWithoutIndexConsistencyMetrics = [st.config2, st.shard0, st.shard1, st.s];
-
-checkServerStatus(configPrimaryConn, configSecondaryConn, connsWithoutIndexConsistencyMetrics, 0);
-
-// Create an inconsistent index for ns1.
-assert.commandWorked(st.shard0.getCollection(ns1).createIndex({x: 1}));
-checkServerStatus(configPrimaryConn, configSecondaryConn, connsWithoutIndexConsistencyMetrics, 1);
-
-// Create another inconsistent index for ns1.
-assert.commandWorked(st.shard1.getCollection(ns1).createIndexes([{y: 1}]));
-checkServerStatus(configPrimaryConn, configSecondaryConn, connsWithoutIndexConsistencyMetrics, 1);
-
-// Create an inconsistent index for ns2.
-assert.commandWorked(st.shard0.getCollection(ns2).createIndex({x: 1}));
-checkServerStatus(configPrimaryConn, configSecondaryConn, connsWithoutIndexConsistencyMetrics, 2);
-
-// Resolve the index inconsistency for ns2.
-assert.commandWorked(st.shard1.getCollection(ns2).createIndex({x: 1}));
-checkServerStatus(configPrimaryConn, configSecondaryConn, connsWithoutIndexConsistencyMetrics, 1);
-
-// Create indexes with different keys for the same name and verify this is considered
-// inconsistent.
-assert.commandWorked(st.shard0.getCollection(ns2).createIndex({y: 1}, {name: "diffKey"}));
-assert.commandWorked(st.shard1.getCollection(ns2).createIndex({z: 1}, {name: "diffKey"}));
-checkServerStatus(configPrimaryConn, configSecondaryConn, connsWithoutIndexConsistencyMetrics, 2);
-
-// Create indexes for n3 with the same options but in different orders on each shard, and verify
-// that it is not considered as inconsistent.
-assert.commandWorked(st.shard0.getCollection(ns3).createIndex({x: 1}, {
- name: "indexWithOptionsOrderedDifferently",
- partialFilterExpression: filterExpr,
- expireAfterSeconds: expiration
-}));
-assert.commandWorked(st.shard1.getCollection(ns3).createIndex({x: 1}, {
- name: "indexWithOptionsOrderedDifferently",
- expireAfterSeconds: expiration,
- partialFilterExpression: filterExpr
-}));
-checkServerStatus(configPrimaryConn, configSecondaryConn, connsWithoutIndexConsistencyMetrics, 2);
-
-// Create indexes for n3 with the same key but different options on each shard, and verify that
-// it is considered as inconsistent.
-assert.commandWorked(st.shard0.getCollection(ns3).createIndex(
- {y: 1}, {name: "indexWithDifferentOptions", expireAfterSeconds: expiration}));
-assert.commandWorked(
- st.shard1.getCollection(ns3).createIndex({y: 1}, {name: "indexWithDifferentOptions"}));
-checkServerStatus(configPrimaryConn, configSecondaryConn, connsWithoutIndexConsistencyMetrics, 3);
-
-// Create indexes where one is missing a property and verify this is considered inconsistent.
-assert.commandWorked(st.shard0.getCollection(ns4).createIndex({y: 1}, {expireAfterSeconds: 100}));
-assert.commandWorked(st.shard1.getCollection(ns4).createIndex({y: 1}));
-checkServerStatus(configPrimaryConn, configSecondaryConn, connsWithoutIndexConsistencyMetrics, 4);
-
-// Resolve the inconsistency on ns4.
-assert.commandWorked(st.shard1.getCollection(ns4).dropIndex({y: 1}));
-assert.commandWorked(st.shard1.getCollection(ns4).createIndex({y: 1}, {expireAfterSeconds: 100}));
-checkServerStatus(configPrimaryConn, configSecondaryConn, connsWithoutIndexConsistencyMetrics, 3);
-
-// Verify fields other than expireAfterSeconds and key are not ignored.
-assert.commandWorked(st.shard0.getCollection(ns4).createIndex(
- {z: 1}, {expireAfterSeconds: 5, partialFilterExpression: {z: {$gt: 50}}}));
-assert.commandWorked(st.shard1.getCollection(ns4).createIndex(
- {z: 1}, {expireAfterSeconds: 5, partialFilterExpression: {z: {$lt: 100}}}));
-checkServerStatus(configPrimaryConn, configSecondaryConn, connsWithoutIndexConsistencyMetrics, 4);
-
-//
-// Verify the counter is only tracked by primaries and cleared on stepdown.
-//
-
-// Force the secondary that tracks inconsistent indexes to step up to primary and update the
-// appropriate test variables.
-st.configRS.stepUp(configSecondaryConn);
-st.configRS.waitForState(configSecondaryConn, ReplSetTest.State.PRIMARY);
-st.configRS.waitForState(configPrimaryConn, ReplSetTest.State.SECONDARY);
-st.configRS.awaitNodesAgreeOnPrimary();
-
-configSecondaryConn = configPrimaryConn;
-configPrimaryConn = st.configRS.getPrimary();
-
-// The new primary should start reporting the correct count and the old primary should start
-// reporting 0.
-checkServerStatus(configPrimaryConn, configSecondaryConn, connsWithoutIndexConsistencyMetrics, 4);
-
-st.stop();
+const latest = "latest";
+const lastLTS = "last-lts";
+
+// We use the mixed versions with just the config server on the latest version to exercise the
+// scenario causing SERVER-66270.
+[{
+ mongos: lastLTS,
+ configSvr: latest,
+ shardSvr: lastLTS
+}, // Mix version after config server binary is upgraded.
+ {mongos: latest, configSvr: latest, shardSvr: latest}]
+ .forEach((version) => {
+ const st = new ShardingTest({
+ shards: 2,
+ config: 3,
+ other: {
+ mongosOptions: {binVersion: version.mongos},
+ configOptions: {
+ binVersion: version.configSvr,
+ setParameter: {"shardedIndexConsistencyCheckIntervalMS": intervalMS}
+ },
+ rsOptions: {
+ binVersion: version.shardSvr,
+ },
+ rs: {nodes: 2},
+ }
+ });
+
+ const dbName = "testDb";
+ const ns1 = dbName + ".testColl1";
+ const ns2 = dbName + ".testColl2";
+ const ns3 = dbName + ".testColl3";
+ const ns4 = dbName + ".testColl4";
+ const expiration = 1000000;
+ const filterExpr = {x: {$gt: 50}};
+
+ assert.commandWorked(st.s.adminCommand({enableSharding: dbName}));
+ st.ensurePrimaryShard(dbName, st.shard0.shardName);
+ assert.commandWorked(st.s.adminCommand({shardCollection: ns1, key: {_id: "hashed"}}));
+ assert.commandWorked(st.s.adminCommand({shardCollection: ns2, key: {_id: "hashed"}}));
+ assert.commandWorked(st.s.adminCommand({shardCollection: ns3, key: {_id: "hashed"}}));
+ assert.commandWorked(st.s.adminCommand({shardCollection: ns4, key: {_id: "hashed"}}));
+
+ // Disable the check on one config secondary to verify this means metrics won't be shown in
+ // serverStatus.
+ assert.commandWorked(st.config2.getDB("admin").runCommand(
+ {setParameter: 1, enableShardedIndexConsistencyCheck: false}));
+
+ let configPrimaryConn = st.config0;
+ let configSecondaryConn = st.config1;
+ const connsWithoutIndexConsistencyMetrics = [st.config2, st.shard0, st.shard1, st.s];
+
+ checkServerStatus(
+ configPrimaryConn, configSecondaryConn, connsWithoutIndexConsistencyMetrics, 0);
+
+ // Create an inconsistent index for ns1.
+ assert.commandWorked(st.shard0.getCollection(ns1).createIndex({x: 1}));
+ checkServerStatus(
+ configPrimaryConn, configSecondaryConn, connsWithoutIndexConsistencyMetrics, 1);
+
+ // Create another inconsistent index for ns1.
+ assert.commandWorked(st.shard1.getCollection(ns1).createIndexes([{y: 1}]));
+ checkServerStatus(
+ configPrimaryConn, configSecondaryConn, connsWithoutIndexConsistencyMetrics, 1);
+
+ // Create an inconsistent index for ns2.
+ assert.commandWorked(st.shard0.getCollection(ns2).createIndex({x: 1}));
+ checkServerStatus(
+ configPrimaryConn, configSecondaryConn, connsWithoutIndexConsistencyMetrics, 2);
+
+ // Resolve the index inconsistency for ns2.
+ assert.commandWorked(st.shard1.getCollection(ns2).createIndex({x: 1}));
+ checkServerStatus(
+ configPrimaryConn, configSecondaryConn, connsWithoutIndexConsistencyMetrics, 1);
+
+ // Create indexes with different keys for the same name and verify this is considered
+ // inconsistent.
+ assert.commandWorked(st.shard0.getCollection(ns2).createIndex({y: 1}, {name: "diffKey"}));
+ assert.commandWorked(st.shard1.getCollection(ns2).createIndex({z: 1}, {name: "diffKey"}));
+ checkServerStatus(
+ configPrimaryConn, configSecondaryConn, connsWithoutIndexConsistencyMetrics, 2);
+
+ // Create indexes for n3 with the same options but in different orders on each shard, and
+ // verify that it is not considered as inconsistent.
+ assert.commandWorked(st.shard0.getCollection(ns3).createIndex({x: 1}, {
+ name: "indexWithOptionsOrderedDifferently",
+ partialFilterExpression: filterExpr,
+ expireAfterSeconds: expiration
+ }));
+ assert.commandWorked(st.shard1.getCollection(ns3).createIndex({x: 1}, {
+ name: "indexWithOptionsOrderedDifferently",
+ expireAfterSeconds: expiration,
+ partialFilterExpression: filterExpr
+ }));
+ checkServerStatus(
+ configPrimaryConn, configSecondaryConn, connsWithoutIndexConsistencyMetrics, 2);
+
+ // Create indexes for n3 with the same key but different options on each shard, and verify
+ // that it is considered as inconsistent.
+ assert.commandWorked(st.shard0.getCollection(ns3).createIndex(
+ {y: 1}, {name: "indexWithDifferentOptions", expireAfterSeconds: expiration}));
+ assert.commandWorked(
+ st.shard1.getCollection(ns3).createIndex({y: 1}, {name: "indexWithDifferentOptions"}));
+ checkServerStatus(
+ configPrimaryConn, configSecondaryConn, connsWithoutIndexConsistencyMetrics, 3);
+
+ // Create indexes where one is missing a property and verify this is considered
+ // inconsistent.
+ assert.commandWorked(
+ st.shard0.getCollection(ns4).createIndex({y: 1}, {expireAfterSeconds: 100}));
+ assert.commandWorked(st.shard1.getCollection(ns4).createIndex({y: 1}));
+ checkServerStatus(
+ configPrimaryConn, configSecondaryConn, connsWithoutIndexConsistencyMetrics, 4);
+
+ // Resolve the inconsistency on ns4.
+ assert.commandWorked(st.shard1.getCollection(ns4).dropIndex({y: 1}));
+ assert.commandWorked(
+ st.shard1.getCollection(ns4).createIndex({y: 1}, {expireAfterSeconds: 100}));
+ checkServerStatus(
+ configPrimaryConn, configSecondaryConn, connsWithoutIndexConsistencyMetrics, 3);
+
+ // Verify fields other than expireAfterSeconds and key are not ignored.
+ assert.commandWorked(st.shard0.getCollection(ns4).createIndex(
+ {z: 1}, {expireAfterSeconds: 5, partialFilterExpression: {z: {$gt: 50}}}));
+ assert.commandWorked(st.shard1.getCollection(ns4).createIndex(
+ {z: 1}, {expireAfterSeconds: 5, partialFilterExpression: {z: {$lt: 100}}}));
+ checkServerStatus(
+ configPrimaryConn, configSecondaryConn, connsWithoutIndexConsistencyMetrics, 4);
+
+ //
+ // Verify the counter is only tracked by primaries and cleared on stepdown.
+ //
+
+ // Force the secondary that tracks inconsistent indexes to step up to primary and update the
+ // appropriate test variables.
+ st.configRS.stepUp(configSecondaryConn);
+ st.configRS.waitForState(configSecondaryConn, ReplSetTest.State.PRIMARY);
+ st.configRS.waitForState(configPrimaryConn, ReplSetTest.State.SECONDARY);
+ st.configRS.awaitNodesAgreeOnPrimary();
+
+ configSecondaryConn = configPrimaryConn;
+ configPrimaryConn = st.configRS.getPrimary();
+
+ // The new primary should start reporting the correct count and the old primary should start
+ // reporting 0.
+ checkServerStatus(
+ configPrimaryConn, configSecondaryConn, connsWithoutIndexConsistencyMetrics, 4);
+
+ st.stop();
+ });
// Verify that the serverStatus output for standalones and non-sharded repilca set servers does
// not contain the index consistency metrics.
diff --git a/src/mongo/db/pipeline/sharded_agg_helpers.cpp b/src/mongo/db/pipeline/sharded_agg_helpers.cpp
index f63f6c61bc6..cf110d53f43 100644
--- a/src/mongo/db/pipeline/sharded_agg_helpers.cpp
+++ b/src/mongo/db/pipeline/sharded_agg_helpers.cpp
@@ -57,6 +57,7 @@
#include "mongo/rpc/get_status_from_command_result.h"
#include "mongo/s/cluster_commands_helpers.h"
#include "mongo/s/collection_uuid_mismatch.h"
+#include "mongo/s/is_mongos.h"
#include "mongo/s/query/cluster_query_knobs_gen.h"
#include "mongo/s/query/document_source_merge_cursors.h"
#include "mongo/s/query/establish_cursors.h"
@@ -115,7 +116,7 @@ RemoteCursor openChangeStreamNewShardMonitor(const boost::intrusive_ptr<Expressi
aggReq.setNeedsMerge(true);
// TODO SERVER-65370: from 6.1 onwards, we will default to v2 and this block should be removed.
- if (expCtx->inMongos) {
+ if (isMongos()) {
aggReq.setGenerateV2ResumeTokens(expCtx->changeStreamTokenVersion == 2);
}
@@ -158,7 +159,7 @@ BSONObj genericTransformForShards(MutableDocument&& cmdForShards,
// that a 6.0 mongoS on a mixed 6.0/7.0 cluster will see only v1 tokens in the stream.
// TODO SERVER-65370: from 6.1 onwards, we will default to v2 and this block should be removed.
const auto& v2FieldName = AggregateCommandRequest::kGenerateV2ResumeTokensFieldName;
- if (auto cmdObj = cmdForShards.peek(); expCtx->inMongos && cmdObj[v2FieldName].missing()) {
+ if (auto cmdObj = cmdForShards.peek(); isMongos() && cmdObj[v2FieldName].missing()) {
cmdForShards[v2FieldName] = Value(false);
}