summaryrefslogtreecommitdiff
path: root/jstests
diff options
context:
space:
mode:
authorMarcos José Grillo Ramirez <marcos.grillo@mongodb.com>2022-04-05 17:11:59 +0000
committerEvergreen Agent <no-reply@evergreen.mongodb.com>2022-04-05 20:28:45 +0000
commit91a607d3a2c748ead682c1a44d37263254de26f8 (patch)
tree1245c2efd4eb7ebcc46f8128c292c8df8744ec3d /jstests
parentc96f8dacc4c71b4774c932a07be4fac71b6db628 (diff)
downloadmongo-91a607d3a2c748ead682c1a44d37263254de26f8.tar.gz
SERVER-63870 Integrate replica set setClusterParameter into POS with replay protection
Diffstat (limited to 'jstests')
-rw-r--r--jstests/auth/lib/commands_lib.js8
-rw-r--r--jstests/core/views/views_all_commands.js1
-rw-r--r--jstests/replsets/db_reads_while_recovering_all_commands.js1
-rw-r--r--jstests/sharding/read_write_concern_defaults_application.js1
-rw-r--r--jstests/sharding/set_cluster_parameter.js78
5 files changed, 77 insertions, 12 deletions
diff --git a/jstests/auth/lib/commands_lib.js b/jstests/auth/lib/commands_lib.js
index dbc73db0e47..5f9d5856505 100644
--- a/jstests/auth/lib/commands_lib.js
+++ b/jstests/auth/lib/commands_lib.js
@@ -4178,7 +4178,11 @@ var authCommandsLib = {
{
testname: "getClusterParameter",
command: {getClusterParameter: "testIntClusterParameter"},
- skipTest: (conn) => !TestData.setParameters.featureFlagClusterWideConfig,
+ skipTest: (conn) => {
+ const hello = assert.commandWorked(conn.getDB("admin").runCommand({hello: 1}));
+ const isStandalone = hello.msg !== "isdbgrid" && !hello.hasOwnProperty('setName');
+ return !TestData.setParameters.featureFlagClusterWideConfig || isStandalone;
+ },
testcases: [
{
runOnDb: adminDbName,
@@ -5666,7 +5670,7 @@ var authCommandsLib = {
},
{
testname: "setClusterParameter",
- command: {setClusterParameter: {testIntClusterParameterParam: {intData: 17}}},
+ command: {setClusterParameter: {testIntClusterParameter: {intData: 17}}},
skipTest: (conn) => {
const hello = assert.commandWorked(conn.getDB("admin").runCommand({hello: 1}));
const isStandalone = hello.msg !== "isdbgrid" && !hello.hasOwnProperty('setName');
diff --git a/jstests/core/views/views_all_commands.js b/jstests/core/views/views_all_commands.js
index 551cb62518c..c6451f6934d 100644
--- a/jstests/core/views/views_all_commands.js
+++ b/jstests/core/views/views_all_commands.js
@@ -169,6 +169,7 @@ let viewsCommandTests = {
_shardsvrReshardCollection: {skip: isAnInternalCommand},
_shardsvrReshardingOperationTime: {skip: isAnInternalCommand},
_shardsvrSetAllowMigrations: {skip: isAnInternalCommand},
+ _shardsvrSetClusterParameter: {skip: isAnInternalCommand},
_shardsvrSetUserWriteBlockMode: {skip: isAnInternalCommand},
_shardsvrShardCollection:
{skip: isAnInternalCommand}, // TODO SERVER-58843: Remove once 6.0 becomes last LTS
diff --git a/jstests/replsets/db_reads_while_recovering_all_commands.js b/jstests/replsets/db_reads_while_recovering_all_commands.js
index d9bc04fe52c..b089f91df8a 100644
--- a/jstests/replsets/db_reads_while_recovering_all_commands.js
+++ b/jstests/replsets/db_reads_while_recovering_all_commands.js
@@ -97,6 +97,7 @@ const allCommands = {
_shardsvrReshardingOperationTime: {skip: isPrimaryOnly},
_shardsvrRefineCollectionShardKey: {skip: isPrimaryOnly},
_shardsvrSetAllowMigrations: {skip: isPrimaryOnly},
+ _shardsvrSetClusterParameter: {skip: isAnInternalCommand},
_shardsvrSetUserWriteBlockMode: {skip: isPrimaryOnly},
_shardsvrCollMod: {skip: isPrimaryOnly},
_shardsvrCollModParticipant: {skip: isAnInternalCommand},
diff --git a/jstests/sharding/read_write_concern_defaults_application.js b/jstests/sharding/read_write_concern_defaults_application.js
index d7df5133590..31d5beb1541 100644
--- a/jstests/sharding/read_write_concern_defaults_application.js
+++ b/jstests/sharding/read_write_concern_defaults_application.js
@@ -168,6 +168,7 @@ let testCases = {
_shardsvrReshardCollection: {skip: "internal command"},
_shardsvrReshardingOperationTime: {skip: "internal command"},
_shardsvrSetAllowMigrations: {skip: "internal command"},
+ _shardsvrSetClusterParameter: {skip: "internal command"},
_shardsvrSetUserWriteBlockMode: {skip: "internal command"},
_shardsvrCollMod: {skip: "internal command"},
_shardsvrCollModParticipant: {skip: "internal command"},
diff --git a/jstests/sharding/set_cluster_parameter.js b/jstests/sharding/set_cluster_parameter.js
index ad550c6b685..70a2cab98e2 100644
--- a/jstests/sharding/set_cluster_parameter.js
+++ b/jstests/sharding/set_cluster_parameter.js
@@ -13,15 +13,31 @@
load('jstests/libs/fail_point_util.js');
-const st = new ShardingTest({shards: 1});
+const clusterParameter1Value = {
+ intData: 42
+};
+const clusterParameter1Name = 'testIntClusterParameter';
+const clusterParameter1 = {
+ [clusterParameter1Name]: clusterParameter1Value
+};
+
+const clusterParameter2Value = {
+ strData: 'on'
+};
+const clusterParameter2Name = 'testStrClusterParameter';
+const clusterParameter2 = {
+ [clusterParameter2Name]: clusterParameter2Value
+};
+
+const st = new ShardingTest({shards: 1, rs: {nodes: 3}});
let fp =
configureFailPoint(st.configRS.getPrimary(), 'hangBeforeRunningConfigsvrCoordinatorInstance');
-let setClusterParameterSuccessThread = new Thread((mongosConnString) => {
+let setClusterParameterSuccessThread = new Thread((mongosConnString, clusterParameter) => {
let mongos = new Mongo(mongosConnString);
- assert.commandWorked(mongos.adminCommand({setClusterParameter: {param: true}}));
-}, st.s.host);
+ assert.commandWorked(mongos.adminCommand({setClusterParameter: clusterParameter}));
+}, st.s.host, clusterParameter1);
setClusterParameterSuccessThread.start();
fp.wait();
@@ -29,12 +45,13 @@ fp.wait();
jsTestLog(
'Check that 2 requests for the same cluster parameter and same value generates only one coordinator.');
-let setClusterParameterJoinSuccessThread = new Thread((mongosConnString) => {
+let setClusterParameterJoinSuccessThread = new Thread((mongosConnString, clusterParameter) => {
let mongos = new Mongo(mongosConnString);
- assert.commandWorked(mongos.adminCommand({setClusterParameter: {param: true}}));
-}, st.s.host);
+ assert.commandWorked(mongos.adminCommand({setClusterParameter: clusterParameter}));
+}, st.s.host, clusterParameter1);
setClusterParameterJoinSuccessThread.start();
+fp.wait();
let currOp =
st.configRS.getPrimary()
@@ -44,17 +61,58 @@ let currOp =
.toArray();
assert.eq(1, currOp.length);
assert(currOp[0].hasOwnProperty('command'));
-assert(currOp[0].command.hasOwnProperty('param'));
-assert.eq(true, currOp[0].command.param);
+assert.docEq(currOp[0].command, clusterParameter1);
jsTestLog('Check that a second request will fail with ConflictingOperationInProgress.');
-assert.commandFailedWithCode(st.s.adminCommand({setClusterParameter: {otherParam: true}}),
+assert.commandFailedWithCode(st.s.adminCommand({setClusterParameter: clusterParameter2}),
ErrorCodes.ConflictingOperationInProgress);
fp.off();
setClusterParameterSuccessThread.join();
setClusterParameterJoinSuccessThread.join();
+jsTestLog('Check forward progress until completion in the presence of a config server stepdown.');
+
+fp = configureFailPoint(st.configRS.getPrimary(), 'hangBeforeRunningConfigsvrCoordinatorInstance');
+
+let setClusterParameterThread = new Thread((mongosConnString, clusterParameter) => {
+ let mongos = new Mongo(mongosConnString);
+ assert.commandWorked(mongos.adminCommand({setClusterParameter: clusterParameter}));
+}, st.s.host, clusterParameter2);
+
+setClusterParameterThread.start();
+fp.wait();
+
+let newPrimary = st.configRS.getSecondary();
+
+st.configRS.stepUp(newPrimary);
+
+// After the stepdown the command should be retried and finish successfully.
+setClusterParameterThread.join();
+
+const clusterParametersConfigColl =
+ st.configRS.getPrimary().getCollection('config.clusterParameters');
+
+const shardParametersConfigColl = st.rs0.getPrimary().getCollection('config.clusterParameters');
+
+assert.eq(1, clusterParametersConfigColl.countDocuments({_id: clusterParameter2Name}));
+const configClusterParameter = clusterParametersConfigColl.findOne(
+ {_id: clusterParameter2Name}, {_id: 0, clusterParameterTime: 0});
+const shardClusterParameter = shardParametersConfigColl.findOne({_id: clusterParameter2Name},
+ {_id: 0, clusterParameterTime: 0});
+assert.docEq(configClusterParameter, clusterParameter2Value);
+assert.docEq(shardClusterParameter, clusterParameter2Value);
+
+fp.off();
+
+// Check the full cluster has the same clusterParameterTime as the config server.
+const configParameterTime =
+ clusterParametersConfigColl.findOne({_id: clusterParameter2Name}, {clusterParameterTime: 1})
+ .clusterParameterTime;
+assert.eq(configParameterTime,
+ shardParametersConfigColl.findOne({_id: clusterParameter2Name}, {clusterParameterTime: 1})
+ .clusterParameterTime);
+
st.stop();
})();