summaryrefslogtreecommitdiff
path: root/jstests/noPassthrough/set_step_params.js
diff options
context:
space:
mode:
Diffstat (limited to 'jstests/noPassthrough/set_step_params.js')
-rw-r--r--jstests/noPassthrough/set_step_params.js462
1 files changed, 231 insertions, 231 deletions
diff --git a/jstests/noPassthrough/set_step_params.js b/jstests/noPassthrough/set_step_params.js
index 08a4e36422b..d3fbe5deb02 100644
--- a/jstests/noPassthrough/set_step_params.js
+++ b/jstests/noPassthrough/set_step_params.js
@@ -5,270 +5,270 @@ load("jstests/libs/parallelTester.js");
*/
(function() {
- "use strict";
-
- const kDbName = 'test';
-
- const minConns = 4;
- var stepParams = {
- ShardingTaskExecutorPoolMinSize: minConns,
- ShardingTaskExecutorPoolMaxSize: 10,
- ShardingTaskExecutorPoolMaxConnecting: 5,
- ShardingTaskExecutorPoolHostTimeoutMS: 300000,
- ShardingTaskExecutorPoolRefreshRequirementMS: 60000,
- ShardingTaskExecutorPoolRefreshTimeoutMS: 20000,
- ShardingTaskExecutorPoolReplicaSetMatching: "disabled",
- };
-
- const st = new ShardingTest({
- config: {nodes: 1},
- shards: 1,
- rs0: {nodes: 1},
- mongos: [{setParameter: stepParams}],
+"use strict";
+
+const kDbName = 'test';
+
+const minConns = 4;
+var stepParams = {
+ ShardingTaskExecutorPoolMinSize: minConns,
+ ShardingTaskExecutorPoolMaxSize: 10,
+ ShardingTaskExecutorPoolMaxConnecting: 5,
+ ShardingTaskExecutorPoolHostTimeoutMS: 300000,
+ ShardingTaskExecutorPoolRefreshRequirementMS: 60000,
+ ShardingTaskExecutorPoolRefreshTimeoutMS: 20000,
+ ShardingTaskExecutorPoolReplicaSetMatching: "disabled",
+};
+
+const st = new ShardingTest({
+ config: {nodes: 1},
+ shards: 1,
+ rs0: {nodes: 1},
+ mongos: [{setParameter: stepParams}],
+});
+const mongos = st.s0;
+const rst = st.rs0;
+const primary = rst.getPrimary();
+
+const cfg = primary.getDB('local').system.replset.findOne();
+const allHosts = cfg.members.map(x => x.host);
+const mongosDB = mongos.getDB(kDbName);
+const primaryOnly = [primary.name];
+
+function configureReplSetFailpoint(name, modeValue) {
+ st.rs0.nodes.forEach(function(node) {
+ assert.commandWorked(node.getDB("admin").runCommand({
+ configureFailPoint: name,
+ mode: modeValue,
+ data: {shouldCheckForInterrupt: true},
+ }));
});
- const mongos = st.s0;
- const rst = st.rs0;
- const primary = rst.getPrimary();
-
- const cfg = primary.getDB('local').system.replset.findOne();
- const allHosts = cfg.members.map(x => x.host);
- const mongosDB = mongos.getDB(kDbName);
- const primaryOnly = [primary.name];
-
- function configureReplSetFailpoint(name, modeValue) {
- st.rs0.nodes.forEach(function(node) {
- assert.commandWorked(node.getDB("admin").runCommand({
- configureFailPoint: name,
- mode: modeValue,
- data: {shouldCheckForInterrupt: true},
- }));
- });
+}
+
+var threads = [];
+function launchFinds({times, readPref, shouldFail}) {
+ jsTestLog("Starting " + times + " connections");
+ for (var i = 0; i < times; i++) {
+ var thread = new Thread(function(connStr, readPref, dbName, shouldFail) {
+ var client = new Mongo(connStr);
+ const ret = client.getDB(dbName).runCommand(
+ {find: "test", limit: 1, "$readPreference": {mode: readPref}});
+
+ if (shouldFail) {
+ assert.commandFailed(ret);
+ } else {
+ assert.commandWorked(ret);
+ }
+ }, st.s.host, readPref, kDbName, shouldFail);
+ thread.start();
+ threads.push(thread);
}
-
- var threads = [];
- function launchFinds({times, readPref, shouldFail}) {
- jsTestLog("Starting " + times + " connections");
- for (var i = 0; i < times; i++) {
- var thread = new Thread(function(connStr, readPref, dbName, shouldFail) {
- var client = new Mongo(connStr);
- const ret = client.getDB(dbName).runCommand(
- {find: "test", limit: 1, "$readPreference": {mode: readPref}});
-
- if (shouldFail) {
- assert.commandFailed(ret);
- } else {
- assert.commandWorked(ret);
- }
- }, st.s.host, readPref, kDbName, shouldFail);
- thread.start();
- threads.push(thread);
+}
+
+var currentCheckNum = 0;
+function hasConnPoolStats(args) {
+ const checkNum = currentCheckNum++;
+ jsTestLog("Check #" + checkNum + ": " + tojson(args));
+ var {ready, pending, active, hosts, isAbsent} = args;
+
+ ready = ready ? ready : 0;
+ pending = pending ? pending : 0;
+ active = active ? active : 0;
+ hosts = hosts ? hosts : allHosts;
+
+ function checkStats(res, host) {
+ var stats = res.hosts[host];
+ if (!stats) {
+ jsTestLog("Connection stats for " + host + " are absent");
+ return isAbsent;
}
+
+ jsTestLog("Connection stats for " + host + ": " + tojson(stats));
+ return stats.available == ready && stats.refreshing == pending && stats.inUse == active;
}
- var currentCheckNum = 0;
- function hasConnPoolStats(args) {
- const checkNum = currentCheckNum++;
- jsTestLog("Check #" + checkNum + ": " + tojson(args));
- var {ready, pending, active, hosts, isAbsent} = args;
-
- ready = ready ? ready : 0;
- pending = pending ? pending : 0;
- active = active ? active : 0;
- hosts = hosts ? hosts : allHosts;
-
- function checkStats(res, host) {
- var stats = res.hosts[host];
- if (!stats) {
- jsTestLog("Connection stats for " + host + " are absent");
- return isAbsent;
- }
+ function checkAllStats() {
+ var res = mongos.adminCommand({connPoolStats: 1});
+ return hosts.map(host => checkStats(res, host)).every(x => x);
+ }
- jsTestLog("Connection stats for " + host + ": " + tojson(stats));
- return stats.available == ready && stats.refreshing == pending && stats.inUse == active;
- }
+ assert.soon(checkAllStats, "Check #" + checkNum + " failed", 10000);
- function checkAllStats() {
- var res = mongos.adminCommand({connPoolStats: 1});
- return hosts.map(host => checkStats(res, host)).every(x => x);
- }
+ jsTestLog("Check #" + checkNum + " successful");
+}
- assert.soon(checkAllStats, "Check #" + checkNum + " failed", 10000);
+function updateSetParameters(params) {
+ var cmd = Object.assign({"setParameter": 1}, params);
+ assert.commandWorked(mongos.adminCommand(cmd));
+}
- jsTestLog("Check #" + checkNum + " successful");
- }
+function dropConnections() {
+ assert.commandWorked(mongos.adminCommand({dropConnections: 1, hostAndPort: allHosts}));
+}
- function updateSetParameters(params) {
- var cmd = Object.assign({"setParameter": 1}, params);
- assert.commandWorked(mongos.adminCommand(cmd));
- }
+function resetPools() {
+ dropConnections();
+ mongos.adminCommand({multicast: {ping: 0}});
+ hasConnPoolStats({ready: 4});
+}
- function dropConnections() {
- assert.commandWorked(mongos.adminCommand({dropConnections: 1, hostAndPort: allHosts}));
- }
+function runSubTest(name, fun) {
+ jsTestLog("Running test for " + name);
- function resetPools() {
- dropConnections();
- mongos.adminCommand({multicast: {ping: 0}});
- hasConnPoolStats({ready: 4});
- }
+ resetPools();
- function runSubTest(name, fun) {
- jsTestLog("Running test for " + name);
+ fun();
- resetPools();
+ updateSetParameters(stepParams);
+}
- fun();
+assert.writeOK(mongosDB.test.insert({x: 1}));
+assert.writeOK(mongosDB.test.insert({x: 2}));
+assert.writeOK(mongosDB.test.insert({x: 3}));
+st.rs0.awaitReplication();
- updateSetParameters(stepParams);
- }
+runSubTest("MinSize", function() {
+ dropConnections();
- assert.writeOK(mongosDB.test.insert({x: 1}));
- assert.writeOK(mongosDB.test.insert({x: 2}));
- assert.writeOK(mongosDB.test.insert({x: 3}));
- st.rs0.awaitReplication();
+ // Launch an initial find to trigger to min
+ launchFinds({times: 1, readPref: "primary"});
+ hasConnPoolStats({ready: minConns});
- runSubTest("MinSize", function() {
- dropConnections();
+ // Increase by one
+ updateSetParameters({ShardingTaskExecutorPoolMinSize: 5});
+ hasConnPoolStats({ready: 5});
- // Launch an initial find to trigger to min
- launchFinds({times: 1, readPref: "primary"});
- hasConnPoolStats({ready: minConns});
+ // Increase to MaxSize
+ updateSetParameters({ShardingTaskExecutorPoolMinSize: 10});
+ hasConnPoolStats({ready: 10});
- // Increase by one
- updateSetParameters({ShardingTaskExecutorPoolMinSize: 5});
- hasConnPoolStats({ready: 5});
+ // Decrease to zero
+ updateSetParameters({ShardingTaskExecutorPoolMinSize: 0});
+});
- // Increase to MaxSize
- updateSetParameters({ShardingTaskExecutorPoolMinSize: 10});
- hasConnPoolStats({ready: 10});
+runSubTest("MaxSize", function() {
+ configureReplSetFailpoint("waitInFindBeforeMakingBatch", "alwaysOn");
+ dropConnections();
- // Decrease to zero
- updateSetParameters({ShardingTaskExecutorPoolMinSize: 0});
- });
+ // Launch 10 blocked finds
+ launchFinds({times: 10, readPref: "primary"});
+ hasConnPoolStats({active: 10, hosts: primaryOnly});
- runSubTest("MaxSize", function() {
- configureReplSetFailpoint("waitInFindBeforeMakingBatch", "alwaysOn");
- dropConnections();
+ // Increase by 5 and Launch another 4 blocked finds
+ updateSetParameters({ShardingTaskExecutorPoolMaxSize: 15});
+ launchFinds({times: 4, readPref: "primary"});
+ hasConnPoolStats({active: 14, hosts: primaryOnly});
- // Launch 10 blocked finds
- launchFinds({times: 10, readPref: "primary"});
- hasConnPoolStats({active: 10, hosts: primaryOnly});
+ // Launch yet another 2, these should add only 1 connection
+ launchFinds({times: 2, readPref: "primary"});
+ hasConnPoolStats({active: 15, hosts: primaryOnly});
- // Increase by 5 and Launch another 4 blocked finds
- updateSetParameters({ShardingTaskExecutorPoolMaxSize: 15});
- launchFinds({times: 4, readPref: "primary"});
- hasConnPoolStats({active: 14, hosts: primaryOnly});
+ configureReplSetFailpoint("waitInFindBeforeMakingBatch", "off");
+ hasConnPoolStats({ready: 15, pending: 0, hosts: primaryOnly});
+});
- // Launch yet another 2, these should add only 1 connection
- launchFinds({times: 2, readPref: "primary"});
- hasConnPoolStats({active: 15, hosts: primaryOnly});
+// Test maxConnecting
+runSubTest("MaxConnecting", function() {
+ const maxPending1 = 2;
+ const maxPending2 = 4;
+ const conns = 6;
- configureReplSetFailpoint("waitInFindBeforeMakingBatch", "off");
- hasConnPoolStats({ready: 15, pending: 0, hosts: primaryOnly});
+ updateSetParameters({
+ ShardingTaskExecutorPoolMaxSize: 100,
+ ShardingTaskExecutorPoolMaxConnecting: maxPending1,
});
- // Test maxConnecting
- runSubTest("MaxConnecting", function() {
- const maxPending1 = 2;
- const maxPending2 = 4;
- const conns = 6;
-
- updateSetParameters({
- ShardingTaskExecutorPoolMaxSize: 100,
- ShardingTaskExecutorPoolMaxConnecting: maxPending1,
- });
-
- configureReplSetFailpoint("waitInIsMaster", "alwaysOn");
- configureReplSetFailpoint("waitInFindBeforeMakingBatch", "alwaysOn");
- dropConnections();
-
- // Go to the limit of maxConnecting, so we're stuck here
- launchFinds({times: maxPending1, readPref: "primary"});
- hasConnPoolStats({pending: maxPending1});
-
- // More won't run right now
- launchFinds({times: conns - maxPending1, readPref: "primary"});
- hasConnPoolStats({pending: maxPending1});
-
- // If we increase our limit, it should fill in some of the connections
- updateSetParameters({ShardingTaskExecutorPoolMaxConnecting: maxPending2});
- hasConnPoolStats({pending: maxPending2});
-
- // Dropping the limit doesn't cause us to drop pending
- updateSetParameters({ShardingTaskExecutorPoolMaxConnecting: maxPending1});
- hasConnPoolStats({pending: maxPending2});
-
- // Release our pending and walk away
- configureReplSetFailpoint("waitInIsMaster", "off");
- hasConnPoolStats({active: conns});
- configureReplSetFailpoint("waitInFindBeforeMakingBatch", "off");
+ configureReplSetFailpoint("waitInIsMaster", "alwaysOn");
+ configureReplSetFailpoint("waitInFindBeforeMakingBatch", "alwaysOn");
+ dropConnections();
+
+ // Go to the limit of maxConnecting, so we're stuck here
+ launchFinds({times: maxPending1, readPref: "primary"});
+ hasConnPoolStats({pending: maxPending1});
+
+ // More won't run right now
+ launchFinds({times: conns - maxPending1, readPref: "primary"});
+ hasConnPoolStats({pending: maxPending1});
+
+ // If we increase our limit, it should fill in some of the connections
+ updateSetParameters({ShardingTaskExecutorPoolMaxConnecting: maxPending2});
+ hasConnPoolStats({pending: maxPending2});
+
+ // Dropping the limit doesn't cause us to drop pending
+ updateSetParameters({ShardingTaskExecutorPoolMaxConnecting: maxPending1});
+ hasConnPoolStats({pending: maxPending2});
+
+ // Release our pending and walk away
+ configureReplSetFailpoint("waitInIsMaster", "off");
+ hasConnPoolStats({active: conns});
+ configureReplSetFailpoint("waitInFindBeforeMakingBatch", "off");
+});
+
+runSubTest("Timeouts", function() {
+ const conns = minConns;
+ const pendingTimeoutMS = 5000;
+ const toRefreshTimeoutMS = 1000;
+ const idleTimeoutMS1 = 20000;
+ const idleTimeoutMS2 = 15500;
+
+ // Updating separately since the validation depends on existing params
+ updateSetParameters({
+ ShardingTaskExecutorPoolRefreshTimeoutMS: pendingTimeoutMS,
+ });
+ updateSetParameters({
+ ShardingTaskExecutorPoolRefreshRequirementMS: toRefreshTimeoutMS,
});
+ updateSetParameters({
+ ShardingTaskExecutorPoolHostTimeoutMS: idleTimeoutMS1,
+ });
+
+ configureReplSetFailpoint("waitInFindBeforeMakingBatch", "alwaysOn");
+ dropConnections();
+
+ // Make ready connections
+ launchFinds({times: conns, readPref: "primary"});
+ configureReplSetFailpoint("waitInFindBeforeMakingBatch", "off");
+ hasConnPoolStats({ready: conns});
+
+ // Block refreshes and wait for the toRefresh timeout
+ configureReplSetFailpoint("waitInIsMaster", "alwaysOn");
+ sleep(toRefreshTimeoutMS);
+
+ // Confirm that we're in pending for all of our conns
+ hasConnPoolStats({pending: conns});
- runSubTest("Timeouts", function() {
- const conns = minConns;
- const pendingTimeoutMS = 5000;
- const toRefreshTimeoutMS = 1000;
- const idleTimeoutMS1 = 20000;
- const idleTimeoutMS2 = 15500;
-
- // Updating separately since the validation depends on existing params
- updateSetParameters({
- ShardingTaskExecutorPoolRefreshTimeoutMS: pendingTimeoutMS,
- });
- updateSetParameters({
- ShardingTaskExecutorPoolRefreshRequirementMS: toRefreshTimeoutMS,
- });
- updateSetParameters({
- ShardingTaskExecutorPoolHostTimeoutMS: idleTimeoutMS1,
- });
-
- configureReplSetFailpoint("waitInFindBeforeMakingBatch", "alwaysOn");
- dropConnections();
-
- // Make ready connections
- launchFinds({times: conns, readPref: "primary"});
- configureReplSetFailpoint("waitInFindBeforeMakingBatch", "off");
- hasConnPoolStats({ready: conns});
-
- // Block refreshes and wait for the toRefresh timeout
- configureReplSetFailpoint("waitInIsMaster", "alwaysOn");
- sleep(toRefreshTimeoutMS);
-
- // Confirm that we're in pending for all of our conns
- hasConnPoolStats({pending: conns});
-
- // Set our min conns to 0 to make sure we don't refresh after pending timeout
- updateSetParameters({
- ShardingTaskExecutorPoolMinSize: 0,
- });
-
- // Wait for our pending timeout
- sleep(pendingTimeoutMS);
- hasConnPoolStats({});
-
- configureReplSetFailpoint("waitInIsMaster", "off");
-
- // Reset the min conns to make sure normal refresh doesn't extend the timeout
- updateSetParameters({
- ShardingTaskExecutorPoolMinSize: minConns,
- });
-
- // Wait for our host timeout and confirm the pool drops
- sleep(idleTimeoutMS1);
- hasConnPoolStats({isAbsent: true});
-
- // Reset the pool
- resetPools();
-
- // Sleep for a shorter timeout and then update so we're already expired
- sleep(idleTimeoutMS2);
- updateSetParameters({ShardingTaskExecutorPoolHostTimeoutMS: idleTimeoutMS2});
- hasConnPoolStats({isAbsent: true});
+ // Set our min conns to 0 to make sure we don't refresh after pending timeout
+ updateSetParameters({
+ ShardingTaskExecutorPoolMinSize: 0,
});
- threads.forEach(function(thread) {
- thread.join();
+ // Wait for our pending timeout
+ sleep(pendingTimeoutMS);
+ hasConnPoolStats({});
+
+ configureReplSetFailpoint("waitInIsMaster", "off");
+
+ // Reset the min conns to make sure normal refresh doesn't extend the timeout
+ updateSetParameters({
+ ShardingTaskExecutorPoolMinSize: minConns,
});
- st.stop();
+ // Wait for our host timeout and confirm the pool drops
+ sleep(idleTimeoutMS1);
+ hasConnPoolStats({isAbsent: true});
+
+ // Reset the pool
+ resetPools();
+
+ // Sleep for a shorter timeout and then update so we're already expired
+ sleep(idleTimeoutMS2);
+ updateSetParameters({ShardingTaskExecutorPoolHostTimeoutMS: idleTimeoutMS2});
+ hasConnPoolStats({isAbsent: true});
+});
+
+threads.forEach(function(thread) {
+ thread.join();
+});
+
+st.stop();
})();