summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorKshitij Gupta <kshitij.gupta@mongodb.com>2021-02-22 22:45:05 +0000
committerEvergreen Agent <no-reply@evergreen.mongodb.com>2021-10-08 16:05:24 +0000
commited680e1843fb4fd2e1b1249dc6c0d897ac66ca9a (patch)
tree0ee798eb09fd2a2a927bf4b1e05265a5fd74f0d2
parenteda5c7c568a611394135e3c8c0b06ff23ee4818d (diff)
downloadmongo-ed680e1843fb4fd2e1b1249dc6c0d897ac66ca9a.tar.gz
SERVER-34597 Wait for sharding initialization in ShardingTest
(cherry picked from commit 1f84ef5e4ca8b5be22d038ea2a6cc3e5e6863194)
-rw-r--r--buildscripts/resmokelib/testing/fixtures/shardedcluster.py29
-rw-r--r--src/mongo/shell/shardingtest.js66
2 files changed, 95 insertions, 0 deletions
diff --git a/buildscripts/resmokelib/testing/fixtures/shardedcluster.py b/buildscripts/resmokelib/testing/fixtures/shardedcluster.py
index 89d57ef2e1a..2a4f62c857b 100644
--- a/buildscripts/resmokelib/testing/fixtures/shardedcluster.py
+++ b/buildscripts/resmokelib/testing/fixtures/shardedcluster.py
@@ -22,6 +22,7 @@ class ShardedClusterFixture(interface.Fixture): # pylint: disable=too-many-inst
_CONFIGSVR_REPLSET_NAME = "config-rs"
_SHARD_REPLSET_NAME_PREFIX = "shard-rs"
+ AWAIT_SHARDING_INITIALIZATION_TIMEOUT_SECS = 60
def __init__( # pylint: disable=too-many-arguments,too-many-locals
self, logger, job_num, mongos_executable=None, mongos_options=None, mongod_options=None,
@@ -160,6 +161,9 @@ class ShardedClusterFixture(interface.Fixture): # pylint: disable=too-many-inst
self.logger.info("Enabling sharding for '%s' database...", db_name)
client.admin.command({"enablesharding": db_name})
+ # Wait for mongod's to be ready.
+ self._await_mongod_sharding_initialization()
+
# Ensure that the sessions collection gets auto-sharded by the config server
if self.configsvr is not None:
primary = self.configsvr.get_primary().mongo_client()
@@ -170,6 +174,31 @@ class ShardedClusterFixture(interface.Fixture): # pylint: disable=too-many-inst
shard.get_primary().mongo_client())
primary.admin.command({"refreshLogicalSessionCacheNow": 1})
+ def _await_mongod_sharding_initialization(self):
+ if (self.enable_sharding) and (self.num_rs_nodes_per_shard is not None):
+ deadline = time.time(
+ ) + ShardedClusterFixture.AWAIT_SHARDING_INITIALIZATION_TIMEOUT_SECS
+ timeout_occurred = lambda: deadline - time.time() <= 0.0
+
+ mongod_clients = [(mongod.mongo_client(), mongod.port) for shard in self.shards
+ for mongod in shard.nodes]
+
+ for client, port in mongod_clients:
+ self._auth_to_db(client)
+
+ while True:
+ # The choice of namespace (local.fooCollection) does not affect the output.
+ get_shard_version_result = client.admin.command(
+ "getShardVersion", "local.fooCollection", check=False)
+ if get_shard_version_result["ok"]:
+ break
+
+ if timeout_occurred():
+ raise errors.ServerFailure(
+ "mongod on port: {} failed waiting for getShardVersion success after {} seconds"
+ .format(port, standalone.MongoDFixture.AWAIT_READY_TIMEOUT_SECS))
+ time.sleep(0.1)
+
def _auth_to_db(self, client):
"""Authenticate client for the 'authenticationDatabase'."""
if self.auth_options is not None:
diff --git a/src/mongo/shell/shardingtest.js b/src/mongo/shell/shardingtest.js
index bae78340eba..8040ab2234b 100644
--- a/src/mongo/shell/shardingtest.js
+++ b/src/mongo/shell/shardingtest.js
@@ -717,6 +717,70 @@ var ShardingTest = function(params) {
};
/**
+ * Wait for sharding to be initialized.
+ */
+ this.waitForShardingInitialized = function(timeoutMs = 60 * 1000) {
+ const getShardVersion = (client, timeout) => {
+ assert.soon(() => {
+ // The choice of namespace (local.fooCollection) does not affect the output.
+ var res = client.adminCommand({getShardVersion: "local.fooCollection"});
+ return res.ok == 1;
+ }, "timeout waiting for sharding to be initialized on mongod", timeout, 0.1);
+ };
+
+ var start = new Date();
+
+ for (var i = 0; i < this._rs.length; ++i) {
+ var replSet = this._rs[i];
+ if (!replSet)
+ continue;
+ nodes = replSet.test.nodes;
+ keyFileUsed = replSet.test.keyFile;
+
+ for (var j = 0; j < nodes.length; ++j) {
+ diff = (new Date()).getTime() - start.getTime();
+ var currNode = nodes[j];
+ // Skip arbiters
+ if (currNode.adminCommand({isMaster: 1}).arbiterOnly) {
+ continue;
+ }
+
+ const x509AuthRequired = (conn.fullOptions && conn.fullOptions.clusterAuthMode &&
+ conn.fullOptions.clusterAuthMode === "x509" &&
+ currNode.fullOptions.sslMode === "requireSSL");
+
+ if (keyFileUsed) {
+ authutil.asCluster(currNode, keyFileUsed, () => {
+ getShardVersion(currNode, timeoutMs - diff);
+ });
+ } else if (x509AuthRequired) {
+ const exitCode = _runMongoProgram(
+ ...["mongo",
+ currNode.host,
+ "--tls",
+ "--tlsAllowInvalidHostnames",
+ "--tlsCertificateKeyFile",
+ currNode.fullOptions.tlsCertificateKeyFile
+ ? currNode.fullOptions.tlsCertificateKeyFile
+ : currNode.fullOptions.sslPEMKeyFile,
+ "--tlsCAFile",
+ currNode.fullOptions.tlsCAFile ? currNode.fullOptions.tlsCAFile
+ : currNode.fullOptions.sslCAFile,
+ "--authenticationDatabase=$external",
+ "--authenticationMechanism=MONGODB-X509",
+ "--eval",
+ `(${getShardVersion.toString()})(db.getMongo(), ` +
+ (timeoutMs - diff).toString() + `)`,
+ ]);
+ assert.eq(0, exitCode, "parallel shell for x509 auth failed");
+ } else {
+ getShardVersion(currNode, timeoutMs - diff);
+ }
+ }
+ }
+ };
+
+ /**
* Kills the mongos with index n.
*/
this.stopMongos = function(n, opts) {
@@ -1760,6 +1824,8 @@ var ShardingTest = function(params) {
}
}
}
+
+ self.waitForShardingInitialized();
}
};