summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorKevin Pulo <kevin.pulo@mongodb.com>2020-09-30 13:31:04 +1000
committerEvergreen Agent <no-reply@evergreen.mongodb.com>2020-09-30 07:45:07 +0000
commit5a1c89e207987c2611c540eea3d39ac3c74bc65e (patch)
tree0a04cae80c3e64439605414841869fb53dfa9abf
parent0a9727b9d817c4cb97e4b2eeb93f6487fb6aa94f (diff)
downloadmongo-5a1c89e207987c2611c540eea3d39ac3c74bc65e.tar.gz
SERVER-51230 jstests which use shardingStatus's configServer response field should wait for RSM updates
-rw-r--r--jstests/sharding/shard_aware_init.js9
-rw-r--r--jstests/sharding/shard_aware_init_secondaries.js6
-rw-r--r--jstests/sharding/shard_aware_on_add_shard.js2
-rw-r--r--jstests/sharding/shard_aware_primary_failover.js3
-rw-r--r--jstests/sharding/shard_identity_rollback.js3
-rw-r--r--src/mongo/s/client/shard_registry.cpp11
6 files changed, 26 insertions, 8 deletions
diff --git a/jstests/sharding/shard_aware_init.js b/jstests/sharding/shard_aware_init.js
index cd764c840ce..5b36acbffd5 100644
--- a/jstests/sharding/shard_aware_init.js
+++ b/jstests/sharding/shard_aware_init.js
@@ -71,9 +71,10 @@ var runTest = function(mongodConn, configConnStr, awaitVersionUpdate) {
res = mongodConn.getDB('admin').runCommand({shardingState: 1});
assert(res.enabled);
- assert.eq(shardIdentityDoc.configsvrConnectionString, res.configServer);
assert.eq(shardIdentityDoc.shardName, res.shardName);
assert.eq(shardIdentityDoc.clusterId, res.clusterId);
+ assert.soon(() => shardIdentityDoc.configsvrConnectionString ==
+ mongodConn.adminCommand({shardingState: 1}).configServer);
return mongodConn;
};
@@ -93,9 +94,10 @@ var runTest = function(mongodConn, configConnStr, awaitVersionUpdate) {
var res = mongodConn.getDB('admin').runCommand({shardingState: 1});
assert(res.enabled);
- assert.eq(shardIdentityDoc.configsvrConnectionString, res.configServer);
assert.eq(shardIdentityDoc.shardName, res.shardName);
assert.eq(shardIdentityDoc.clusterId, res.clusterId);
+ assert.soon(() => shardIdentityDoc.configsvrConnectionString ==
+ mongodConn.adminCommand({shardingState: 1}).configServer);
// Should not be allowed to remove the shardIdentity document
assert.writeErrorWithCode(
mongodConn.getDB('admin').system.version.remove({_id: 'shardIdentity'}), 40070);
@@ -118,9 +120,10 @@ var runTest = function(mongodConn, configConnStr, awaitVersionUpdate) {
res = mongodConn.getDB('admin').runCommand({shardingState: 1});
assert(res.enabled);
- assert.eq(shardIdentityDoc.configsvrConnectionString, res.configServer);
assert.eq(shardIdentityDoc.shardName, res.shardName);
assert.eq(shardIdentityDoc.clusterId, res.clusterId);
+ assert.soon(() => shardIdentityDoc.configsvrConnectionString ==
+ mongodConn.adminCommand({shardingState: 1}).configServer);
//
// Test shardIdentity doc without configsvrConnectionString, resulting into parse error
diff --git a/jstests/sharding/shard_aware_init_secondaries.js b/jstests/sharding/shard_aware_init_secondaries.js
index f852c6e58a1..c48aecb52fd 100644
--- a/jstests/sharding/shard_aware_init_secondaries.js
+++ b/jstests/sharding/shard_aware_init_secondaries.js
@@ -46,9 +46,10 @@ secConn.setSecondaryOk();
var res = secConn.getDB('admin').runCommand({shardingState: 1});
assert(res.enabled, tojson(res));
-assert.eq(shardIdentityDoc.configsvrConnectionString, res.configServer);
assert.eq(shardIdentityDoc.shardName, res.shardName);
assert.eq(shardIdentityDoc.clusterId, res.clusterId);
+assert.soon(() => shardIdentityDoc.configsvrConnectionString ==
+ secConn.adminCommand({shardingState: 1}).configServer);
replTest.restart(replTest.getNodeId(secConn));
replTest.waitForPrimary();
@@ -60,9 +61,10 @@ secConn.setSecondaryOk();
res = secConn.getDB('admin').runCommand({shardingState: 1});
assert(res.enabled, tojson(res));
-assert.eq(shardIdentityDoc.configsvrConnectionString, res.configServer);
assert.eq(shardIdentityDoc.shardName, res.shardName);
assert.eq(shardIdentityDoc.clusterId, res.clusterId);
+assert.soon(() => shardIdentityDoc.configsvrConnectionString ==
+ secConn.adminCommand({shardingState: 1}).configServer);
replTest.stopSet();
diff --git a/jstests/sharding/shard_aware_on_add_shard.js b/jstests/sharding/shard_aware_on_add_shard.js
index 75e2d7c1026..e524f2cfe64 100644
--- a/jstests/sharding/shard_aware_on_add_shard.js
+++ b/jstests/sharding/shard_aware_on_add_shard.js
@@ -17,10 +17,10 @@ var checkShardingStateInitialized = function(conn, configConnStr, shardName, clu
var res = conn.getDB('admin').runCommand({shardingState: 1});
assert.commandWorked(res);
assert(res.enabled);
- assert.eq(configConnStr, res.configServer);
assert.eq(shardName, res.shardName);
assert(clusterId.equals(res.clusterId),
'cluster id: ' + tojson(clusterId) + ' != ' + tojson(res.clusterId));
+ assert.soon(() => configConnStr == conn.adminCommand({shardingState: 1}).configServer);
};
var checkShardMarkedAsShardAware = function(mongosConn, shardName) {
diff --git a/jstests/sharding/shard_aware_primary_failover.js b/jstests/sharding/shard_aware_primary_failover.js
index 9929b555bf8..77f73d4c106 100644
--- a/jstests/sharding/shard_aware_primary_failover.js
+++ b/jstests/sharding/shard_aware_primary_failover.js
@@ -48,9 +48,10 @@ primaryConn = replTest.getPrimary();
var res = primaryConn.getDB('admin').runCommand({shardingState: 1});
assert(res.enabled);
-assert.eq(shardIdentityDoc.configsvrConnectionString, res.configServer);
assert.eq(shardIdentityDoc.shardName, res.shardName);
assert.eq(shardIdentityDoc.clusterId, res.clusterId);
+assert.soon(() => shardIdentityDoc.configsvrConnectionString ==
+ primaryConn.adminCommand({shardingState: 1}).configServer);
replTest.stopSet();
diff --git a/jstests/sharding/shard_identity_rollback.js b/jstests/sharding/shard_identity_rollback.js
index 25dbc2e19e4..39738521d6e 100644
--- a/jstests/sharding/shard_identity_rollback.js
+++ b/jstests/sharding/shard_identity_rollback.js
@@ -46,9 +46,10 @@ assert.commandWorked(priConn.getDB('admin').system.version.update(
// Ensure sharding state on the primary was initialized
var res = priConn.getDB('admin').runCommand({shardingState: 1});
assert(res.enabled, tojson(res));
-assert.eq(shardIdentityDoc.configsvrConnectionString, res.configServer);
assert.eq(shardIdentityDoc.shardName, res.shardName);
assert.eq(shardIdentityDoc.clusterId, res.clusterId);
+assert.soon(() => shardIdentityDoc.configsvrConnectionString ==
+ priConn.adminCommand({shardingState: 1}).configServer);
// Ensure sharding state on the secondaries was *not* initialized
secondaries.forEach(function(secondary) {
diff --git a/src/mongo/s/client/shard_registry.cpp b/src/mongo/s/client/shard_registry.cpp
index c74911bcc48..1387772ecd2 100644
--- a/src/mongo/s/client/shard_registry.cpp
+++ b/src/mongo/s/client/shard_registry.cpp
@@ -107,6 +107,10 @@ void ShardRegistry::init(ServiceContext* service) {
_cache =
std::make_unique<Cache>(_cacheMutex, _service, _threadPool, lookupFn, 1 /* cacheSize */);
+ LOGV2_DEBUG(5123000,
+ 1,
+ "Initializing ShardRegistry",
+ "configServers"_attr = _initConfigServerCS.toString());
{
stdx::lock_guard<Latch> lk(_mutex);
_configShardData = ShardRegistryData::createWithConfigShardOnly(
@@ -381,6 +385,13 @@ void ShardRegistry::updateReplSetHosts(const ConnectionString& givenConnString,
_latestConnStrings.find(givenConnString.getSetName()) != _latestConnStrings.end())
? _latestConnStrings[givenConnString.getSetName()].makeUnionWith(givenConnString)
: givenConnString;
+ LOGV2_DEBUG(5123001,
+ 1,
+ "Updating ShardRegistry connection string",
+ "updateType"_attr =
+ updateType == ConnectionStringUpdateType::kPossible ? "possible" : "confirmed",
+ "givenConnString"_attr = givenConnString.toString(),
+ "newConnString"_attr = newConnString.toString());
if (auto shard = _configShardData.findByRSName(newConnString.getSetName())) {
auto newData = ShardRegistryData::createFromExisting(
_configShardData, newConnString, _shardFactory.get());