summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMaria van Keulen <maria@mongodb.com>2018-01-03 16:49:38 -0500
committerMaria van Keulen <maria@mongodb.com>2018-01-04 18:28:55 -0500
commit489d177dbd0f0420a8ca04d39fd78d0a2c539420 (patch)
tree4e23f554a69e1c233cfd12340a05a13972f57c17
parent2e346aaf5452b86a2106af645ddf65a51a9590fa (diff)
downloadmongo-489d177dbd0f0420a8ca04d39fd78d0a2c539420.tar.gz
SERVER-32255 Ensure UUIDs exist for local collections on ShardServersr3.6.2-rc0r3.6.2
(cherry picked from commit 2158ea8d6c84c9a68b50f76c9dd23b8a193b65f8) Conflicts: buildscripts/resmokeconfig/suites/sharding_last_stable_mongos_and_mixed_shards.yml jstests/multiVersion/do_upgrade_downgrade.js src/mongo/db/commands/validate.cpp
-rw-r--r--buildscripts/resmokeconfig/suites/sharding_last_stable_mongos_and_mixed_shards.yml2
-rw-r--r--jstests/libs/check_uuids.js27
-rw-r--r--jstests/multiVersion/set_schema_version.js23
-rw-r--r--jstests/sharding/nonreplicated_uuids_on_shardservers.js22
-rw-r--r--src/mongo/db/repl/initial_syncer.cpp31
-rw-r--r--src/mongo/db/repl/replication_coordinator_external_state_impl.cpp18
6 files changed, 78 insertions, 45 deletions
diff --git a/buildscripts/resmokeconfig/suites/sharding_last_stable_mongos_and_mixed_shards.yml b/buildscripts/resmokeconfig/suites/sharding_last_stable_mongos_and_mixed_shards.yml
index 7d327494d56..a608c15b7f2 100644
--- a/buildscripts/resmokeconfig/suites/sharding_last_stable_mongos_and_mixed_shards.yml
+++ b/buildscripts/resmokeconfig/suites/sharding_last_stable_mongos_and_mixed_shards.yml
@@ -107,6 +107,8 @@ selector:
- jstests/sharding/pending_chunk.js
# Calls a shard command that does not exist in v3.4
- jstests/sharding/dump_coll_metadata.js
+ # This test should not be run with a mixed cluster environment.
+ - jstests/sharding/nonreplicated_uuids_on_shardservers.js
executor:
config:
diff --git a/jstests/libs/check_uuids.js b/jstests/libs/check_uuids.js
new file mode 100644
index 00000000000..1eb5f748834
--- /dev/null
+++ b/jstests/libs/check_uuids.js
@@ -0,0 +1,27 @@
+// Contains helpers for checking UUIDs on collections.
+
+/**
+ * Verifies that all collections on all databases on the server with admin database adminDB have
+ * UUIDs if isDowngrade is false and don't have UUIDs if isDowngrade is true.
+ */
+function checkCollectionUUIDs(adminDB, isDowngrade) {
+ let databaseList = adminDB.runCommand({"listDatabases": 1}).databases;
+
+ databaseList.forEach(function(database) {
+ let currentDatabase = adminDB.getSiblingDB(database.name);
+ let collectionInfos = currentDatabase.getCollectionInfos();
+ for (let i = 0; i < collectionInfos.length; i++) {
+ // Always skip system.indexes due to SERVER-30500.
+ if (collectionInfos[i].name == "system.indexes") {
+ continue;
+ }
+ if (isDowngrade) {
+ assert(!collectionInfos[i].info.uuid,
+ "Unexpected uuid for collection: " + tojson(collectionInfos[i]));
+ } else {
+ assert(collectionInfos[i].info.uuid,
+ "Expect uuid for collection: " + tojson(collectionInfos[i]));
+ }
+ }
+ });
+}
diff --git a/jstests/multiVersion/set_schema_version.js b/jstests/multiVersion/set_schema_version.js
index ab198fb264e..553d1fcc443 100644
--- a/jstests/multiVersion/set_schema_version.js
+++ b/jstests/multiVersion/set_schema_version.js
@@ -5,32 +5,11 @@
load("jstests/replsets/rslib.js");
load("jstests/libs/feature_compatibility_version.js");
load("jstests/libs/get_index_helpers.js");
+ load("jstests/libs/check_uuids.js");
const latest = "latest";
const downgrade = "3.4";
- let checkCollectionUUIDs = function(adminDB, isDowngrade) {
- let databaseList = adminDB.runCommand({"listDatabases": 1}).databases;
-
- databaseList.forEach(function(database) {
- let currentDatabase = adminDB.getSiblingDB(database.name);
- let collectionInfos = currentDatabase.getCollectionInfos();
- for (let i = 0; i < collectionInfos.length; i++) {
- // Always skip system.indexes due to SERVER-30500.
- if (collectionInfos[i].name == "system.indexes") {
- continue;
- }
- if (isDowngrade) {
- assert(!collectionInfos[i].info.uuid,
- "Unexpected uuid for collection: " + tojson(collectionInfos[i]));
- } else {
- assert(collectionInfos[i].info.uuid,
- "Expect uuid for collection: " + tojson(collectionInfos[i]));
- }
- }
- });
- };
-
let setFCV = function(adminDB, version) {
assert.commandWorked(adminDB.runCommand({setFeatureCompatibilityVersion: version}));
checkFCV(adminDB, version);
diff --git a/jstests/sharding/nonreplicated_uuids_on_shardservers.js b/jstests/sharding/nonreplicated_uuids_on_shardservers.js
new file mode 100644
index 00000000000..f91c5d88cc7
--- /dev/null
+++ b/jstests/sharding/nonreplicated_uuids_on_shardservers.js
@@ -0,0 +1,22 @@
+// SERVER-32255 This test ensures a node started with --shardsvr and added to a replica set receives
+// UUIDs upon re-initiation.
+(function() {
+ "use strict";
+ load("jstests/libs/check_uuids.js");
+ let st = new ShardingTest({shards: 1, rs: {nodes: 1}, mongos: 1});
+ let mongos = st.s;
+ let rs = st.rs0;
+
+ // Create `test.coll`.
+ mongos.getDB("test").coll.insert({_id: 1, x: 1});
+
+ // Add a node with --shardsvr to the replica set.
+ let newNode = rs.add({'shardsvr': '', rsConfig: {priority: 0, votes: 0}});
+ rs.reInitiate();
+ rs.awaitSecondaryNodes();
+
+ let secondaryAdminDB = newNode.getDB("admin");
+
+ // Ensure the new node has UUIDs for all its collections.
+ checkCollectionUUIDs(secondaryAdminDB, /*isDowngrade*/ false);
+})();
diff --git a/src/mongo/db/repl/initial_syncer.cpp b/src/mongo/db/repl/initial_syncer.cpp
index 4b1c882b1eb..1e13256dcaf 100644
--- a/src/mongo/db/repl/initial_syncer.cpp
+++ b/src/mongo/db/repl/initial_syncer.cpp
@@ -1096,25 +1096,22 @@ void InitialSyncer::_rollbackCheckerCheckForRollbackCallback(
// Set UUIDs for all non-replicated collections on secondaries. See comment in
// ReplicationCoordinatorExternalStateImpl::initializeReplSetStorage() for the explanation of
- // why we do this and why it is not necessary for sharded clusters.
- if (serverGlobalParams.clusterRole != ClusterRole::ShardServer) {
- const NamespaceString nss("admin", "system.version");
- auto opCtx = makeOpCtx();
- auto statusWithUUID = _storage->getCollectionUUID(opCtx.get(), nss);
- if (!statusWithUUID.isOK()) {
- // If the admin database does not exist, we intentionally fail initial sync. As part of
- // SERVER-29448, we disallow dropping the admin database, so failing here is fine.
- onCompletionGuard->setResultAndCancelRemainingWork_inlock(lock,
- statusWithUUID.getStatus());
+ // why we do this.
+ const NamespaceString nss("admin", "system.version");
+ auto opCtx = makeOpCtx();
+ auto statusWithUUID = _storage->getCollectionUUID(opCtx.get(), nss);
+ if (!statusWithUUID.isOK()) {
+ // If the admin database does not exist, we intentionally fail initial sync. As part of
+ // SERVER-29448, we disallow dropping the admin database, so failing here is fine.
+ onCompletionGuard->setResultAndCancelRemainingWork_inlock(lock, statusWithUUID.getStatus());
+ return;
+ }
+ if (statusWithUUID.getValue()) {
+ auto schemaStatus = _storage->upgradeUUIDSchemaVersionNonReplicated(opCtx.get());
+ if (!schemaStatus.isOK()) {
+ onCompletionGuard->setResultAndCancelRemainingWork_inlock(lock, schemaStatus);
return;
}
- if (statusWithUUID.getValue()) {
- auto schemaStatus = _storage->upgradeUUIDSchemaVersionNonReplicated(opCtx.get());
- if (!schemaStatus.isOK()) {
- onCompletionGuard->setResultAndCancelRemainingWork_inlock(lock, schemaStatus);
- return;
- }
- }
}
// Success!
diff --git a/src/mongo/db/repl/replication_coordinator_external_state_impl.cpp b/src/mongo/db/repl/replication_coordinator_external_state_impl.cpp
index fb675304cfe..f2a2ff633ad 100644
--- a/src/mongo/db/repl/replication_coordinator_external_state_impl.cpp
+++ b/src/mongo/db/repl/replication_coordinator_external_state_impl.cpp
@@ -404,12 +404,18 @@ Status ReplicationCoordinatorExternalStateImpl::initializeReplSetStorage(Operati
// Set UUIDs for all non-replicated collections. This is necessary for independent replica
// sets and config server replica sets started with no data files because collections in
// local are created prior to the featureCompatibilityVersion being set to 3.6, so the
- // collections are not created with UUIDs. This is not an issue for shard servers because
- // the config server sends a setFeatureCompatibilityVersion command with the
- // featureCompatibilityVersion equal to the cluster's featureCompatibilityVersion during
- // addShard, which will add UUIDs to all collections that do not already have them. Here,
- // we add UUIDs to the non-replicated collections on the primary. We add them on the
- // secondaries during InitialSync.
+ // collections are not created with UUIDs. We exclude ShardServers when adding UUIDs to
+ // non-replicated collections on the primary because ShardServers are started up by default
+ // with featureCompatibilityVersion 3.4, so we don't want to assign UUIDs to them until the
+ // cluster's featureCompatibilityVersion is explicitly set to 3.6 by the config server. The
+ // below UUID addition for non-replicated collections only occurs on the primary; UUIDs are
+ // added to non-replicated collections on secondaries during InitialSync. When the config
+ // server sets the featureCompatibilityVersion to 3.6, the shard primary will add UUIDs to
+ // all the collections that need them. One special case here is if a shard is already in
+ // featureCompatibilityVersion 3.6 and a new node is started up with --shardsvr and added to
+ // that shard, the new node will still start up with featureCompatibilityVersion 3.4 and
+ // need to have UUIDs added to each collection. These UUIDs are added during InitialSync,
+ // because the new node is a secondary.
if (serverGlobalParams.clusterRole != ClusterRole::ShardServer &&
FeatureCompatibilityVersion::isCleanStartUp()) {
auto schemaStatus = updateUUIDSchemaVersionNonReplicated(opCtx, true);