summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--buildscripts/resmokeconfig/suites/sharding_auth.yml2
-rw-r--r--buildscripts/resmokeconfig/suites/sharding_auth_audit.yml2
-rw-r--r--buildscripts/resmokeconfig/suites/sharding_continuous_config_stepdown.yml1
-rw-r--r--buildscripts/resmokeconfig/suites/sharding_last_stable_mongos_and_mixed_shards.yml1
-rw-r--r--jstests/noPassthrough/auth_reject_mismatching_logical_times.js3
-rw-r--r--jstests/sharding/mongod_returns_no_cluster_time_without_keys.js82
-rw-r--r--src/mongo/db/service_entry_point_mongod.cpp90
7 files changed, 148 insertions, 33 deletions
diff --git a/buildscripts/resmokeconfig/suites/sharding_auth.yml b/buildscripts/resmokeconfig/suites/sharding_auth.yml
index 320eb022a80..f7698454d30 100644
--- a/buildscripts/resmokeconfig/suites/sharding_auth.yml
+++ b/buildscripts/resmokeconfig/suites/sharding_auth.yml
@@ -33,6 +33,8 @@ selector:
- jstests/sharding/movechunk_interrupt_at_primary_stepdown.js # SERVER-21713
- jstests/sharding/movechunk_parallel.js # SERVER-21713
- jstests/sharding/migration_critical_section_concurrency.js # SERVER-21713
+ # Runs with auth enabled.
+ - jstests/sharding/mongod_returns_no_cluster_time_without_keys.js
executor:
config:
diff --git a/buildscripts/resmokeconfig/suites/sharding_auth_audit.yml b/buildscripts/resmokeconfig/suites/sharding_auth_audit.yml
index 80991abb93e..a1a469b1b98 100644
--- a/buildscripts/resmokeconfig/suites/sharding_auth_audit.yml
+++ b/buildscripts/resmokeconfig/suites/sharding_auth_audit.yml
@@ -33,6 +33,8 @@ selector:
- jstests/sharding/migration_server_status.js # SERVER-21713
- jstests/sharding/migration_move_chunk_after_receive.js # SERVER-21713
- jstests/sharding/migration_critical_section_concurrency.js # SERVER-21713
+ # Runs with auth enabled.
+ - jstests/sharding/mongod_returns_no_cluster_time_without_keys.js
executor:
config:
diff --git a/buildscripts/resmokeconfig/suites/sharding_continuous_config_stepdown.yml b/buildscripts/resmokeconfig/suites/sharding_continuous_config_stepdown.yml
index 2b9bdd434a5..5f8e868fe89 100644
--- a/buildscripts/resmokeconfig/suites/sharding_continuous_config_stepdown.yml
+++ b/buildscripts/resmokeconfig/suites/sharding_continuous_config_stepdown.yml
@@ -13,6 +13,7 @@ selector:
- jstests/sharding/mrShardedOutputAuth.js
- jstests/sharding/aggregation_currentop.js
- jstests/sharding/advance_cluster_time_action_type.js
+ - jstests/sharding/mongod_returns_no_cluster_time_without_keys.js
# Count/write/aggregate/group commands against the config shard do not support retries yet
- jstests/sharding/addshard1.js
- jstests/sharding/addshard2.js
diff --git a/buildscripts/resmokeconfig/suites/sharding_last_stable_mongos_and_mixed_shards.yml b/buildscripts/resmokeconfig/suites/sharding_last_stable_mongos_and_mixed_shards.yml
index 092e385b6a9..df75a6adfee 100644
--- a/buildscripts/resmokeconfig/suites/sharding_last_stable_mongos_and_mixed_shards.yml
+++ b/buildscripts/resmokeconfig/suites/sharding_last_stable_mongos_and_mixed_shards.yml
@@ -52,6 +52,7 @@ selector:
- jstests/sharding/lookup_change_stream_post_image_compound_shard_key.js
- jstests/sharding/lookup_change_stream_post_image_hashed_shard_key.js
- jstests/sharding/lookup_change_stream_post_image_id_shard_key.js
+ - jstests/sharding/mongod_returns_no_cluster_time_without_keys.js
- jstests/sharding/mongos_does_not_gossip_logical_time_without_keys.js
- jstests/sharding/move_chunk_find_and_modify_with_write_retryability.js
- jstests/sharding/move_chunk_insert_with_write_retryability.js
diff --git a/jstests/noPassthrough/auth_reject_mismatching_logical_times.js b/jstests/noPassthrough/auth_reject_mismatching_logical_times.js
index c19d7b36887..e57650b6fba 100644
--- a/jstests/noPassthrough/auth_reject_mismatching_logical_times.js
+++ b/jstests/noPassthrough/auth_reject_mismatching_logical_times.js
@@ -46,6 +46,9 @@
rst.initiate();
assert.commandWorked(st.s.adminCommand({addShard: rst.getURL()}));
+ // TODO: SERVER-34964
+ sleep(30000);
+
const testDB = st.s.getDB("test");
// Unsharded collections reject mismatching cluster times and accept valid ones.
diff --git a/jstests/sharding/mongod_returns_no_cluster_time_without_keys.js b/jstests/sharding/mongod_returns_no_cluster_time_without_keys.js
new file mode 100644
index 00000000000..3c9cf3e6af7
--- /dev/null
+++ b/jstests/sharding/mongod_returns_no_cluster_time_without_keys.js
@@ -0,0 +1,82 @@
+/**
+ * Tests that mongod does not gossip cluster time metadata and operation time until at least one key
+ * is created on the
+ * config server.
+* @tags: [requires_persistence]
+ */
+
+(function() {
+ "use strict";
+
+ load("jstests/multiVersion/libs/multi_rs.js");
+
+ // TODO SERVER-32672: remove this flag.
+ TestData.skipGossipingClusterTime = true;
+ const keyFile = 'jstests/libs/key1';
+ const adminUser = {db: "admin", username: "foo", password: "bar"};
+ const rUser = {db: "test", username: "r", password: "bar"};
+
+ function assertContainsValidLogicalTime(res) {
+ assert.hasFields(res, ["$clusterTime"]);
+ assert.hasFields(res.$clusterTime, ["signature", "clusterTime"]);
+ // clusterTime must be greater than the uninitialzed value.
+ assert.hasFields(res.$clusterTime.signature, ["hash", "keyId"]);
+ // The signature must have been signed by a key with a valid generation.
+ assert(res.$clusterTime.signature.keyId > NumberLong(0));
+
+ assert.hasFields(res, ["operationTime"]);
+ assert(Object.prototype.toString.call(res.operationTime) === "[object Timestamp]",
+ "operationTime must be a timestamp");
+ }
+
+ let st = new ShardingTest({shards: {rs0: {nodes: 2}}, other: {keyFile: keyFile}});
+
+ jsTestLog("Started ShardingTest");
+
+ var adminDB = st.s.getDB("admin");
+ adminDB.createUser({user: adminUser.username, pwd: adminUser.password, roles: ["__system"]});
+
+ adminDB.auth(adminUser.username, adminUser.password);
+ assert(st.s.getDB("admin").system.keys.count() >= 2);
+
+ let priRSConn = st.rs0.getPrimary().getDB("admin");
+ priRSConn.createUser({user: rUser.username, pwd: rUser.password, roles: ["root"]});
+
+ // TODO: SERVER-34964
+ sleep(30000);
+
+ priRSConn.auth(rUser.username, rUser.password);
+ const resWithKeys = priRSConn.runCommand({isMaster: 1});
+ assertContainsValidLogicalTime(resWithKeys);
+
+ // Enable the failpoint, remove all keys, and restart the config servers with the failpoint
+ // still enabled to guarantee there are no keys.
+ for (let i = 0; i < st.configRS.nodes.length; i++) {
+ assert.commandWorked(st.configRS.nodes[i].adminCommand(
+ {"configureFailPoint": "disableKeyGeneration", "mode": "alwaysOn"}));
+ }
+
+ var priCSConn = st.configRS.getPrimary();
+ authutil.asCluster(priCSConn, keyFile, function() {
+ priCSConn.getDB("admin").system.keys.remove({purpose: "HMAC"});
+ });
+
+ assert(adminDB.system.keys.count() == 0, "expected there to be no keys on the config server");
+
+ st.configRS.stopSet(null /* signal */, true /* forRestart */);
+ st.configRS.startSet(
+ {restart: true, setParameter: {"failpoint.disableKeyGeneration": "{'mode':'alwaysOn'}"}});
+
+ // bounce rs0 to clean the key cache
+ st.rs0.stopSet(null /* signal */, true /* forRestart */);
+ st.rs0.startSet({restart: true});
+
+ priRSConn.auth(rUser.username, rUser.password);
+ const resNoKeys = priRSConn.runCommand({isMaster: 1});
+ assert.commandWorked(resNoKeys);
+
+ assert.eq(resNoKeys.hasOwnProperty("$clusterTime"), false);
+ assert.eq(resNoKeys.hasOwnProperty("operationTime"), false);
+
+ st.stop();
+})();
diff --git a/src/mongo/db/service_entry_point_mongod.cpp b/src/mongo/db/service_entry_point_mongod.cpp
index 52ed67098da..9c9f784d9b5 100644
--- a/src/mongo/db/service_entry_point_mongod.cpp
+++ b/src/mongo/db/service_entry_point_mongod.cpp
@@ -252,6 +252,10 @@ private:
const bool _maintenanceModeSet;
};
+bool hasClusterTime(BSONObj metadata) {
+ return metadata.hasField(rpc::LogicalTimeMetadata::fieldName());
+}
+
// Called from the error contexts where request may not be available.
// It only attaches clusterTime and operationTime.
void appendReplyMetadataOnError(OperationContext* opCtx, BSONObjBuilder* metadataBob) {
@@ -271,8 +275,10 @@ void appendReplyMetadataOnError(OperationContext* opCtx, BSONObjBuilder* metadat
} else if (auto validator = LogicalTimeValidator::get(opCtx)) {
auto currentTime =
validator->trySignLogicalTime(LogicalClock::get(opCtx)->getClusterTime());
- rpc::LogicalTimeMetadata logicalTimeMetadata(currentTime);
- logicalTimeMetadata.writeToMetadata(metadataBob);
+ if (currentTime.getKeyId() != 0) {
+ rpc::LogicalTimeMetadata logicalTimeMetadata(currentTime);
+ logicalTimeMetadata.writeToMetadata(metadataBob);
+ }
}
}
}
@@ -310,8 +316,11 @@ void appendReplyMetadata(OperationContext* opCtx,
} else if (auto validator = LogicalTimeValidator::get(opCtx)) {
auto currentTime =
validator->trySignLogicalTime(LogicalClock::get(opCtx)->getClusterTime());
- rpc::LogicalTimeMetadata logicalTimeMetadata(currentTime);
- logicalTimeMetadata.writeToMetadata(metadataBob);
+ // Do not add $clusterTime if the signature and keyId is dummy.
+ if (currentTime.getKeyId() != 0) {
+ rpc::LogicalTimeMetadata logicalTimeMetadata(currentTime);
+ logicalTimeMetadata.writeToMetadata(metadataBob);
+ }
}
}
}
@@ -530,22 +539,23 @@ bool runCommandImpl(OperationContext* opCtx,
Command::appendCommandStatus(inPlaceReplyBob, result);
- auto operationTime = computeOperationTime(
- opCtx, startOperationTime, repl::ReadConcernArgs::get(opCtx).getLevel());
+ BSONObjBuilder metadataBob;
+ appendReplyMetadata(opCtx, request, &metadataBob);
+ auto metadata = metadataBob.done();
- // An uninitialized operation time means the cluster time is not propagated, so the operation
- // time should not be attached to the response.
- if (operationTime != LogicalTime::kUninitialized &&
+ if (hasClusterTime(metadata) &&
(serverGlobalParams.featureCompatibility.getVersion() ==
ServerGlobalParams::FeatureCompatibility::Version::kFullyUpgradedTo36)) {
- operationTime.appendAsOperationTime(&inPlaceReplyBob);
+ auto operationTime = computeOperationTime(
+ opCtx, startOperationTime, repl::ReadConcernArgs::get(opCtx).getLevel());
+
+ if (operationTime != LogicalTime::kUninitialized) {
+ operationTime.appendAsOperationTime(&inPlaceReplyBob);
+ }
}
inPlaceReplyBob.doneFast();
-
- BSONObjBuilder metadataBob;
- appendReplyMetadata(opCtx, request, &metadataBob);
- replyBuilder->setMetadata(metadataBob.done());
+ replyBuilder->setMetadata(metadata);
return result;
}
@@ -775,35 +785,37 @@ void execCommandDatabase(OperationContext* opCtx,
BSONObjBuilder metadataBob;
appendReplyMetadata(opCtx, request, &metadataBob);
-
- // Note: the read concern may not have been successfully or yet placed on the opCtx, so
- // parsing it separately here.
- const std::string db = request.getDatabase().toString();
- auto readConcernArgsStatus = _extractReadConcern(
- request.body, command->supportsNonLocalReadConcern(db, request.body));
- auto operationTime = readConcernArgsStatus.isOK()
- ? computeOperationTime(
- opCtx, startOperationTime, readConcernArgsStatus.getValue().getLevel())
- : LogicalClock::get(opCtx)->getClusterTime();
+ auto metadata = metadataBob.obj();
// An uninitialized operation time means the cluster time is not propagated, so the
// operation time should not be attached to the error response.
- if (operationTime != LogicalTime::kUninitialized &&
+ if (hasClusterTime(metadata) &&
(serverGlobalParams.featureCompatibility.getVersion() ==
ServerGlobalParams::FeatureCompatibility::Version::kFullyUpgradedTo36)) {
+
+ // Note: the read concern may not have been successfully or yet placed on the opCtx, so
+ // parsing it separately here.
+ const std::string db = request.getDatabase().toString();
+ auto readConcernArgsStatus = _extractReadConcern(
+ request.body, command->supportsNonLocalReadConcern(db, request.body));
+ auto operationTime = readConcernArgsStatus.isOK()
+ ? computeOperationTime(
+ opCtx, startOperationTime, readConcernArgsStatus.getValue().getLevel())
+ : LogicalClock::get(opCtx)->getClusterTime();
+
LOG(1) << "assertion while executing command '" << request.getCommandName() << "' "
<< "on database '" << request.getDatabase() << "' "
<< "with arguments '" << command->getRedactedCopyForLogging(request.body)
<< "' and operationTime '" << operationTime.toString() << "': " << e.toString();
- _generateErrorResponse(opCtx, replyBuilder, e, metadataBob.obj(), operationTime);
+ _generateErrorResponse(opCtx, replyBuilder, e, metadata, operationTime);
} else {
LOG(1) << "assertion while executing command '" << request.getCommandName() << "' "
<< "on database '" << request.getDatabase() << "' "
<< "with arguments '" << command->getRedactedCopyForLogging(request.body)
<< "': " << e.toString();
- _generateErrorResponse(opCtx, replyBuilder, e, metadataBob.obj());
+ _generateErrorResponse(opCtx, replyBuilder, e, metadata);
}
}
}
@@ -836,14 +848,20 @@ DbResponse runCommands(OperationContext* opCtx, const Message& message) {
if (ErrorCodes::isConnectionFatalMessageParseError(ex.code()))
throw;
- auto operationTime = LogicalClock::get(opCtx)->getClusterTime();
BSONObjBuilder metadataBob;
appendReplyMetadataOnError(opCtx, &metadataBob);
+ auto metadata = metadataBob.obj();
+
// Otherwise, reply with the parse error. This is useful for cases where parsing fails
// due to user-supplied input, such as the document too deep error. Since we failed
// during parsing, we can't log anything about the command.
LOG(1) << "assertion while parsing command: " << ex.toString();
- _generateErrorResponse(opCtx, replyBuilder.get(), ex, metadataBob.obj(), operationTime);
+ if (hasClusterTime(metadata)) {
+ auto operationTime = LogicalClock::get(opCtx)->getClusterTime();
+ _generateErrorResponse(opCtx, replyBuilder.get(), ex, metadata, operationTime);
+ } else {
+ _generateErrorResponse(opCtx, replyBuilder.get(), ex, metadata);
+ }
return; // From lambda. Don't try executing if parsing failed.
}
@@ -878,11 +896,17 @@ DbResponse runCommands(OperationContext* opCtx, const Message& message) {
} catch (const DBException& ex) {
BSONObjBuilder metadataBob;
appendReplyMetadataOnError(opCtx, &metadataBob);
- auto operationTime = LogicalClock::get(opCtx)->getClusterTime();
- LOG(1) << "assertion while executing command '" << request.getCommandName() << "' "
- << "on database '" << request.getDatabase() << "': " << ex.toString();
+ auto metadata = metadataBob.obj();
- _generateErrorResponse(opCtx, replyBuilder.get(), ex, metadataBob.obj(), operationTime);
+ if (hasClusterTime(metadata)) {
+ auto operationTime = LogicalClock::get(opCtx)->getClusterTime();
+ LOG(1) << "assertion while executing command '" << request.getCommandName() << "' "
+ << "on database '" << request.getDatabase() << "': " << ex.toString();
+
+ _generateErrorResponse(opCtx, replyBuilder.get(), ex, metadata, operationTime);
+ } else {
+ _generateErrorResponse(opCtx, replyBuilder.get(), ex, metadata);
+ }
}
}();