summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--jstests/core/list_databases.js2
-rw-r--r--jstests/core/views/views_all_commands.js2
-rw-r--r--jstests/multiVersion/causal_consistency_downgrade_cluster.js6
-rw-r--r--jstests/multiVersion/causal_consistency_upgrade_cluster.js8
-rw-r--r--jstests/multiVersion/libs/causal_consistency_helpers.js18
-rw-r--r--jstests/noPassthrough/auth_reject_mismatching_logical_times.js20
-rw-r--r--jstests/noPassthrough/causal_consistency_feature_compatibility.js12
-rw-r--r--jstests/noPassthrough/max_acceptable_logical_clock_drift_secs_parameter.js10
-rw-r--r--jstests/replsets/operation_time_read_and_write_concern.js10
-rw-r--r--jstests/sharding/advance_logical_time_with_valid_signature.js16
-rw-r--r--jstests/sharding/causal_consistency_shell_support.js8
-rw-r--r--jstests/sharding/key_rotation.js24
-rw-r--r--jstests/sharding/logical_time_api.js12
-rw-r--r--jstests/sharding/logical_time_metadata.js14
-rw-r--r--jstests/sharding/mongos_does_not_gossip_logical_time_without_keys.js24
-rw-r--r--src/mongo/db/commands.cpp2
-rw-r--r--src/mongo/db/commands.h2
-rw-r--r--src/mongo/db/keys_collection_cache_reader_and_updater.h2
-rw-r--r--src/mongo/db/logical_clock.h2
-rw-r--r--src/mongo/db/logical_clock_test.cpp12
-rw-r--r--src/mongo/db/logical_time_metadata_hook.cpp2
-rw-r--r--src/mongo/db/logical_time_validator.cpp6
-rw-r--r--src/mongo/db/logical_time_validator.h6
-rw-r--r--src/mongo/db/repl/read_concern_args.h2
-rw-r--r--src/mongo/db/service_entry_point_mongod.cpp6
-rw-r--r--src/mongo/db/time_proof_service.cpp2
-rw-r--r--src/mongo/db/time_proof_service.h2
-rw-r--r--src/mongo/db/time_proof_service_test.cpp4
-rw-r--r--src/mongo/idl/idl_test.cpp4
-rw-r--r--src/mongo/rpc/metadata.cpp4
-rw-r--r--src/mongo/rpc/metadata/logical_time_metadata.cpp2
-rw-r--r--src/mongo/rpc/metadata/logical_time_metadata.h2
-rw-r--r--src/mongo/rpc/metadata/logical_time_metadata_test.cpp14
-rw-r--r--src/mongo/rpc/reply_builder_test.cpp6
-rw-r--r--src/mongo/s/commands/strategy.cpp2
-rw-r--r--src/mongo/shell/mongo.js6
-rw-r--r--src/mongo/shell/shardingtest.js12
37 files changed, 144 insertions, 144 deletions
diff --git a/jstests/core/list_databases.js b/jstests/core/list_databases.js
index 80df62f2d1d..8ea7fbfe255 100644
--- a/jstests/core/list_databases.js
+++ b/jstests/core/list_databases.js
@@ -18,7 +18,7 @@
function verifyNameOnly(listDatabasesOut) {
for (let field in listDatabasesOut) {
- assert(['databases', 'nameOnly', 'ok', 'operationTime', '$logicalTime'].some((f) => f ==
+ assert(['databases', 'nameOnly', 'ok', 'operationTime', '$clusterTime'].some((f) => f ==
field),
'unexpected field ' + field);
}
diff --git a/jstests/core/views/views_all_commands.js b/jstests/core/views/views_all_commands.js
index 0d46a43c2a4..ed8d3eb8747 100644
--- a/jstests/core/views/views_all_commands.js
+++ b/jstests/core/views/views_all_commands.js
@@ -318,7 +318,7 @@
ok: 1
};
delete res.operationTime;
- delete res.$logicalTime;
+ delete res.$clusterTime;
assert.eq(expectedRes, res, "unexpected result for: " + tojson(killCursorsCmd));
}
},
diff --git a/jstests/multiVersion/causal_consistency_downgrade_cluster.js b/jstests/multiVersion/causal_consistency_downgrade_cluster.js
index f173a30cf7a..dc623b4af6b 100644
--- a/jstests/multiVersion/causal_consistency_downgrade_cluster.js
+++ b/jstests/multiVersion/causal_consistency_downgrade_cluster.js
@@ -35,7 +35,7 @@
st.s.getDB("test").runCommand({insert: "foo", documents: [{_id: 1, x: 1}]});
- // Both logical and operation times are returned, and logical times are signed by mongos. Mongos
+ // Both logical and operation times are returned, and cluster times are signed by mongos. Mongos
// doesn't wait for keys at startup, so retry.
assert.soonNoExcept(function() {
assertContainsLogicalAndOperationTime(st.s.getDB("test").runCommand({isMaster: 1}),
@@ -51,7 +51,7 @@
// Change featureCompatibilityVersion to 3.4.
assert.commandWorked(st.s.adminCommand({setFeatureCompatibilityVersion: "3.4"}));
- // Mongos still signs logical times, because they are held in memory.
+ // Mongos still signs cluster times, because they are held in memory.
assertContainsLogicalAndOperationTime(st.s.getDB("test").runCommand({isMaster: 1}),
{initialized: true, signed: true});
@@ -73,7 +73,7 @@
st.upgradeCluster("last-stable", {upgradeConfigs: false, upgradeShards: false});
st.restartMongoses();
- // Mongos should no longer return operation or logical times.
+ // Mongos should no longer return operation or cluster times.
assertDoesNotContainLogicalOrOperationTime(st.s.getDB("test").runCommand({isMaster: 1}));
// Downgrade shards next.
diff --git a/jstests/multiVersion/causal_consistency_upgrade_cluster.js b/jstests/multiVersion/causal_consistency_upgrade_cluster.js
index 916684ea29a..ef9d5193d64 100644
--- a/jstests/multiVersion/causal_consistency_upgrade_cluster.js
+++ b/jstests/multiVersion/causal_consistency_upgrade_cluster.js
@@ -1,6 +1,6 @@
/**
* Tests upgrading a cluster with two shards and two mongos servers from last stable to current
- * version, verifying the behavior of $logicalTime metadata and afterClusterTime commands throughout
+ * version, verifying the behavior of $clusterTime metadata and afterClusterTime commands throughout
* the process.
*/
(function() {
@@ -96,7 +96,7 @@
assertAfterClusterTimeReadFails(st.rs0.getPrimary().getDB("test"), "foo");
assertAfterClusterTimeReadFails(st.rs1.getPrimary().getDB("test"), "foo");
- // Neither mongos returns logical time or operation time, because there are no keys in the
+ // Neither mongos returns cluster time or operation time, because there are no keys in the
// config server, since feature compatibility version is still 3.4.
assertDoesNotContainLogicalOrOperationTime(st.s0.getDB("test").runCommand({isMaster: 1}));
assertDoesNotContainLogicalOrOperationTime(st.s1.getDB("test").runCommand({isMaster: 1}));
@@ -112,7 +112,7 @@
// Set feature compatibility version to 3.6 on one mongos.
assert.commandWorked(st.s0.getDB("admin").runCommand({setFeatureCompatibilityVersion: "3.6"}));
- // Now shards and config servers return dummy signed logical times and operation times.
+ // Now shards and config servers return dummy signed cluster times and operation times.
assertContainsLogicalAndOperationTime(
st.rs0.getPrimary().getDB("test").runCommand({isMaster: 1}),
{initialized: true, signed: false});
@@ -124,7 +124,7 @@
{initialized: true, signed: false});
// Once the config primary creates keys, both mongos servers discover them and start returning
- // signed logical times.
+ // signed cluster times.
assert.soonNoExcept(function() {
assertContainsLogicalAndOperationTime(st.s0.getDB("test").runCommand({isMaster: 1}),
{initialized: true, signed: true});
diff --git a/jstests/multiVersion/libs/causal_consistency_helpers.js b/jstests/multiVersion/libs/causal_consistency_helpers.js
index 2f3734be0ee..943e59d5d97 100644
--- a/jstests/multiVersion/libs/causal_consistency_helpers.js
+++ b/jstests/multiVersion/libs/causal_consistency_helpers.js
@@ -20,7 +20,7 @@ function assertDoesNotContainLogicalOrOperationTime(res) {
}
function assertDoesNotContainLogicalTime(res) {
- assert.eq(res.$logicalTime, undefined);
+ assert.eq(res.$clusterTime, undefined);
}
function assertDoesNotContainOperationTime(res) {
@@ -33,25 +33,25 @@ function assertContainsLogicalAndOperationTime(res, opts) {
}
function assertContainsLogicalTime(res, opts) {
- assert.hasFields(res, ["$logicalTime"]);
- assert.hasFields(res.$logicalTime, ["clusterTime", "signature"]);
- assert.hasFields(res.$logicalTime.signature, ["hash", "keyId"]);
+ assert.hasFields(res, ["$clusterTime"]);
+ assert.hasFields(res.$clusterTime, ["clusterTime", "signature"]);
+ assert.hasFields(res.$clusterTime.signature, ["hash", "keyId"]);
if (opts.signed !== undefined) {
- // Signed logical times have a keyId greater than 0.
+ // Signed cluster times have a keyId greater than 0.
if (opts.signed) {
- assert(res.$logicalTime.signature.keyId > NumberLong(0));
+ assert(res.$clusterTime.signature.keyId > NumberLong(0));
} else {
- assert.eq(res.$logicalTime.signature.keyId, NumberLong(0));
+ assert.eq(res.$clusterTime.signature.keyId, NumberLong(0));
}
}
if (opts.initialized !== undefined) {
// Initialized operation times are greater than a null timestamp.
if (opts.initialized) {
- assert.eq(bsonWoCompare(res.$logicalTime.clusterTime, Timestamp(0, 0)), 1);
+ assert.eq(bsonWoCompare(res.$clusterTime.clusterTime, Timestamp(0, 0)), 1);
} else {
- assert.eq(bsonWoCompare(res.$logicalTime.clusterTime, Timestamp(0, 0)), 0);
+ assert.eq(bsonWoCompare(res.$clusterTime.clusterTime, Timestamp(0, 0)), 0);
}
}
}
diff --git a/jstests/noPassthrough/auth_reject_mismatching_logical_times.js b/jstests/noPassthrough/auth_reject_mismatching_logical_times.js
index 3de5f3f440c..56c2695bdd8 100644
--- a/jstests/noPassthrough/auth_reject_mismatching_logical_times.js
+++ b/jstests/noPassthrough/auth_reject_mismatching_logical_times.js
@@ -1,31 +1,31 @@
/**
- * Verifies mismatching logical time objects are rejected by a sharded cluster when auth is on. In
+ * Verifies mismatching cluster time objects are rejected by a sharded cluster when auth is on. In
* noPassthrough because auth is manually set.
*/
(function() {
"use strict";
- // Given a valid logical time object, returns one with the same signature, but a mismatching
+ // Given a valid cluster time object, returns one with the same signature, but a mismatching
// cluster time.
function mismatchingLogicalTime(lt) {
return Object.merge(lt, {clusterTime: Timestamp(lt.clusterTime.getTime() + 100, 0)});
}
function assertRejectsMismatchingLogicalTime(db) {
- let validTime = db.runCommand({isMaster: 1}).$logicalTime;
+ let validTime = db.runCommand({isMaster: 1}).$clusterTime;
let mismatchingTime = mismatchingLogicalTime(validTime);
assert.commandFailedWithCode(
- db.runCommand({isMaster: 1, $logicalTime: mismatchingTime}),
+ db.runCommand({isMaster: 1, $clusterTime: mismatchingTime}),
ErrorCodes.TimeProofMismatch,
- "expected command with mismatching logical time and signature to be rejected");
+ "expected command with mismatching cluster time and signature to be rejected");
}
function assertAcceptsValidLogicalTime(db) {
- let validTime = db.runCommand({isMaster: 1}).$logicalTime;
+ let validTime = db.runCommand({isMaster: 1}).$clusterTime;
assert.commandWorked(
- testDB.runCommand({isMaster: 1, $logicalTime: validTime}),
- "expected command with valid logical time and signature to be accepted");
+ testDB.runCommand({isMaster: 1, $clusterTime: validTime}),
+ "expected command with valid cluster time and signature to be accepted");
}
// Start the sharding test with auth on.
@@ -48,7 +48,7 @@
const testDB = st.s.getDB("test");
- // Unsharded collections reject mismatching logical times and accept valid ones.
+ // Unsharded collections reject mismatching cluster times and accept valid ones.
assertRejectsMismatchingLogicalTime(testDB);
assertAcceptsValidLogicalTime(testDB);
@@ -57,7 +57,7 @@
assert.commandWorked(
testDB.adminCommand({shardCollection: testDB.foo.getFullName(), key: {_id: 1}}));
- // Sharded collections reject mismatching logical times and accept valid ones.
+ // Sharded collections reject mismatching cluster times and accept valid ones.
assertRejectsMismatchingLogicalTime(testDB);
assertAcceptsValidLogicalTime(testDB);
diff --git a/jstests/noPassthrough/causal_consistency_feature_compatibility.js b/jstests/noPassthrough/causal_consistency_feature_compatibility.js
index fc5075a3d49..6ec8632975f 100644
--- a/jstests/noPassthrough/causal_consistency_feature_compatibility.js
+++ b/jstests/noPassthrough/causal_consistency_feature_compatibility.js
@@ -10,7 +10,7 @@
function logicalTimeCanBeProcessed(db) {
const increment = 5000;
- let initialTime = db.runCommand({isMaster: 1}).$logicalTime;
+ let initialTime = db.runCommand({isMaster: 1}).$clusterTime;
if (!initialTime) {
return false;
}
@@ -20,8 +20,8 @@
{clusterTime: Timestamp(initialTime.clusterTime.getTime() + increment, 0)});
let returnedTime = rst.getPrimary()
.getDB("test")
- .runCommand({isMaster: 1, $logicalTime: laterTime})
- .$logicalTime;
+ .runCommand({isMaster: 1, $clusterTime: laterTime})
+ .$clusterTime;
// Use a range to allow for unrelated activity advancing cluster time.
return (returnedTime.clusterTime.getTime() - initialTime.clusterTime.getTime()) >=
@@ -64,14 +64,14 @@
assert.commandWorked(rst.getPrimary().getDB("test").runCommand(
{find: "foo", readConcern: {level: "majority", afterClusterTime: Timestamp(1, 1)}}));
- // Verify logical time can be processed by shards and the config servers.
+ // Verify cluster time can be processed by shards and the config servers.
assert(logicalTimeCanBeProcessed(rst.getPrimary().getDB("test")));
assert(logicalTimeCanBeProcessed(st.configRS.getPrimary().getDB("test")));
// Set featureCompatibilityVersion to 3.4
assert.commandWorked(st.s.adminCommand({setFeatureCompatibilityVersion: "3.4"}));
- // Verify logical time cannot be processed by shards and the config servers now.
+ // Verify cluster time cannot be processed by shards and the config servers now.
assert(!logicalTimeCanBeProcessed(rst.getPrimary().getDB("test")));
assert(!logicalTimeCanBeProcessed(st.configRS.getPrimary().getDB("test")));
@@ -89,7 +89,7 @@
// setFeatureCompatibilityVersion can only be run on the admin database on mongos.
assert.commandWorked(st.s.adminCommand({setFeatureCompatibilityVersion: "3.6"}));
- // Verify logical time can be processed by shards and the config servers again.
+ // Verify cluster time can be processed by shards and the config servers again.
assert(logicalTimeCanBeProcessed(rst.getPrimary().getDB("test")));
assert(logicalTimeCanBeProcessed(st.configRS.getPrimary().getDB("test")));
diff --git a/jstests/noPassthrough/max_acceptable_logical_clock_drift_secs_parameter.js b/jstests/noPassthrough/max_acceptable_logical_clock_drift_secs_parameter.js
index 5406dfa501e..2a3c07186a6 100644
--- a/jstests/noPassthrough/max_acceptable_logical_clock_drift_secs_parameter.js
+++ b/jstests/noPassthrough/max_acceptable_logical_clock_drift_secs_parameter.js
@@ -1,6 +1,6 @@
/**
* Tests what values are accepted for the maxAcceptableLogicalClockDriftSecs startup parameter, and
- * that servers in a sharded clusters reject logical times more than
+ * that servers in a sharded clusters reject cluster times more than
* maxAcceptableLogicalClockDriftSecs ahead of their wall clocks.
*/
(function() {
@@ -43,15 +43,15 @@
});
let testDB = st.s.getDB("test");
- // Contact cluster to get initial logical time.
+ // Contact cluster to get initial cluster time.
let res = assert.commandWorked(testDB.runCommand({isMaster: 1}));
- let lt = res.$logicalTime;
+ let lt = res.$clusterTime;
- // Try to advance logical time by more than the max acceptable drift, which should fail the rate
+ // Try to advance cluster time by more than the max acceptable drift, which should fail the rate
// limiter.
let tooFarTime = Object.assign(
lt, {clusterTime: new Timestamp(lt.clusterTime.getTime() + (maxDriftValue * 2), 0)});
- assert.commandFailedWithCode(testDB.runCommand({isMaster: 1, $logicalTime: tooFarTime}),
+ assert.commandFailedWithCode(testDB.runCommand({isMaster: 1, $clusterTime: tooFarTime}),
ErrorCodes.ClusterTimeFailsRateLimiter,
"expected command to not pass the rate limiter");
diff --git a/jstests/replsets/operation_time_read_and_write_concern.js b/jstests/replsets/operation_time_read_and_write_concern.js
index e5a6faba1ce..ebe3b186bf9 100644
--- a/jstests/replsets/operation_time_read_and_write_concern.js
+++ b/jstests/replsets/operation_time_read_and_write_concern.js
@@ -24,7 +24,7 @@
var collectionName = "foo";
// readConcern level majority:
- // operationTime is the logical time of the last committed op in the oplog.
+ // operationTime is the cluster time of the last committed op in the oplog.
jsTestLog("Testing operationTime for readConcern level majority with afterClusterTime.");
var majorityDoc = {_id: 10, x: 1};
var localDoc = {_id: 15, x: 2};
@@ -55,7 +55,7 @@
assert.eq(majorityReadOperationTime,
majorityWriteOperationTime,
"the operationTime of the majority read, " + majorityReadOperationTime +
- ", should be the logical time of the last committed op in the oplog, " +
+ ", should be the cluster time of the last committed op in the oplog, " +
majorityWriteOperationTime);
// Validate that after replication, the local write data is now returned by the same query.
@@ -77,7 +77,7 @@
assert.eq(secondMajorityReadOperationTime,
localWriteOperationTime,
"the operationTime of the second majority read, " + secondMajorityReadOperationTime +
- ", should be the logical time of the replicated local write, " +
+ ", should be the cluster time of the replicated local write, " +
localWriteOperationTime);
// readConcern level linearizable is not currently supported.
@@ -92,7 +92,7 @@
"linearizable reads with afterClusterTime are not supported and should not be allowed");
// writeConcern level majority:
- // operationTime is the logical time of the write if it succeeds, or of the previous successful
+ // operationTime is the cluster time of the write if it succeeds, or of the previous successful
// write at the time the write was determined to have failed, or a no-op.
jsTestLog("Testing operationTime for writeConcern level majority.");
var successfulDoc = {_id: 1000, y: 1};
@@ -113,7 +113,7 @@
failedWriteOperationTime,
majorityWriteOperationTime,
"the operationTime of the failed majority write, " + failedWriteOperationTime +
- ", should be the logical time of the last successful write at the time it failed, " +
+ ", should be the cluster time of the last successful write at the time it failed, " +
majorityWriteOperationTime);
replTest.stopSet();
diff --git a/jstests/sharding/advance_logical_time_with_valid_signature.js b/jstests/sharding/advance_logical_time_with_valid_signature.js
index e2a379d981f..ef2f5e44e81 100644
--- a/jstests/sharding/advance_logical_time_with_valid_signature.js
+++ b/jstests/sharding/advance_logical_time_with_valid_signature.js
@@ -1,5 +1,5 @@
/**
- * Tests that the mongo shell can use a logical time with a valid signature to advance a server's
+ * Tests that the mongo shell can use a cluster time with a valid signature to advance a server's
* cluster time.
*/
(function() {
@@ -26,16 +26,16 @@
// Get logicalTime metadata from the connected mongos's response and send it in an isMaster
// command to the disconnected mongos. isMaster does not require mongos to contact any other
// servers, so the command should succeed.
- let lt = res.$logicalTime;
+ let lt = res.$clusterTime;
res = assert.commandWorked(
- disconnectedDB.runCommand({isMaster: 1, $logicalTime: lt}),
- "expected the disconnected mongos to accept logical time: " + tojson(lt));
+ disconnectedDB.runCommand({isMaster: 1, $clusterTime: lt}),
+ "expected the disconnected mongos to accept cluster time: " + tojson(lt));
- // Verify logical time response from the disconnected mongos matches what was passed.
+ // Verify cluster time response from the disconnected mongos matches what was passed.
assert.eq(lt,
- res.$logicalTime,
- "expected the disconnected mongos to send logical time: " + tojson(lt) +
- ", received: " + tojson(res.$logicalTime));
+ res.$clusterTime,
+ "expected the disconnected mongos to send cluster time: " + tojson(lt) +
+ ", received: " + tojson(res.$clusterTime));
st.stop();
})();
diff --git a/jstests/sharding/causal_consistency_shell_support.js b/jstests/sharding/causal_consistency_shell_support.js
index b3a84fc9d96..4a75bc8c2d7 100644
--- a/jstests/sharding/causal_consistency_shell_support.js
+++ b/jstests/sharding/causal_consistency_shell_support.js
@@ -53,7 +53,7 @@
function commandReturnsExpectedResult(cmdObj, db, resCallback) {
const mongo = db.getMongo();
- // Use the latest logical time returned as a new operationTime and run command.
+ // Use the latest cluster time returned as a new operationTime and run command.
const clusterTimeObj = mongo.getClusterTime();
mongo.setOperationTime(clusterTimeObj.clusterTime);
const res = assert.commandWorked(testDB.runCommand(cmdObj));
@@ -136,8 +136,8 @@
checkCausalConsistencySupportForCommandNames(supportedCommandNames, false);
checkCausalConsistencySupportForCommandNames(unsupportedCommandNames, false);
- // Verify logical times are tracked even before causal consistency is set (so the first
- // operation with causal consistency set can use valid logical times).
+ // Verify cluster times are tracked even before causal consistency is set (so the first
+ // operation with causal consistency set can use valid cluster times).
mongo._operationTime = null;
mongo._clusterTime = null;
@@ -157,7 +157,7 @@
runCommandAndCheckLogicalTimes(
{update: "foo", updates: [{q: {x: 2}, u: {$set: {x: 3}}}]}, testDB, true);
- // Test that each supported command works as expected and the shell's logical times are properly
+ // Test that each supported command works as expected and the shell's cluster times are properly
// forwarded to the server and updated based on the response.
mongo.setCausalConsistency(true);
diff --git a/jstests/sharding/key_rotation.js b/jstests/sharding/key_rotation.js
index a825a249d1a..ee642c7f059 100644
--- a/jstests/sharding/key_rotation.js
+++ b/jstests/sharding/key_rotation.js
@@ -1,7 +1,7 @@
/**
* Tests for causal consistency key rotation. In particular, tests:
* - that a sharded cluster with no keys inserts new keys after startup.
- * - responses from servers in a sharded cluster contain a logical time object with a signature.
+ * - responses from servers in a sharded cluster contain a cluster time object with a signature.
* - manual key rotation is possible by deleting existing keys and restarting the cluster.
*
* Manual key rotation requires restarting a shard, so a persistent storage engine is necessary.
@@ -24,13 +24,13 @@
"key document " + i + ": " + tojson(key) + ", did not have all of the expected fields");
});
- // Verify there is a $logicalTime with a signature in the response.
- jsTestLog("Verify a signature is included in the logical time in a response.");
+ // Verify there is a $clusterTime with a signature in the response.
+ jsTestLog("Verify a signature is included in the cluster time in a response.");
let res = assert.commandWorked(st.s.getDB("test").runCommand({isMaster: 1}));
- assert.hasFields(res, ["$logicalTime"]);
- assert.hasFields(res.$logicalTime, ["signature"]);
- assert.hasFields(res.$logicalTime.signature, ["hash", "keyId"]);
+ assert.hasFields(res, ["$clusterTime"]);
+ assert.hasFields(res.$clusterTime, ["signature"]);
+ assert.hasFields(res.$clusterTime.signature, ["hash", "keyId"]);
// Verify manual key rotation.
jsTestLog("Verify manual key rotation.");
@@ -64,16 +64,16 @@
st.rs0.startSet({restart: true});
st.restartMongos(0);
- // The shard primary should return a dummy signed logical time, because there are no keys.
+ // The shard primary should return a dummy signed cluster time, because there are no keys.
res = assert.commandWorked(st.rs0.getPrimary().getDB("test").runCommand({isMaster: 1}));
- assert.hasFields(res, ["$logicalTime", "operationTime"]);
- assert.eq(res.$logicalTime.signature.keyId, NumberLong(0));
+ assert.hasFields(res, ["$clusterTime", "operationTime"]);
+ assert.eq(res.$clusterTime.signature.keyId, NumberLong(0));
- // Mongos shouldn't return a logical time at all.
+ // Mongos shouldn't return a cluster time at all.
res = assert.commandWorked(st.s.getDB("test").runCommand({isMaster: 1}));
assert.throws(function() {
- assert.hasFields(res, ["$logicalTime", "operationTime"]);
- }, [], "expected the mongos not to return logical time or operation time");
+ assert.hasFields(res, ["$clusterTime", "operationTime"]);
+ }, [], "expected the mongos not to return cluster time or operation time");
// Resume key generation.
for (let i = 0; i < st.configRS.nodes.length; i++) {
diff --git a/jstests/sharding/logical_time_api.js b/jstests/sharding/logical_time_api.js
index 61fcb5b9d14..77df002f2bf 100644
--- a/jstests/sharding/logical_time_api.js
+++ b/jstests/sharding/logical_time_api.js
@@ -11,7 +11,7 @@
"use strict";
// Returns true if the given object contains a logicalTime BSON object in the following format:
- // $logicalTime: {
+ // $clusterTime: {
// clusterTime: <Timestamp>
// signature: {
// hash: <BinData>
@@ -23,7 +23,7 @@
return false;
}
- var logicalTime = obj.$logicalTime;
+ var logicalTime = obj.$clusterTime;
return logicalTime && isType(logicalTime, "BSON") &&
isType(logicalTime.clusterTime, "Timestamp") && isType(logicalTime.signature, "BSON") &&
isType(logicalTime.signature.hash, "BinData") &&
@@ -59,8 +59,8 @@
"Expected command body from a mongos talking to a sharded collection on a sharded " +
"replica set to contain logicalTime, received: " + tojson(res));
- // Verify mongos can accept requests with $logicalTime in the command body.
- assert.commandWorked(testDB.runCommand({isMaster: 1, $logicalTime: res.$logicalTime}));
+ // Verify mongos can accept requests with $clusterTime in the command body.
+ assert.commandWorked(testDB.runCommand({isMaster: 1, $clusterTime: res.$clusterTime}));
// A mongod in a sharded replica set returns a logicalTime bson that matches the expected
// format.
@@ -70,8 +70,8 @@
"Expected command body from a mongod in a sharded replica set to contain " +
"logicalTime, received: " + tojson(res));
- // Verify mongod can accept requests with $logicalTime in the command body.
- res = assert.commandWorked(testDB.runCommand({isMaster: 1, $logicalTime: res.$logicalTime}));
+ // Verify mongod can accept requests with $clusterTime in the command body.
+ res = assert.commandWorked(testDB.runCommand({isMaster: 1, $clusterTime: res.$clusterTime}));
st.stop();
diff --git a/jstests/sharding/logical_time_metadata.js b/jstests/sharding/logical_time_metadata.js
index adf0d397c82..63d3fc57cef 100644
--- a/jstests/sharding/logical_time_metadata.js
+++ b/jstests/sharding/logical_time_metadata.js
@@ -1,7 +1,7 @@
/**
- * Basic test that checks that mongos includes the logical time metatadata in it's response.
- * This does not test logical time propagation via the shell as there are many back channels
- * where the logical time metadata can propagated, making it inherently racy.
+ * Basic test that checks that mongos includes the cluster time metatadata in it's response.
+ * This does not test cluster time propagation via the shell as there are many back channels
+ * where the cluster time metadata can propagated, making it inherently racy.
*/
(function() {
var st = new ShardingTest({shards: {rs0: {nodes: 3}}, mongosWaitsForKeys: true});
@@ -9,11 +9,11 @@
var db = st.s.getDB('test');
- // insert on one shard and extract the logical time
+ // insert on one shard and extract the cluster time
var res = assert.commandWorked(db.runCommand({insert: 'user', documents: [{x: 10}]}));
- assert.hasFields(res, ['$logicalTime']);
+ assert.hasFields(res, ['$clusterTime']);
- var logicalTimeMetadata = res.$logicalTime;
+ var logicalTimeMetadata = res.$clusterTime;
assert.hasFields(logicalTimeMetadata, ['clusterTime', 'signature']);
res = st.rs0.getPrimary().adminCommand({replSetGetStatus: 1});
@@ -24,7 +24,7 @@
'appliedTime: ' + tojson(appliedTime) + ' != clusterTime: ' +
tojson(logicalTimeMetadata.clusterTime));
- assert.commandWorked(db.runCommand({ping: 1, '$logicalTime': logicalTimeMetadata}));
+ assert.commandWorked(db.runCommand({ping: 1, '$clusterTime': logicalTimeMetadata}));
st.stop();
diff --git a/jstests/sharding/mongos_does_not_gossip_logical_time_without_keys.js b/jstests/sharding/mongos_does_not_gossip_logical_time_without_keys.js
index 1ed8d9c6fb1..3549e9c0546 100644
--- a/jstests/sharding/mongos_does_not_gossip_logical_time_without_keys.js
+++ b/jstests/sharding/mongos_does_not_gossip_logical_time_without_keys.js
@@ -1,5 +1,5 @@
/**
- * Tests that mongos does not gossip logical time metadata until at least one key is created on the
+ * Tests that mongos does not gossip cluster time metadata until at least one key is created on the
* config server, and that it does not block waiting for keys at startup.
*/
@@ -10,19 +10,19 @@
load("jstests/multiVersion/libs/multi_cluster.js"); // For restartMongoses.
function assertContainsValidLogicalTime(res) {
- assert.hasFields(res, ["$logicalTime"]);
- assert.hasFields(res.$logicalTime, ["signature", "clusterTime"]);
+ assert.hasFields(res, ["$clusterTime"]);
+ assert.hasFields(res.$clusterTime, ["signature", "clusterTime"]);
// clusterTime must be greater than the uninitialzed value.
- assert.eq(bsonWoCompare(res.$logicalTime.clusterTime, Timestamp(0, 0)), 1);
- assert.hasFields(res.$logicalTime.signature, ["hash", "keyId"]);
+ assert.eq(bsonWoCompare(res.$clusterTime.clusterTime, Timestamp(0, 0)), 1);
+ assert.hasFields(res.$clusterTime.signature, ["hash", "keyId"]);
// The signature must have been signed by a key with a valid generation.
- assert(res.$logicalTime.signature.keyId > NumberLong(0));
+ assert(res.$clusterTime.signature.keyId > NumberLong(0));
}
let st = new ShardingTest({shards: {rs0: {nodes: 2}}});
// Verify there are keys in the config server eventually, since mongos doesn't block for keys at
- // startup, and that once there are, mongos sends $logicalTime with a signature in responses.
+ // startup, and that once there are, mongos sends $clusterTime with a signature in responses.
assert.soonNoExcept(function() {
assert(st.s.getDB("admin").system.keys.count() >= 2);
@@ -30,7 +30,7 @@
assertContainsValidLogicalTime(res);
return true;
- }, "expected keys to be created and for mongos to send signed logical times");
+ }, "expected keys to be created and for mongos to send signed cluster times");
// Enable the failpoint, remove all keys, and restart the config servers with the failpoint
// still enabled to guarantee there are no keys.
@@ -57,11 +57,11 @@
// Mongos should restart with no problems.
st.restartMongoses();
- // There should be no logical time metadata in mongos responses.
+ // There should be no cluster time metadata in mongos responses.
res = assert.commandWorked(st.s.getDB("test").runCommand({isMaster: 1}));
assert.throws(function() {
assertContainsValidLogicalTime(res);
- }, [], "expected mongos to not return logical time metadata");
+ }, [], "expected mongos to not return cluster time metadata");
// Disable the failpoint.
for (let i = 0; i < st.configRS.nodes.length; i++) {
@@ -69,11 +69,11 @@
{"configureFailPoint": "disableKeyGeneration", "mode": "off"}));
}
- // Eventually mongos will discover the new keys, and start signing logical times.
+ // Eventually mongos will discover the new keys, and start signing cluster times.
assert.soonNoExcept(function() {
assertContainsValidLogicalTime(st.s.getDB("test").runCommand({isMaster: 1}));
return true;
- }, "expected mongos to eventually start signing logical times", 60 * 1000); // 60 seconds.
+ }, "expected mongos to eventually start signing cluster times", 60 * 1000); // 60 seconds.
// There may be a delay between the creation of the first and second keys, but mongos will start
// signing after seeing the first key, so there is only guaranteed to be one at this point.
diff --git a/src/mongo/db/commands.cpp b/src/mongo/db/commands.cpp
index ce98a97f97f..b431751dc9f 100644
--- a/src/mongo/db/commands.cpp
+++ b/src/mongo/db/commands.cpp
@@ -379,7 +379,7 @@ void Command::filterCommandReplyForPassthrough(const BSONObj& cmdObj, BSONObjBui
const auto name = elem.fieldNameStringData();
if (name == "$configServerState" || //
name == "$gleStats" || //
- name == "$logicalTime" || //
+ name == "$clusterTime" || //
name == "$oplogQueryData" || //
name == "$replData" || //
name == "operationTime") {
diff --git a/src/mongo/db/commands.h b/src/mongo/db/commands.h
index 13db6740e6b..1efd437d3ef 100644
--- a/src/mongo/db/commands.h
+++ b/src/mongo/db/commands.h
@@ -510,7 +510,7 @@ public:
arg == "$queryOptions" || //
arg == "$readPreference" || //
arg == "$replData" || //
- arg == "$logicalTime" || //
+ arg == "$clusterTime" || //
arg == "maxTimeMS" || //
arg == "readConcern" || //
arg == "shardVersion" || //
diff --git a/src/mongo/db/keys_collection_cache_reader_and_updater.h b/src/mongo/db/keys_collection_cache_reader_and_updater.h
index 590d88f6cdb..4719f58c48b 100644
--- a/src/mongo/db/keys_collection_cache_reader_and_updater.h
+++ b/src/mongo/db/keys_collection_cache_reader_and_updater.h
@@ -39,7 +39,7 @@ class ShardingCatalogClient;
/**
* Keeps a local cache of the keys with the ability to refresh. The refresh method also makes sure
- * that there will be valid keys available to sign the current logical time and there will be
+ * that there will be valid keys available to sign the current cluster time and there will be
* another key ready after the current key expires.
*
* Assumptions and limitations:
diff --git a/src/mongo/db/logical_clock.h b/src/mongo/db/logical_clock.h
index a1be25c3265..2408e03439b 100644
--- a/src/mongo/db/logical_clock.h
+++ b/src/mongo/db/logical_clock.h
@@ -84,7 +84,7 @@ public:
private:
/**
- * Rate limiter for advancing logical time. Rejects newTime if its seconds value is more than
+ * Rate limiter for advancing cluster time. Rejects newTime if its seconds value is more than
* kMaxAcceptableLogicalClockDriftSecs seconds ahead of this node's wall clock.
*/
Status _passesRateLimiter_inlock(LogicalTime newTime);
diff --git a/src/mongo/db/logical_clock_test.cpp b/src/mongo/db/logical_clock_test.cpp
index 27c1f1a0907..bf3b78a3062 100644
--- a/src/mongo/db/logical_clock_test.cpp
+++ b/src/mongo/db/logical_clock_test.cpp
@@ -103,7 +103,7 @@ TEST_F(LogicalClockTest, advanceClusterTime) {
ASSERT_TRUE(t1 == getClock()->getClusterTime());
}
-// Verify rate limiter rejects logical times whose seconds values are too far ahead.
+// Verify rate limiter rejects cluster times whose seconds values are too far ahead.
TEST_F(LogicalClockTest, RateLimiterRejectsLogicalTimesTooFarAhead) {
setMockClockSourceTime(Date_t::fromMillisSinceEpoch(10 * 1000));
@@ -148,7 +148,7 @@ TEST_F(LogicalClockTest, WritesToOplogAdvanceClusterTime) {
// Tests the scenario where an admin incorrectly sets the wall clock more than
// maxAcceptableLogicalClockDriftSecs in the past at startup, and cluster time is initialized to
// the incorrect past time, then the admin resets the clock to the current time. In this case,
-// logical time can be advanced through metadata as long as the new time isn't
+// cluster time can be advanced through metadata as long as the new time isn't
// maxAcceptableLogicalClockDriftSecs ahead of the correct current wall clock time, since the rate
// limiter compares new times to the wall clock, not the cluster time.
TEST_F(LogicalClockTest, WallClockSetTooFarInPast) {
@@ -180,7 +180,7 @@ TEST_F(LogicalClockTest, WallClockSetTooFarInPast) {
// Tests the scenario where an admin incorrectly sets the wall clock more than
// maxAcceptableLogicalClockDriftSecs in the future and a write is accepted, advancing cluster
-// time, then the admin resets the clock to the current time. In this case, logical time cannot be
+// time, then the admin resets the clock to the current time. In this case, cluster time cannot be
// advanced through metadata unless the drift parameter is increased.
TEST_F(LogicalClockTest, WallClockSetTooFarInFuture) {
auto oneDay = Seconds(24 * 60 * 60);
@@ -206,7 +206,7 @@ TEST_F(LogicalClockTest, WallClockSetTooFarInFuture) {
// Verify that maxAcceptableLogicalClockDriftSecs parameter has to be increased to advance
// cluster time through metadata.
auto nextTime = getClock()->getClusterTime();
- nextTime.addTicks(1); // The next lowest logical time.
+ nextTime.addTicks(1); // The next lowest cluster time.
ASSERT_EQ(ErrorCodes::ClusterTimeFailsRateLimiter, getClock()->advanceClusterTime(nextTime));
@@ -308,11 +308,11 @@ TEST_F(LogicalClockTest, ReserveTicksBehaviorWhenWallClockNearMaxTime) {
ASSERT_TRUE(getClock()->getClusterTime() == buildLogicalTime(1, 1));
}
-// Verify the clock rejects logical times greater than the max allowed time.
+// Verify the clock rejects cluster times greater than the max allowed time.
TEST_F(LogicalClockTest, RejectsLogicalTimesGreaterThanMaxTime) {
unsigned maxVal = LogicalClock::kMaxSignedInt;
- // A logical time can be greater than the maximum value allowed because the signed integer
+ // A cluster time can be greater than the maximum value allowed because the signed integer
// maximum is used for legacy compatibility, but these fields are stored as unsigned integers.
auto beyondMaxTime = buildLogicalTime(maxVal + 1, maxVal + 1);
diff --git a/src/mongo/db/logical_time_metadata_hook.cpp b/src/mongo/db/logical_time_metadata_hook.cpp
index ad9301c681e..9d14f2570e8 100644
--- a/src/mongo/db/logical_time_metadata_hook.cpp
+++ b/src/mongo/db/logical_time_metadata_hook.cpp
@@ -64,7 +64,7 @@ Status LogicalTimeMetadataHook::readReplyMetadata(StringData replySource,
auto& signedTime = parseStatus.getValue().getSignedTime();
- // LogicalTimeMetadata is default constructed if no logical time metadata was sent, so a
+ // LogicalTimeMetadata is default constructed if no cluster time metadata was sent, so a
// default constructed SignedLogicalTime should be ignored.
if (signedTime.getTime() == LogicalTime::kUninitialized) {
return Status::OK();
diff --git a/src/mongo/db/logical_time_validator.cpp b/src/mongo/db/logical_time_validator.cpp
index 2248145ce3d..d17a728435c 100644
--- a/src/mongo/db/logical_time_validator.cpp
+++ b/src/mongo/db/logical_time_validator.cpp
@@ -85,7 +85,7 @@ SignedLogicalTime LogicalTimeValidator::_getProof(const KeysCollectionDocument&
auto key = keyDoc.getKey();
// Compare and calculate HMAC inside mutex to prevent multiple threads computing HMAC for the
- // same logical time.
+ // same cluster time.
stdx::lock_guard<stdx::mutex> lk(_mutex);
// Note: _lastSeenValidTime will initially not have a proof set.
if (newTime == _lastSeenValidTime.getTime() && _lastSeenValidTime.getProof()) {
@@ -149,8 +149,8 @@ Status LogicalTimeValidator::validate(OperationContext* opCtx, const SignedLogic
const auto& key = keyStatus.getValue().getKey();
const auto newProof = newTime.getProof();
- // Logical time is only sent if a server's clock can verify and sign logical times, so any
- // received logical times should have proofs.
+ // Cluster time is only sent if a server's clock can verify and sign cluster times, so any
+ // received cluster times should have proofs.
invariant(newProof);
auto res = _timeProofService.checkProof(newTime.getTime(), newProof.get(), key);
diff --git a/src/mongo/db/logical_time_validator.h b/src/mongo/db/logical_time_validator.h
index d46f1787ebf..44bb34922e5 100644
--- a/src/mongo/db/logical_time_validator.h
+++ b/src/mongo/db/logical_time_validator.h
@@ -42,8 +42,8 @@ class KeysCollectionDocument;
class KeysCollectionManager;
/**
- * This is responsible for signing logical times that can be used to sent to other servers and
- * verifying signatures of signed logical times.
+ * This is responsible for signing cluster times that can be used to sent to other servers and
+ * verifying signatures of signed cluster times.
*/
class LogicalTimeValidator {
public:
@@ -92,7 +92,7 @@ public:
static bool isAuthorizedToAdvanceClock(OperationContext* opCtx);
/**
- * Returns true if the server should gossip, sign, and validate logical times. False until there
+ * Returns true if the server should gossip, sign, and validate cluster times. False until there
* are keys in the config server.
*/
bool shouldGossipLogicalTime();
diff --git a/src/mongo/db/repl/read_concern_args.h b/src/mongo/db/repl/read_concern_args.h
index c6794b278fb..02c729b0181 100644
--- a/src/mongo/db/repl/read_concern_args.h
+++ b/src/mongo/db/repl/read_concern_args.h
@@ -114,7 +114,7 @@ private:
*/
boost::optional<OpTime> _opTime;
/**
- * Read data after cluster-wide logical time.
+ * Read data after cluster-wide cluster time.
*/
boost::optional<LogicalTime> _clusterTime;
boost::optional<ReadConcernLevel> _level;
diff --git a/src/mongo/db/service_entry_point_mongod.cpp b/src/mongo/db/service_entry_point_mongod.cpp
index fca93bfdfc5..470feb11bd1 100644
--- a/src/mongo/db/service_entry_point_mongod.cpp
+++ b/src/mongo/db/service_entry_point_mongod.cpp
@@ -260,7 +260,7 @@ void appendReplyMetadata(OperationContext* opCtx,
if (serverGlobalParams.featureCompatibility.version.load() ==
ServerGlobalParams::FeatureCompatibility::Version::k36) {
if (LogicalTimeValidator::isAuthorizedToAdvanceClock(opCtx)) {
- // No need to sign logical times for internal clients.
+ // No need to sign cluster times for internal clients.
SignedLogicalTime currentTime(LogicalClock::get(opCtx)->getClusterTime(),
TimeProofService::TimeProof(),
0);
@@ -328,7 +328,7 @@ void _waitForWriteConcernAndAddToCommandResponse(OperationContext* opCtx,
/**
* For replica set members it returns the last known op time from opCtx. Otherwise will return
- * uninitialized logical time.
+ * uninitialized cluster time.
*/
LogicalTime getClientOperationTime(OperationContext* opCtx) {
repl::ReplicationCoordinator* replCoord =
@@ -346,7 +346,7 @@ LogicalTime getClientOperationTime(OperationContext* opCtx) {
/**
* Returns the proper operationTime for a command. To construct the operationTime for replica set
* members, it uses the last optime in the oplog for writes, last committed optime for majority
- * reads, and the last applied optime for every other read. An uninitialized logical time is
+ * reads, and the last applied optime for every other read. An uninitialized cluster time is
* returned for non replica set members.
*/
LogicalTime computeOperationTime(OperationContext* opCtx,
diff --git a/src/mongo/db/time_proof_service.cpp b/src/mongo/db/time_proof_service.cpp
index 55caf06921b..19cf095acd4 100644
--- a/src/mongo/db/time_proof_service.cpp
+++ b/src/mongo/db/time_proof_service.cpp
@@ -75,7 +75,7 @@ TimeProofService::TimeProof TimeProofService::getProof(LogicalTime time, const K
Status TimeProofService::checkProof(LogicalTime time, const TimeProof& proof, const Key& key) {
auto myProof = getProof(time, key);
if (myProof != proof) {
- return Status(ErrorCodes::TimeProofMismatch, "Proof does not match the logical time");
+ return Status(ErrorCodes::TimeProofMismatch, "Proof does not match the cluster time");
}
return Status::OK();
}
diff --git a/src/mongo/db/time_proof_service.h b/src/mongo/db/time_proof_service.h
index 42186aab8b3..0a981632d09 100644
--- a/src/mongo/db/time_proof_service.h
+++ b/src/mongo/db/time_proof_service.h
@@ -36,7 +36,7 @@
namespace mongo {
/**
- * The TimeProofService holds the key used by mongod and mongos processes to verify logical times
+ * The TimeProofService holds the key used by mongod and mongos processes to verify cluster times
* and contains the logic to generate this key. As a performance optimization to avoid expensive
* signature generation the class also holds the cache.
*/
diff --git a/src/mongo/db/time_proof_service_test.cpp b/src/mongo/db/time_proof_service_test.cpp
index e033a1f3047..7f1ce4a6b7d 100644
--- a/src/mongo/db/time_proof_service_test.cpp
+++ b/src/mongo/db/time_proof_service_test.cpp
@@ -40,7 +40,7 @@ using TimeProof = TimeProofService::TimeProof;
const TimeProofService::Key key = {};
-// Verifies logical time with proof signed with the correct key.
+// Verifies cluster time with proof signed with the correct key.
TEST(TimeProofService, VerifyLogicalTimeWithValidProof) {
TimeProofService timeProofService;
@@ -50,7 +50,7 @@ TEST(TimeProofService, VerifyLogicalTimeWithValidProof) {
ASSERT_OK(timeProofService.checkProof(time, proof, key));
}
-// Fails for logical time with proof signed with an invalid key.
+// Fails for cluster time with proof signed with an invalid key.
TEST(TimeProofService, LogicalTimeWithMismatchingProofShouldFail) {
TimeProofService timeProofService;
diff --git a/src/mongo/idl/idl_test.cpp b/src/mongo/idl/idl_test.cpp
index a49a352bfbc..04f25bad12f 100644
--- a/src/mongo/idl/idl_test.cpp
+++ b/src/mongo/idl/idl_test.cpp
@@ -1936,7 +1936,7 @@ TEST(IDLDocSequence, TestWellKnownFieldsAreIgnored) {
"$queryOptions",
"$readPreference",
"$replData",
- "$logicalTime",
+ "$clusterTime",
"maxTimeMS",
"readConcern",
"shardVersion",
@@ -1977,7 +1977,7 @@ TEST(IDLDocSequence, TestWellKnownFieldsPassthrough) {
"$queryOptions",
"$readPreference",
"$replData",
- "$logicalTime",
+ "$clusterTime",
"maxTimeMS",
"readConcern",
"shardVersion",
diff --git a/src/mongo/rpc/metadata.cpp b/src/mongo/rpc/metadata.cpp
index e9324f2b666..2ccea81633a 100644
--- a/src/mongo/rpc/metadata.cpp
+++ b/src/mongo/rpc/metadata.cpp
@@ -95,10 +95,10 @@ void readRequestMetadata(OperationContext* opCtx, const BSONObj& metadataObj) {
uassertStatusOK(rpc::LogicalTimeMetadata::readFromMetadata(logicalTimeElem));
auto& signedTime = logicalTimeMetadata.getSignedTime();
- // LogicalTimeMetadata is default constructed if no logical time metadata was sent, so a
+ // LogicalTimeMetadata is default constructed if no cluster time metadata was sent, so a
// default constructed SignedLogicalTime should be ignored.
if (signedTime.getTime() != LogicalTime::kUninitialized) {
- // Logical times are only sent by sharding aware mongod servers, so this point is only
+ // Cluster times are only sent by sharding aware mongod servers, so this point is only
// reached in sharded clusters.
if (serverGlobalParams.featureCompatibility.version.load() !=
ServerGlobalParams::FeatureCompatibility::Version::k34) {
diff --git a/src/mongo/rpc/metadata/logical_time_metadata.cpp b/src/mongo/rpc/metadata/logical_time_metadata.cpp
index d7ad869c9e8..ecb91861627 100644
--- a/src/mongo/rpc/metadata/logical_time_metadata.cpp
+++ b/src/mongo/rpc/metadata/logical_time_metadata.cpp
@@ -105,7 +105,7 @@ void LogicalTimeMetadata::writeToMetadata(BSONObjBuilder* metadataBuilder) const
_clusterTime.getTime().asTimestamp().append(subObjBuilder.bb(), kClusterTimeFieldName);
BSONObjBuilder signatureObjBuilder(subObjBuilder.subobjStart(kSignatureFieldName));
- // Logical time metadata is only written when the LogicalTimeValidator is set, which
+ // Cluster time metadata is only written when the LogicalTimeValidator is set, which
// means the cluster time should always have a proof.
invariant(_clusterTime.getProof());
_clusterTime.getProof()->appendAsBinData(signatureObjBuilder, kSignatureHashFieldName);
diff --git a/src/mongo/rpc/metadata/logical_time_metadata.h b/src/mongo/rpc/metadata/logical_time_metadata.h
index f47d27fba8c..4370a56d514 100644
--- a/src/mongo/rpc/metadata/logical_time_metadata.h
+++ b/src/mongo/rpc/metadata/logical_time_metadata.h
@@ -65,7 +65,7 @@ public:
const SignedLogicalTime& getSignedTime() const;
static StringData fieldName() {
- return "$logicalTime";
+ return "$clusterTime";
}
private:
diff --git a/src/mongo/rpc/metadata/logical_time_metadata_test.cpp b/src/mongo/rpc/metadata/logical_time_metadata_test.cpp
index bcc50a392cc..b6cc15d1166 100644
--- a/src/mongo/rpc/metadata/logical_time_metadata_test.cpp
+++ b/src/mongo/rpc/metadata/logical_time_metadata_test.cpp
@@ -75,7 +75,7 @@ TEST(LogicalTimeMetadataTest, MissingClusterTimeShouldFailToParse) {
long long keyId = 1;
BSONObjBuilder builder;
- BSONObjBuilder subObjBuilder(builder.subobjStart("$logicalTime"));
+ BSONObjBuilder subObjBuilder(builder.subobjStart("$clusterTime"));
BSONObjBuilder signatureObjBuilder(subObjBuilder.subobjStart("signature"));
signatureObjBuilder.append("hash", BSONBinData(proof.data(), proof.size(), BinDataGeneral));
signatureObjBuilder.append("keyId", keyId);
@@ -91,7 +91,7 @@ TEST(LogicalTimeMetadataTest, MissingSignatureShouldFailToParse) {
const auto ts = Timestamp(100, 200);
BSONObjBuilder builder;
- BSONObjBuilder subObjBuilder(builder.subobjStart("$logicalTime"));
+ BSONObjBuilder subObjBuilder(builder.subobjStart("$clusterTime"));
ts.append(subObjBuilder.bb(), "clusterTime");
subObjBuilder.doneFast();
@@ -106,7 +106,7 @@ TEST(LogicalTimeMetadataTest, MissingHashShouldFailToParse) {
long long keyId = 1;
BSONObjBuilder builder;
- BSONObjBuilder subObjBuilder(builder.subobjStart("$logicalTime"));
+ BSONObjBuilder subObjBuilder(builder.subobjStart("$clusterTime"));
ts.append(subObjBuilder.bb(), "clusterTime");
BSONObjBuilder signatureObjBuilder(subObjBuilder.subobjStart("signature"));
signatureObjBuilder.append("keyId", keyId);
@@ -125,7 +125,7 @@ TEST(LogicalTimeMetadataTest, MissingKeyIdShouldFailToParse) {
proof.fill(0);
BSONObjBuilder builder;
- BSONObjBuilder subObjBuilder(builder.subobjStart("$logicalTime"));
+ BSONObjBuilder subObjBuilder(builder.subobjStart("$clusterTime"));
ts.append(subObjBuilder.bb(), "clusterTime");
BSONObjBuilder signatureObjBuilder(subObjBuilder.subobjStart("signature"));
signatureObjBuilder.append("hash", BSONBinData(proof.data(), proof.size(), BinDataGeneral));
@@ -146,7 +146,7 @@ TEST(LogicalTimeMetadataTest, ProofWithWrongLengthShouldFailToParse) {
long long keyId = 1;
BSONObjBuilder builder;
- BSONObjBuilder subObjBuilder(builder.subobjStart("$logicalTime"));
+ BSONObjBuilder subObjBuilder(builder.subobjStart("$clusterTime"));
ts.append(subObjBuilder.bb(), "clusterTime");
BSONObjBuilder signatureObjBuilder(subObjBuilder.subobjStart("signature"));
signatureObjBuilder.append("hash", BSONBinData(proof.data(), proof.size(), BinDataGeneral));
@@ -171,7 +171,7 @@ TEST(LogicalTimeMetadataTest, UpconvertPass) {
BSONObjBuilder builder;
builder.append("aaa", 1);
builder.append("bbb", 1);
- BSONObjBuilder subObjBuilder(builder.subobjStart("$logicalTime"));
+ BSONObjBuilder subObjBuilder(builder.subobjStart("$clusterTime"));
ts.append(subObjBuilder.bb(), "clusterTime");
BSONObjBuilder signatureObjBuilder(subObjBuilder.subobjStart("signature"));
signatureObjBuilder.append("hash", BSONBinData(proof.data(), proof.size(), BinDataGeneral));
@@ -185,7 +185,7 @@ TEST(LogicalTimeMetadataTest, UpconvertPass) {
BSONObjBuilder commandBob;
auto converted = upconvertRequestMetadata(commandObj, 0);
ASSERT_BSONOBJ_EQ(BSON("aaa" << 1 << "bbb" << 1), std::get<0>(converted));
- ASSERT_BSONOBJ_EQ(BSON("$logicalTime" << logicalTimeMetadata), std::get<1>(converted));
+ ASSERT_BSONOBJ_EQ(BSON("$clusterTime" << logicalTimeMetadata), std::get<1>(converted));
}
} // namespace rpc
diff --git a/src/mongo/rpc/reply_builder_test.cpp b/src/mongo/rpc/reply_builder_test.cpp
index 2e44d035bce..83a449f686b 100644
--- a/src/mongo/rpc/reply_builder_test.cpp
+++ b/src/mongo/rpc/reply_builder_test.cpp
@@ -69,9 +69,9 @@ BSONObj buildMetadata() {
metadataGle.append("electionId", OID("5592bee00d21e3aa796e185e"));
}
- // For now we don't need a real $logicalTime and just ensure that it just round trips whatever
- // is there. If that ever changes, we will need to construct a real $logicalTime here.
- metadataTop.append("$logicalTime", BSON("bogus" << true));
+ // For now we don't need a real $clusterTime and just ensure that it just round trips whatever
+ // is there. If that ever changes, we will need to construct a real $clusterTime here.
+ metadataTop.append("$clusterTime", BSON("bogus" << true));
return metadataTop.obj();
}
diff --git a/src/mongo/s/commands/strategy.cpp b/src/mongo/s/commands/strategy.cpp
index 557ade044d8..2b320595c4d 100644
--- a/src/mongo/s/commands/strategy.cpp
+++ b/src/mongo/s/commands/strategy.cpp
@@ -128,7 +128,7 @@ Status processCommandMetadata(OperationContext* opCtx, const BSONObj& cmdObj) {
void appendRequiredFieldsToResponse(OperationContext* opCtx, BSONObjBuilder* responseBuilder) {
auto validator = LogicalTimeValidator::get(opCtx);
if (validator->shouldGossipLogicalTime()) {
- // Add $logicalTime.
+ // Add $clusterTime.
auto currentTime =
validator->signLogicalTime(opCtx, LogicalClock::get(opCtx)->getClusterTime());
rpc::LogicalTimeMetadata(currentTime).writeToMetadata(responseBuilder);
diff --git a/src/mongo/shell/mongo.js b/src/mongo/shell/mongo.js
index 9025d6bd963..5ed8ababdf6 100644
--- a/src/mongo/shell/mongo.js
+++ b/src/mongo/shell/mongo.js
@@ -160,7 +160,7 @@ Mongo.prototype._gossipLogicalTime = function(obj) {
obj = Object.assign({}, obj);
const clusterTime = this.getClusterTime();
if (clusterTime) {
- obj["$logicalTime"] = clusterTime;
+ obj["$clusterTime"] = clusterTime;
}
return obj;
};
@@ -173,8 +173,8 @@ Mongo.prototype._setLogicalTimeFromReply = function(res) {
if (res.hasOwnProperty("operationTime")) {
this.setOperationTime(res["operationTime"]);
}
- if (res.hasOwnProperty("$logicalTime")) {
- this.setClusterTime(res["$logicalTime"]);
+ if (res.hasOwnProperty("$clusterTime")) {
+ this.setClusterTime(res["$clusterTime"]);
}
};
diff --git a/src/mongo/shell/shardingtest.js b/src/mongo/shell/shardingtest.js
index 55c76c18bb8..ad286256004 100644
--- a/src/mongo/shell/shardingtest.js
+++ b/src/mongo/shell/shardingtest.js
@@ -18,7 +18,7 @@
* configuration object(s)(*). @see MongoRunner.runMongos
*
* mongosWaitsForKeys {boolean}: if true, wait for mongos to discover keys from the config
- * server and to start sending logical times.
+ * server and to start sending cluster times.
*
* rs {Object|Array.<Object>}: replica set configuration object. Can
* contain:
@@ -1487,19 +1487,19 @@ var ShardingTest = function(params) {
}
// Mongos does not block for keys from the config servers at startup, so it may not initially
- // return logical times. If mongosWaitsForKeys is set, block until all mongos servers have found
- // the keys and begun to send logical times. Retry every 500 milliseconds and timeout after 60
+ // return cluster times. If mongosWaitsForKeys is set, block until all mongos servers have found
+ // the keys and begun to send cluster times. Retry every 500 milliseconds and timeout after 60
// seconds.
if (params.mongosWaitsForKeys) {
assert.soon(function() {
for (let i = 0; i < numMongos; i++) {
const res = self._mongos[i].adminCommand({isMaster: 1});
- if (!res.hasOwnProperty("$logicalTime")) {
- print("Waiting for mongos #" + i + " to start sending logical times.");
+ if (!res.hasOwnProperty("$clusterTime")) {
+ print("Waiting for mongos #" + i + " to start sending cluster times.");
return false;
}
}
return true;
- }, "waiting for all mongos servers to return logical times", 60 * 1000, 500);
+ }, "waiting for all mongos servers to return cluster times", 60 * 1000, 500);
}
};