summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorAllison Easton <allison.easton@mongodb.com>2022-05-26 06:51:02 +0000
committerEvergreen Agent <no-reply@evergreen.mongodb.com>2022-05-26 07:20:12 +0000
commita40552a3eedefd4b4ce7687411627afdc172ac82 (patch)
treef748be2fe684a9e29afee13d22bf4524e52e3629
parentb198148add5ca8746d3fedf63ded1c9709ad58fd (diff)
downloadmongo-a40552a3eedefd4b4ce7687411627afdc172ac82.tar.gz
SERVER-63819 Make range deletion's `numOrphanDocs` field non-optional once 6.0 branches out
-rw-r--r--jstests/sharding/resubmit_rangedeletions_on_stepup.js7
-rw-r--r--jstests/sharding/updates_to_rangedeletions_collection_trigger_range_deletions.js1
-rw-r--r--src/mongo/db/s/migration_coordinator.cpp3
-rw-r--r--src/mongo/db/s/migration_destination_manager.cpp3
-rw-r--r--src/mongo/db/s/migration_util.cpp8
-rw-r--r--src/mongo/db/s/migration_util_test.cpp1
-rw-r--r--src/mongo/db/s/range_deletion_task.idl2
-rw-r--r--src/mongo/db/s/shard_server_op_observer.cpp4
8 files changed, 10 insertions, 19 deletions
diff --git a/jstests/sharding/resubmit_rangedeletions_on_stepup.js b/jstests/sharding/resubmit_rangedeletions_on_stepup.js
index 3cded7ef852..0a8c92ff10e 100644
--- a/jstests/sharding/resubmit_rangedeletions_on_stepup.js
+++ b/jstests/sharding/resubmit_rangedeletions_on_stepup.js
@@ -29,12 +29,13 @@ function setup() {
return st;
}
-function writeRangeDeletionTask(collectionUuid, shardConn, pending) {
+function writeRangeDeletionTask(collectionUuid, shardConn, pending, numOrphans) {
let deletionTask = {
_id: UUID(),
nss: ns,
collectionUuid: collectionUuid,
donorShardId: "unused",
+ numOrphanDocs: numOrphans,
range: {min: {x: 50}, max: {x: MaxKey}},
whenToClean: "now"
};
@@ -84,7 +85,7 @@ function writeRangeDeletionTask(collectionUuid, shardConn, pending) {
assert.eq(shard1Coll.find().itcount(), expectedNumDocsShard1);
const collectionUuid = getUUIDFromConfigCollections(st.s, ns);
- writeRangeDeletionTask(collectionUuid, st.shard0);
+ writeRangeDeletionTask(collectionUuid, st.shard0, false, orphanCount);
// Step down current primary.
let originalShard0Primary = st.rs0.getPrimary();
@@ -123,7 +124,7 @@ function writeRangeDeletionTask(collectionUuid, shardConn, pending) {
}
const collectionUuid = getUUIDFromConfigCollections(st.s, ns);
- writeRangeDeletionTask(collectionUuid, st.shard0, true);
+ writeRangeDeletionTask(collectionUuid, st.shard0, true, orphanCount);
const expectedNumDocsTotal = 0;
const expectedNumDocsShard0 = 0;
diff --git a/jstests/sharding/updates_to_rangedeletions_collection_trigger_range_deletions.js b/jstests/sharding/updates_to_rangedeletions_collection_trigger_range_deletions.js
index fe356704ed6..e96c7dfe168 100644
--- a/jstests/sharding/updates_to_rangedeletions_collection_trigger_range_deletions.js
+++ b/jstests/sharding/updates_to_rangedeletions_collection_trigger_range_deletions.js
@@ -127,6 +127,7 @@ let testColl = testDB.foo;
collectionUuid: UUID(),
donorShardId: "unused",
pending: true,
+ numOrphanDocs: 0,
range: {min: {x: 70}, max: {x: 90}},
whenToClean: "now"
};
diff --git a/src/mongo/db/s/migration_coordinator.cpp b/src/mongo/db/s/migration_coordinator.cpp
index 46c8a0d611a..8a6b1f4da2f 100644
--- a/src/mongo/db/s/migration_coordinator.cpp
+++ b/src/mongo/db/s/migration_coordinator.cpp
@@ -128,9 +128,6 @@ void MigrationCoordinator::startMigration(OperationContext* opCtx) {
donorDeletionTask.setPending(true);
const auto currentTime = VectorClock::get(opCtx)->getTime();
donorDeletionTask.setTimestamp(currentTime.clusterTime().asTimestamp());
- if (feature_flags::gOrphanTracking.isEnabled(serverGlobalParams.featureCompatibility)) {
- donorDeletionTask.setNumOrphanDocs(0);
- }
migrationutil::persistRangeDeletionTaskLocally(
opCtx, donorDeletionTask, WriteConcerns::kMajorityWriteConcernShardingTimeout);
}
diff --git a/src/mongo/db/s/migration_destination_manager.cpp b/src/mongo/db/s/migration_destination_manager.cpp
index e8ebe638187..6f4fe0c2272 100644
--- a/src/mongo/db/s/migration_destination_manager.cpp
+++ b/src/mongo/db/s/migration_destination_manager.cpp
@@ -1289,9 +1289,6 @@ void MigrationDestinationManager::_migrateDriver(OperationContext* outerOpCtx,
recipientDeletionTask.setPending(true);
const auto currentTime = VectorClock::get(outerOpCtx)->getTime();
recipientDeletionTask.setTimestamp(currentTime.clusterTime().asTimestamp());
- if (feature_flags::gOrphanTracking.isEnabled(serverGlobalParams.featureCompatibility)) {
- recipientDeletionTask.setNumOrphanDocs(0);
- }
// It is illegal to wait for write concern with a session checked out, so persist the
// range deletion task with an immediately satsifiable write concern and then wait for
diff --git a/src/mongo/db/s/migration_util.cpp b/src/mongo/db/s/migration_util.cpp
index ee54cadb56b..a2ec056f48d 100644
--- a/src/mongo/db/s/migration_util.cpp
+++ b/src/mongo/db/s/migration_util.cpp
@@ -700,11 +700,7 @@ void persistUpdatedNumOrphans(OperationContext* opCtx,
const UUID& migrationId,
const UUID& collectionUuid,
long long changeInOrphans) {
- // TODO (SERVER-63819) Remove numOrphanDocsFieldName field from the query
- // Add $exists to the query to ensure that on upgrade and downgrade, the numOrphanDocs field
- // is only updated after the upgrade procedure has populated it with an initial value.
- BSONObj query = BSON("_id" << migrationId << RangeDeletionTask::kNumOrphanDocsFieldName
- << BSON("$exists" << true));
+ BSONObj query = BSON("_id" << migrationId);
try {
PersistentTaskStore<RangeDeletionTask> store(NamespaceString::kRangeDeletionNamespace);
ScopedRangeDeleterLock rangeDeleterLock(opCtx, collectionUuid);
@@ -750,7 +746,7 @@ long long retrieveNumOrphansFromRecipient(OperationContext* opCtx,
}
const auto numOrphanDocsElem =
rangeDeletionResponse.docs[0].getField(RangeDeletionTask::kNumOrphanDocsFieldName);
- return numOrphanDocsElem ? numOrphanDocsElem.safeNumberLong() : 0;
+ return numOrphanDocsElem.safeNumberLong();
}
void notifyChangeStreamsOnRecipientFirstChunk(OperationContext* opCtx,
diff --git a/src/mongo/db/s/migration_util_test.cpp b/src/mongo/db/s/migration_util_test.cpp
index 24fb4d3567e..8e6f02043da 100644
--- a/src/mongo/db/s/migration_util_test.cpp
+++ b/src/mongo/db/s/migration_util_test.cpp
@@ -344,7 +344,6 @@ TEST_F(MigrationUtilsTest, TestUpdateNumberOfOrphans) {
const auto collectionUuid = UUID::gen();
PersistentTaskStore<RangeDeletionTask> store(NamespaceString::kRangeDeletionNamespace);
auto rangeDeletionDoc = createDeletionTask(opCtx, kTestNss, collectionUuid, 0, 10);
- rangeDeletionDoc.setNumOrphanDocs(0);
store.add(opCtx, rangeDeletionDoc);
migrationutil::persistUpdatedNumOrphans(opCtx, rangeDeletionDoc.getId(), collectionUuid, 5);
diff --git a/src/mongo/db/s/range_deletion_task.idl b/src/mongo/db/s/range_deletion_task.idl
index 2e47651abdb..7d2b90610b9 100644
--- a/src/mongo/db/s/range_deletion_task.idl
+++ b/src/mongo/db/s/range_deletion_task.idl
@@ -84,4 +84,4 @@ structs:
numOrphanDocs:
type: safeInt64
description: "The estimated number of orphaned documents in the range"
- optional: true
+ default: 0
diff --git a/src/mongo/db/s/shard_server_op_observer.cpp b/src/mongo/db/s/shard_server_op_observer.cpp
index ea0908949ee..4f853ddf78b 100644
--- a/src/mongo/db/s/shard_server_op_observer.cpp
+++ b/src/mongo/db/s/shard_server_op_observer.cpp
@@ -275,7 +275,7 @@ void ShardServerOpObserver::onInserts(OperationContext* opCtx,
std::make_unique<SubmitRangeDeletionHandler>(opCtx, deletionTask));
}
- const auto numOrphanDocs = deletionTask.getNumOrphanDocs().value_or(0);
+ const auto numOrphanDocs = deletionTask.getNumOrphanDocs();
BalancerStatsRegistry::get(opCtx)->onRangeDeletionTaskInsertion(
deletionTask.getCollectionUuid(), numOrphanDocs);
}
@@ -549,7 +549,7 @@ void ShardServerOpObserver::onDelete(OperationContext* opCtx,
const auto numOrphanDocs = [&] {
auto numOrphanDocsElem = update_oplog_entry::extractNewValueForField(
deletedDoc, RangeDeletionTask::kNumOrphanDocsFieldName);
- return numOrphanDocsElem ? numOrphanDocsElem.exactNumberLong() : 0;
+ return numOrphanDocsElem.exactNumberLong();
}();
auto collUuid = [&] {