summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMax Hirschhorn <max.hirschhorn@mongodb.com>2020-03-11 18:29:23 -0400
committerEvergreen Agent <no-reply@evergreen.mongodb.com>2020-03-11 23:13:38 +0000
commit90eefa051e6015514dcc6256d0f42b76bf041a76 (patch)
tree734352949bfc4a13ef69493616acbc05aae2306a
parentabd4c261113bb98cbacef3db12d48b38344b5399 (diff)
downloadmongo-90eefa051e6015514dcc6256d0f42b76bf041a76.tar.gz
SERVER-46395 Ensure range deletion task document exists during deletion.
Changes the range deletion task to guarantee a batch of documents can only be removed while the corresponding range deletion task document exists. If the range deletion task document doesn't exist or a stepdown could lead to it not existing, then the range deletion task is abandoned. Changes the deletion of the range deletion task document to use the _id index of the config.rangeDeletions collection rather than being a collection scan.
-rw-r--r--src/mongo/base/error_codes.yml2
-rw-r--r--src/mongo/db/s/catalog_cache_loader_mock.cpp8
-rw-r--r--src/mongo/db/s/cleanup_orphaned_cmd.cpp3
-rw-r--r--src/mongo/db/s/collection_sharding_runtime.cpp3
-rw-r--r--src/mongo/db/s/collection_sharding_runtime.h4
-rw-r--r--src/mongo/db/s/collection_sharding_runtime_test.cpp4
-rw-r--r--src/mongo/db/s/collection_sharding_state_factory_shard.cpp2
-rw-r--r--src/mongo/db/s/metadata_manager.cpp21
-rw-r--r--src/mongo/db/s/metadata_manager.h5
-rw-r--r--src/mongo/db/s/metadata_manager_test.cpp6
-rw-r--r--src/mongo/db/s/migration_source_manager.cpp2
-rw-r--r--src/mongo/db/s/migration_util.cpp2
-rw-r--r--src/mongo/db/s/migration_util_test.cpp50
-rw-r--r--src/mongo/db/s/range_deletion_util.cpp136
-rw-r--r--src/mongo/db/s/range_deletion_util.h3
-rw-r--r--src/mongo/db/s/range_deletion_util_test.cpp139
16 files changed, 307 insertions, 83 deletions
diff --git a/src/mongo/base/error_codes.yml b/src/mongo/base/error_codes.yml
index 55d8d894b23..43ea3f365b8 100644
--- a/src/mongo/base/error_codes.yml
+++ b/src/mongo/base/error_codes.yml
@@ -350,6 +350,8 @@ error_codes:
- {code: 305,name: ReadThroughCacheKeyNotFound}
- {code: 306,name: ReadThroughCacheLookupCanceled}
+ - {code: 307,name: RangeDeletionAbandonedBecauseTaskDocumentDoesNotExist}
+
# Error codes 4000-8999 are reserved.
# Non-sequential error codes for compatibility only)
diff --git a/src/mongo/db/s/catalog_cache_loader_mock.cpp b/src/mongo/db/s/catalog_cache_loader_mock.cpp
index 7d3c7b1f80b..9bd095b9f6b 100644
--- a/src/mongo/db/s/catalog_cache_loader_mock.cpp
+++ b/src/mongo/db/s/catalog_cache_loader_mock.cpp
@@ -111,13 +111,19 @@ std::shared_ptr<Notification<void>> CatalogCacheLoaderMock::getChunksSince(
uassertStatusOK(_swCollectionReturnValue);
uassertStatusOK(_swChunksReturnValue);
+ // We swap the chunks out of _swChunksReturnValue to ensure if this task is
+ // scheduled multiple times that we don't inform the ChunkManager about a chunk it
+ // has already updated.
+ std::vector<ChunkType> chunks;
+ _swChunksReturnValue.getValue().swap(chunks);
+
return CollectionAndChangedChunks(
_swCollectionReturnValue.getValue().getUUID(),
_swCollectionReturnValue.getValue().getEpoch(),
_swCollectionReturnValue.getValue().getKeyPattern().toBSON(),
_swCollectionReturnValue.getValue().getDefaultCollation(),
_swCollectionReturnValue.getValue().getUnique(),
- _swChunksReturnValue.getValue());
+ std::move(chunks));
} catch (const DBException& ex) {
return ex.toStatus();
}
diff --git a/src/mongo/db/s/cleanup_orphaned_cmd.cpp b/src/mongo/db/s/cleanup_orphaned_cmd.cpp
index 2404ff36f5e..7aa97cd3e64 100644
--- a/src/mongo/db/s/cleanup_orphaned_cmd.cpp
+++ b/src/mongo/db/s/cleanup_orphaned_cmd.cpp
@@ -114,7 +114,8 @@ CleanupResult cleanupOrphanedData(OperationContext* opCtx,
*stoppedAtKey = targetRange->getMax();
- cleanupCompleteFuture = css->cleanUpRange(*targetRange, CollectionShardingRuntime::kNow);
+ cleanupCompleteFuture =
+ css->cleanUpRange(*targetRange, boost::none, CollectionShardingRuntime::kNow);
}
// Sleep waiting for our own deletion. We don't actually care about any others, so there is no
diff --git a/src/mongo/db/s/collection_sharding_runtime.cpp b/src/mongo/db/s/collection_sharding_runtime.cpp
index 511c06ecea5..b523400b3ae 100644
--- a/src/mongo/db/s/collection_sharding_runtime.cpp
+++ b/src/mongo/db/s/collection_sharding_runtime.cpp
@@ -251,10 +251,11 @@ void CollectionShardingRuntime::forgetReceive(const ChunkRange& range) {
_metadataManager->forgetReceive(range);
}
SharedSemiFuture<void> CollectionShardingRuntime::cleanUpRange(ChunkRange const& range,
+ boost::optional<UUID> migrationId,
CleanWhen when) {
stdx::lock_guard lk(_metadataManagerLock);
invariant(_metadataType == MetadataType::kSharded);
- return _metadataManager->cleanUpRange(range, when == kDelayed);
+ return _metadataManager->cleanUpRange(range, std::move(migrationId), when == kDelayed);
}
Status CollectionShardingRuntime::waitForClean(OperationContext* opCtx,
diff --git a/src/mongo/db/s/collection_sharding_runtime.h b/src/mongo/db/s/collection_sharding_runtime.h
index 8ba437137b0..ddbf9683b7f 100644
--- a/src/mongo/db/s/collection_sharding_runtime.h
+++ b/src/mongo/db/s/collection_sharding_runtime.h
@@ -142,7 +142,9 @@ public:
* succeeds, waitForClean can be called to ensure no other deletions are pending for the range.
*/
enum CleanWhen { kNow, kDelayed };
- SharedSemiFuture<void> cleanUpRange(ChunkRange const& range, CleanWhen when);
+ SharedSemiFuture<void> cleanUpRange(ChunkRange const& range,
+ boost::optional<UUID> migrationId,
+ CleanWhen when);
/**
* Returns a range _not_ owned by this shard that starts no lower than the specified
diff --git a/src/mongo/db/s/collection_sharding_runtime_test.cpp b/src/mongo/db/s/collection_sharding_runtime_test.cpp
index 103dad1eef9..3d5616d5068 100644
--- a/src/mongo/db/s/collection_sharding_runtime_test.cpp
+++ b/src/mongo/db/s/collection_sharding_runtime_test.cpp
@@ -273,6 +273,7 @@ TEST_F(CollectionShardingRuntimeWithRangeDeleterTest,
auto cleanupComplete =
csr().cleanUpRange(ChunkRange(BSON(kShardKey << MINKEY), BSON(kShardKey << MAXKEY)),
+ boost::none,
CollectionShardingRuntime::CleanWhen::kNow);
operationContext()->setDeadlineAfterNowBy(Milliseconds(100), ErrorCodes::MaxTimeMSExpired);
@@ -297,10 +298,12 @@ TEST_F(CollectionShardingRuntimeWithRangeDeleterTest,
auto cleanupCompleteFirst =
csr().cleanUpRange(ChunkRange(BSON(kShardKey << MINKEY), BSON(kShardKey << middleKey)),
+ boost::none,
CollectionShardingRuntime::CleanWhen::kNow);
auto cleanupCompleteSecond =
csr().cleanUpRange(ChunkRange(BSON(kShardKey << middleKey), BSON(kShardKey << MAXKEY)),
+ boost::none,
CollectionShardingRuntime::CleanWhen::kNow);
auto status = CollectionShardingRuntime::waitForClean(
@@ -325,6 +328,7 @@ TEST_F(CollectionShardingRuntimeWithRangeDeleterTest,
auto cleanupComplete =
csr().cleanUpRange(ChunkRange(BSON(kShardKey << MINKEY), BSON(kShardKey << MAXKEY)),
+ boost::none,
CollectionShardingRuntime::CleanWhen::kNow);
auto status = CollectionShardingRuntime::waitForClean(
diff --git a/src/mongo/db/s/collection_sharding_state_factory_shard.cpp b/src/mongo/db/s/collection_sharding_state_factory_shard.cpp
index 626564fff79..f70ba73a17c 100644
--- a/src/mongo/db/s/collection_sharding_state_factory_shard.cpp
+++ b/src/mongo/db/s/collection_sharding_state_factory_shard.cpp
@@ -66,6 +66,8 @@ CollectionShardingStateFactoryShard::_getRangeDeletionExecutor() {
if (!_rangeDeletionExecutor) {
const std::string kExecName("CollectionRangeDeleter-TaskExecutor");
+ // CAUTION: The safety of range deletion depends on using a task executor that schedules
+ // work on a single thread.
auto net = executor::makeNetworkInterface(kExecName);
auto pool = std::make_unique<executor::NetworkInterfaceThreadPool>(net.get());
auto taskExecutor =
diff --git a/src/mongo/db/s/metadata_manager.cpp b/src/mongo/db/s/metadata_manager.cpp
index 2497968d875..f17fd52d8e0 100644
--- a/src/mongo/db/s/metadata_manager.cpp
+++ b/src/mongo/db/s/metadata_manager.cpp
@@ -310,8 +310,11 @@ SharedSemiFuture<void> MetadataManager::beginReceive(ChunkRange const& range) {
"nss_ns"_attr = _nss.ns(),
"range"_attr = redact(range.toString()));
- return _submitRangeForDeletion(
- lg, SemiFuture<void>::makeReady(), range, Seconds(orphanCleanupDelaySecs.load()));
+ return _submitRangeForDeletion(lg,
+ SemiFuture<void>::makeReady(),
+ range,
+ boost::none,
+ Seconds(orphanCleanupDelaySecs.load()));
}
void MetadataManager::forgetReceive(ChunkRange const& range) {
@@ -333,10 +336,12 @@ void MetadataManager::forgetReceive(ChunkRange const& range) {
invariant(it != _receivingChunks.end());
_receivingChunks.erase(it);
- std::ignore = _submitRangeForDeletion(lg, SemiFuture<void>::makeReady(), range, Seconds(0));
+ std::ignore =
+ _submitRangeForDeletion(lg, SemiFuture<void>::makeReady(), range, boost::none, Seconds(0));
}
SharedSemiFuture<void> MetadataManager::cleanUpRange(ChunkRange const& range,
+ boost::optional<UUID> migrationId,
bool shouldDelayBeforeDeletion) {
stdx::lock_guard<Latch> lg(_managerLock);
invariant(!_metadata.empty());
@@ -372,6 +377,7 @@ SharedSemiFuture<void> MetadataManager::cleanUpRange(ChunkRange const& range,
return _submitRangeForDeletion(lg,
overlapMetadata->onDestructionPromise.getFuture().semi(),
range,
+ std::move(migrationId),
delayForActiveQueriesOnSecondariesToComplete);
} else {
// No running queries can depend on this range, so queue it for deletion immediately.
@@ -381,8 +387,11 @@ SharedSemiFuture<void> MetadataManager::cleanUpRange(ChunkRange const& range,
"nss_ns"_attr = _nss.ns(),
"range"_attr = redact(range.toString()));
- return _submitRangeForDeletion(
- lg, SemiFuture<void>::makeReady(), range, delayForActiveQueriesOnSecondariesToComplete);
+ return _submitRangeForDeletion(lg,
+ SemiFuture<void>::makeReady(),
+ range,
+ std::move(migrationId),
+ delayForActiveQueriesOnSecondariesToComplete);
}
}
@@ -449,6 +458,7 @@ SharedSemiFuture<void> MetadataManager::_submitRangeForDeletion(
const WithLock&,
SemiFuture<void> waitForActiveQueriesToComplete,
const ChunkRange& range,
+ boost::optional<UUID> migrationId,
Seconds delayForActiveQueriesOnSecondariesToComplete) {
int maxToDelete = rangeDeleterBatchSize.load();
@@ -463,6 +473,7 @@ SharedSemiFuture<void> MetadataManager::_submitRangeForDeletion(
*_metadata.back()->metadata->getChunkManager()->getUUID(),
_metadata.back()->metadata->getKeyPattern().getOwned(),
range,
+ std::move(migrationId),
maxToDelete,
delayForActiveQueriesOnSecondariesToComplete,
Milliseconds(rangeDeleterBatchDelayMS.load()));
diff --git a/src/mongo/db/s/metadata_manager.h b/src/mongo/db/s/metadata_manager.h
index 6ab9a5ae37e..bd2c0189742 100644
--- a/src/mongo/db/s/metadata_manager.h
+++ b/src/mongo/db/s/metadata_manager.h
@@ -140,7 +140,9 @@ public:
*
* Returns a future that will be fulfilled when the range deletion completes or fails.
*/
- SharedSemiFuture<void> cleanUpRange(ChunkRange const& range, bool shouldDelayBeforeDeletion);
+ SharedSemiFuture<void> cleanUpRange(ChunkRange const& range,
+ boost::optional<UUID> migrationId,
+ bool shouldDelayBeforeDeletion);
/**
* Returns the number of ranges scheduled to be cleaned, exclusive of such ranges that might
@@ -241,6 +243,7 @@ private:
const WithLock&,
SemiFuture<void> waitForActiveQueriesToComplete,
const ChunkRange& range,
+ boost::optional<UUID> migrationId,
Seconds delayForActiveQueriesOnSecondariesToComplete);
// ServiceContext from which to obtain instances of global support objects
diff --git a/src/mongo/db/s/metadata_manager_test.cpp b/src/mongo/db/s/metadata_manager_test.cpp
index df611cb12ca..58c663c4a4f 100644
--- a/src/mongo/db/s/metadata_manager_test.cpp
+++ b/src/mongo/db/s/metadata_manager_test.cpp
@@ -185,7 +185,7 @@ TEST_F(MetadataManagerTest, TrackOrphanedDataCleanupBlocksOnScheduledRangeDeleti
// Enable fail point to suspendRangeDeletion.
globalFailPointRegistry().find("suspendRangeDeletion")->setMode(FailPoint::alwaysOn);
- auto notifn1 = _manager->cleanUpRange(cr1, false /*delayBeforeDeleting*/);
+ auto notifn1 = _manager->cleanUpRange(cr1, boost::none, false /*delayBeforeDeleting*/);
ASSERT_FALSE(notifn1.isReady());
ASSERT_EQ(_manager->numberOfRangesToClean(), 1UL);
@@ -212,7 +212,7 @@ TEST_F(MetadataManagerTest, CleanupNotificationsAreSignaledWhenMetadataManagerIs
_manager->setFilteringMetadata(
cloneMetadataMinusChunk(_manager->getActiveMetadata(boost::none), rangeToClean));
- auto notif = _manager->cleanUpRange(rangeToClean, false /*delayBeforeDeleting*/);
+ auto notif = _manager->cleanUpRange(rangeToClean, boost::none, false /*delayBeforeDeleting*/);
ASSERT(!notif.isReady());
auto optNotif = _manager->trackOrphanedDataCleanup(rangeToClean);
@@ -295,7 +295,7 @@ TEST_F(MetadataManagerTest, RangesToCleanMembership) {
// Enable fail point to suspendRangeDeletion.
globalFailPointRegistry().find("suspendRangeDeletion")->setMode(FailPoint::alwaysOn);
- auto notifn = _manager->cleanUpRange(cr, false /*delayBeforeDeleting*/);
+ auto notifn = _manager->cleanUpRange(cr, boost::none, false /*delayBeforeDeleting*/);
ASSERT(!notifn.isReady());
ASSERT_EQ(1UL, _manager->numberOfRangesToClean());
diff --git a/src/mongo/db/s/migration_source_manager.cpp b/src/mongo/db/s/migration_source_manager.cpp
index 479a2a4ec7f..45589219e6f 100644
--- a/src/mongo/db/s/migration_source_manager.cpp
+++ b/src/mongo/db/s/migration_source_manager.cpp
@@ -584,7 +584,7 @@ Status MigrationSourceManager::commitChunkMetadataOnConfig() {
UninterruptibleLockGuard noInterrupt(_opCtx->lockState());
AutoGetCollection autoColl(_opCtx, getNss(), MODE_IS);
return CollectionShardingRuntime::get(_opCtx, getNss())
- ->cleanUpRange(range, whenToClean);
+ ->cleanUpRange(range, boost::none, whenToClean);
}();
if (_args.getWaitForDelete()) {
diff --git a/src/mongo/db/s/migration_util.cpp b/src/mongo/db/s/migration_util.cpp
index 02d7e5bcc31..6b4537ede2b 100644
--- a/src/mongo/db/s/migration_util.cpp
+++ b/src/mongo/db/s/migration_util.cpp
@@ -292,7 +292,7 @@ ExecutorFuture<void> submitRangeDeletionTask(OperationContext* opCtx,
? CollectionShardingRuntime::kNow
: CollectionShardingRuntime::kDelayed;
- return css->cleanUpRange(deletionTask.getRange(), whenToClean);
+ return css->cleanUpRange(deletionTask.getRange(), deletionTask.getId(), whenToClean);
})
.onError([=](const Status status) {
ThreadClient tc(kRangeDeletionThreadName, serviceContext);
diff --git a/src/mongo/db/s/migration_util_test.cpp b/src/mongo/db/s/migration_util_test.cpp
index 33e0dd1e54f..d8e6c1c3fbc 100644
--- a/src/mongo/db/s/migration_util_test.cpp
+++ b/src/mongo/db/s/migration_util_test.cpp
@@ -68,12 +68,16 @@ void addRangeToReceivingChunks(OperationContext* opCtx,
}
template <typename ShardKey>
-RangeDeletionTask createDeletionTask(
- const NamespaceString& nss, const UUID& uuid, ShardKey min, ShardKey max, bool pending = true) {
+RangeDeletionTask createDeletionTask(const NamespaceString& nss,
+ const UUID& uuid,
+ ShardKey min,
+ ShardKey max,
+ ShardId donorShard = ShardId("donorShard"),
+ bool pending = true) {
auto task = RangeDeletionTask(UUID::gen(),
nss,
uuid,
- ShardId("donorShard"),
+ donorShard,
ChunkRange{BSON("_id" << min), BSON("_id" << max)},
CleanWhenEnum::kNow);
@@ -466,11 +470,12 @@ TEST_F(SubmitRangeDeletionTaskTest,
FailsAndDeletesTaskIfFilteringMetadataIsUnknownEvenAfterRefresh) {
auto opCtx = operationContext();
- auto deletionTask = createDeletionTask(kNss, kDefaultUUID, 0, 10);
+ auto deletionTask = createDeletionTask(kNss, kDefaultUUID, 0, 10, _myShardName);
PersistentTaskStore<RangeDeletionTask> store(opCtx, NamespaceString::kRangeDeletionNamespace);
store.add(opCtx, deletionTask);
ASSERT_EQ(store.count(opCtx), 1);
+ migrationutil::markAsReadyRangeDeletionTaskLocally(opCtx, deletionTask.getId());
// Make the refresh triggered by submitting the task return an empty result when loading the
// database.
@@ -490,11 +495,12 @@ TEST_F(SubmitRangeDeletionTaskTest,
TEST_F(SubmitRangeDeletionTaskTest, FailsAndDeletesTaskIfNamespaceIsUnshardedEvenAfterRefresh) {
auto opCtx = operationContext();
- auto deletionTask = createDeletionTask(kNss, kDefaultUUID, 0, 10);
+ auto deletionTask = createDeletionTask(kNss, kDefaultUUID, 0, 10, _myShardName);
PersistentTaskStore<RangeDeletionTask> store(opCtx, NamespaceString::kRangeDeletionNamespace);
store.add(opCtx, deletionTask);
ASSERT_EQ(store.count(opCtx), 1);
+ migrationutil::markAsReadyRangeDeletionTaskLocally(opCtx, deletionTask.getId());
// Make the refresh triggered by submitting the task return an empty result when loading the
// collection so it is considered unsharded.
@@ -516,11 +522,12 @@ TEST_F(SubmitRangeDeletionTaskTest,
FailsAndDeletesTaskIfNamespaceIsUnshardedBeforeAndAfterRefresh) {
auto opCtx = operationContext();
- auto deletionTask = createDeletionTask(kNss, kDefaultUUID, 0, 10);
+ auto deletionTask = createDeletionTask(kNss, kDefaultUUID, 0, 10, _myShardName);
PersistentTaskStore<RangeDeletionTask> store(opCtx, NamespaceString::kRangeDeletionNamespace);
store.add(opCtx, deletionTask);
ASSERT_EQ(store.count(opCtx), 1);
+ migrationutil::markAsReadyRangeDeletionTaskLocally(opCtx, deletionTask.getId());
// Mock an empty result for the task's collection and force a refresh so the node believes the
// collection is unsharded.
@@ -543,7 +550,12 @@ TEST_F(SubmitRangeDeletionTaskTest, SucceedsIfFilteringMetadataUUIDMatchesTaskUU
auto opCtx = operationContext();
auto collectionUUID = createCollectionAndGetUUID(kNss);
- auto deletionTask = createDeletionTask(kNss, collectionUUID, 0, 10);
+ auto deletionTask = createDeletionTask(kNss, collectionUUID, 0, 10, _myShardName);
+
+ PersistentTaskStore<RangeDeletionTask> store(opCtx, NamespaceString::kRangeDeletionNamespace);
+ store.add(opCtx, deletionTask);
+ ASSERT_EQ(store.count(opCtx), 1);
+ migrationutil::markAsReadyRangeDeletionTaskLocally(opCtx, deletionTask.getId());
// Force a metadata refresh with the task's UUID before the task is submitted.
auto coll = makeCollectionType(collectionUUID, kEpoch);
@@ -565,7 +577,12 @@ TEST_F(
auto opCtx = operationContext();
auto collectionUUID = createCollectionAndGetUUID(kNss);
- auto deletionTask = createDeletionTask(kNss, collectionUUID, 0, 10);
+ auto deletionTask = createDeletionTask(kNss, collectionUUID, 0, 10, _myShardName);
+
+ PersistentTaskStore<RangeDeletionTask> store(opCtx, NamespaceString::kRangeDeletionNamespace);
+ store.add(opCtx, deletionTask);
+ ASSERT_EQ(store.count(opCtx), 1);
+ migrationutil::markAsReadyRangeDeletionTaskLocally(opCtx, deletionTask.getId());
// Make the refresh triggered by submitting the task return a UUID that matches the task's UUID.
auto coll = makeCollectionType(collectionUUID, kEpoch);
@@ -592,7 +609,12 @@ TEST_F(SubmitRangeDeletionTaskTest,
forceShardFilteringMetadataRefresh(opCtx, kNss, true);
auto collectionUUID = createCollectionAndGetUUID(kNss);
- auto deletionTask = createDeletionTask(kNss, collectionUUID, 0, 10);
+ auto deletionTask = createDeletionTask(kNss, collectionUUID, 0, 10, _myShardName);
+
+ PersistentTaskStore<RangeDeletionTask> store(opCtx, NamespaceString::kRangeDeletionNamespace);
+ store.add(opCtx, deletionTask);
+ ASSERT_EQ(store.count(opCtx), 1);
+ migrationutil::markAsReadyRangeDeletionTaskLocally(opCtx, deletionTask.getId());
// Make the refresh triggered by submitting the task return a UUID that matches the task's UUID.
auto matchingColl = makeCollectionType(collectionUUID, kEpoch);
@@ -623,7 +645,12 @@ TEST_F(SubmitRangeDeletionTaskTest,
forceShardFilteringMetadataRefresh(opCtx, kNss, true);
auto collectionUUID = createCollectionAndGetUUID(kNss);
- auto deletionTask = createDeletionTask(kNss, collectionUUID, 0, 10);
+ auto deletionTask = createDeletionTask(kNss, collectionUUID, 0, 10, _myShardName);
+
+ PersistentTaskStore<RangeDeletionTask> store(opCtx, NamespaceString::kRangeDeletionNamespace);
+ store.add(opCtx, deletionTask);
+ ASSERT_EQ(store.count(opCtx), 1);
+ migrationutil::markAsReadyRangeDeletionTaskLocally(opCtx, deletionTask.getId());
// Make the refresh triggered by submitting the task return a UUID that matches the task's UUID.
auto matchingColl = makeCollectionType(collectionUUID, kEpoch);
@@ -641,11 +668,12 @@ TEST_F(SubmitRangeDeletionTaskTest,
FailsAndDeletesTaskIfFilteringMetadataUUIDDifferentFromTaskUUIDEvenAfterRefresh) {
auto opCtx = operationContext();
- auto deletionTask = createDeletionTask(kNss, kDefaultUUID, 0, 10);
+ auto deletionTask = createDeletionTask(kNss, kDefaultUUID, 0, 10, _myShardName);
PersistentTaskStore<RangeDeletionTask> store(opCtx, NamespaceString::kRangeDeletionNamespace);
store.add(opCtx, deletionTask);
ASSERT_EQ(store.count(opCtx), 1);
+ migrationutil::markAsReadyRangeDeletionTaskLocally(opCtx, deletionTask.getId());
// Make the refresh triggered by submitting the task return an arbitrary UUID.
const auto otherEpoch = OID::gen();
diff --git a/src/mongo/db/s/range_deletion_util.cpp b/src/mongo/db/s/range_deletion_util.cpp
index 164f9e0fe12..bf3adab6da8 100644
--- a/src/mongo/db/s/range_deletion_util.cpp
+++ b/src/mongo/db/s/range_deletion_util.cpp
@@ -36,6 +36,8 @@
#include <algorithm>
#include <utility>
+#include <boost/optional.hpp>
+
#include "mongo/db/catalog/index_catalog.h"
#include "mongo/db/catalog_raii.h"
#include "mongo/db/client.h"
@@ -240,9 +242,45 @@ auto withTemporaryOperationContext(Callable&& callable) {
}
auto uniqueOpCtx = Client::getCurrent()->makeOperationContext();
auto opCtx = uniqueOpCtx.get();
+
+ {
+ // We acquire the global IX lock and then immediately release it to ensure this operation
+ // would be killed by the RstlKillOpThread during step-up or stepdown. Note that the
+ // RstlKillOpThread kills any operations on step-up or stepdown for which
+ // Locker::wasGlobalLockTakenInModeConflictingWithWrites() returns true.
+ Lock::GlobalLock lk(opCtx, MODE_IX);
+ }
+ invariant(opCtx->lockState()->wasGlobalLockTakenInModeConflictingWithWrites());
+
return callable(opCtx);
}
+void ensureRangeDeletionTaskStillExists(OperationContext* opCtx, const UUID& migrationId) {
+ // While at this point we are guaranteed for our operation context to be killed if there is a
+ // step-up or stepdown, it is still possible that a stepdown and a subsequent step-up happened
+ // prior to acquiring the global IX lock. The range deletion task document prevents a moveChunk
+ // operation from migrating an overlapping range to this shard. If the range deletion task
+ // document has already been deleted, then it is possible for the range in the user collection
+ // to now be owned by this shard and for proceeding with the range deletion to result in data
+ // corruption. The scheme for checking whether the range deletion task document still exists
+ // relies on the executor only having a single thread and that thread being solely responsible
+ // for deleting the range deletion task document.
+ PersistentTaskStore<RangeDeletionTask> store(opCtx, NamespaceString::kRangeDeletionNamespace);
+ auto count = store.count(opCtx,
+ QUERY(RangeDeletionTask::kIdFieldName
+ << migrationId << RangeDeletionTask::kPendingFieldName
+ << BSON("$exists" << false)));
+ invariant(count == 0 || count == 1, "found duplicate range deletion tasks");
+ uassert(ErrorCodes::RangeDeletionAbandonedBecauseTaskDocumentDoesNotExist,
+ "Range deletion task no longer exists",
+ count == 1);
+
+ // We are now guaranteed that either (a) the range deletion task document will continue to exist
+ // for the lifetime of this operation context, or (b) this operation context will be killed if
+ // it is possible for the range deletion task document to have been deleted while we weren't
+ // holding any locks.
+}
+
/**
* Delete the range in a sequence of batches until there are no more documents to
* delete or deletion returns an error.
@@ -252,10 +290,15 @@ ExecutorFuture<void> deleteRangeInBatches(const std::shared_ptr<executor::TaskEx
const UUID& collectionUuid,
const BSONObj& keyPattern,
const ChunkRange& range,
+ const boost::optional<UUID>& migrationId,
int numDocsToRemovePerBatch,
Milliseconds delayBetweenBatches) {
return AsyncTry([=] {
return withTemporaryOperationContext([=](OperationContext* opCtx) {
+ if (migrationId) {
+ ensureRangeDeletionTaskStillExists(opCtx, *migrationId);
+ }
+
AutoGetCollection autoColl(opCtx, nss, MODE_IX);
auto* const collection = autoColl.getCollection();
@@ -287,6 +330,8 @@ ExecutorFuture<void> deleteRangeInBatches(const std::shared_ptr<executor::TaskEx
return (swNumDeleted.isOK() && swNumDeleted.getValue() == 0) ||
swNumDeleted.getStatus() ==
ErrorCodes::RangeDeletionAbandonedBecauseCollectionWithUUIDDoesNotExist ||
+ swNumDeleted.getStatus() ==
+ ErrorCodes::RangeDeletionAbandonedBecauseTaskDocumentDoesNotExist ||
ErrorCodes::isShutdownError(swNumDeleted.getStatus()) ||
ErrorCodes::isNotMasterError(swNumDeleted.getStatus());
})
@@ -314,25 +359,12 @@ void notifySecondariesThatDeletionIsOccurring(const NamespaceString& nss,
});
}
-void removePersistentRangeDeletionTask(const NamespaceString& nss,
- const UUID& collectionUuid,
- const ChunkRange& range) {
+void removePersistentRangeDeletionTask(const NamespaceString& nss, UUID migrationId) {
withTemporaryOperationContext([&](OperationContext* opCtx) {
- try {
- PersistentTaskStore<RangeDeletionTask> store(opCtx,
- NamespaceString::kRangeDeletionNamespace);
- store.remove(opCtx,
- QUERY(RangeDeletionTask::kCollectionUuidFieldName
- << collectionUuid << RangeDeletionTask::kRangeFieldName
- << range.toBSON()));
- } catch (const DBException& e) {
- LOGV2(23770,
- "Failed to delete range deletion task for range {range} in collection "
- "{nss}{causedBy_e_what}",
- "range"_attr = range,
- "nss"_attr = nss,
- "causedBy_e_what"_attr = causedBy(e.what()));
- }
+ PersistentTaskStore<RangeDeletionTask> store(opCtx,
+ NamespaceString::kRangeDeletionNamespace);
+
+ store.remove(opCtx, QUERY(RangeDeletionTask::kIdFieldName << migrationId));
});
}
@@ -369,6 +401,7 @@ SharedSemiFuture<void> removeDocumentsInRange(
const UUID& collectionUuid,
const BSONObj& keyPattern,
const ChunkRange& range,
+ boost::optional<UUID> migrationId,
int numDocsToRemovePerBatch,
Seconds delayForActiveQueriesOnSecondariesToComplete,
Milliseconds delayBetweenBatches) {
@@ -401,14 +434,26 @@ SharedSemiFuture<void> removeDocumentsInRange(
collectionUuid,
keyPattern,
range,
+ migrationId,
numDocsToRemovePerBatch,
- delayBetweenBatches);
- })
- .then([=] {
- // We only need to do this if previous rounds succeed, because the only errors that
- // would propagate to this point are errors that indicate that this node is no longer
- // primary or is shutting down.
- return waitForDeletionsToMajorityReplicate(executor, nss, collectionUuid, range);
+ delayBetweenBatches)
+ .onCompletion([=](Status s) {
+ if (!s.isOK() &&
+ s.code() !=
+ ErrorCodes::RangeDeletionAbandonedBecauseTaskDocumentDoesNotExist) {
+ // Propagate any errors to the onCompletion() handler below.
+ return ExecutorFuture<void>(executor, s);
+ }
+
+ // We wait for majority write concern even if the range deletion task document
+ // doesn't exist to guarantee the deletion (which must have happened earlier) is
+ // visible to the caller at non-local read concerns.
+ return waitForDeletionsToMajorityReplicate(executor, nss, collectionUuid, range)
+ .then([=] {
+ // Propagate any errors to the onCompletion() handler below.
+ return s;
+ });
+ });
})
.onCompletion([=](Status s) {
if (s.isOK()) {
@@ -425,24 +470,43 @@ SharedSemiFuture<void> removeDocumentsInRange(
"causedBy_s"_attr = causedBy(redact(s)));
}
- if (s.isOK() ||
- s.code() ==
- ErrorCodes::RangeDeletionAbandonedBecauseCollectionWithUUIDDoesNotExist) {
- removePersistentRangeDeletionTask(nss, collectionUuid, range);
+ if (s.code() == ErrorCodes::RangeDeletionAbandonedBecauseTaskDocumentDoesNotExist) {
+ return Status::OK();
+ }
- LOGV2_DEBUG(23775,
- 1,
- "Completed removal of persistent range deletion task for {nss_ns} "
- "range {range}",
- "nss_ns"_attr = nss.ns(),
- "range"_attr = redact(range.toString()));
+ if (!migrationId ||
+ (!s.isOK() &&
+ s.code() !=
+ ErrorCodes::RangeDeletionAbandonedBecauseCollectionWithUUIDDoesNotExist)) {
+ // Propagate any errors to callers waiting on the result.
+ return s;
+ }
+
+ try {
+ removePersistentRangeDeletionTask(nss, std::move(*migrationId));
+ } catch (const DBException& e) {
+ LOGV2(23770,
+ "Failed to delete range deletion task for range {range} in collection "
+ "{nss}{causedBy_e_what}",
+ "range"_attr = range,
+ "nss"_attr = nss,
+ "causedBy_e_what"_attr = causedBy(e.what()));
+
+ return e.toStatus();
}
+ LOGV2_DEBUG(23775,
+ 1,
+ "Completed removal of persistent range deletion task for {nss_ns} "
+ "range {range}",
+ "nss_ns"_attr = nss.ns(),
+ "range"_attr = redact(range.toString()));
+
// Propagate any errors to callers waiting on the result.
return s;
})
.semi()
.share();
-} // namespace mongo
+}
} // namespace mongo
diff --git a/src/mongo/db/s/range_deletion_util.h b/src/mongo/db/s/range_deletion_util.h
index 6dc679bb226..a1f66b78578 100644
--- a/src/mongo/db/s/range_deletion_util.h
+++ b/src/mongo/db/s/range_deletion_util.h
@@ -30,6 +30,8 @@
#include <list>
+#include <boost/optional.hpp>
+
#include "mongo/db/namespace_string.h"
#include "mongo/executor/task_executor.h"
#include "mongo/s/catalog/type_chunk.h"
@@ -68,6 +70,7 @@ SharedSemiFuture<void> removeDocumentsInRange(
const UUID& collectionUuid,
const BSONObj& keyPattern,
const ChunkRange& range,
+ boost::optional<UUID> migrationId,
int numDocsToRemovePerBatch,
Seconds delayForActiveQueriesOnSecondariesToComplete,
Milliseconds delayBetweenBatches);
diff --git a/src/mongo/db/s/range_deletion_util_test.cpp b/src/mongo/db/s/range_deletion_util_test.cpp
index c157d84075c..b80447b17d3 100644
--- a/src/mongo/db/s/range_deletion_util_test.cpp
+++ b/src/mongo/db/s/range_deletion_util_test.cpp
@@ -143,6 +143,7 @@ TEST_F(RangeDeleterTest,
uuid(),
kShardKeyPattern,
range,
+ boost::none,
numDocsToRemovePerBatch,
Seconds(0) /* delayForActiveQueriesOnSecondariesToComplete*/,
Milliseconds(0) /* delayBetweenBatches */);
@@ -172,6 +173,7 @@ TEST_F(RangeDeleterTest,
uuid(),
kShardKeyPattern,
range,
+ boost::none,
numDocsToRemovePerBatch,
Seconds(0) /* delayForActiveQueriesOnSecondariesToComplete*/,
Milliseconds(0) /* delayBetweenBatches */);
@@ -195,6 +197,7 @@ TEST_F(RangeDeleterTest, RemoveDocumentsInRangeInsertsDocumentToNotifySecondarie
uuid(),
kShardKeyPattern,
range,
+ boost::none,
numDocsToRemovePerBatch,
Seconds(0) /* delayForActiveQueriesOnSecondariesToComplete*/,
Milliseconds(0) /* delayBetweenBatches */);
@@ -228,6 +231,7 @@ TEST_F(
uuid(),
kShardKeyPattern,
range,
+ boost::none,
numDocsToRemovePerBatch,
Seconds(0) /* delayForActiveQueriesOnSecondariesToComplete*/,
Milliseconds(0) /* delayBetweenBatches */);
@@ -260,6 +264,7 @@ TEST_F(RangeDeleterTest,
uuid(),
kShardKeyPattern,
range,
+ boost::none,
1 /* numDocsToRemovePerBatch */,
Seconds(0) /* delayForActiveQueriesOnSecondariesToComplete*/,
Milliseconds(0) /* delayBetweenBatches */);
@@ -291,6 +296,7 @@ TEST_F(RangeDeleterTest,
uuid(),
kShardKeyPattern,
range,
+ boost::none,
1 /* numDocsToRemovePerBatch */,
Seconds(0) /* delayForActiveQueriesOnSecondariesToComplete*/,
Milliseconds(0) /* delayBetweenBatches */);
@@ -318,6 +324,7 @@ TEST_F(RangeDeleterTest,
UUID::gen(),
kShardKeyPattern,
ChunkRange(BSON(kShardKey << 0), BSON(kShardKey << 10)),
+ boost::none,
10 /* numDocsToRemovePerBatch*/,
Seconds(0) /* delayForActiveQueriesOnSecondariesToComplete*/,
Milliseconds(0) /* delayBetweenBatches */);
@@ -338,6 +345,7 @@ TEST_F(RangeDeleterTest, RemoveDocumentsInRangeThrowsErrorWhenCollectionDoesNotE
UUID::gen(),
kShardKeyPattern,
ChunkRange(BSON(kShardKey << 0), BSON(kShardKey << 10)),
+ boost::none,
10 /* numDocsToRemovePerBatch*/,
Seconds(0) /* delayForActiveQueriesOnSecondariesToComplete*/,
Milliseconds(0) /* delayBetweenBatches */);
@@ -348,11 +356,23 @@ TEST_F(RangeDeleterTest, RemoveDocumentsInRangeThrowsErrorWhenCollectionDoesNotE
ErrorCodes::RangeDeletionAbandonedBecauseCollectionWithUUIDDoesNotExist);
}
-TEST_F(RangeDeleterTest, RemoveDocumentsInRangeWaitsForReplicationAfterDeletingSingleBatch) {
+TEST_F(RangeDeleterTest, RemoveDocumentsInRangeLeavesDocumentsWhenTaskDocumentDoesNotExist) {
auto replCoord = checked_cast<repl::ReplicationCoordinatorMock*>(
repl::ReplicationCoordinator::get(getServiceContext()));
+ const ChunkRange range(BSON(kShardKey << 0), BSON(kShardKey << 10));
+
+ DBDirectClient dbclient(operationContext());
+ dbclient.insert(kNss.toString(), BSON(kShardKey << 5));
+
+ // We intentionally skip inserting a range deletion task document to simulate it already having
+ // been deleted.
+
+ // We should wait for replication after attempting to delete the document in the range even when
+ // the task document doesn't exist.
+ const auto expectedNumTimesWaitedForReplication = 1;
int numTimesWaitedForReplication = 0;
+
// Override special handler for waiting for replication to count the number of times we wait for
// replication.
replCoord->setAwaitReplicationReturnValueFunction(
@@ -361,6 +381,30 @@ TEST_F(RangeDeleterTest, RemoveDocumentsInRangeWaitsForReplicationAfterDeletingS
return repl::ReplicationCoordinator::StatusAndDuration(Status::OK(), Milliseconds(0));
});
+ auto queriesComplete = SemiFuture<void>::makeReady();
+ auto cleanupComplete =
+ removeDocumentsInRange(executor(),
+ std::move(queriesComplete),
+ kNss,
+ uuid(),
+ kShardKeyPattern,
+ range,
+ UUID::gen(),
+ 10 /*numDocsToRemovePerBatch*/,
+ Seconds(0) /* delayForActiveQueriesOnSecondariesToComplete */,
+ Milliseconds(0) /* delayBetweenBatches */);
+
+ cleanupComplete.get();
+
+ // Document should not have been deleted.
+ ASSERT_EQUALS(dbclient.count(kNss, BSONObj()), 1);
+ ASSERT_EQ(numTimesWaitedForReplication, expectedNumTimesWaitedForReplication);
+}
+
+TEST_F(RangeDeleterTest, RemoveDocumentsInRangeWaitsForReplicationAfterDeletingSingleBatch) {
+ auto replCoord = checked_cast<repl::ReplicationCoordinatorMock*>(
+ repl::ReplicationCoordinator::get(getServiceContext()));
+
const auto numDocsToInsert = 3;
const auto numDocsToRemovePerBatch = 10;
const auto numBatches = ceil((double)numDocsToInsert / numDocsToRemovePerBatch);
@@ -374,6 +418,26 @@ TEST_F(RangeDeleterTest, RemoveDocumentsInRangeWaitsForReplicationAfterDeletingS
dbclient.insert(kNss.toString(), BSON(kShardKey << i));
}
+ // Insert range deletion task for this collection and range.
+ setFilteringMetadataWithUUID(uuid());
+ PersistentTaskStore<RangeDeletionTask> store(operationContext(),
+ NamespaceString::kRangeDeletionNamespace);
+ const ChunkRange range(BSON(kShardKey << 0), BSON(kShardKey << 10));
+ RangeDeletionTask t(
+ UUID::gen(), kNss, uuid(), ShardId("donor"), range, CleanWhenEnum::kDelayed);
+ store.add(operationContext(), t);
+ // Document should be in the store.
+ ASSERT_EQUALS(countDocsInConfigRangeDeletions(store, operationContext()), 1);
+
+ int numTimesWaitedForReplication = 0;
+ // Override special handler for waiting for replication to count the number of times we wait for
+ // replication.
+ replCoord->setAwaitReplicationReturnValueFunction(
+ [&](OperationContext* opCtx, const repl::OpTime& opTime) {
+ ++numTimesWaitedForReplication;
+ return repl::ReplicationCoordinator::StatusAndDuration(Status::OK(), Milliseconds(0));
+ });
+
auto queriesComplete = SemiFuture<void>::makeReady();
auto cleanupComplete =
removeDocumentsInRange(executor(),
@@ -381,7 +445,8 @@ TEST_F(RangeDeleterTest, RemoveDocumentsInRangeWaitsForReplicationAfterDeletingS
kNss,
uuid(),
kShardKeyPattern,
- ChunkRange(BSON(kShardKey << 0), BSON(kShardKey << 10)),
+ range,
+ t.getId(),
numDocsToRemovePerBatch,
Seconds(0) /* delayForActiveQueriesOnSecondariesToComplete*/,
Milliseconds(0) /* delayBetweenBatches */);
@@ -396,15 +461,6 @@ TEST_F(RangeDeleterTest, RemoveDocumentsInRangeWaitsForReplicationOnlyOnceAfterS
auto replCoord = checked_cast<repl::ReplicationCoordinatorMock*>(
repl::ReplicationCoordinator::get(getServiceContext()));
- int numTimesWaitedForReplication = 0;
-
- // Set special handler for waiting for replication.
- replCoord->setAwaitReplicationReturnValueFunction(
- [&](OperationContext* opCtx, const repl::OpTime& opTime) {
- ++numTimesWaitedForReplication;
- return repl::ReplicationCoordinator::StatusAndDuration(Status::OK(), Milliseconds(0));
- });
-
const auto numDocsToInsert = 3;
const auto numDocsToRemovePerBatch = 1;
const auto numBatches = ceil((double)numDocsToInsert / numDocsToRemovePerBatch);
@@ -419,6 +475,26 @@ TEST_F(RangeDeleterTest, RemoveDocumentsInRangeWaitsForReplicationOnlyOnceAfterS
dbclient.insert(kNss.toString(), BSON(kShardKey << i));
}
+ // Insert range deletion task for this collection and range.
+ setFilteringMetadataWithUUID(uuid());
+ PersistentTaskStore<RangeDeletionTask> store(operationContext(),
+ NamespaceString::kRangeDeletionNamespace);
+ const ChunkRange range(BSON(kShardKey << 0), BSON(kShardKey << 10));
+ RangeDeletionTask t(
+ UUID::gen(), kNss, uuid(), ShardId("donor"), range, CleanWhenEnum::kDelayed);
+ store.add(operationContext(), t);
+ // Document should be in the store.
+ ASSERT_EQUALS(countDocsInConfigRangeDeletions(store, operationContext()), 1);
+
+ int numTimesWaitedForReplication = 0;
+
+ // Set special handler for waiting for replication.
+ replCoord->setAwaitReplicationReturnValueFunction(
+ [&](OperationContext* opCtx, const repl::OpTime& opTime) {
+ ++numTimesWaitedForReplication;
+ return repl::ReplicationCoordinator::StatusAndDuration(Status::OK(), Milliseconds(0));
+ });
+
auto queriesComplete = SemiFuture<void>::makeReady();
auto cleanupComplete =
removeDocumentsInRange(executor(),
@@ -426,7 +502,8 @@ TEST_F(RangeDeleterTest, RemoveDocumentsInRangeWaitsForReplicationOnlyOnceAfterS
kNss,
uuid(),
kShardKeyPattern,
- ChunkRange(BSON(kShardKey << 0), BSON(kShardKey << 10)),
+ range,
+ t.getId(),
numDocsToRemovePerBatch,
Seconds(0) /* delayForActiveQueriesOnSecondariesToComplete */,
Milliseconds(0) /* delayBetweenBatches */);
@@ -441,6 +518,25 @@ TEST_F(RangeDeleterTest, RemoveDocumentsInRangeDoesNotWaitForReplicationIfErrorD
auto replCoord = checked_cast<repl::ReplicationCoordinatorMock*>(
repl::ReplicationCoordinator::get(getServiceContext()));
+ const auto numDocsToInsert = 3;
+ const auto numDocsToRemovePerBatch = 10;
+
+ DBDirectClient dbclient(operationContext());
+ for (auto i = 0; i < numDocsToInsert; ++i) {
+ dbclient.insert(kNss.toString(), BSON(kShardKey << i));
+ }
+
+ // Insert range deletion task for this collection and range.
+ setFilteringMetadataWithUUID(uuid());
+ PersistentTaskStore<RangeDeletionTask> store(operationContext(),
+ NamespaceString::kRangeDeletionNamespace);
+ const ChunkRange range(BSON(kShardKey << 0), BSON(kShardKey << 10));
+ RangeDeletionTask t(
+ UUID::gen(), kNss, uuid(), ShardId("donor"), range, CleanWhenEnum::kDelayed);
+ store.add(operationContext(), t);
+ // Document should be in the store.
+ ASSERT_EQUALS(countDocsInConfigRangeDeletions(store, operationContext()), 1);
+
int numTimesWaitedForReplication = 0;
// Override special handler for waiting for replication to count the number of times we wait for
// replication.
@@ -450,14 +546,6 @@ TEST_F(RangeDeleterTest, RemoveDocumentsInRangeDoesNotWaitForReplicationIfErrorD
return repl::ReplicationCoordinator::StatusAndDuration(Status::OK(), Milliseconds(0));
});
- const auto numDocsToInsert = 3;
- const auto numDocsToRemovePerBatch = 10;
-
- DBDirectClient dbclient(operationContext());
- for (auto i = 0; i < numDocsToInsert; ++i) {
- dbclient.insert(kNss.toString(), BSON(kShardKey << i));
- }
-
// Pretend we stepped down.
replCoord->setCanAcceptNonLocalWrites(false);
std::ignore = replCoord->setFollowerMode(repl::MemberState::RS_SECONDARY);
@@ -469,7 +557,8 @@ TEST_F(RangeDeleterTest, RemoveDocumentsInRangeDoesNotWaitForReplicationIfErrorD
kNss,
uuid(),
kShardKeyPattern,
- ChunkRange(BSON(kShardKey << 0), BSON(kShardKey << 10)),
+ range,
+ t.getId(),
numDocsToRemovePerBatch,
Seconds(0) /* delayForActiveQueriesOnSecondariesToComplete*/,
Milliseconds(0) /* delayBetweenBatches */);
@@ -507,6 +596,7 @@ TEST_F(RangeDeleterTest, RemoveDocumentsInRangeRetriesOnWriteConflictException)
uuid(),
kShardKeyPattern,
range,
+ t.getId(),
10 /*numDocsToRemovePerBatch*/,
Seconds(0) /* delayForActiveQueriesOnSecondariesToComplete */,
Milliseconds(0) /* delayBetweenBatches */);
@@ -545,6 +635,7 @@ TEST_F(RangeDeleterTest, RemoveDocumentsInRangeRetriesOnUnexpectedError) {
uuid(),
kShardKeyPattern,
range,
+ t.getId(),
10 /*numDocsToRemovePerBatch*/,
Seconds(0) /* delayForActiveQueriesOnSecondariesToComplete */,
Milliseconds(0) /* delayBetweenBatches */);
@@ -576,6 +667,7 @@ TEST_F(RangeDeleterTest, RemoveDocumentsInRangeRespectsDelayInBetweenBatches) {
uuid(),
kShardKeyPattern,
range,
+ boost::none,
numDocsToRemovePerBatch,
Seconds(0) /* delayForActiveQueriesOnSecondariesToComplete */,
delayBetweenBatches);
@@ -617,6 +709,7 @@ TEST_F(RangeDeleterTest, RemoveDocumentsInRangeRespectsOrphanCleanupDelay) {
uuid(),
kShardKeyPattern,
range,
+ boost::none,
numDocsToRemovePerBatch,
orphanCleanupDelay,
Milliseconds(0) /* delayBetweenBatches */);
@@ -664,6 +757,7 @@ TEST_F(RangeDeleterTest, RemoveDocumentsInRangeRemovesRangeDeletionTaskOnSuccess
uuid(),
kShardKeyPattern,
range,
+ t.getId(),
10 /*numDocsToRemovePerBatch*/,
Seconds(0) /* delayForActiveQueriesOnSecondariesToComplete */,
Milliseconds(0) /* delayBetweenBatches */);
@@ -701,6 +795,7 @@ TEST_F(RangeDeleterTest,
fakeUuid,
kShardKeyPattern,
range,
+ t.getId(),
10 /*numDocsToRemovePerBatch*/,
Seconds(0) /* delayForActiveQueriesOnSecondariesToComplete */,
Milliseconds(0) /* delayBetweenBatches */);
@@ -745,6 +840,7 @@ TEST_F(RangeDeleterTest,
uuid(),
kShardKeyPattern,
range,
+ t.getId(),
10 /*numDocsToRemovePerBatch*/,
Seconds(0) /* delayForActiveQueriesOnSecondariesToComplete */,
Milliseconds(0) /* delayBetweenBatches */);
@@ -772,6 +868,7 @@ DEATH_TEST_F(RangeDeleterTest, RemoveDocumentsInRangeCrashesIfInputFutureHasErro
uuid(),
kShardKeyPattern,
ChunkRange(BSON(kShardKey << 0), BSON(kShardKey << 10)),
+ boost::none,
10 /* numDocsToRemovePerBatch */,
Seconds(0) /* delayForActiveQueriesOnSecondariesToComplete */,
Milliseconds(0) /* delayBetweenBatches */);