diff options
author | Alex Taskov <alex.taskov@mongodb.com> | 2019-11-06 18:08:40 +0000 |
---|---|---|
committer | evergreen <evergreen@mongodb.com> | 2019-11-06 18:08:40 +0000 |
commit | ef94aa73d2f8b6222d90e7fc42c22be6bfabd7e4 (patch) | |
tree | bf8d093c9a64ad0615d762c82953a621069a73f8 | |
parent | ba9a82eab12ca8eb3afb35fcd27de2a970dd427d (diff) | |
download | mongo-ef94aa73d2f8b6222d90e7fc42c22be6bfabd7e4.tar.gz |
SERVER-44157 On beginning to receive a range, check overlapping ranges in config.rangeDeletions
-rw-r--r-- | jstests/sharding/migration_fails_if_exists_in_rangedeletions.js | 45 | ||||
-rw-r--r-- | jstests/sharding/updates_to_rangedeletions_collection_trigger_range_deletions.js | 4 | ||||
-rw-r--r-- | src/mongo/db/s/SConscript | 2 | ||||
-rw-r--r-- | src/mongo/db/s/migration_destination_manager.cpp | 24 | ||||
-rw-r--r-- | src/mongo/db/s/migration_util.cpp | 17 | ||||
-rw-r--r-- | src/mongo/db/s/migration_util.h | 14 | ||||
-rw-r--r-- | src/mongo/db/s/migration_util_test.cpp | 140 | ||||
-rw-r--r-- | src/mongo/db/s/persistent_task_store.h | 133 | ||||
-rw-r--r-- | src/mongo/db/s/persistent_task_store_test.cpp | 209 | ||||
-rw-r--r-- | src/mongo/db/s/range_deletion_task.idl | 14 | ||||
-rw-r--r-- | src/mongo/db/s/shard_server_op_observer.cpp | 3 | ||||
-rw-r--r-- | src/mongo/s/catalog/type_chunk.cpp | 3 | ||||
-rw-r--r-- | src/mongo/s/catalog/type_chunk.h | 3 | ||||
-rw-r--r-- | src/mongo/util/uuid.h | 1 |
14 files changed, 595 insertions, 17 deletions
diff --git a/jstests/sharding/migration_fails_if_exists_in_rangedeletions.js b/jstests/sharding/migration_fails_if_exists_in_rangedeletions.js new file mode 100644 index 00000000000..5f6c74a708e --- /dev/null +++ b/jstests/sharding/migration_fails_if_exists_in_rangedeletions.js @@ -0,0 +1,45 @@ +(function() { +"use strict"; + +load("jstests/libs/uuid_util.js"); + +const dbName = "test"; +const collName = "foo"; +const ns = dbName + "." + collName; + +// Create 2 shards with 3 replicas each. +let st = new ShardingTest({shards: {rs0: {nodes: 3}, rs1: {nodes: 3}}}); + +// Create a sharded collection with two chunks: [-inf, 50), [50, inf) +assert.commandWorked(st.s.adminCommand({enableSharding: dbName})); +assert.commandWorked(st.s.adminCommand({movePrimary: dbName, to: st.shard0.shardName})); +assert.commandWorked(st.s.adminCommand({shardCollection: ns, key: {x: 1}})); +assert.commandWorked(st.s.adminCommand({split: ns, middle: {x: 50}})); + +const collectionUuid = getUUIDFromConfigCollections(st.s, ns); + +let deletionTask = { + nss: ns, + collectionUuid: collectionUuid, + pending: true, + range: {min: {x: 70}, max: {x: 90}}, + whenToClean: "now" +}; + +const rangeDeletionNs = "config.rangeDeletions"; +let deletionsColl = st.shard1.getCollection(rangeDeletionNs); + +// Write range to deletion collection +deletionsColl.insert(deletionTask); + +function commandFailsWithMsg(result, msg) { + assert(result.errmsg.includes(msg)); +} + +// Move chunk [50, inf) to shard1 and expect failure. +commandFailsWithMsg( + st.s.adminCommand({moveChunk: ns, find: {x: 50}, to: st.shard1.shardName}), + "Migration aborted because range overlaps with a range that is scheduled for deletion: collection:"); + +st.stop(); +})(); diff --git a/jstests/sharding/updates_to_rangedeletions_collection_trigger_range_deletions.js b/jstests/sharding/updates_to_rangedeletions_collection_trigger_range_deletions.js index 798c7ade579..1ead001a4e4 100644 --- a/jstests/sharding/updates_to_rangedeletions_collection_trigger_range_deletions.js +++ b/jstests/sharding/updates_to_rangedeletions_collection_trigger_range_deletions.js @@ -65,7 +65,7 @@ let testColl = testDB.foo; let deletionTask = { nss: ns, - collectionUuid: extractUUIDFromObject(collectionUuid), + collectionUuid: collectionUuid, pending: "true", range: {min: {x: 70}, max: {x: 90}}, whenToClean: "now" @@ -113,7 +113,7 @@ let testColl = testDB.foo; let deletionTask = { nss: ns, - collectionUuid: "0", + collectionUuid: UUID(), pending: "true", range: {min: {x: 70}, max: {x: 90}}, whenToClean: "now" diff --git a/src/mongo/db/s/SConscript b/src/mongo/db/s/SConscript index 772c8750c89..cc722d4c549 100644 --- a/src/mongo/db/s/SConscript +++ b/src/mongo/db/s/SConscript @@ -385,6 +385,7 @@ env.CppUnitTest( 'implicit_create_collection_test.cpp', 'migration_chunk_cloner_source_legacy_test.cpp', 'migration_destination_manager_test.cpp', + 'migration_util_test.cpp', 'namespace_metadata_change_notifications_test.cpp', 'session_catalog_migration_destination_test.cpp', 'session_catalog_migration_source_test.cpp', @@ -417,6 +418,7 @@ env.CppUnitTest( 'collection_range_deleter_test.cpp', 'collection_sharding_state_test.cpp', 'metadata_manager_test.cpp', + 'persistent_task_store_test.cpp', 'persistent_task_queue_test.cpp' ], LIBDEPS=[ diff --git a/src/mongo/db/s/migration_destination_manager.cpp b/src/mongo/db/s/migration_destination_manager.cpp index 8692878129e..0df33635636 100644 --- a/src/mongo/db/s/migration_destination_manager.cpp +++ b/src/mongo/db/s/migration_destination_manager.cpp @@ -52,6 +52,8 @@ #include "mongo/db/s/collection_sharding_state.h" #include "mongo/db/s/migration_util.h" #include "mongo/db/s/move_timing_helper.h" +#include "mongo/db/s/persistent_task_store.h" +#include "mongo/db/s/range_deletion_task_gen.h" #include "mongo/db/s/sharding_runtime_d_params_gen.h" #include "mongo/db/s/sharding_statistics.h" #include "mongo/db/s/start_chunk_clone_request.h" @@ -734,12 +736,28 @@ void MigrationDestinationManager::_migrateDriver(OperationContext* opCtx) { auto fromShard = uassertStatusOK(Grid::get(opCtx)->shardRegistry()->getShard(opCtx, _fromShard)); + const UUID collectionUuid = [&] { + AutoGetCollection autoGetCollection(opCtx, _nss, MODE_IS); + return autoGetCollection.getCollection()->uuid(); + }(); + { + const ChunkRange range(_min, _max); + + if (migrationutil::checkForConflictingDeletions(opCtx, range, collectionUuid)) { + _setStateFail(str::stream() << "Migration aborted because range overlaps with a " + "range that is scheduled for deletion: collection: " + << _nss.ns() << " range: " << redact(range.toString())); + return; + } + + // TODO(SERVER-44163): Delete this block after the MigrationCoordinator has been integrated + // into the source. It will be replaced by the checkForOverlapping call. + // 2. Synchronously delete any data which might have been left orphaned in the range // being moved, and wait for completion - const ChunkRange footprint(_min, _max); - auto notification = _notePending(opCtx, footprint); + auto notification = _notePending(opCtx, range); // Wait for the range deletion to report back if (!notification.waitStatus(opCtx).isOK()) { _setStateFail(redact(notification.waitStatus(opCtx).reason())); @@ -747,7 +765,7 @@ void MigrationDestinationManager::_migrateDriver(OperationContext* opCtx) { } // Wait for any other, overlapping queued deletions to drain - auto status = CollectionShardingRuntime::waitForClean(opCtx, _nss, _epoch, footprint); + auto status = CollectionShardingRuntime::waitForClean(opCtx, _nss, _epoch, range); if (!status.isOK()) { _setStateFail(redact(status.reason())); return; diff --git a/src/mongo/db/s/migration_util.cpp b/src/mongo/db/s/migration_util.cpp index a66109e73ba..900caf1d0b8 100644 --- a/src/mongo/db/s/migration_util.cpp +++ b/src/mongo/db/s/migration_util.cpp @@ -62,6 +62,23 @@ BSONObj makeMigrationStatusDocument(const NamespaceString& nss, return builder.obj(); } +Query overlappingRangeQuery(const ChunkRange& range, const UUID& uuid) { + return QUERY(RangeDeletionTask::kCollectionUuidFieldName + << uuid << RangeDeletionTask::kRangeFieldName + "." + ChunkRange::kMinKey << LT + << range.getMax() << RangeDeletionTask::kRangeFieldName + "." + ChunkRange::kMaxKey + << GT << range.getMin()); +} + +bool checkForConflictingDeletions(OperationContext* opCtx, + const ChunkRange& range, + const UUID& uuid) { + PersistentTaskStore<RangeDeletionTask> store(opCtx, NamespaceString::kRangeDeletionNamespace); + + auto results = store.query(opCtx, overlappingRangeQuery(range, uuid)); + + return !results.empty(); +} + } // namespace migrationutil } // namespace mongo diff --git a/src/mongo/db/s/migration_util.h b/src/mongo/db/s/migration_util.h index 67b59761477..e1481211936 100644 --- a/src/mongo/db/s/migration_util.h +++ b/src/mongo/db/s/migration_util.h @@ -29,6 +29,10 @@ #pragma once +#include "mongo/db/s/persistent_task_store.h" +#include "mongo/db/s/range_deletion_task_gen.h" +#include "mongo/s/catalog/type_chunk.h" + namespace mongo { class BSONObj; @@ -56,6 +60,16 @@ BSONObj makeMigrationStatusDocument(const NamespaceString& nss, const BSONObj& min, const BSONObj& max); +// Creates a query object that can used to find overlapping ranges in the pending range deletions +// collection. +Query overlappingRangeQuery(const ChunkRange& range, const UUID& uuid); + +// Checks the pending range deletions collection to see if there are any pending ranges that +// conflict with the passed in range. +bool checkForConflictingDeletions(OperationContext* opCtx, + const ChunkRange& range, + const UUID& uuid); + } // namespace migrationutil } // namespace mongo diff --git a/src/mongo/db/s/migration_util_test.cpp b/src/mongo/db/s/migration_util_test.cpp new file mode 100644 index 00000000000..9938123df42 --- /dev/null +++ b/src/mongo/db/s/migration_util_test.cpp @@ -0,0 +1,140 @@ +/** + * Copyright (C) 2019-present MongoDB, Inc. + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the Server Side Public License, version 1, + * as published by MongoDB, Inc. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * Server Side Public License for more details. + * + * You should have received a copy of the Server Side Public License + * along with this program. If not, see + * <http://www.mongodb.com/licensing/server-side-public-license>. + * + * As a special exception, the copyright holders give permission to link the + * code of portions of this program with the OpenSSL library under certain + * conditions as described in each individual source file and distribute + * linked combinations including the program with the OpenSSL library. You + * must comply with the Server Side Public License in all respects for + * all of the code used other than as permitted herein. If you modify file(s) + * with this exception, you may extend this exception to your version of the + * file(s), but you are not obligated to do so. If you do not wish to do so, + * delete this exception statement from your version. If you delete this + * exception statement from all source files in the program, then also delete + * it in the license file. + */ + +#include "mongo/db/s/migration_util.h" +#include "mongo/db/s/persistent_task_store.h" +#include "mongo/s/shard_server_test_fixture.h" +#include "mongo/unittest/unittest.h" + +namespace mongo { +namespace { + +using MigrationUtilsTest = ShardServerTestFixture; + +RangeDeletionTask createDeletionTask(NamespaceString nss, const UUID& uuid, int min, int max) { + RangeDeletionTask task{nss, uuid, CleanWhenEnum::kNow}; + + task.setRange(ChunkRange{BSON("_id" << min), BSON("_id" << max)}); + + return task; +} + +// Test that overlappingRangeQuery() can handle the cases that we expect to encounter. +// 1 1 2 2 3 3 4 4 5 +// 0----5----0----5----0----5----0----5----0----5----0 +// |---------O Range 1 [25, 35) +// |---------O Range 2 [5, 15) +// |---------O Range 4 [10, 20) +// |----O Range 5 [0, 5) +// |-----O Range 7 [12, 18) +// |---------O Range 8 [30, 40) +// Ranges in store +// |---------O [0, 10) +// |---------O [10, 20) +// |---------O [40 50) +// 1 1 2 2 3 3 4 4 5 +// 0----5----0----5----0----5----0----5----0----5----0 +TEST_F(MigrationUtilsTest, TestOverlappingRangeQuery) { + auto opCtx = operationContext(); + const auto uuid = UUID::gen(); + + PersistentTaskStore<RangeDeletionTask> store(opCtx, NamespaceString::kRangeDeletionNamespace); + + store.add(opCtx, createDeletionTask(NamespaceString{"one"}, uuid, 0, 10)); + store.add(opCtx, createDeletionTask(NamespaceString{"two"}, uuid, 10, 20)); + store.add(opCtx, createDeletionTask(NamespaceString{"three"}, uuid, 40, 50)); + + ASSERT_EQ(store.count(opCtx), 3); + + // 1. Non-overlapping range + auto range1 = ChunkRange{BSON("_id" << 25), BSON("_id" << 35)}; + auto results = store.query(opCtx, migrationutil::overlappingRangeQuery(range1, uuid)); + ASSERT_EQ(results.size(), 0); + ASSERT_FALSE(migrationutil::checkForConflictingDeletions(opCtx, range1, uuid)); + + // 2, 3. Find overlapping ranges, either direction. + auto range2 = ChunkRange{BSON("_id" << 5), BSON("_id" << 15)}; + results = store.query(opCtx, migrationutil::overlappingRangeQuery(range2, uuid)); + ASSERT_EQ(results.size(), 2); + ASSERT(migrationutil::checkForConflictingDeletions(opCtx, range2, uuid)); + + // 4. Identical range + auto range4 = ChunkRange{BSON("_id" << 10), BSON("_id" << 20)}; + results = store.query(opCtx, migrationutil::overlappingRangeQuery(range4, uuid)); + ASSERT_EQ(results.size(), 1); + ASSERT(migrationutil::checkForConflictingDeletions(opCtx, range4, uuid)); + + // 5, 6. Find overlapping edge, either direction. + auto range5 = ChunkRange{BSON("_id" << 0), BSON("_id" << 5)}; + results = store.query(opCtx, migrationutil::overlappingRangeQuery(range5, uuid)); + ASSERT_EQ(results.size(), 1); + ASSERT(migrationutil::checkForConflictingDeletions(opCtx, range5, uuid)); + auto range6 = ChunkRange{BSON("_id" << 5), BSON("_id" << 10)}; + results = store.query(opCtx, migrationutil::overlappingRangeQuery(range6, uuid)); + ASSERT_EQ(results.size(), 1); + ASSERT(migrationutil::checkForConflictingDeletions(opCtx, range6, uuid)); + + // 7. Find fully enclosed range + auto range7 = ChunkRange{BSON("_id" << 12), BSON("_id" << 18)}; + results = store.query(opCtx, migrationutil::overlappingRangeQuery(range7, uuid)); + ASSERT_EQ(results.size(), 1); + ASSERT(migrationutil::checkForConflictingDeletions(opCtx, range7, uuid)); + + // 8, 9. Open max doesn't overlap closed min, either direction. + auto range8 = ChunkRange{BSON("_id" << 30), BSON("_id" << 40)}; + results = store.query(opCtx, migrationutil::overlappingRangeQuery(range8, uuid)); + ASSERT_EQ(results.size(), 0); + ASSERT_FALSE(migrationutil::checkForConflictingDeletions(opCtx, range8, uuid)); + auto range9 = ChunkRange{BSON("_id" << 20), BSON("_id" << 30)}; + results = store.query(opCtx, migrationutil::overlappingRangeQuery(range9, uuid)); + ASSERT_EQ(results.size(), 0); + ASSERT_FALSE(migrationutil::checkForConflictingDeletions(opCtx, range9, uuid)); +} + +TEST_F(MigrationUtilsTest, TestInvalidUUID) { + auto opCtx = operationContext(); + const auto uuid = UUID::gen(); + + PersistentTaskStore<RangeDeletionTask> store(opCtx, NamespaceString::kRangeDeletionNamespace); + + store.add(opCtx, createDeletionTask(NamespaceString{"one"}, uuid, 0, 10)); + store.add(opCtx, createDeletionTask(NamespaceString{"two"}, uuid, 10, 20)); + store.add(opCtx, createDeletionTask(NamespaceString{"three"}, uuid, 40, 50)); + + ASSERT_EQ(store.count(opCtx), 3); + + const auto wrongUuid = UUID::gen(); + auto range = ChunkRange{BSON("_id" << 5), BSON("_id" << 15)}; + auto results = store.query(opCtx, migrationutil::overlappingRangeQuery(range, wrongUuid)); + ASSERT_EQ(results.size(), 0); + ASSERT_FALSE(migrationutil::checkForConflictingDeletions(opCtx, range, wrongUuid)); +} + +} // namespace +} // namespace mongo
\ No newline at end of file diff --git a/src/mongo/db/s/persistent_task_store.h b/src/mongo/db/s/persistent_task_store.h new file mode 100644 index 00000000000..85a5801d723 --- /dev/null +++ b/src/mongo/db/s/persistent_task_store.h @@ -0,0 +1,133 @@ +/** + * Copyright (C) 2019-present MongoDB, Inc. + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the Server Side Public License, version 1, + * as published by MongoDB, Inc. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * Server Side Public License for more details. + * + * You should have received a copy of the Server Side Public License + * along with this program. If not, see + * <http://www.mongodb.com/licensing/server-side-public-license>. + * + * As a special exception, the copyright holders give permission to link the + * code of portions of this program with the OpenSSL library under certain + * conditions as described in each individual source file and distribute + * linked combinations including the program with the OpenSSL library. You + * must comply with the Server Side Public License in all respects for + * all of the code used other than as permitted herein. If you modify file(s) + * with this exception, you may extend this exception to your version of the + * file(s), but you are not obligated to do so. If you do not wish to do so, + * delete this exception statement from your version. If you delete this + * exception statement from all source files in the program, then also delete + * it in the license file. + */ + +#pragma once + +#include "mongo/db/dbdirectclient.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/ops/write_ops_gen.h" +#include "mongo/db/repl/repl_client_info.h" +#include "mongo/db/write_concern.h" +#include "mongo/rpc/get_status_from_command_result.h" + +namespace mongo { + +namespace WriteConcerns { + +const WriteConcernOptions kMajorityWriteConcern{WriteConcernOptions::kMajority, + WriteConcernOptions::SyncMode::UNSET, + WriteConcernOptions::kWriteConcernTimeoutSharding}; + +} + +template <typename T> +class PersistentTaskStore { +public: + PersistentTaskStore(OperationContext* opCtx, NamespaceString storageNss) + : _storageNss(std::move(storageNss)) {} + + void add(OperationContext* opCtx, + const T& task, + const WriteConcernOptions& writeConcern = WriteConcerns::kMajorityWriteConcern) { + DBDirectClient dbClient(opCtx); + + const auto commandResponse = dbClient.runCommand([&] { + write_ops::Insert insertOp(_storageNss); + insertOp.setDocuments({task.toBSON()}); + return insertOp.serialize({}); + }()); + + const auto commandReply = commandResponse->getCommandReply(); + uassertStatusOK(getStatusFromWriteCommandReply(commandReply)); + + WriteConcernResult ignoreResult; + auto latestOpTime = repl::ReplClientInfo::forClient(opCtx->getClient()).getLastOp(); + uassertStatusOK(waitForWriteConcern(opCtx, latestOpTime, writeConcern, &ignoreResult)); + } + + void remove(OperationContext* opCtx, + Query query, + const WriteConcernOptions& writeConcern = WriteConcerns::kMajorityWriteConcern) { + DBDirectClient dbClient(opCtx); + + auto commandResponse = dbClient.runCommand([&] { + write_ops::Delete deleteOp(_storageNss); + + deleteOp.setDeletes({[&] { + write_ops::DeleteOpEntry entry; + + entry.setQ(query.obj); + entry.setMulti(true); + + return entry; + }()}); + + return deleteOp.serialize({}); + }()); + + const auto commandReply = commandResponse->getCommandReply(); + uassertStatusOK(getStatusFromWriteCommandReply(commandReply)); + + WriteConcernResult ignoreResult; + auto latestOpTime = repl::ReplClientInfo::forClient(opCtx->getClient()).getLastOp(); + uassertStatusOK(waitForWriteConcern(opCtx, latestOpTime, writeConcern, &ignoreResult)); + } + + std::vector<T> query(OperationContext* opCtx, Query query) { + DBDirectClient dbClient(opCtx); + + auto cursor = dbClient.query(_storageNss, query); + + std::vector<T> results; + + while (cursor->more()) { + auto bson = cursor->next(); + auto t = T::parse( + IDLParserErrorContext("PersistentTaskStore:" + _storageNss.toString()), bson); + results.push_back(t); + } + + return results; + } + + size_t count(OperationContext* opCtx, Query query = Query()) { + DBDirectClient client(opCtx); + + auto projection = BSON("_id" << 1); + auto cursor = client.query(_storageNss, query, 0, 0, &projection); + + return cursor->itcount(); + } + +private: + NamespaceString _storageNss; +}; + +} // namespace mongo
\ No newline at end of file diff --git a/src/mongo/db/s/persistent_task_store_test.cpp b/src/mongo/db/s/persistent_task_store_test.cpp new file mode 100644 index 00000000000..700ed1bbf6d --- /dev/null +++ b/src/mongo/db/s/persistent_task_store_test.cpp @@ -0,0 +1,209 @@ +/** + * Copyright (C) 2019-present MongoDB, Inc. + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the Server Side Public License, version 1, + * as published by MongoDB, Inc. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * Server Side Public License for more details. + * + * You should have received a copy of the Server Side Public License + * along with this program. If not, see + * <http://www.mongodb.com/licensing/server-side-public-license>. + * + * As a special exception, the copyright holders give permission to link the + * code of portions of this program with the OpenSSL library under certain + * conditions as described in each individual source file and distribute + * linked combinations including the program with the OpenSSL library. You + * must comply with the Server Side Public License in all respects for + * all of the code used other than as permitted herein. If you modify file(s) + * with this exception, you may extend this exception to your version of the + * file(s), but you are not obligated to do so. If you do not wish to do so, + * delete this exception statement from your version. If you delete this + * exception statement from all source files in the program, then also delete + * it in the license file. + */ + +#include "mongo/db/s/persistent_task_store.h" +#include "mongo/s/shard_server_test_fixture.h" +#include "mongo/unittest/unittest.h" + +namespace mongo { +namespace { + +using PersistentTaskStoreTest = ShardServerTestFixture; + +const NamespaceString kNss{"test.foo"}; + +struct TestTask { + std::string key; + int min; + int max; + + TestTask() : min(0), max(std::numeric_limits<int>::max()) {} + TestTask(std::string key, int min = 0, int max = std::numeric_limits<int>::max()) + : key(std::move(key)), min(min), max(max) {} + TestTask(BSONObj bson) + : key(bson.getField("key").String()), + min(bson.getField("min").Int()), + max(bson.getField("max").Int()) {} + + static TestTask parse(IDLParserErrorContext, BSONObj bson) { + return TestTask{bson}; + } + + void serialize(BSONObjBuilder& builder) const { + builder.append("key", key); + builder.append("min", min); + builder.append("max", max); + } + + BSONObj toBSON() const { + BSONObjBuilder builder; + serialize(builder); + return builder.obj(); + } +}; + +TEST_F(PersistentTaskStoreTest, TestAdd) { + auto opCtx = operationContext(); + + PersistentTaskStore<TestTask> store(opCtx, kNss); + + store.add(opCtx, TestTask{"one", 0, 10}); + store.add(opCtx, TestTask{"two", 10, 20}); + store.add(opCtx, TestTask{"three", 40, 50}); + + ASSERT_EQ(store.count(opCtx), 3); +} + +TEST_F(PersistentTaskStoreTest, TestQuery) { + auto opCtx = operationContext(); + + PersistentTaskStore<TestTask> store(opCtx, kNss); + + store.add(opCtx, TestTask{"one", 0, 10}); + store.add(opCtx, TestTask{"two", 10, 20}); + store.add(opCtx, TestTask{"three", 40, 50}); + + ASSERT_EQ(store.count(opCtx), 3); + + // No match. + auto results = store.query(opCtx, + QUERY("key" + << "four")); + ASSERT_EQ(results.size(), 0); + + // Multiple matches. + results = store.query(opCtx, QUERY("min" << GTE << 10)); + ASSERT_EQ(results.size(), 2); + + // Single match. + results = store.query(opCtx, + QUERY("key" + << "one")); + ASSERT_EQ(results.size(), 1); +} + +TEST_F(PersistentTaskStoreTest, TestRemove) { + auto opCtx = operationContext(); + + PersistentTaskStore<TestTask> store(opCtx, kNss); + + store.add(opCtx, TestTask{"one", 0, 10}); + store.add(opCtx, TestTask{"two", 10, 20}); + store.add(opCtx, TestTask{"three", 40, 50}); + + ASSERT_EQ(store.count(opCtx), 3); + + store.remove(opCtx, + QUERY("key" + << "one")); + + ASSERT_EQ(store.count(opCtx), 2); +} + +TEST_F(PersistentTaskStoreTest, TestRemoveMultiple) { + auto opCtx = operationContext(); + + PersistentTaskStore<TestTask> store(opCtx, kNss); + + store.add(opCtx, TestTask{"one", 0, 10}); + store.add(opCtx, TestTask{"two", 10, 20}); + store.add(opCtx, TestTask{"three", 40, 50}); + + ASSERT_EQ(store.count(opCtx), 3); + + // Remove multipe overlapping ranges. + store.remove(opCtx, QUERY("min" << GTE << 10)); + + ASSERT_EQ(store.count(opCtx), 1); +} + +TEST_F(PersistentTaskStoreTest, TestWritesPersistAcrossInstances) { + auto opCtx = operationContext(); + + { + PersistentTaskStore<TestTask> store(opCtx, kNss); + + store.add(opCtx, TestTask{"one", 0, 10}); + store.add(opCtx, TestTask{"two", 10, 20}); + store.add(opCtx, TestTask{"three", 40, 50}); + + ASSERT_EQ(store.count(opCtx), 3); + } + + { + PersistentTaskStore<TestTask> store(opCtx, kNss); + ASSERT_EQ(store.count(opCtx), 3); + + auto results = store.query(opCtx, QUERY("min" << GTE << 10)); + ASSERT_EQ(results.size(), 2); + + store.remove(opCtx, + QUERY("key" + << "two")); + ASSERT_EQ(store.count(opCtx), 2); + + results = store.query(opCtx, QUERY("min" << GTE << 10)); + ASSERT_EQ(results.size(), 1); + } + + { + PersistentTaskStore<TestTask> store(opCtx, kNss); + ASSERT_EQ(store.count(opCtx), 2); + + auto results = store.query(opCtx, QUERY("min" << GTE << 10)); + ASSERT_EQ(results.size(), 1); + } +} + +TEST_F(PersistentTaskStoreTest, TestCountWithQuery) { + auto opCtx = operationContext(); + + PersistentTaskStore<TestTask> store(opCtx, kNss); + + store.add(opCtx, TestTask{"one", 0, 10}); + store.add(opCtx, TestTask{"two", 10, 20}); + store.add(opCtx, TestTask{"two", 40, 50}); + + ASSERT_EQ(store.count(opCtx, + QUERY("key" + << "two")), + 2); + + // Remove multipe overlapping ranges. + auto range = ChunkRange{BSON("_id" << 5), BSON("_id" << 10)}; + store.remove(opCtx, QUERY("min" << 10)); + + ASSERT_EQ(store.count(opCtx, + QUERY("key" + << "two")), + 1); +} + +} // namespace +} // namespace mongo
\ No newline at end of file diff --git a/src/mongo/db/s/range_deletion_task.idl b/src/mongo/db/s/range_deletion_task.idl index 37d51703aab..9af29c5181d 100644 --- a/src/mongo/db/s/range_deletion_task.idl +++ b/src/mongo/db/s/range_deletion_task.idl @@ -46,12 +46,12 @@ types: deserializer: "mongo::ChunkRange::fromBSONThrowing" enums: - CleanWhen: - description: "When to cleanup ranges." - type: string - values: - kNow: "now" - kDelayed: "delayed" + CleanWhen: + description: "When to cleanup ranges." + type: string + values: + kNow: "now" + kDelayed: "delayed" structs: rangeDeletionTask: @@ -62,7 +62,7 @@ structs: type: namespacestring description: "The namespace of the collection that the chunk belongs to." collectionUuid: - type: namespacestring + type: uuid description: "The UUID of the collection that the chunk belongs to." range: type: chunkRangeType diff --git a/src/mongo/db/s/shard_server_op_observer.cpp b/src/mongo/db/s/shard_server_op_observer.cpp index 931d04abee2..fe9b34c8e64 100644 --- a/src/mongo/db/s/shard_server_op_observer.cpp +++ b/src/mongo/db/s/shard_server_op_observer.cpp @@ -341,8 +341,7 @@ void ShardServerOpObserver::onUpdate(OperationContext* opCtx, const OplogUpdateE AutoGetCollection autoColl(opCtx, deletionTask.getNss(), MODE_IS); if (!autoColl.getCollection() || - autoColl.getCollection()->uuid() != - UUID::parse(deletionTask.getCollectionUuid().toString())) { + autoColl.getCollection()->uuid() != deletionTask.getCollectionUuid()) { LOG(0) << "Collection UUID doesn't match the one marked for deletion: " << autoColl.getCollection()->uuid() << " != " << deletionTask.getCollectionUuid(); diff --git a/src/mongo/s/catalog/type_chunk.cpp b/src/mongo/s/catalog/type_chunk.cpp index fd7e9313148..b9736e478b4 100644 --- a/src/mongo/s/catalog/type_chunk.cpp +++ b/src/mongo/s/catalog/type_chunk.cpp @@ -60,9 +60,6 @@ const BSONField<BSONObj> ChunkType::history("history"); namespace { -const char kMinKey[] = "min"; -const char kMaxKey[] = "max"; - /** * Extracts an Object value from 'obj's field 'fieldName'. Sets the result to 'bsonElement'. */ diff --git a/src/mongo/s/catalog/type_chunk.h b/src/mongo/s/catalog/type_chunk.h index ce5526b01b0..1905280392b 100644 --- a/src/mongo/s/catalog/type_chunk.h +++ b/src/mongo/s/catalog/type_chunk.h @@ -52,6 +52,9 @@ class StatusWith; */ class ChunkRange { public: + static constexpr char kMinKey[] = "min"; + static constexpr char kMaxKey[] = "max"; + ChunkRange(BSONObj minKey, BSONObj maxKey); /** diff --git a/src/mongo/util/uuid.h b/src/mongo/util/uuid.h index 86bd828088c..ae82c800264 100644 --- a/src/mongo/util/uuid.h +++ b/src/mongo/util/uuid.h @@ -76,6 +76,7 @@ class UUID { friend class LogicalSessionToClient; friend class LogicalSessionIdToClient; friend class LogicalSessionFromClient; + friend class RangeDeletionTask; friend class ResolvedKeyId; friend class repl::CollectionInfo; friend class repl::OplogEntryBase; |