summaryrefslogtreecommitdiff
path: root/src/mongo
diff options
context:
space:
mode:
authorMatthew Saltz <matthew.saltz@mongodb.com>2019-12-31 07:32:55 +0000
committerevergreen <evergreen@mongodb.com>2019-12-31 07:32:55 +0000
commit3b2a7ee28a282bcfd2b329443ed4f5a2130b11f3 (patch)
treea7efa0a2cf9b5b0f6074910526d30087404372a1 /src/mongo
parent8e2c33cdf3552a7ba3a96fef162a3463f16a33eb (diff)
downloadmongo-3b2a7ee28a282bcfd2b329443ed4f5a2130b11f3.tar.gz
SERVER-45024 Make the MetadataManager a non-reusable object
Diffstat (limited to 'src/mongo')
-rw-r--r--src/mongo/db/s/collection_metadata_filtering_test.cpp16
-rw-r--r--src/mongo/db/s/collection_range_deleter.cpp16
-rw-r--r--src/mongo/db/s/collection_sharding_runtime.cpp125
-rw-r--r--src/mongo/db/s/collection_sharding_runtime.h41
-rw-r--r--src/mongo/db/s/collection_sharding_state_test.cpp51
-rw-r--r--src/mongo/db/s/get_shard_version_command.cpp7
-rw-r--r--src/mongo/db/s/metadata_manager.cpp89
-rw-r--r--src/mongo/db/s/metadata_manager.h43
-rw-r--r--src/mongo/db/s/metadata_manager_test.cpp195
9 files changed, 245 insertions, 338 deletions
diff --git a/src/mongo/db/s/collection_metadata_filtering_test.cpp b/src/mongo/db/s/collection_metadata_filtering_test.cpp
index b0effe64772..31eb7655e7e 100644
--- a/src/mongo/db/s/collection_metadata_filtering_test.cpp
+++ b/src/mongo/db/s/collection_metadata_filtering_test.cpp
@@ -44,7 +44,6 @@ class CollectionMetadataFilteringTest : public ShardServerTestFixture {
protected:
void setUp() override {
ShardServerTestFixture::setUp();
- _manager = std::make_shared<MetadataManager>(getServiceContext(), kNss, executor().get());
}
/**
@@ -106,7 +105,8 @@ protected:
css->setFilteringMetadata(operationContext(), CollectionMetadata(cm, ShardId("0")));
}
- _manager->setFilteringMetadata(CollectionMetadata(cm, ShardId("0")));
+ _manager = std::make_shared<MetadataManager>(
+ getServiceContext(), kNss, executor().get(), CollectionMetadata(cm, ShardId("0")));
auto& oss = OperationShardingState::get(operationContext());
const auto version = cm->getVersion(ShardId("0"));
@@ -143,8 +143,8 @@ TEST_F(CollectionMetadataFilteringTest, FilterDocumentsInTheFuture) {
}
{
- const auto scm = _manager->getActiveMetadata(_manager, LogicalTime(Timestamp(100, 0)));
- testFn(*scm);
+ const auto scm = _manager->getActiveMetadata(LogicalTime(Timestamp(100, 0)));
+ testFn(scm);
}
}
@@ -173,8 +173,8 @@ TEST_F(CollectionMetadataFilteringTest, FilterDocumentsInThePast) {
}
{
- const auto scm = _manager->getActiveMetadata(_manager, LogicalTime(Timestamp(50, 0)));
- testFn(*scm);
+ const auto scm = _manager->getActiveMetadata(LogicalTime(Timestamp(50, 0)));
+ testFn(scm);
}
}
@@ -211,8 +211,8 @@ TEST_F(CollectionMetadataFilteringTest, FilterDocumentsTooFarInThePastThrowsStal
}
{
- const auto scm = _manager->getActiveMetadata(_manager, LogicalTime(Timestamp(10, 0)));
- testFn(*scm);
+ const auto scm = _manager->getActiveMetadata(LogicalTime(Timestamp(10, 0)));
+ testFn(scm);
}
}
diff --git a/src/mongo/db/s/collection_range_deleter.cpp b/src/mongo/db/s/collection_range_deleter.cpp
index bc8f2b20080..15a37946f98 100644
--- a/src/mongo/db/s/collection_range_deleter.cpp
+++ b/src/mongo/db/s/collection_range_deleter.cpp
@@ -297,14 +297,13 @@ boost::optional<Date_t> CollectionRangeDeleter::cleanUpNextRange(
}
}
- const auto scopedCollectionMetadata =
- metadataManager->getActiveMetadata(metadataManager, boost::none);
+ const auto scopedCollectionMetadata = metadataManager->getActiveMetadata(boost::none);
const auto& metadata = *scopedCollectionMetadata;
try {
swNumDeleted = doDeletion(opCtx,
collection,
- metadata->getKeyPattern(),
+ metadata.getKeyPattern(),
*range,
maxToDelete,
// _throwWriteConflictForTest is only used in unit tests, so
@@ -434,20 +433,13 @@ bool CollectionRangeDeleter::_checkCollectionMetadataStillValid(
Collection* collection,
std::shared_ptr<MetadataManager> metadataManager) {
- const auto scopedCollectionMetadata =
- metadataManager->getActiveMetadata(metadataManager, boost::none);
-
- if (!scopedCollectionMetadata) {
+ if (!metadataManager) {
LOG(0) << "Abandoning any range deletions because the metadata for " << nss.ns()
<< " was reset";
- stdx::lock_guard<Latch> lk(metadataManager->_managerLock);
- metadataManager->_clearAllCleanups(lk);
return false;
}
- const auto& metadata = *scopedCollectionMetadata;
-
- if (!forTestOnly && (!collection || !metadata->isSharded())) {
+ if (!forTestOnly && (!collection)) {
if (!collection) {
LOG(0) << "Abandoning any range deletions left over from dropped " << nss.ns();
} else {
diff --git a/src/mongo/db/s/collection_sharding_runtime.cpp b/src/mongo/db/s/collection_sharding_runtime.cpp
index b0903581dfb..cc0bb13ec95 100644
--- a/src/mongo/db/s/collection_sharding_runtime.cpp
+++ b/src/mongo/db/s/collection_sharding_runtime.cpp
@@ -102,11 +102,11 @@ boost::optional<ChunkVersion> getOperationReceivedVersion(OperationContext* opCt
CollectionShardingRuntime::CollectionShardingRuntime(ServiceContext* sc,
NamespaceString nss,
executor::TaskExecutor* rangeDeleterExecutor)
- : _stateChangeMutex(nss.toString()),
- _nss(std::move(nss)),
- _metadataManager(std::make_shared<MetadataManager>(sc, _nss, rangeDeleterExecutor)) {
+ : _nss(std::move(nss)),
+ _rangeDeleterExecutor(rangeDeleterExecutor),
+ _stateChangeMutex(nss.toString()) {
if (isNamespaceAlwaysUnsharded(_nss)) {
- _metadataManager->setFilteringMetadata(CollectionMetadata());
+ _metadataType = MetadataType::kUnsharded;
}
}
@@ -134,28 +134,28 @@ ScopedCollectionMetadata CollectionShardingRuntime::getOrphansFilter(OperationCo
}
ScopedCollectionMetadata CollectionShardingRuntime::getCurrentMetadata() {
- auto optMetadata = _metadataManager->getActiveMetadata(_metadataManager, boost::none);
-
+ auto optMetadata = _getCurrentMetadataIfKnown(boost::none);
if (!optMetadata)
return {kUnshardedCollection};
- return {std::move(*optMetadata)};
+ return *optMetadata;
}
boost::optional<ScopedCollectionMetadata> CollectionShardingRuntime::getCurrentMetadataIfKnown() {
- return _metadataManager->getActiveMetadata(_metadataManager, boost::none);
+ return _getCurrentMetadataIfKnown(boost::none);
}
boost::optional<ChunkVersion> CollectionShardingRuntime::getCurrentShardVersionIfKnown() {
- const auto optMetadata = _metadataManager->getActiveMetadata(_metadataManager, boost::none);
- if (!optMetadata)
- return boost::none;
-
- const auto& metadata = *optMetadata;
- if (!metadata->isSharded())
- return ChunkVersion::UNSHARDED();
-
- return metadata->getCollVersion();
+ stdx::lock_guard lk(_metadataManagerLock);
+ switch (_metadataType) {
+ case MetadataType::kUnknown:
+ return boost::none;
+ case MetadataType::kUnsharded:
+ return ChunkVersion::UNSHARDED();
+ case MetadataType::kSharded:
+ return _metadataManager->getActiveShardVersion();
+ };
+ MONGO_UNREACHABLE;
}
void CollectionShardingRuntime::checkShardVersionOrThrow(OperationContext* opCtx,
@@ -202,21 +202,39 @@ void CollectionShardingRuntime::setFilteringMetadata(OperationContext* opCtx,
str::stream() << "Namespace " << _nss.ns() << " must never be sharded.");
auto csrLock = CSRLock::lockExclusive(opCtx, this);
-
- _metadataManager->setFilteringMetadata(std::move(newMetadata));
+ stdx::lock_guard lk(_metadataManagerLock);
+
+ if (!newMetadata.isSharded()) {
+ LOG(0) << "Marking collection " << _nss.ns() << " as " << newMetadata.toStringBasic();
+ _metadataType = MetadataType::kUnsharded;
+ _metadataManager.reset();
+ } else if (!_metadataManager ||
+ !newMetadata.uuidMatches(_metadataManager->getCollectionUuid())) {
+ _metadataType = MetadataType::kSharded;
+ _metadataManager = std::make_shared<MetadataManager>(
+ opCtx->getServiceContext(), _nss, _rangeDeleterExecutor, newMetadata);
+ } else {
+ _metadataManager->setFilteringMetadata(std::move(newMetadata));
+ }
}
void CollectionShardingRuntime::clearFilteringMetadata() {
+ stdx::lock_guard lk(_metadataManagerLock);
if (!isNamespaceAlwaysUnsharded(_nss)) {
- _metadataManager->clearFilteringMetadata();
+ _metadataType = MetadataType::kUnknown;
+ _metadataManager.reset();
}
}
auto CollectionShardingRuntime::beginReceive(ChunkRange const& range) -> CleanupNotification {
+ stdx::lock_guard lk(_metadataManagerLock);
+ invariant(_metadataType == MetadataType::kSharded);
return _metadataManager->beginReceive(range);
}
void CollectionShardingRuntime::forgetReceive(const ChunkRange& range) {
+ stdx::lock_guard lk(_metadataManagerLock);
+ invariant(_metadataType == MetadataType::kSharded);
_metadataManager->forgetReceive(range);
}
@@ -224,6 +242,8 @@ auto CollectionShardingRuntime::cleanUpRange(ChunkRange const& range, CleanWhen
-> CleanupNotification {
Date_t time =
(when == kNow) ? Date_t{} : Date_t::now() + Seconds(orphanCleanupDelaySecs.load());
+ stdx::lock_guard lk(_metadataManagerLock);
+ invariant(_metadataType == MetadataType::kSharded);
return _metadataManager->cleanUpRange(range, time);
}
@@ -237,24 +257,18 @@ Status CollectionShardingRuntime::waitForClean(OperationContext* opCtx,
{
AutoGetCollection autoColl(opCtx, nss, MODE_IX);
auto* const self = CollectionShardingRuntime::get(opCtx, nss);
-
- {
- // First, see if collection was dropped, but do it in a separate scope in order to
- // not hold reference on it, which would make it appear in use
- const auto optMetadata =
- self->_metadataManager->getActiveMetadata(self->_metadataManager, boost::none);
- if (!optMetadata)
- return {ErrorCodes::ConflictingOperationInProgress,
- "Collection being migrated had its metadata reset"};
-
- const auto& metadata = *optMetadata;
- if (!metadata->isSharded() || metadata->getCollVersion().epoch() != epoch) {
- return {ErrorCodes::ConflictingOperationInProgress,
- "Collection being migrated was dropped"};
- }
+ stdx::lock_guard lk(self->_metadataManagerLock);
+
+ // If the metadata was reset, the collection does not exist, or the collection was
+ // dropped and recreated since the metadata manager was created, return an error.
+ if (!self->_metadataManager || !autoColl.getCollection() ||
+ autoColl.getCollection()->uuid() != self->_metadataManager->getCollectionUuid()) {
+ return {ErrorCodes::ConflictingOperationInProgress,
+ "Collection being migrated was dropped or otherwise had its metadata "
+ "reset"};
}
- stillScheduled = self->trackOrphanedDataCleanup(orphanRange);
+ stillScheduled = self->_metadataManager->trackOrphanedDataCleanup(orphanRange);
if (!stillScheduled) {
log() << "Finished deleting " << nss.ns() << " range "
<< redact(orphanRange.toString());
@@ -274,15 +288,26 @@ Status CollectionShardingRuntime::waitForClean(OperationContext* opCtx,
MONGO_UNREACHABLE;
}
-auto CollectionShardingRuntime::trackOrphanedDataCleanup(ChunkRange const& range)
- -> boost::optional<CleanupNotification> {
- return _metadataManager->trackOrphanedDataCleanup(range);
-}
-
boost::optional<ChunkRange> CollectionShardingRuntime::getNextOrphanRange(BSONObj const& from) {
+ stdx::lock_guard lk(_metadataManagerLock);
+ invariant(_metadataType == MetadataType::kSharded);
return _metadataManager->getNextOrphanRange(from);
}
+boost::optional<ScopedCollectionMetadata> CollectionShardingRuntime::_getCurrentMetadataIfKnown(
+ const boost::optional<LogicalTime>& atClusterTime) {
+ stdx::lock_guard lk(_metadataManagerLock);
+ switch (_metadataType) {
+ case MetadataType::kUnknown:
+ return boost::none;
+ case MetadataType::kUnsharded:
+ return ScopedCollectionMetadata{kUnshardedCollection};
+ case MetadataType::kSharded:
+ return _metadataManager->getActiveMetadata(atClusterTime);
+ };
+ MONGO_UNREACHABLE;
+}
+
boost::optional<ScopedCollectionMetadata> CollectionShardingRuntime::_getMetadataWithVersionCheckAt(
OperationContext* opCtx,
const boost::optional<mongo::LogicalTime>& atClusterTime,
@@ -303,24 +328,22 @@ boost::optional<ScopedCollectionMetadata> CollectionShardingRuntime::_getMetadat
auto csrLock = CSRLock::lockShared(opCtx, this);
- auto metadata = _metadataManager->getActiveMetadata(_metadataManager, atClusterTime);
- auto wantedShardVersion = ChunkVersion::UNSHARDED();
+ auto wantedShardVersion = [&] {
+ auto optionalWantedShardVersion = getCurrentShardVersionIfKnown();
+ return optionalWantedShardVersion ? *optionalWantedShardVersion : ChunkVersion::UNSHARDED();
+ }();
if (MONGO_unlikely(useFCV44CheckShardVersionProtocol.shouldFail())) {
LOG(0) << "Received shardVersion: " << receivedShardVersion << " for " << _nss.ns();
if (isCollection) {
+ auto shardVersionKnown = _metadataType != MetadataType::kUnknown;
LOG(0) << "Namespace " << _nss.ns() << " is collection, "
- << (metadata ? "have shardVersion cached" : "don't know shardVersion");
+ << (shardVersionKnown ? "have shardVersion cached" : "don't know shardVersion");
uassert(StaleConfigInfo(_nss, receivedShardVersion, wantedShardVersion),
"don't know shardVersion",
- metadata);
- wantedShardVersion = (*metadata)->getShardVersion();
+ shardVersionKnown);
}
LOG(0) << "Wanted shardVersion: " << wantedShardVersion << " for " << _nss.ns();
- } else {
- if (metadata && (*metadata)->isSharded()) {
- wantedShardVersion = (*metadata)->getShardVersion();
- }
}
auto criticalSectionSignal = [&] {
@@ -336,7 +359,7 @@ boost::optional<ScopedCollectionMetadata> CollectionShardingRuntime::_getMetadat
}
if (receivedShardVersion.isWriteCompatibleWith(wantedShardVersion)) {
- return metadata;
+ return _getCurrentMetadataIfKnown(atClusterTime);
}
//
diff --git a/src/mongo/db/s/collection_sharding_runtime.h b/src/mongo/db/s/collection_sharding_runtime.h
index ff5982b56e6..b0336a3143d 100644
--- a/src/mongo/db/s/collection_sharding_runtime.h
+++ b/src/mongo/db/s/collection_sharding_runtime.h
@@ -154,15 +154,6 @@ public:
CleanupNotification cleanUpRange(ChunkRange const& range, CleanWhen when);
/**
- * Reports whether any range still scheduled for deletion overlaps the argument range. If so,
- * it returns a notification n such that n->get(opCtx) will wake when the newest overlapping
- * range's deletion (possibly the one of interest) completes or fails. This should be called
- * again after each wakeup until it returns boost::none, because there can be more than one
- * range scheduled for deletion that overlaps its argument.
- */
- auto trackOrphanedDataCleanup(ChunkRange const& range) -> boost::optional<CleanupNotification>;
-
- /**
* Returns a range _not_ owned by this shard that starts no lower than the specified
* startingFrom key value, if any, or boost::none if there is no such range.
*/
@@ -189,23 +180,47 @@ private:
* Returns the latest version of collection metadata with filtering configured for
* atClusterTime if specified.
*/
+ boost::optional<ScopedCollectionMetadata> _getCurrentMetadataIfKnown(
+ const boost::optional<LogicalTime>& atClusterTime);
+
+ /**
+ * Returns the latest version of collection metadata with filtering configured for
+ * atClusterTime if specified. Throws StaleConfigInfo if the shard version attached to the
+ * operation context does not match the shard version on the active metadata object.
+ */
boost::optional<ScopedCollectionMetadata> _getMetadataWithVersionCheckAt(
OperationContext* opCtx,
const boost::optional<mongo::LogicalTime>& atClusterTime,
bool isCollection);
+ // Namespace this state belongs to.
+ const NamespaceString _nss;
+
+ // The executor used for deleting ranges of orphan chunks.
+ executor::TaskExecutor* _rangeDeleterExecutor;
+
// Object-wide ResourceMutex to protect changes to the CollectionShardingRuntime or objects held
// within (including the MigrationSourceManager, which is a decoration on the CSR). Use only the
// CSRLock to lock this mutex.
Lock::ResourceMutex _stateChangeMutex;
- // Namespace this state belongs to.
- const NamespaceString _nss;
-
// Tracks the migration critical section state for this collection.
ShardingMigrationCriticalSection _critSec;
- // Contains all the metadata associated with this collection.
+ mutable Mutex _metadataManagerLock =
+ MONGO_MAKE_LATCH("CollectionShardingRuntime::_metadataManagerLock");
+
+ // Tracks whether the filtering metadata is unknown, unsharded, or sharded
+ enum class MetadataType {
+ kUnknown,
+ kUnsharded,
+ kSharded
+ } _metadataType{MetadataType::kUnknown};
+
+ // If the collection is sharded, contains all the metadata associated with this collection.
+ //
+ // If the collection is unsharded, the metadata has not been set yet, or the metadata has been
+ // specifically reset by calling clearFilteringMetadata(), this will be nullptr;
std::shared_ptr<MetadataManager> _metadataManager;
};
diff --git a/src/mongo/db/s/collection_sharding_state_test.cpp b/src/mongo/db/s/collection_sharding_state_test.cpp
index a4bd7b58864..5c1dd392ae5 100644
--- a/src/mongo/db/s/collection_sharding_state_test.cpp
+++ b/src/mongo/db/s/collection_sharding_state_test.cpp
@@ -41,6 +41,18 @@ namespace {
const NamespaceString kTestNss("TestDB", "TestColl");
+void setCollectionFilteringMetadata(OperationContext* opCtx, CollectionMetadata metadata) {
+ AutoGetCollection autoColl(opCtx, kTestNss, MODE_X);
+ auto* const css = CollectionShardingRuntime::get(opCtx, kTestNss);
+ css->setFilteringMetadata(opCtx, std::move(metadata));
+
+ auto& oss = OperationShardingState::get(opCtx);
+ const auto version = metadata.getShardVersion();
+ BSONObjBuilder builder;
+ version.appendToCommand(&builder);
+ oss.initializeClientRoutingVersionsFromCommand(kTestNss, builder.obj());
+}
+
/**
* Constructs a CollectionMetadata suitable for refreshing a CollectionShardingState. The only
* salient detail is the argument `keyPattern` which, defining the shard key, selects the fields
@@ -58,23 +70,20 @@ CollectionMetadata makeAMetadata(BSONObj const& keyPattern) {
return CollectionMetadata(std::move(cm), ShardId("this"));
}
-class DeleteStateTest : public ShardServerTestFixture {
-protected:
- void setCollectionFilteringMetadata(CollectionMetadata metadata) {
- AutoGetCollection autoColl(operationContext(), kTestNss, MODE_X);
- auto* const css = CollectionShardingRuntime::get(operationContext(), kTestNss);
- css->setFilteringMetadata(operationContext(), std::move(metadata));
-
- auto& oss = OperationShardingState::get(operationContext());
- const auto version = metadata.getShardVersion();
- BSONObjBuilder builder;
- version.appendToCommand(&builder);
- oss.initializeClientRoutingVersionsFromCommand(kTestNss, builder.obj());
+class DeleteStateTest : public ShardServerTestFixture {};
+
+class CollectionShardingRuntimeTest : public ShardServerTestFixture {
+ void setUp() override {
+ ShardServerTestFixture::setUp();
+ }
+
+ void tearDown() override {
+ ShardServerTestFixture::tearDown();
}
};
TEST_F(DeleteStateTest, MakeDeleteStateUnsharded) {
- setCollectionFilteringMetadata(CollectionMetadata());
+ setCollectionFilteringMetadata(operationContext(), CollectionMetadata());
AutoGetCollection autoColl(operationContext(), kTestNss, MODE_IX);
@@ -93,7 +102,8 @@ TEST_F(DeleteStateTest, MakeDeleteStateUnsharded) {
TEST_F(DeleteStateTest, MakeDeleteStateShardedWithoutIdInShardKey) {
// Push a CollectionMetadata with a shard key not including "_id"...
- setCollectionFilteringMetadata(makeAMetadata(BSON("key" << 1 << "key3" << 1)));
+ setCollectionFilteringMetadata(operationContext(),
+ makeAMetadata(BSON("key" << 1 << "key3" << 1)));
AutoGetCollection autoColl(operationContext(), kTestNss, MODE_IX);
@@ -115,7 +125,8 @@ TEST_F(DeleteStateTest, MakeDeleteStateShardedWithoutIdInShardKey) {
TEST_F(DeleteStateTest, MakeDeleteStateShardedWithIdInShardKey) {
// Push a CollectionMetadata with a shard key that does have "_id" in the middle...
- setCollectionFilteringMetadata(makeAMetadata(BSON("key" << 1 << "_id" << 1 << "key2" << 1)));
+ setCollectionFilteringMetadata(operationContext(),
+ makeAMetadata(BSON("key" << 1 << "_id" << 1 << "key2" << 1)));
AutoGetCollection autoColl(operationContext(), kTestNss, MODE_IX);
@@ -136,7 +147,8 @@ TEST_F(DeleteStateTest, MakeDeleteStateShardedWithIdInShardKey) {
TEST_F(DeleteStateTest, MakeDeleteStateShardedWithIdHashInShardKey) {
// Push a CollectionMetadata with a shard key "_id", hashed.
- setCollectionFilteringMetadata(makeAMetadata(BSON("_id"
+ setCollectionFilteringMetadata(operationContext(),
+ makeAMetadata(BSON("_id"
<< "hashed")));
AutoGetCollection autoColl(operationContext(), kTestNss, MODE_IX);
@@ -152,5 +164,12 @@ TEST_F(DeleteStateTest, MakeDeleteStateShardedWithIdHashInShardKey) {
ASSERT_FALSE(OpObserverShardingImpl::isMigrating(operationContext(), kTestNss, doc));
}
+
+TEST_F(CollectionShardingRuntimeTest,
+ GetCurrentMetadataReturnsNoneBeforeSetFilteringMetadataIsCalled) {
+ CollectionShardingRuntime csr(getServiceContext(), kTestNss, executor().get());
+ ASSERT_FALSE(csr.getCurrentMetadataIfKnown());
+}
+
} // namespace
} // namespace mongo
diff --git a/src/mongo/db/s/get_shard_version_command.cpp b/src/mongo/db/s/get_shard_version_command.cpp
index 4dcc90be124..0fda498a2bf 100644
--- a/src/mongo/db/s/get_shard_version_command.cpp
+++ b/src/mongo/db/s/get_shard_version_command.cpp
@@ -119,12 +119,7 @@ public:
}
} else {
const auto& metadata = *optMetadata;
-
- if (metadata->isSharded()) {
- result.appendTimestamp("global", metadata->getShardVersion().toLong());
- } else {
- result.appendTimestamp("global", ChunkVersion::UNSHARDED().toLong());
- }
+ result.appendTimestamp("global", metadata->getShardVersion().toLong());
if (cmdObj["fullMetadata"].trueValue()) {
BSONObjBuilder metadataBuilder(result.subobjStart("metadata"));
diff --git a/src/mongo/db/s/metadata_manager.cpp b/src/mongo/db/s/metadata_manager.cpp
index fd7289649c8..9e9d67b77f5 100644
--- a/src/mongo/db/s/metadata_manager.cpp
+++ b/src/mongo/db/s/metadata_manager.cpp
@@ -196,23 +196,25 @@ public:
}
private:
- friend boost::optional<ScopedCollectionMetadata> MetadataManager::getActiveMetadata(
- std::shared_ptr<MetadataManager>, const boost::optional<LogicalTime>&);
-
std::shared_ptr<MetadataManager> _metadataManager;
std::shared_ptr<MetadataManager::CollectionMetadataTracker> _metadataTracker;
};
MetadataManager::MetadataManager(ServiceContext* serviceContext,
NamespaceString nss,
- TaskExecutor* executor)
+ TaskExecutor* executor,
+ CollectionMetadata initialMetadata)
: _serviceContext(serviceContext),
_nss(std::move(nss)),
- _executor(executor),
- _receivingChunks(SimpleBSONObjComparator::kInstance.makeBSONObjIndexedMap<BSONObj>()) {}
+ _collectionUuid(*initialMetadata.getChunkManager()->getUUID()),
+ _executor(std::move(executor)),
+ _receivingChunks(SimpleBSONObjComparator::kInstance.makeBSONObjIndexedMap<BSONObj>()) {
+ _metadata.emplace_back(std::make_shared<CollectionMetadataTracker>(std::move(initialMetadata)));
+}
MetadataManager::~MetadataManager() {
- clearFilteringMetadata();
+ stdx::lock_guard<Latch> lg(_managerLock);
+ _clearAllCleanups(lg);
}
void MetadataManager::_clearAllCleanups(WithLock lock) {
@@ -230,14 +232,10 @@ void MetadataManager::_clearAllCleanups(WithLock, Status status) {
_rangesToClean.clear(status);
}
-boost::optional<ScopedCollectionMetadata> MetadataManager::getActiveMetadata(
- std::shared_ptr<MetadataManager> self, const boost::optional<LogicalTime>& atClusterTime) {
+ScopedCollectionMetadata MetadataManager::getActiveMetadata(
+ const boost::optional<LogicalTime>& atClusterTime) {
stdx::lock_guard<Latch> lg(_managerLock);
- if (_metadata.empty()) {
- return boost::none;
- }
-
auto activeMetadataTracker = _metadata.back();
const auto& activeMetadata = activeMetadataTracker->metadata;
@@ -245,7 +243,7 @@ boost::optional<ScopedCollectionMetadata> MetadataManager::getActiveMetadata(
// just return the active metadata
if (!atClusterTime || !activeMetadata->isSharded()) {
return ScopedCollectionMetadata(std::make_shared<RangePreserver>(
- lg, std::move(self), std::move(activeMetadataTracker)));
+ lg, shared_from_this(), std::move(activeMetadataTracker)));
}
auto chunkManager = activeMetadata->getChunkManager();
@@ -270,9 +268,7 @@ boost::optional<ScopedCollectionMetadata> MetadataManager::getActiveMetadata(
size_t MetadataManager::numberOfMetadataSnapshots() const {
stdx::lock_guard<Latch> lg(_managerLock);
- if (_metadata.empty())
- return 0;
-
+ invariant(!_metadata.empty());
return _metadata.size() - 1;
}
@@ -290,49 +286,21 @@ int MetadataManager::numberOfEmptyMetadataSnapshots() const {
void MetadataManager::setFilteringMetadata(CollectionMetadata remoteMetadata) {
stdx::lock_guard<Latch> lg(_managerLock);
-
- // Collection is becoming sharded
- if (_metadata.empty()) {
- LOG(0) << "Marking collection " << _nss.ns() << " as " << remoteMetadata.toStringBasic();
-
- invariant(_receivingChunks.empty());
- invariant(_rangesToClean.isEmpty());
-
- _setActiveMetadata(lg, std::move(remoteMetadata));
- return;
- }
-
- const auto& activeMetadata = _metadata.back()->metadata;
-
- // If the metadata being installed is unsharded or is sharded and has a different UUID from
- // ours, this means the collection was dropped and recreated, so we must entirely reset the
- // metadata state.
- if (!remoteMetadata.isSharded() ||
- (activeMetadata->isSharded() &&
- *activeMetadata->getChunkManager()->getUUID() !=
- remoteMetadata.getChunkManager()->getUUID())) {
- LOG(0) << "Updating metadata for collection " << _nss.ns() << " from "
- << activeMetadata->toStringBasic() << " to " << remoteMetadata.toStringBasic()
- << " due to UUID change";
-
- _receivingChunks.clear();
- _clearAllCleanups(lg);
- _metadata.clear();
-
- _setActiveMetadata(lg, std::move(remoteMetadata));
- return;
- }
+ invariant(!_metadata.empty());
+ // The active metadata should always be available (not boost::none)
+ invariant(_metadata.back()->metadata);
+ const auto& activeMetadata = _metadata.back()->metadata.get();
// We already have the same or newer version
- if (activeMetadata->getCollVersion().epoch() == remoteMetadata.getCollVersion().epoch() &&
- activeMetadata->getCollVersion() >= remoteMetadata.getCollVersion()) {
- LOG(1) << "Ignoring update of active metadata " << activeMetadata->toStringBasic()
+ if (activeMetadata.getCollVersion().epoch() == remoteMetadata.getCollVersion().epoch() &&
+ activeMetadata.getCollVersion() >= remoteMetadata.getCollVersion()) {
+ LOG(1) << "Ignoring update of active metadata " << activeMetadata.toStringBasic()
<< " with an older " << remoteMetadata.toStringBasic();
return;
}
LOG(0) << "Updating metadata for collection " << _nss.ns() << " from "
- << activeMetadata->toStringBasic() << " to " << remoteMetadata.toStringBasic()
+ << activeMetadata.toStringBasic() << " to " << remoteMetadata.toStringBasic()
<< " due to version change";
// Resolve any receiving chunks, which might have completed by now
@@ -356,13 +324,6 @@ void MetadataManager::setFilteringMetadata(CollectionMetadata remoteMetadata) {
_setActiveMetadata(lg, std::move(remoteMetadata));
}
-void MetadataManager::clearFilteringMetadata() {
- stdx::lock_guard<Latch> lg(_managerLock);
- _receivingChunks.clear();
- _clearAllCleanups(lg);
- _metadata.clear();
-}
-
void MetadataManager::_setActiveMetadata(WithLock wl, CollectionMetadata newMetadata) {
_metadata.emplace_back(std::make_shared<CollectionMetadataTracker>(std::move(newMetadata)));
_retireExpiredMetadata(wl);
@@ -423,9 +384,7 @@ void MetadataManager::append(BSONObjBuilder* builder) const {
}
pcArr.done();
- if (_metadata.empty()) {
- return;
- }
+ invariant(!_metadata.empty());
BSONArrayBuilder amrArr(builder->subarrayStart("activeMetadataRanges"));
for (const auto& entry : _metadata.back()->metadata->getChunks()) {
@@ -449,9 +408,7 @@ auto MetadataManager::_pushRangeToClean(WithLock lock, ChunkRange const& range,
void MetadataManager::_pushListToClean(WithLock, std::list<Deletion> ranges) {
auto when = _rangesToClean.add(std::move(ranges));
if (when) {
- auto collectionUuid = _metadata.back()->metadata->getChunkManager()->getUUID();
- invariant(collectionUuid);
- scheduleCleanup(_executor, _nss, *collectionUuid, *when);
+ scheduleCleanup(_executor, _nss, _collectionUuid, *when);
}
}
diff --git a/src/mongo/db/s/metadata_manager.h b/src/mongo/db/s/metadata_manager.h
index 90a0a7e233e..69fc46ec81a 100644
--- a/src/mongo/db/s/metadata_manager.h
+++ b/src/mongo/db/s/metadata_manager.h
@@ -48,31 +48,49 @@ namespace mongo {
class RangePreserver;
-class MetadataManager {
- MetadataManager(const MetadataManager&) = delete;
- MetadataManager& operator=(const MetadataManager&) = delete;
-
+/**
+ * Contains filtering metadata for a sharded collection.
+ */
+class MetadataManager : public std::enable_shared_from_this<MetadataManager> {
public:
using CleanupNotification = CollectionRangeDeleter::DeleteNotification;
using Deletion = CollectionRangeDeleter::Deletion;
MetadataManager(ServiceContext* serviceContext,
NamespaceString nss,
- executor::TaskExecutor* executor);
+ executor::TaskExecutor* executor,
+ CollectionMetadata initialMetadata);
~MetadataManager();
+ MetadataManager(const MetadataManager&) = delete;
+ MetadataManager& operator=(const MetadataManager&) = delete;
+
/**
- * If there is no filtering metadata set yet (setFilteringMetadata has not been called) returns
- * boost::none. Otherwise increments the usage counter of the active metadata and returns an
- * RAII object, which corresponds to it.
+ * Increments the usage counter of the active metadata and returns an RAII object, which
+ * corresponds to it.
*
* Holding a reference on a particular instance of the metadata means that orphan cleanup is not
* allowed to run and delete chunks which are covered by that metadata. When the returned
* ScopedCollectionMetadata goes out of scope, the reference counter on the metadata will be
* decremented and if it reaches to zero, orphan cleanup may proceed.
*/
- boost::optional<ScopedCollectionMetadata> getActiveMetadata(
- std::shared_ptr<MetadataManager> self, const boost::optional<LogicalTime>& atClusterTime);
+ ScopedCollectionMetadata getActiveMetadata(const boost::optional<LogicalTime>& atClusterTime);
+
+ /**
+ * Returns the shard version of the active metadata object.
+ */
+ ChunkVersion getActiveShardVersion() {
+ stdx::lock_guard<Latch> lg(_managerLock);
+ invariant(!_metadata.empty());
+ return _metadata.back()->metadata->getShardVersion();
+ }
+
+ /**
+ * Returns the UUID of the collection tracked by this MetadataManager object.
+ */
+ UUID getCollectionUuid() const {
+ return _collectionUuid;
+ }
/**
* Returns the number of CollectionMetadata objects being maintained on behalf of running
@@ -90,8 +108,6 @@ public:
void setFilteringMetadata(CollectionMetadata newMetadata);
- void clearFilteringMetadata();
-
void toBSONPending(BSONArrayBuilder& bb) const;
/**
@@ -236,6 +252,9 @@ private:
// Namespace for which this manager object applies
const NamespaceString _nss;
+ // The UUID for the collection tracked by this manager object.
+ const UUID _collectionUuid;
+
// The background task that deletes documents from orphaned chunk ranges.
executor::TaskExecutor* const _executor;
diff --git a/src/mongo/db/s/metadata_manager_test.cpp b/src/mongo/db/s/metadata_manager_test.cpp
index 1e367cf7aea..145c3e44014 100644
--- a/src/mongo/db/s/metadata_manager_test.cpp
+++ b/src/mongo/db/s/metadata_manager_test.cpp
@@ -1,5 +1,5 @@
/**
- * Copyright (C) 2018-present MongoDB, Inc.
+ * Copyright (C) 2019-present MongoDB, Inc.
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the Server Side Public License, version 1,
@@ -64,7 +64,8 @@ class MetadataManagerTest : public ShardServerTestFixture {
protected:
void setUp() override {
ShardServerTestFixture::setUp();
- _manager = std::make_shared<MetadataManager>(getServiceContext(), kNss, executor().get());
+ _manager = std::make_shared<MetadataManager>(
+ getServiceContext(), kNss, executor().get(), makeEmptyMetadata());
}
/**
@@ -156,45 +157,12 @@ protected:
std::shared_ptr<MetadataManager> _manager;
};
-TEST_F(MetadataManagerTest, InitialMetadataIsUnknown) {
- ASSERT(!_manager->getActiveMetadata(_manager, boost::none));
- ASSERT(!_manager->getActiveMetadata(_manager, LogicalTime(Timestamp(10))));
-
- ASSERT_EQ(0UL, _manager->numberOfMetadataSnapshots());
- ASSERT_EQ(0UL, _manager->numberOfRangesToClean());
- ASSERT_EQ(0UL, _manager->numberOfRangesToCleanStillInUse());
-}
-
-TEST_F(MetadataManagerTest, MetadataAfterClearIsUnknown) {
- _manager->setFilteringMetadata(makeEmptyMetadata());
- ASSERT(_manager->getActiveMetadata(_manager, boost::none));
- ASSERT(_manager->getActiveMetadata(_manager, LogicalTime(Timestamp(10))));
-
- _manager->clearFilteringMetadata();
- ASSERT(!_manager->getActiveMetadata(_manager, boost::none));
- ASSERT(!_manager->getActiveMetadata(_manager, LogicalTime(Timestamp(10))));
-
- ASSERT_EQ(0UL, _manager->numberOfMetadataSnapshots());
- ASSERT_EQ(0UL, _manager->numberOfRangesToClean());
- ASSERT_EQ(0UL, _manager->numberOfRangesToCleanStillInUse());
-}
-
-TEST_F(MetadataManagerTest, GetActiveMetadataForUnshardedCollection) {
- _manager->setFilteringMetadata(CollectionMetadata());
-
- ASSERT(_manager->getActiveMetadata(_manager, boost::none));
- ASSERT(!(*_manager->getActiveMetadata(_manager, boost::none))->isSharded());
-
- ASSERT(_manager->getActiveMetadata(_manager, LogicalTime(Timestamp(10))));
- ASSERT(!(*_manager->getActiveMetadata(_manager, LogicalTime(Timestamp(10))))->isSharded());
-}
-
TEST_F(MetadataManagerTest, CleanUpForMigrateIn) {
_manager->setFilteringMetadata(makeEmptyMetadata());
// Sanity checks
- ASSERT((*_manager->getActiveMetadata(_manager, boost::none))->isSharded());
- ASSERT_EQ(0UL, (*_manager->getActiveMetadata(_manager, boost::none))->getChunks().size());
+ ASSERT(_manager->getActiveMetadata(boost::none)->isSharded());
+ ASSERT_EQ(0UL, _manager->getActiveMetadata(boost::none)->getChunks().size());
ChunkRange range1(BSON("key" << 0), BSON("key" << 10));
ChunkRange range2(BSON("key" << 10), BSON("key" << 20));
@@ -213,8 +181,6 @@ TEST_F(MetadataManagerTest, CleanUpForMigrateIn) {
}
TEST_F(MetadataManagerTest, AddRangeNotificationsBlockAndYield) {
- _manager->setFilteringMetadata(makeEmptyMetadata());
-
ChunkRange cr1(BSON("key" << 0), BSON("key" << 10));
auto notifn1 = _manager->cleanUpRange(cr1, Date_t{});
@@ -229,76 +195,21 @@ TEST_F(MetadataManagerTest, AddRangeNotificationsBlockAndYield) {
optNotifn->abandon();
}
-TEST_F(MetadataManagerTest, NotificationBlocksUntilDeletion) {
- _manager->setFilteringMetadata(makeEmptyMetadata());
-
- ChunkRange cr1(BSON("key" << 20), BSON("key" << 30));
- auto optNotif = _manager->trackOrphanedDataCleanup(cr1);
- ASSERT(!optNotif);
-
- {
- ASSERT_EQ(_manager->numberOfMetadataSnapshots(), 0UL);
- ASSERT_EQ(_manager->numberOfRangesToClean(), 0UL);
-
- auto scm1 = _manager->getActiveMetadata(_manager, boost::none); // and increment refcount
-
- const auto addChunk = [this] {
- _manager->setFilteringMetadata(
- cloneMetadataPlusChunk(*_manager->getActiveMetadata(_manager, boost::none),
- {BSON("key" << 0), BSON("key" << 20)}));
- };
-
- addChunk(); // push new metadata
- auto scm2 = _manager->getActiveMetadata(_manager, boost::none); // and increment refcount
- ASSERT_EQ(1ULL, (*scm2)->getChunks().size());
-
- // Simulate drop and recreate
- _manager->setFilteringMetadata(makeEmptyMetadata());
-
- addChunk(); // push new metadata
- auto scm3 = _manager->getActiveMetadata(_manager, boost::none); // and increment refcount
- ASSERT_EQ(1ULL, (*scm3)->getChunks().size());
-
- ASSERT_EQ(_manager->numberOfMetadataSnapshots(), 0UL);
- ASSERT_EQ(_manager->numberOfRangesToClean(), 0UL);
-
- optNotif = _manager->cleanUpRange(cr1, Date_t{});
- ASSERT(optNotif);
- ASSERT_EQ(_manager->numberOfMetadataSnapshots(), 0UL);
- ASSERT_EQ(_manager->numberOfRangesToClean(), 1UL);
- }
-
- // At this point scm1,2,3 above are destroyed and the refcount of each metadata goes to zero
-
- ASSERT_EQ(_manager->numberOfMetadataSnapshots(), 0UL);
- ASSERT_EQ(_manager->numberOfRangesToClean(), 1UL);
- ASSERT(!optNotif->ready());
-
- auto optNotif2 = _manager->trackOrphanedDataCleanup(cr1); // now tracking it in _rangesToClean
- ASSERT(optNotif2);
-
- ASSERT(!optNotif->ready());
- ASSERT(!optNotif2->ready());
- ASSERT(*optNotif == *optNotif2);
-
- optNotif->abandon();
- optNotif2->abandon();
-}
-
-TEST_F(MetadataManagerTest, CleanupNotificationsAreSignaledOnDropAndRecreate) {
+TEST_F(MetadataManagerTest, CleanupNotificationsAreSignaledWhenMetadataManagerIsDestroyed) {
const ChunkRange rangeToClean(BSON("key" << 20), BSON("key" << 30));
- _manager->setFilteringMetadata(makeEmptyMetadata());
- _manager->setFilteringMetadata(
- cloneMetadataPlusChunk(*_manager->getActiveMetadata(_manager, boost::none),
- {BSON("key" << 0), BSON("key" << 20)}));
+ _manager->setFilteringMetadata(cloneMetadataPlusChunk(_manager->getActiveMetadata(boost::none),
+ {BSON("key" << 0), BSON("key" << 20)}));
_manager->setFilteringMetadata(
- cloneMetadataPlusChunk(*_manager->getActiveMetadata(_manager, boost::none), rangeToClean));
- auto cursorOnMovedMetadata = _manager->getActiveMetadata(_manager, boost::none);
+ cloneMetadataPlusChunk(_manager->getActiveMetadata(boost::none), rangeToClean));
+
+ // Optional so that it can be reset.
+ boost::optional<ScopedCollectionMetadata> cursorOnMovedMetadata{
+ _manager->getActiveMetadata(boost::none)};
_manager->setFilteringMetadata(
- cloneMetadataMinusChunk(*_manager->getActiveMetadata(_manager, boost::none), rangeToClean));
+ cloneMetadataMinusChunk(_manager->getActiveMetadata(boost::none), rangeToClean));
auto notif = _manager->cleanUpRange(rangeToClean, Date_t{});
ASSERT(!notif.ready());
@@ -307,90 +218,68 @@ TEST_F(MetadataManagerTest, CleanupNotificationsAreSignaledOnDropAndRecreate) {
ASSERT(optNotif);
ASSERT(!optNotif->ready());
- _manager->setFilteringMetadata(makeEmptyMetadata());
+ // Reset the original shared_ptr. The cursorOnMovedMetadata will still contain its own copy of
+ // the shared_ptr though, so the destructor of ~MetadataManager won't yet be called.
+ _manager.reset();
+ ASSERT(!notif.ready());
+ ASSERT(!optNotif->ready());
+
+ // Destroys the ScopedCollectionMetadata object and causes the destructor of MetadataManager to
+ // run, which should trigger all deletion notifications.
+ cursorOnMovedMetadata.reset();
ASSERT(notif.ready());
ASSERT(optNotif->ready());
}
TEST_F(MetadataManagerTest, RefreshAfterSuccessfulMigrationSinglePending) {
- _manager->setFilteringMetadata(makeEmptyMetadata());
-
ChunkRange cr1(BSON("key" << 0), BSON("key" << 10));
_manager->setFilteringMetadata(
- cloneMetadataPlusChunk(*_manager->getActiveMetadata(_manager, boost::none), cr1));
- ASSERT_EQ((*_manager->getActiveMetadata(_manager, boost::none))->getChunks().size(), 1UL);
+ cloneMetadataPlusChunk(_manager->getActiveMetadata(boost::none), cr1));
+ ASSERT_EQ(_manager->getActiveMetadata(boost::none)->getChunks().size(), 1UL);
}
TEST_F(MetadataManagerTest, RefreshAfterSuccessfulMigrationMultiplePending) {
- _manager->setFilteringMetadata(makeEmptyMetadata());
-
ChunkRange cr1(BSON("key" << 0), BSON("key" << 10));
ChunkRange cr2(BSON("key" << 30), BSON("key" << 40));
{
_manager->setFilteringMetadata(
- cloneMetadataPlusChunk(*_manager->getActiveMetadata(_manager, boost::none), cr1));
+ cloneMetadataPlusChunk(_manager->getActiveMetadata(boost::none), cr1));
ASSERT_EQ(_manager->numberOfRangesToClean(), 0UL);
- ASSERT_EQ((*_manager->getActiveMetadata(_manager, boost::none))->getChunks().size(), 1UL);
+ ASSERT_EQ(_manager->getActiveMetadata(boost::none)->getChunks().size(), 1UL);
}
{
_manager->setFilteringMetadata(
- cloneMetadataPlusChunk(*_manager->getActiveMetadata(_manager, boost::none), cr2));
- ASSERT_EQ((*_manager->getActiveMetadata(_manager, boost::none))->getChunks().size(), 2UL);
+ cloneMetadataPlusChunk(_manager->getActiveMetadata(boost::none), cr2));
+ ASSERT_EQ(_manager->getActiveMetadata(boost::none)->getChunks().size(), 2UL);
}
}
TEST_F(MetadataManagerTest, RefreshAfterNotYetCompletedMigrationMultiplePending) {
- _manager->setFilteringMetadata(makeEmptyMetadata());
-
ChunkRange cr1(BSON("key" << 0), BSON("key" << 10));
ChunkRange cr2(BSON("key" << 30), BSON("key" << 40));
- _manager->setFilteringMetadata(
- cloneMetadataPlusChunk(*_manager->getActiveMetadata(_manager, boost::none),
- {BSON("key" << 50), BSON("key" << 60)}));
- ASSERT_EQ((*_manager->getActiveMetadata(_manager, boost::none))->getChunks().size(), 1UL);
+ _manager->setFilteringMetadata(cloneMetadataPlusChunk(_manager->getActiveMetadata(boost::none),
+ {BSON("key" << 50), BSON("key" << 60)}));
+ ASSERT_EQ(_manager->getActiveMetadata(boost::none)->getChunks().size(), 1UL);
}
TEST_F(MetadataManagerTest, BeginReceiveWithOverlappingRange) {
- _manager->setFilteringMetadata(makeEmptyMetadata());
-
ChunkRange cr1(BSON("key" << 0), BSON("key" << 10));
ChunkRange cr2(BSON("key" << 30), BSON("key" << 40));
_manager->setFilteringMetadata(
- cloneMetadataPlusChunk(*_manager->getActiveMetadata(_manager, boost::none), cr1));
+ cloneMetadataPlusChunk(_manager->getActiveMetadata(boost::none), cr1));
_manager->setFilteringMetadata(
- cloneMetadataPlusChunk(*_manager->getActiveMetadata(_manager, boost::none), cr2));
+ cloneMetadataPlusChunk(_manager->getActiveMetadata(boost::none), cr2));
ChunkRange crOverlap(BSON("key" << 5), BSON("key" << 35));
}
-TEST_F(MetadataManagerTest, RefreshMetadataAfterDropAndRecreate) {
- _manager->setFilteringMetadata(makeEmptyMetadata());
- _manager->setFilteringMetadata(
- cloneMetadataPlusChunk(*_manager->getActiveMetadata(_manager, boost::none),
- {BSON("key" << 0), BSON("key" << 10)}));
-
- // Now, pretend that the collection was dropped and recreated
- _manager->setFilteringMetadata(makeEmptyMetadata());
- _manager->setFilteringMetadata(
- cloneMetadataPlusChunk(*_manager->getActiveMetadata(_manager, boost::none),
- {BSON("key" << 20), BSON("key" << 30)}));
-
- const auto chunks = (*_manager->getActiveMetadata(_manager, boost::none))->getChunks();
- ASSERT_EQ(1UL, chunks.size());
- const auto chunkEntry = chunks.begin();
- ASSERT_BSONOBJ_EQ(BSON("key" << 20), chunkEntry->first);
- ASSERT_BSONOBJ_EQ(BSON("key" << 30), chunkEntry->second);
-}
-
// Tests membership functions for _rangesToClean
TEST_F(MetadataManagerTest, RangesToCleanMembership) {
- _manager->setFilteringMetadata(makeEmptyMetadata());
-
ChunkRange cr(BSON("key" << 0), BSON("key" << 10));
ASSERT_EQ(0UL, _manager->numberOfRangesToClean());
@@ -403,17 +292,16 @@ TEST_F(MetadataManagerTest, RangesToCleanMembership) {
}
TEST_F(MetadataManagerTest, ClearUnneededChunkManagerObjectsLastSnapshotInList) {
- _manager->setFilteringMetadata(makeEmptyMetadata());
ChunkRange cr1(BSON("key" << 0), BSON("key" << 10));
ChunkRange cr2(BSON("key" << 30), BSON("key" << 40));
- auto scm1 = *_manager->getActiveMetadata(_manager, boost::none);
+ auto scm1 = _manager->getActiveMetadata(boost::none);
{
_manager->setFilteringMetadata(cloneMetadataPlusChunk(scm1, cr1));
ASSERT_EQ(_manager->numberOfMetadataSnapshots(), 1UL);
ASSERT_EQ(_manager->numberOfRangesToClean(), 0UL);
- auto scm2 = *_manager->getActiveMetadata(_manager, boost::none);
+ auto scm2 = _manager->getActiveMetadata(boost::none);
ASSERT_EQ(scm2->getChunks().size(), 1UL);
_manager->setFilteringMetadata(cloneMetadataPlusChunk(scm2, cr2));
ASSERT_EQ(_manager->numberOfMetadataSnapshots(), 2UL);
@@ -424,27 +312,26 @@ TEST_F(MetadataManagerTest, ClearUnneededChunkManagerObjectsLastSnapshotInList)
// is now out of scope, but that in scm1 should remain
ASSERT_EQ(_manager->numberOfEmptyMetadataSnapshots(), 1);
ASSERT_EQ(_manager->numberOfMetadataSnapshots(), 2UL);
- ASSERT_EQ((*_manager->getActiveMetadata(_manager, boost::none))->getChunks().size(), 2UL);
+ ASSERT_EQ(_manager->getActiveMetadata(boost::none)->getChunks().size(), 2UL);
}
TEST_F(MetadataManagerTest, ClearUnneededChunkManagerObjectSnapshotInMiddleOfList) {
- _manager->setFilteringMetadata(makeEmptyMetadata());
ChunkRange cr1(BSON("key" << 0), BSON("key" << 10));
ChunkRange cr2(BSON("key" << 30), BSON("key" << 40));
ChunkRange cr3(BSON("key" << 50), BSON("key" << 80));
ChunkRange cr4(BSON("key" << 90), BSON("key" << 100));
- auto scm = *_manager->getActiveMetadata(_manager, boost::none);
+ auto scm = _manager->getActiveMetadata(boost::none);
_manager->setFilteringMetadata(cloneMetadataPlusChunk(scm, cr1));
ASSERT_EQ(_manager->numberOfMetadataSnapshots(), 1UL);
ASSERT_EQ(_manager->numberOfRangesToClean(), 0UL);
- auto scm2 = *_manager->getActiveMetadata(_manager, boost::none);
+ auto scm2 = _manager->getActiveMetadata(boost::none);
ASSERT_EQ(scm2->getChunks().size(), 1UL);
_manager->setFilteringMetadata(cloneMetadataPlusChunk(scm2, cr2));
{
- auto scm3 = *_manager->getActiveMetadata(_manager, boost::none);
+ auto scm3 = _manager->getActiveMetadata(boost::none);
ASSERT_EQ(scm3->getChunks().size(), 2UL);
_manager->setFilteringMetadata(cloneMetadataPlusChunk(scm3, cr3));
ASSERT_EQ(_manager->numberOfMetadataSnapshots(), 3UL);
@@ -460,7 +347,7 @@ TEST_F(MetadataManagerTest, ClearUnneededChunkManagerObjectSnapshotInMiddleOfLis
* CollectionMetadataTracker{ metadata: xxx, orphans: [], usageCounter: 1}
* ]
*/
- scm2 = *_manager->getActiveMetadata(_manager, boost::none);
+ scm2 = _manager->getActiveMetadata(boost::none);
ASSERT_EQ(scm2->getChunks().size(), 3UL);
_manager->setFilteringMetadata(cloneMetadataPlusChunk(scm2, cr4));
ASSERT_EQ(_manager->numberOfMetadataSnapshots(), 4UL);