summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorNathan Myers <nathan.myers@10gen.com>2017-05-17 13:57:04 -0400
committerNathan Myers <nathan.myers@10gen.com>2017-05-18 19:32:51 -0400
commit3e3c1c44f36a6121ca707e7d9700f9a5689a1c25 (patch)
treeaa1881e1856b03acea2d524d9c4c1b96bca3aac3
parent9ff6b2d78cee808d633dee952d31e4d4f1fe0dd0 (diff)
downloadmongo-3e3c1c44f36a6121ca707e7d9700f9a5689a1c25.tar.gz
SERVER-28841 (forward-port) simpler CollectionMetadata lifetime management
-rw-r--r--src/mongo/db/s/collection_metadata.h13
-rw-r--r--src/mongo/db/s/collection_range_deleter.cpp10
-rw-r--r--src/mongo/db/s/collection_range_deleter.h17
-rw-r--r--src/mongo/db/s/collection_sharding_state.cpp21
-rw-r--r--src/mongo/db/s/collection_sharding_state.h4
-rw-r--r--src/mongo/db/s/metadata_manager.cpp328
-rw-r--r--src/mongo/db/s/metadata_manager.h41
-rw-r--r--src/mongo/db/s/metadata_manager_test.cpp176
8 files changed, 290 insertions, 320 deletions
diff --git a/src/mongo/db/s/collection_metadata.h b/src/mongo/db/s/collection_metadata.h
index de318225ec6..a290172362d 100644
--- a/src/mongo/db/s/collection_metadata.h
+++ b/src/mongo/db/s/collection_metadata.h
@@ -29,6 +29,7 @@
#pragma once
#include "mongo/db/range_arithmetic.h"
+#include "mongo/db/s/collection_range_deleter.h"
#include "mongo/s/catalog/type_chunk.h"
#include "mongo/s/chunk_version.h"
#include "mongo/s/shard_key_pattern.h"
@@ -47,9 +48,10 @@ class ChunkType;
* here allow building a new incarnation of a collection's metadata based on an existing
* one (e.g, we're splitting in a given collection.).
*
- * This class is immutable once constructed.
+ * This class's chunk mapping is immutable once constructed.
*/
class CollectionMetadata {
+
public:
/**
* The main way to construct CollectionMetadata is through MetadataLoader or clone() methods.
@@ -166,6 +168,12 @@ public:
std::string toStringBasic() const;
private:
+ struct Tracker {
+ uint32_t usageCounter{0};
+ std::list<CollectionRangeDeleter::Deletion> orphans;
+ };
+ Tracker _tracker;
+
/**
* Builds _rangesMap from the contents of _chunksMap.
*/
@@ -188,6 +196,9 @@ private:
// w.r.t. _chunkMap but we expect high chunk contiguity, especially in small
// installations.
RangeMap _rangesMap;
+
+ friend class ScopedCollectionMetadata;
+ friend class MetadataManager;
};
} // namespace mongo
diff --git a/src/mongo/db/s/collection_range_deleter.cpp b/src/mongo/db/s/collection_range_deleter.cpp
index c768ea93f1d..456dfe92ac6 100644
--- a/src/mongo/db/s/collection_range_deleter.cpp
+++ b/src/mongo/db/s/collection_range_deleter.cpp
@@ -94,8 +94,8 @@ bool CollectionRangeDeleter::cleanUpNextRange(OperationContext* opCtx,
if ((!collection || !scopedCollectionMetadata) && !rangeDeleterForTestOnly) {
log() << "Abandoning collection " << nss.ns()
<< " range deletions left over from sharded state";
- stdx::lock_guard<stdx::mutex> lk(css->_metadataManager._managerLock);
- css->_metadataManager._clearAllCleanups();
+ stdx::lock_guard<stdx::mutex> lk(css->_metadataManager->_managerLock);
+ css->_metadataManager->_clearAllCleanups();
return false; // collection was unsharded
}
@@ -103,9 +103,9 @@ bool CollectionRangeDeleter::cleanUpNextRange(OperationContext* opCtx,
// scheduled to do deletions on, or another one with the same name. But it doesn't
// matter: if it has deletions scheduled, now is as good a time as any to do them.
auto self = rangeDeleterForTestOnly ? rangeDeleterForTestOnly
- : &css->_metadataManager._rangesToClean;
+ : &css->_metadataManager->_rangesToClean;
{
- stdx::lock_guard<stdx::mutex> scopedLock(css->_metadataManager._managerLock);
+ stdx::lock_guard<stdx::mutex> scopedLock(css->_metadataManager->_managerLock);
if (self->isEmpty())
return false;
@@ -127,7 +127,7 @@ bool CollectionRangeDeleter::cleanUpNextRange(OperationContext* opCtx,
log() << "No documents remain to delete in " << nss << " range "
<< redact(range->toString());
}
- stdx::lock_guard<stdx::mutex> scopedLock(css->_metadataManager._managerLock);
+ stdx::lock_guard<stdx::mutex> scopedLock(css->_metadataManager->_managerLock);
self->_pop(wrote.getStatus());
return true;
}
diff --git a/src/mongo/db/s/collection_range_deleter.h b/src/mongo/db/s/collection_range_deleter.h
index 96643a0431e..49f6b9fd0a2 100644
--- a/src/mongo/db/s/collection_range_deleter.h
+++ b/src/mongo/db/s/collection_range_deleter.h
@@ -91,18 +91,15 @@ public:
DeleteNotification notification{};
};
+ CollectionRangeDeleter() = default;
+ ~CollectionRangeDeleter();
+
//
// All of the following members must be called only while the containing MetadataManager's lock
// is held (or in its destructor), except cleanUpNextRange.
//
/**
- * Normally, construct with the collection name and ShardingState's dedicated executor.
- */
- CollectionRangeDeleter() = default;
- ~CollectionRangeDeleter();
-
- /**
* Splices range's elements to the list to be cleaned up by the deleter thread. Returns true
* if the list is newly non-empty, so the caller knows to schedule a deletion task.
*/
@@ -142,11 +139,11 @@ public:
* be called without locks.
*
* The 'rangeDeleterForTestOnly' is used as a utility for unit-tests that directly test the
- * CollectionRangeDeleter class so they do not need to set up
- * CollectionShardingState/MetadataManager.
+ * CollectionRangeDeleter class so they do not need to set up CollectionShardingState and
+ * MetadataManager objects.
*
- * Returns true if it should be scheduled to run again because there is more data to be deleted
- * or false otherwise.
+ * Returns true if it should be scheduled to run again because there might be more documents to
+ * delete, or false otherwise.
*/
static bool cleanUpNextRange(OperationContext*,
NamespaceString const& nss,
diff --git a/src/mongo/db/s/collection_sharding_state.cpp b/src/mongo/db/s/collection_sharding_state.cpp
index 43669e60428..6440415d511 100644
--- a/src/mongo/db/s/collection_sharding_state.cpp
+++ b/src/mongo/db/s/collection_sharding_state.cpp
@@ -86,7 +86,8 @@ private:
CollectionShardingState::CollectionShardingState(ServiceContext* sc, NamespaceString nss)
: _nss(std::move(nss)),
- _metadataManager{sc, _nss, ShardingState::get(sc)->getRangeDeleterTaskExecutor()} {}
+ _metadataManager(std::make_shared<MetadataManager>(
+ sc, _nss, ShardingState::get(sc)->getRangeDeleterTaskExecutor())) {}
CollectionShardingState::~CollectionShardingState() {
invariant(!_sourceMgr);
@@ -107,30 +108,30 @@ CollectionShardingState* CollectionShardingState::get(OperationContext* opCtx,
}
ScopedCollectionMetadata CollectionShardingState::getMetadata() {
- return _metadataManager.getActiveMetadata();
+ return _metadataManager->getActiveMetadata(_metadataManager);
}
void CollectionShardingState::refreshMetadata(OperationContext* opCtx,
std::unique_ptr<CollectionMetadata> newMetadata) {
invariant(opCtx->lockState()->isCollectionLockedForMode(_nss.ns(), MODE_X));
- _metadataManager.refreshActiveMetadata(std::move(newMetadata));
+ _metadataManager->refreshActiveMetadata(std::move(newMetadata));
}
void CollectionShardingState::markNotShardedAtStepdown() {
- _metadataManager.refreshActiveMetadata(nullptr);
+ _metadataManager->refreshActiveMetadata(nullptr);
}
auto CollectionShardingState::beginReceive(ChunkRange const& range) -> CleanupNotification {
- return _metadataManager.beginReceive(range);
+ return _metadataManager->beginReceive(range);
}
void CollectionShardingState::forgetReceive(const ChunkRange& range) {
- _metadataManager.forgetReceive(range);
+ _metadataManager->forgetReceive(range);
}
auto CollectionShardingState::cleanUpRange(ChunkRange const& range) -> CleanupNotification {
- return _metadataManager.cleanUpRange(range);
+ return _metadataManager->cleanUpRange(range);
}
MigrationSourceManager* CollectionShardingState::getMigrationSourceManager() {
@@ -194,7 +195,7 @@ Status CollectionShardingState::waitForClean(OperationContext* opCtx,
// First, see if collection was dropped.
auto css = CollectionShardingState::get(opCtx, nss);
{
- auto metadata = css->_metadataManager.getActiveMetadata();
+ auto metadata = css->_metadataManager->getActiveMetadata(css->_metadataManager);
if (!metadata || metadata->getCollVersion().epoch() != epoch) {
return {ErrorCodes::StaleShardVersion, "Collection being migrated was dropped"};
}
@@ -222,11 +223,11 @@ Status CollectionShardingState::waitForClean(OperationContext* opCtx,
auto CollectionShardingState::trackOrphanedDataCleanup(ChunkRange const& range)
-> boost::optional<CleanupNotification> {
- return _metadataManager.trackOrphanedDataCleanup(range);
+ return _metadataManager->trackOrphanedDataCleanup(range);
}
boost::optional<KeyRange> CollectionShardingState::getNextOrphanRange(BSONObj const& from) {
- return _metadataManager.getNextOrphanRange(from);
+ return _metadataManager->getNextOrphanRange(from);
}
bool CollectionShardingState::isDocumentInMigratingChunk(OperationContext* opCtx,
diff --git a/src/mongo/db/s/collection_sharding_state.h b/src/mongo/db/s/collection_sharding_state.h
index 23eb813b2be..ff0489f4d9a 100644
--- a/src/mongo/db/s/collection_sharding_state.h
+++ b/src/mongo/db/s/collection_sharding_state.h
@@ -99,7 +99,7 @@ public:
* BSON output of the pending metadata into a BSONArray
*/
void toBSONPending(BSONArrayBuilder& bb) const {
- _metadataManager.toBSONPending(bb);
+ _metadataManager->toBSONPending(bb);
}
/**
@@ -233,7 +233,7 @@ private:
const NamespaceString _nss;
// Contains all the metadata associated with this collection.
- MetadataManager _metadataManager;
+ std::shared_ptr<MetadataManager> _metadataManager;
// If this collection is serving as a source shard for chunk migration, this value will be
// non-null. To write this value there needs to be X-lock on the collection in order to
diff --git a/src/mongo/db/s/metadata_manager.cpp b/src/mongo/db/s/metadata_manager.cpp
index 94335dd65eb..ed4abe1245e 100644
--- a/src/mongo/db/s/metadata_manager.cpp
+++ b/src/mongo/db/s/metadata_manager.cpp
@@ -44,135 +44,100 @@
#include "mongo/util/assert_util.h"
#include "mongo/util/log.h"
-// MetadataManager exists only as a data member of a CollectionShardingState object.
+// MetadataManager maintains std::shared_ptr<CollectionMetadataManager> pointers in a list
+// _metadata. It also contains a CollectionRangeDeleter that queues orphan ranges to delete in
+// a background thread, and a record of the ranges being migrated in, to avoid deleting them.
//
-// It maintains a set of std::shared_ptr<MetadataManager::Tracker> pointers: one in
-// _activeMetadataTracker, and more in a list _metadataInUse. It also contains a
-// CollectionRangeDeleter that queues orphan ranges to delete in a background thread, and a record
-// of the ranges being migrated in, to avoid deleting them.
+// Free-floating CollectionMetadata objects are maintained by these pointers, and also by clients
+// via shared pointers in ScopedCollectionMetadata objects.
//
-// Free-floating MetadataManager::Tracker objects are maintained by these pointers, and also by
-// clients in ScopedCollectionMetadata objects obtained via CollectionShardingState::getMetadata().
-//
-// A Tracker object keeps:
-// a std::unique_ptr<CollectionMetadata>, owning a map of the chunks owned by the shard,
-// a key range [min,max) of orphaned documents that may be deleted when the count goes to zero,
-// a count of the ScopedCollectionMetadata objects that have pointers to it,
-// a mutex lock, serializing access to:
-// a pointer back to the MetadataManager object that created it.
-//
-// __________________________
-// (s): std::shared_ptr<> Clients:| ScopedCollectionMetadata |
-// (u): std::unique_ptr<> | tracker (s)-----------+
-// ________________________________ |__________________________| | |
-// | CollectionShardingState | | tracker (s)--------+ +
-// | | |__________________________| | | |
-// | ____________________________ | | tracker (s)----+ | |
-// | | MetadataManager | | |_________________________| | | |
-// | | | | ________________________ | | |
-// | | _activeMetadataTracker (s)-------->| Tracker |<------+ | | (1 reference)
-// | | | | | ______________________|_ | |
-// | | [ (s),-------------->| Tracker | | | (0 references)
-// | | (s),---------\ | | ______________________|_ | |
-// | | _metadataInUse ... ] | | \----->| Tracker |<----+-+ (2 references)
-// | | ________________________ | | | | | | ______________________
-// | | | CollectionRangeDeleter | | | | | | metadata (u)------------->| CollectionMetadata |
-// | | | | | | | | | [ orphans [min,max) ] | | |
-// | | | _orphans [ [min,max), | | | | | | usageCounter | | _chunksMap |
-// | | | [min,max), | | | | | | trackerLock: | | _chunkVersion |
-// | | | ... ] | |<--------------manager | | ... |
-// | | | | | | |_| | | |______________________|
-// | | |________________________| | | |_| |
-// | | | | |________________________|
+// The _tracker member of CollectionMetadata keeps:
+// a count of the ScopedCollectionMetadata objects that have pointers to the CollectionMetadata
+// a list of key ranges [min,max) of orphaned documents that may be deleted when the count goes
+// to zero
+// ____________________________
+// (s): std::shared_ptr<> Clients:| ScopedCollectionMetadata |
+// _________________________ +----(s) manager metadata (s)-----------------+
+// | CollectionShardingState | | |____________________________| | |
+// | _metadataManager (s) | +-------(s) manager metadata (s)-------------+ |
+// |____________________|____| | |____________________________| | | |
+// ____________________v_______ +----------(s) manager metadata (s) | | |
+// | MetadataManager | | |________________________|___| | |
+// | |<---+ | | |
+// | | ________________________ | | |
+// | /----------->| CollectionMetadata |<----+ (1 use) | |
+// | [(s),----/ | | ______________________|_ | |
+// | (s),------------------->| CollectionMetadata | (0 uses) | |
+// | _metadata: (s)]----\ | | | ______________________|_ | |
+// | \--------------->| CollectionMetadata | | |
+// | | | | | | | |
+// | _rangesToClean: | | | | _tracker: |<------------+ |
+// | ________________________ | | | | ____________________ |<--------------+
+// | | CollectionRangeDeleter | | | | | | Tracker | | (2 uses)
+// | | | | | | | | | |
+// | | _orphans [[min,max), | | | | | | usageCounter | |
+// | | [min,max), | | | | | | orphans [min,max), | |
+// | | ... ] | | | | | | ... ] | |
+// | |________________________| | |_| | |____________________| |
+// |____________________________| | | _chunksMap |
+// |_| _chunkVersion |
+// | ... |
+// |________________________|
//
// A ScopedCollectionMetadata object is created and held during a query, and destroyed when the
-// query no longer needs access to the collection. Its destructor decrements the Tracker's
-// usageCounter.
+// query no longer needs access to the collection. Its destructor decrements the CollectionMetadata
+// _tracker member's usageCounter. Note that the collection may become unsharded, and even get
+// sharded again, between construction and destruction of a ScopedCollectionMetadata.
//
-// When a new chunk mapping replaces _activeMetadata, if any queries still depend on the current
-// mapping, it is pushed onto the back of _metadataInUse.
+// When a new chunk mapping replaces the active mapping, it is pushed onto the back of _metadata.
//
-// Trackers pointed to from _metadataInUse, and their associated CollectionMetadata, are maintained
-// at least as long as any query holds a ScopedCollectionMetadata object referring to them, or to
-// any older tracker. In the diagram above, the middle Tracker must be kept until the one below it
-// is disposed of. (Note that _metadataInUse as shown here has its front() at the bottom, back()
-// at the top. As usual, new entries are pushed onto the back, popped off the front.)
+// A CollectionMetadata object pointed to from _metadata is maintained at least as long as any
+// query holds a ScopedCollectionMetadata object referring to it, or to any older one. In the
+// diagram above, the middle CollectionMetadata is kept until the one below it is disposed of.
+//
+// Note that _metadata as shown here has its front() at the bottom, back() at the top. As usual,
+// new entries are pushed onto the back, popped off the front. The "active" metadata used by new
+// queries (when there is one), is _metadata.back().
namespace mongo {
using TaskExecutor = executor::TaskExecutor;
using CallbackArgs = TaskExecutor::CallbackArgs;
-struct MetadataManager::Tracker {
- /**
- * Creates a new Tracker with the usageCounter initialized to zero.
- */
- Tracker(std::unique_ptr<CollectionMetadata>, MetadataManager*);
-
- std::unique_ptr<CollectionMetadata> metadata;
- uint32_t usageCounter{0};
- std::list<Deletion> orphans;
-
- // lock guards access to manager, which is zeroed by the ~MetadataManager(), but used by
- // ScopedCollectionMetadata when usageCounter falls to zero.
- stdx::mutex trackerLock;
- MetadataManager* manager{nullptr};
-};
-
MetadataManager::MetadataManager(ServiceContext* sc, NamespaceString nss, TaskExecutor* executor)
: _nss(std::move(nss)),
_serviceContext(sc),
- _activeMetadataTracker(std::make_shared<Tracker>(nullptr, this)),
_receivingChunks(SimpleBSONObjComparator::kInstance.makeBSONObjIndexedMap<CachedChunkInfo>()),
_executor(executor),
_rangesToClean() {}
MetadataManager::~MetadataManager() {
- {
- stdx::lock_guard<stdx::mutex> scopedLock(_managerLock);
- _shuttingDown = true;
- }
- std::list<std::shared_ptr<Tracker>> inUse;
- {
- // drain any threads that might remove _metadataInUse entries, push to deleter
- stdx::lock_guard<stdx::mutex> scopedLock(_managerLock);
- _clearAllCleanups();
- inUse = std::move(_metadataInUse);
- }
-
- // Trackers can outlive MetadataManager, so we still need to lock each tracker...
- std::for_each(inUse.begin(), inUse.end(), [](auto& tp) {
- stdx::lock_guard<stdx::mutex> scopedLock(tp->trackerLock);
- tp->manager = nullptr;
- });
- { // ... and the active one too
- stdx::lock_guard<stdx::mutex> scopedLock(_activeMetadataTracker->trackerLock);
- _activeMetadataTracker->manager = nullptr;
- }
+ stdx::lock_guard<stdx::mutex> scopedLock(_managerLock);
+ _clearAllCleanups();
+ auto metadata = std::move(_metadata);
}
void MetadataManager::_clearAllCleanups() {
- for (auto& tracker : _metadataInUse) {
- _pushListToClean(std::move(tracker->orphans));
+ for (auto& metadata : _metadata) {
+ _pushListToClean(std::move(metadata->_tracker.orphans));
}
- _pushListToClean(std::move(_activeMetadataTracker->orphans));
_rangesToClean.clear({ErrorCodes::InterruptedDueToReplStateChange,
str::stream() << "Range deletions in " << _nss.ns()
<< " abandoned because collection was"
" dropped or became unsharded"});
}
-ScopedCollectionMetadata MetadataManager::getActiveMetadata() {
+ScopedCollectionMetadata MetadataManager::getActiveMetadata(std::shared_ptr<MetadataManager> self) {
stdx::lock_guard<stdx::mutex> scopedLock(_managerLock);
- if (_activeMetadataTracker) {
- return ScopedCollectionMetadata(_activeMetadataTracker);
+ if (!_metadata.empty()) {
+ return ScopedCollectionMetadata(std::move(self), _metadata.back());
}
return ScopedCollectionMetadata();
}
size_t MetadataManager::numberOfMetadataSnapshots() {
stdx::lock_guard<stdx::mutex> scopedLock(_managerLock);
- return _metadataInUse.size();
+ return _metadata.size() - 1;
}
void MetadataManager::refreshActiveMetadata(std::unique_ptr<CollectionMetadata> remoteMetadata) {
@@ -181,7 +146,7 @@ void MetadataManager::refreshActiveMetadata(std::unique_ptr<CollectionMetadata>
// Collection was never sharded in the first place. This check is necessary in order to avoid
// extraneous logging in the not-a-shard case, because all call sites always try to get the
// collection sharding information regardless of whether the node is sharded or not.
- if (!remoteMetadata && !_activeMetadataTracker->metadata) {
+ if (!remoteMetadata && _metadata.empty()) {
invariant(_receivingChunks.empty());
invariant(_rangesToClean.isEmpty());
return;
@@ -189,12 +154,12 @@ void MetadataManager::refreshActiveMetadata(std::unique_ptr<CollectionMetadata>
// Collection is becoming unsharded
if (!remoteMetadata) {
- log() << "Marking collection " << _nss.ns() << " with "
- << _activeMetadataTracker->metadata->toStringBasic() << " as no longer sharded";
+ log() << "Marking collection " << _nss.ns() << " with " << _metadata.back()->toStringBasic()
+ << " as no longer sharded";
_receivingChunks.clear();
- _setActiveMetadata_inlock(nullptr);
_clearAllCleanups();
+ _metadata.clear();
return;
}
@@ -203,7 +168,7 @@ void MetadataManager::refreshActiveMetadata(std::unique_ptr<CollectionMetadata>
invariant(!remoteMetadata->getShardVersion().isWriteCompatibleWith(ChunkVersion::UNSHARDED()));
// Collection is becoming sharded
- if (!_activeMetadataTracker->metadata) {
+ if (_metadata.empty()) {
log() << "Marking collection " << _nss.ns() << " as sharded with "
<< remoteMetadata->toStringBasic();
@@ -214,31 +179,30 @@ void MetadataManager::refreshActiveMetadata(std::unique_ptr<CollectionMetadata>
return;
}
+ auto* activeMetadata = _metadata.back().get();
+
// If the metadata being installed has a different epoch from ours, this means the collection
// was dropped and recreated, so we must entirely reset the metadata state
- if (_activeMetadataTracker->metadata->getCollVersion().epoch() !=
- remoteMetadata->getCollVersion().epoch()) {
+ if (activeMetadata->getCollVersion().epoch() != remoteMetadata->getCollVersion().epoch()) {
log() << "Overwriting metadata for collection " << _nss.ns() << " from "
- << _activeMetadataTracker->metadata->toStringBasic() << " to "
- << remoteMetadata->toStringBasic() << " due to epoch change";
+ << activeMetadata->toStringBasic() << " to " << remoteMetadata->toStringBasic()
+ << " due to epoch change";
_receivingChunks.clear();
- _setActiveMetadata_inlock(std::move(remoteMetadata)); // start fresh
+ _setActiveMetadata_inlock(std::move(remoteMetadata));
_clearAllCleanups();
return;
}
// We already have newer version
- if (_activeMetadataTracker->metadata->getCollVersion() >= remoteMetadata->getCollVersion()) {
- LOG(1) << "Ignoring refresh of active metadata "
- << _activeMetadataTracker->metadata->toStringBasic() << " with an older "
- << remoteMetadata->toStringBasic();
+ if (activeMetadata->getCollVersion() >= remoteMetadata->getCollVersion()) {
+ LOG(1) << "Ignoring refresh of active metadata " << activeMetadata->toStringBasic()
+ << " with an older " << remoteMetadata->toStringBasic();
return;
}
log() << "Refreshing metadata for collection " << _nss.ns() << " from "
- << _activeMetadataTracker->metadata->toStringBasic() << " to "
- << remoteMetadata->toStringBasic();
+ << activeMetadata->toStringBasic() << " to " << remoteMetadata->toStringBasic();
// Resolve any receiving chunks, which might have completed by now.
// Should be no more than one.
@@ -263,40 +227,34 @@ void MetadataManager::refreshActiveMetadata(std::unique_ptr<CollectionMetadata>
}
void MetadataManager::_setActiveMetadata_inlock(std::unique_ptr<CollectionMetadata> newMetadata) {
- _metadataInUse.push_back(std::move(_activeMetadataTracker));
- _activeMetadataTracker = std::make_shared<Tracker>(std::move(newMetadata), this);
+ invariant(newMetadata);
+ _metadata.push_back(std::move(newMetadata));
_retireExpiredMetadata();
}
void MetadataManager::_retireExpiredMetadata() {
- while (!_metadataInUse.empty() && _metadataInUse.front()->usageCounter == 0) {
- // No ScopedCollectionMetadata can see this Tracker, other than, maybe, the caller.
- auto& tracker = _metadataInUse.front();
- if (!tracker->orphans.empty()) {
+ if (_metadata.empty()) {
+ return; // The collection was dropped, or went unsharded, before the query was cleaned up.
+ }
+ for (; _metadata.front()->_tracker.usageCounter == 0; _metadata.pop_front()) {
+ // No ScopedCollectionMetadata can see _metadata->front(), other than, maybe, the caller.
+ if (!_metadata.front()->_tracker.orphans.empty()) {
log() << "Queries possibly dependent on " << _nss.ns()
<< " range(s) finished; scheduling for deletion";
- _pushListToClean(std::move(tracker->orphans));
+ _pushListToClean(std::move(_metadata.front()->_tracker.orphans));
}
- tracker->metadata.reset(); // Discard the CollectionMetadata.
- _metadataInUse.pop_front(); // Disconnect from the tracker (and maybe destroy it)
- }
- if (_metadataInUse.empty() && !_activeMetadataTracker->orphans.empty()) {
- log() << "Queries possibly dependent on " << _nss.ns()
- << " range(s) finished; scheduling for deletion";
- _pushListToClean(std::move(_activeMetadataTracker->orphans));
+ if (&_metadata.front() == &_metadata.back())
+ break; // do not retire current chunk metadata.
}
}
-MetadataManager::Tracker::Tracker(std::unique_ptr<CollectionMetadata> md, MetadataManager* mgr)
- : metadata(std::move(md)), manager(mgr) {}
-
// ScopedCollectionMetadata members
// call with MetadataManager locked
-ScopedCollectionMetadata::ScopedCollectionMetadata(
- std::shared_ptr<MetadataManager::Tracker> tracker)
- : _tracker(std::move(tracker)) {
- ++_tracker->usageCounter;
+ScopedCollectionMetadata::ScopedCollectionMetadata(std::shared_ptr<MetadataManager> manager,
+ std::shared_ptr<CollectionMetadata> metadata)
+ : _metadata(std::move(metadata)), _manager(std::move(manager)) {
+ ++_metadata->_tracker.usageCounter;
}
ScopedCollectionMetadata::~ScopedCollectionMetadata() {
@@ -304,56 +262,48 @@ ScopedCollectionMetadata::~ScopedCollectionMetadata() {
}
CollectionMetadata* ScopedCollectionMetadata::operator->() const {
- return _tracker ? _tracker->metadata.get() : nullptr;
+ return _metadata ? _metadata.get() : nullptr;
}
CollectionMetadata* ScopedCollectionMetadata::getMetadata() const {
- return _tracker ? _tracker->metadata.get() : nullptr;
+ return _metadata ? _metadata.get() : nullptr;
}
void ScopedCollectionMetadata::_clear() {
- if (!_tracker) {
+ if (!_manager) {
return;
}
- // Note: There is no risk of deadlock here because the only other place in MetadataManager
- // that takes the trackerLock, ~MetadataManager(), does not hold _managerLock at the same time,
- // and ScopedCollectionMetadata takes _managerLock only here.
- stdx::unique_lock<stdx::mutex> trackerLock(_tracker->trackerLock);
- MetadataManager* manager = _tracker->manager;
- if (manager) {
- stdx::lock_guard<stdx::mutex> managerLock(_tracker->manager->_managerLock);
- trackerLock.unlock();
- invariant(_tracker->usageCounter != 0);
- if (--_tracker->usageCounter == 0 && !manager->_shuttingDown) {
- // MetadataManager doesn't care which usageCounter went to zero. It justs retires all
- // that are older than the oldest tracker still in use by queries. (Some start out at
- // zero, some go to zero but can't be expired yet.) Note that new instances of
- // ScopedCollectionMetadata may get attached to the active tracker, so its usage
- // count can increase from zero, unlike most reference counts.
- manager->_retireExpiredMetadata();
- }
- } else {
- trackerLock.unlock();
+ stdx::lock_guard<stdx::mutex> managerLock(_manager->_managerLock);
+ invariant(_metadata->_tracker.usageCounter != 0);
+ if (--_metadata->_tracker.usageCounter == 0) {
+ // MetadataManager doesn't care which usageCounter went to zero. It justs retires all
+ // that are older than the oldest metadata still in use by queries. (Some start out at
+ // zero, some go to zero but can't be expired yet.) Note that new instances of
+ // ScopedCollectionMetadata may get attached to _metadata.back(), so its usage count can
+ // increase from zero, unlike other reference counts.
+ _manager->_retireExpiredMetadata();
}
- _tracker.reset(); // disconnect from the tracker.
+ _metadata.reset();
+ _manager.reset();
}
// do not call with MetadataManager locked
ScopedCollectionMetadata::ScopedCollectionMetadata(ScopedCollectionMetadata&& other) {
- *this = std::move(other); // Rely on this->_tracker being zero-initialized already.
+ *this = std::move(other); // Rely on being zero-initialized already.
}
// do not call with MetadataManager locked
ScopedCollectionMetadata& ScopedCollectionMetadata::operator=(ScopedCollectionMetadata&& other) {
if (this != &other) {
_clear();
- _tracker = std::move(other._tracker);
+ _metadata = std::move(other._metadata);
+ _manager = std::move(other._manager);
}
return *this;
}
ScopedCollectionMetadata::operator bool() const {
- return _tracker && _tracker->metadata; // with a Collection lock the metadata member is stable
+ return _metadata.get();
}
void MetadataManager::toBSONPending(BSONArrayBuilder& bb) const {
@@ -379,8 +329,11 @@ void MetadataManager::append(BSONObjBuilder* builder) {
}
pcArr.done();
+ if (_metadata.empty()) {
+ return;
+ }
BSONArrayBuilder amrArr(builder->subarrayStart("activeMetadataRanges"));
- for (const auto& entry : _activeMetadataTracker->metadata->getChunks()) {
+ for (const auto& entry : _metadata.back()->getChunks()) {
BSONObjBuilder obj;
ChunkRange r = ChunkRange(entry.first, entry.second.getMaxKey());
r.append(&obj);
@@ -425,9 +378,9 @@ void MetadataManager::_addToReceiving(ChunkRange const& range) {
auto MetadataManager::beginReceive(ChunkRange const& range) -> CleanupNotification {
stdx::unique_lock<stdx::mutex> scopedLock(_managerLock);
+ invariant(!_metadata.empty());
- auto* metadata = _activeMetadataTracker->metadata.get();
- if (_overlapsInUseChunk(range) || metadata->rangeOverlapsChunk(range)) {
+ if (_overlapsInUseChunk(range)) {
return Status{ErrorCodes::RangeOverlapConflict,
"Documents in target range may still be in use on the destination shard."};
}
@@ -445,28 +398,24 @@ void MetadataManager::_removeFromReceiving(ChunkRange const& range) {
void MetadataManager::forgetReceive(ChunkRange const& range) {
stdx::lock_guard<stdx::mutex> scopedLock(_managerLock);
+ invariant(!_metadata.empty());
+
// This is potentially a partially received chunk, which needs to be cleaned up. We know none
// of these documents are in use, so they can go straight to the deletion queue.
log() << "Abandoning in-migration of " << _nss.ns() << " range " << range
<< "; scheduling deletion of any documents already copied";
- invariant(!_overlapsInUseChunk(range) &&
- !_activeMetadataTracker->metadata->rangeOverlapsChunk(range));
-
+ invariant(!_overlapsInUseChunk(range));
_removeFromReceiving(range);
-
- // avoid generating a notification to delete: allows stronger check in its destructor.
- std::list<Deletion> ranges;
- ranges.emplace_back(Deletion{ChunkRange{range.getMin().getOwned(), range.getMax().getOwned()}});
- _pushListToClean(std::move(ranges));
+ _pushRangeToClean(range).abandon();
}
auto MetadataManager::cleanUpRange(ChunkRange const& range) -> CleanupNotification {
stdx::unique_lock<stdx::mutex> scopedLock(_managerLock);
- CollectionMetadata* metadata = _activeMetadataTracker->metadata.get();
- invariant(metadata != nullptr);
+ invariant(!_metadata.empty());
- if (metadata->rangeOverlapsChunk(range)) {
+ auto* activeMetadata = _metadata.back().get();
+ if (activeMetadata->rangeOverlapsChunk(range)) {
return Status{ErrorCodes::RangeOverlapConflict,
str::stream() << "Requested deletion range overlaps a live shard chunk"};
}
@@ -484,20 +433,20 @@ auto MetadataManager::cleanUpRange(ChunkRange const& range) -> CleanupNotificati
return _pushRangeToClean(range);
}
- _activeMetadataTracker->orphans.emplace_back(
+ activeMetadata->_tracker.orphans.emplace_back(
Deletion{ChunkRange{range.getMin().getOwned(), range.getMax().getOwned()}});
log() << "Scheduling " << _nss.ns() << " range " << redact(range.toString())
<< " for deletion after all possibly-dependent queries finish";
- return _activeMetadataTracker->orphans.back().notification;
+ return activeMetadata->_tracker.orphans.back().notification;
}
size_t MetadataManager::numberOfRangesToCleanStillInUse() {
stdx::lock_guard<stdx::mutex> scopedLock(_managerLock);
- size_t count = _activeMetadataTracker->orphans.size();
- for (auto& tracker : _metadataInUse) {
- count += tracker->orphans.size();
+ size_t count = 0;
+ for (auto& metadata : _metadata) {
+ count += metadata->_tracker.orphans.size();
}
return count;
}
@@ -518,30 +467,25 @@ auto MetadataManager::trackOrphanedDataCleanup(ChunkRange const& range)
}
bool MetadataManager::_overlapsInUseChunk(ChunkRange const& range) {
- if (_activeMetadataTracker->metadata->rangeOverlapsChunk(range)) {
- return true; // refcount doesn't matter for the active case
- }
- for (auto& tracker : _metadataInUse) {
- if (tracker->usageCounter != 0 && tracker->metadata->rangeOverlapsChunk(range)) {
+ invariant(!_metadata.empty());
+ for (auto it = _metadata.begin(), end = --_metadata.end(); it != end; ++it) {
+ if (((*it)->_tracker.usageCounter != 0) && (*it)->rangeOverlapsChunk(range)) {
return true;
}
}
+ if (_metadata.back()->rangeOverlapsChunk(range)) { // for active metadata, ignore refcount.
+ return true;
+ }
return false;
}
auto MetadataManager::_overlapsInUseCleanups(ChunkRange const& range)
-> boost::optional<CleanupNotification> {
- auto cleanup = _activeMetadataTracker->orphans.crbegin();
- auto ec = _activeMetadataTracker->orphans.crend();
- for (; cleanup != ec; ++cleanup) {
- if (cleanup->range.overlapWith(range)) {
- return cleanup->notification;
- }
- }
- auto tracker = _metadataInUse.crbegin(), et = _metadataInUse.crend();
- for (; tracker != et; ++tracker) {
- cleanup = (*tracker)->orphans.crbegin();
- ec = (*tracker)->orphans.crend();
+ invariant(!_metadata.empty());
+
+ for (auto it = _metadata.crbegin(), et = _metadata.crend(); it != et; ++it) {
+ auto cleanup = (*it)->_tracker.orphans.crbegin();
+ auto ec = (*it)->_tracker.orphans.crend();
for (; cleanup != ec; ++cleanup) {
if (bool(cleanup->range.overlapWith(range))) {
return cleanup->notification;
@@ -553,8 +497,8 @@ auto MetadataManager::_overlapsInUseCleanups(ChunkRange const& range)
boost::optional<KeyRange> MetadataManager::getNextOrphanRange(BSONObj const& from) {
stdx::unique_lock<stdx::mutex> scopedLock(_managerLock);
- invariant(_activeMetadataTracker->metadata);
- return _activeMetadataTracker->metadata->getNextOrphanRange(_receivingChunks, from);
+ invariant(!_metadata.empty());
+ return _metadata.back()->getNextOrphanRange(_receivingChunks, from);
}
} // namespace mongo
diff --git a/src/mongo/db/s/metadata_manager.h b/src/mongo/db/s/metadata_manager.h
index 20f951a9fa8..46da84db9e2 100644
--- a/src/mongo/db/s/metadata_manager.h
+++ b/src/mongo/db/s/metadata_manager.h
@@ -62,7 +62,7 @@ public:
* contains the currently active metadata. When the usageCounter goes to zero, the RAII
* object going out of scope will call _removeMetadata.
*/
- ScopedCollectionMetadata getActiveMetadata();
+ ScopedCollectionMetadata getActiveMetadata(std::shared_ptr<MetadataManager> self);
/**
* Returns the number of CollectionMetadata objects being maintained on behalf of running
@@ -139,8 +139,6 @@ public:
using Deletion = CollectionRangeDeleter::Deletion;
private:
- struct Tracker;
-
/**
* Deletes ranges, in background, until done, normally using a task executor attached to the
* ShardingState.
@@ -208,21 +206,16 @@ private:
// data members
const NamespaceString _nss;
-
// ServiceContext from which to obtain instances of global support objects.
ServiceContext* const _serviceContext;
// Mutex to protect the state below
stdx::mutex _managerLock;
- bool _shuttingDown{false};
-
- // The collection metadata reflecting chunks accessible to new queries
- std::shared_ptr<Tracker> _activeMetadataTracker;
-
- // Previously active collection metadata instances still in use by active server operations or
- // cursors
- std::list<std::shared_ptr<Tracker>> _metadataInUse;
+ // _metadata.back() is the collection metadata reflecting chunks accessible to new queries.
+ // The rest are previously active collection metadata instances still in use by active server
+ // operations or cursors.
+ std::list<std::shared_ptr<CollectionMetadata>> _metadata;
// Chunk ranges being migrated into to the shard. Indexed by the min key of the range.
RangeMap _receivingChunks;
@@ -235,14 +228,12 @@ private:
// friends
- // for access to _decrementTrackerUsage(), and to Tracker.
- friend class ScopedCollectionMetadata;
-
// for access to _rangesToClean and _managerLock under task callback
friend bool CollectionRangeDeleter::cleanUpNextRange(OperationContext*,
NamespaceString const&,
int maxToDelete,
CollectionRangeDeleter*);
+ friend class ScopedCollectionMetadata;
};
class ScopedCollectionMetadata {
@@ -257,7 +248,7 @@ public:
~ScopedCollectionMetadata();
/**
- * Binds *this to the same tracker as other, if any.
+ * Binds *this to the same CollectionMetadata as other, if any.
*/
ScopedCollectionMetadata(ScopedCollectionMetadata&& other);
ScopedCollectionMetadata& operator=(ScopedCollectionMetadata&& other);
@@ -276,20 +267,26 @@ public:
private:
/**
- * If tracker is non-null, increments the refcount in the specified tracker.
+ * Increments the usageCounter in the specified CollectionMetadata.
*
- * Must be called with tracker->manager locked.
+ * Must be called with manager->_managerLock held. Arguments must be non-null.
*/
- ScopedCollectionMetadata(std::shared_ptr<MetadataManager::Tracker> tracker);
+ ScopedCollectionMetadata(std::shared_ptr<MetadataManager> manager,
+ std::shared_ptr<CollectionMetadata> metadata);
/**
- * Disconnect from the tracker, possibly triggering GC of unused CollectionMetadata.
+ * Disconnect from the CollectionMetadata, possibly triggering GC of unused CollectionMetadata.
+ *
+ * Must be called with manager->_managerLock held.
*/
void _clear();
- std::shared_ptr<MetadataManager::Tracker> _tracker{nullptr};
+ std::shared_ptr<CollectionMetadata> _metadata{nullptr};
+
+ std::shared_ptr<MetadataManager> _manager{nullptr};
- friend ScopedCollectionMetadata MetadataManager::getActiveMetadata(); // uses our private ctor
+ friend ScopedCollectionMetadata MetadataManager::getActiveMetadata(
+ std::shared_ptr<MetadataManager>); // uses our private ctor
};
} // namespace mongo
diff --git a/src/mongo/db/s/metadata_manager_test.cpp b/src/mongo/db/s/metadata_manager_test.cpp
index 2a1eaa694ff..a3375634d9f 100644
--- a/src/mongo/db/s/metadata_manager_test.cpp
+++ b/src/mongo/db/s/metadata_manager_test.cpp
@@ -137,8 +137,8 @@ protected:
metadata.getKeyPattern(), chunkVersion, chunkVersion, std::move(chunksMap));
}
- CollectionMetadata* addChunk(MetadataManager* manager) {
- ScopedCollectionMetadata scopedMetadata1 = manager->getActiveMetadata();
+ CollectionMetadata* addChunk(std::shared_ptr<MetadataManager>& manager) {
+ ScopedCollectionMetadata scopedMetadata1 = manager->getActiveMetadata(manager);
ChunkVersion newVersion = scopedMetadata1->getCollVersion();
newVersion.incMajor();
@@ -152,22 +152,24 @@ protected:
};
TEST_F(MetadataManagerTest, SetAndGetActiveMetadata) {
- MetadataManager manager(getServiceContext(), kNss, executor());
+ std::shared_ptr<MetadataManager> manager =
+ std::make_shared<MetadataManager>(getServiceContext(), kNss, executor());
std::unique_ptr<CollectionMetadata> cm = makeEmptyMetadata();
auto cmPtr = cm.get();
- manager.refreshActiveMetadata(std::move(cm));
- ScopedCollectionMetadata scopedMetadata = manager.getActiveMetadata();
+ manager->refreshActiveMetadata(std::move(cm));
+ ScopedCollectionMetadata scopedMetadata = manager->getActiveMetadata(manager);
ASSERT_EQ(cmPtr, scopedMetadata.getMetadata());
};
TEST_F(MetadataManagerTest, ResetActiveMetadata) {
- MetadataManager manager(getServiceContext(), kNss, executor());
- manager.refreshActiveMetadata(makeEmptyMetadata());
- auto cm2Ptr = addChunk(&manager);
- ScopedCollectionMetadata scopedMetadata2 = manager.getActiveMetadata();
+ std::shared_ptr<MetadataManager> manager =
+ std::make_shared<MetadataManager>(getServiceContext(), kNss, executor());
+ manager->refreshActiveMetadata(makeEmptyMetadata());
+ auto cm2Ptr = addChunk(manager);
+ ScopedCollectionMetadata scopedMetadata2 = manager->getActiveMetadata(manager);
ASSERT_EQ(cm2Ptr, scopedMetadata2.getMetadata());
};
@@ -176,30 +178,32 @@ TEST_F(MetadataManagerTest, ResetActiveMetadata) {
// doing anything.
TEST_F(MetadataManagerTest, CleanUpForMigrateIn) {
- MetadataManager manager(getServiceContext(), kNss, executor());
- manager.refreshActiveMetadata(makeEmptyMetadata());
+ std::shared_ptr<MetadataManager> manager =
+ std::make_shared<MetadataManager>(getServiceContext(), kNss, executor());
+ manager->refreshActiveMetadata(makeEmptyMetadata());
ChunkRange range1(BSON("key" << 0), BSON("key" << 10));
ChunkRange range2(BSON("key" << 10), BSON("key" << 20));
- auto notif1 = manager.beginReceive(range1);
+ auto notif1 = manager->beginReceive(range1);
ASSERT_TRUE(!notif1.ready());
- auto notif2 = manager.beginReceive(range2);
+ auto notif2 = manager->beginReceive(range2);
ASSERT_TRUE(!notif2.ready());
- ASSERT_EQ(manager.numberOfRangesToClean(), 2UL);
- ASSERT_EQ(manager.numberOfRangesToCleanStillInUse(), 0UL);
+ ASSERT_EQ(manager->numberOfRangesToClean(), 2UL);
+ ASSERT_EQ(manager->numberOfRangesToCleanStillInUse(), 0UL);
notif1.abandon();
notif2.abandon();
}
TEST_F(MetadataManagerTest, AddRangeNotificationsBlockAndYield) {
- MetadataManager manager(getServiceContext(), kNss, executor());
- manager.refreshActiveMetadata(makeEmptyMetadata());
+ std::shared_ptr<MetadataManager> manager =
+ std::make_shared<MetadataManager>(getServiceContext(), kNss, executor());
+ manager->refreshActiveMetadata(makeEmptyMetadata());
ChunkRange cr1(BSON("key" << 0), BSON("key" << 10));
- auto notifn1 = manager.cleanUpRange(cr1);
+ auto notifn1 = manager->cleanUpRange(cr1);
ASSERT_FALSE(notifn1.ready());
- ASSERT_EQ(manager.numberOfRangesToClean(), 1UL);
- auto optNotifn = manager.trackOrphanedDataCleanup(cr1);
+ ASSERT_EQ(manager->numberOfRangesToClean(), 1UL);
+ auto optNotifn = manager->trackOrphanedDataCleanup(cr1);
ASSERT_FALSE(notifn1.ready());
ASSERT_FALSE(optNotifn->ready());
ASSERT(notifn1 == *optNotifn);
@@ -209,29 +213,30 @@ TEST_F(MetadataManagerTest, AddRangeNotificationsBlockAndYield) {
TEST_F(MetadataManagerTest, NotificationBlocksUntilDeletion) {
ChunkRange cr1(BSON("key" << 20), BSON("key" << 30));
- MetadataManager manager(getServiceContext(), kNss, executor());
- manager.refreshActiveMetadata(makeEmptyMetadata());
- auto optNotif = manager.trackOrphanedDataCleanup(cr1);
+ std::shared_ptr<MetadataManager> manager =
+ std::make_shared<MetadataManager>(getServiceContext(), kNss, executor());
+ manager->refreshActiveMetadata(makeEmptyMetadata());
+ auto optNotif = manager->trackOrphanedDataCleanup(cr1);
ASSERT_FALSE(optNotif); // nothing to track yet
{
- ASSERT_EQ(manager.numberOfMetadataSnapshots(), 0UL);
- ASSERT_EQ(manager.numberOfRangesToClean(), 0UL);
+ ASSERT_EQ(manager->numberOfMetadataSnapshots(), 0UL);
+ ASSERT_EQ(manager->numberOfRangesToClean(), 0UL);
- auto scm = manager.getActiveMetadata(); // and increment scm's refcount
+ auto scm = manager->getActiveMetadata(manager); // and increment scm's refcount
ASSERT(bool(scm));
- addChunk(&manager); // push new metadata
+ addChunk(manager); // push new metadata
- ASSERT_EQ(manager.numberOfMetadataSnapshots(), 1UL);
- ASSERT_EQ(manager.numberOfRangesToClean(), 0UL); // not yet...
+ ASSERT_EQ(manager->numberOfMetadataSnapshots(), 1UL);
+ ASSERT_EQ(manager->numberOfRangesToClean(), 0UL); // not yet...
- optNotif = manager.cleanUpRange(cr1);
- ASSERT_EQ(manager.numberOfMetadataSnapshots(), 1UL);
- ASSERT_EQ(manager.numberOfRangesToClean(), 1UL);
- } // scm destroyed, refcount of tracker goes to zero
- ASSERT_EQ(manager.numberOfMetadataSnapshots(), 0UL);
- ASSERT_EQ(manager.numberOfRangesToClean(), 1UL);
+ optNotif = manager->cleanUpRange(cr1);
+ ASSERT_EQ(manager->numberOfMetadataSnapshots(), 1UL);
+ ASSERT_EQ(manager->numberOfRangesToClean(), 1UL);
+ } // scm destroyed, refcount of metadata goes to zero
+ ASSERT_EQ(manager->numberOfMetadataSnapshots(), 0UL);
+ ASSERT_EQ(manager->numberOfRangesToClean(), 1UL);
ASSERT_FALSE(optNotif->ready());
- auto optNotif2 = manager.trackOrphanedDataCleanup(cr1); // now tracking it in _rangesToClean
+ auto optNotif2 = manager->trackOrphanedDataCleanup(cr1); // now tracking it in _rangesToClean
ASSERT_TRUE(optNotif && !optNotif->ready());
ASSERT_TRUE(optNotif2 && !optNotif2->ready());
ASSERT(*optNotif == *optNotif2);
@@ -240,97 +245,111 @@ TEST_F(MetadataManagerTest, NotificationBlocksUntilDeletion) {
}
TEST_F(MetadataManagerTest, RefreshAfterSuccessfulMigrationSinglePending) {
- MetadataManager manager(getServiceContext(), kNss, executor());
- manager.refreshActiveMetadata(makeEmptyMetadata());
+ std::shared_ptr<MetadataManager> manager =
+ std::make_shared<MetadataManager>(getServiceContext(), kNss, executor());
+ manager->refreshActiveMetadata(makeEmptyMetadata());
const ChunkRange cr1(BSON("key" << 0), BSON("key" << 10));
- ASSERT_EQ(manager.getActiveMetadata()->getChunks().size(), 0UL);
+ ASSERT_EQ(manager->getActiveMetadata(manager)->getChunks().size(), 0UL);
- ChunkVersion version = manager.getActiveMetadata()->getCollVersion();
+ ChunkVersion version = manager->getActiveMetadata(manager)->getCollVersion();
version.incMajor();
- manager.refreshActiveMetadata(cloneMetadataPlusChunk(
- *manager.getActiveMetadata().getMetadata(), cr1.getMin(), cr1.getMax(), version));
- ASSERT_EQ(manager.getActiveMetadata()->getChunks().size(), 1UL);
+ manager->refreshActiveMetadata(cloneMetadataPlusChunk(
+ *manager->getActiveMetadata(manager).getMetadata(), cr1.getMin(), cr1.getMax(), version));
+ ASSERT_EQ(manager->getActiveMetadata(manager)->getChunks().size(), 1UL);
}
TEST_F(MetadataManagerTest, RefreshAfterSuccessfulMigrationMultiplePending) {
- MetadataManager manager(getServiceContext(), kNss, executor());
- manager.refreshActiveMetadata(makeEmptyMetadata());
+ std::shared_ptr<MetadataManager> manager =
+ std::make_shared<MetadataManager>(getServiceContext(), kNss, executor());
+ manager->refreshActiveMetadata(makeEmptyMetadata());
const ChunkRange cr1(BSON("key" << 0), BSON("key" << 10));
const ChunkRange cr2(BSON("key" << 30), BSON("key" << 40));
- ASSERT_EQ(manager.getActiveMetadata()->getChunks().size(), 0UL);
+ ASSERT_EQ(manager->getActiveMetadata(manager)->getChunks().size(), 0UL);
{
- ChunkVersion version = manager.getActiveMetadata()->getCollVersion();
+ ChunkVersion version = manager->getActiveMetadata(manager)->getCollVersion();
version.incMajor();
- manager.refreshActiveMetadata(cloneMetadataPlusChunk(
- *manager.getActiveMetadata().getMetadata(), cr1.getMin(), cr1.getMax(), version));
- ASSERT_EQ(manager.numberOfRangesToClean(), 0UL);
- ASSERT_EQ(manager.getActiveMetadata()->getChunks().size(), 1UL);
+ manager->refreshActiveMetadata(
+ cloneMetadataPlusChunk(*manager->getActiveMetadata(manager).getMetadata(),
+ cr1.getMin(),
+ cr1.getMax(),
+ version));
+ ASSERT_EQ(manager->numberOfRangesToClean(), 0UL);
+ ASSERT_EQ(manager->getActiveMetadata(manager)->getChunks().size(), 1UL);
}
{
- ChunkVersion version = manager.getActiveMetadata()->getCollVersion();
+ ChunkVersion version = manager->getActiveMetadata(manager)->getCollVersion();
version.incMajor();
- manager.refreshActiveMetadata(cloneMetadataPlusChunk(
- *manager.getActiveMetadata().getMetadata(), cr2.getMin(), cr2.getMax(), version));
- ASSERT_EQ(manager.getActiveMetadata()->getChunks().size(), 2UL);
+ manager->refreshActiveMetadata(
+ cloneMetadataPlusChunk(*manager->getActiveMetadata(manager).getMetadata(),
+ cr2.getMin(),
+ cr2.getMax(),
+ version));
+ ASSERT_EQ(manager->getActiveMetadata(manager)->getChunks().size(), 2UL);
}
}
TEST_F(MetadataManagerTest, RefreshAfterNotYetCompletedMigrationMultiplePending) {
- MetadataManager manager(getServiceContext(), kNss, executor());
- manager.refreshActiveMetadata(makeEmptyMetadata());
+ std::shared_ptr<MetadataManager> manager =
+ std::make_shared<MetadataManager>(getServiceContext(), kNss, executor());
+ manager->refreshActiveMetadata(makeEmptyMetadata());
const ChunkRange cr1(BSON("key" << 0), BSON("key" << 10));
const ChunkRange cr2(BSON("key" << 30), BSON("key" << 40));
- ASSERT_EQ(manager.getActiveMetadata()->getChunks().size(), 0UL);
+ ASSERT_EQ(manager->getActiveMetadata(manager)->getChunks().size(), 0UL);
- ChunkVersion version = manager.getActiveMetadata()->getCollVersion();
+ ChunkVersion version = manager->getActiveMetadata(manager)->getCollVersion();
version.incMajor();
- manager.refreshActiveMetadata(cloneMetadataPlusChunk(
- *manager.getActiveMetadata().getMetadata(), BSON("key" << 50), BSON("key" << 60), version));
- ASSERT_EQ(manager.getActiveMetadata()->getChunks().size(), 1UL);
+ manager->refreshActiveMetadata(
+ cloneMetadataPlusChunk(*manager->getActiveMetadata(manager).getMetadata(),
+ BSON("key" << 50),
+ BSON("key" << 60),
+ version));
+ ASSERT_EQ(manager->getActiveMetadata(manager)->getChunks().size(), 1UL);
}
TEST_F(MetadataManagerTest, BeginReceiveWithOverlappingRange) {
- MetadataManager manager(getServiceContext(), kNss, executor());
- manager.refreshActiveMetadata(makeEmptyMetadata());
+ std::shared_ptr<MetadataManager> manager =
+ std::make_shared<MetadataManager>(getServiceContext(), kNss, executor());
+ manager->refreshActiveMetadata(makeEmptyMetadata());
const ChunkRange cr1(BSON("key" << 0), BSON("key" << 10));
const ChunkRange cr2(BSON("key" << 30), BSON("key" << 40));
const ChunkRange crOverlap(BSON("key" << 5), BSON("key" << 35));
- ASSERT_EQ(manager.getActiveMetadata()->getChunks().size(), 0UL);
+ ASSERT_EQ(manager->getActiveMetadata(manager)->getChunks().size(), 0UL);
}
TEST_F(MetadataManagerTest, RefreshMetadataAfterDropAndRecreate) {
- MetadataManager manager(getServiceContext(), kNss, executor());
- manager.refreshActiveMetadata(makeEmptyMetadata());
+ std::shared_ptr<MetadataManager> manager =
+ std::make_shared<MetadataManager>(getServiceContext(), kNss, executor());
+ manager->refreshActiveMetadata(makeEmptyMetadata());
{
- auto metadata = manager.getActiveMetadata();
+ auto metadata = manager->getActiveMetadata(manager);
ChunkVersion newVersion = metadata->getCollVersion();
newVersion.incMajor();
- manager.refreshActiveMetadata(cloneMetadataPlusChunk(
+ manager->refreshActiveMetadata(cloneMetadataPlusChunk(
*metadata.getMetadata(), BSON("key" << 0), BSON("key" << 10), newVersion));
}
// Now, pretend that the collection was dropped and recreated
auto recreateMetadata = makeEmptyMetadata();
- ChunkVersion newVersion = manager.getActiveMetadata()->getCollVersion();
+ ChunkVersion newVersion = manager->getActiveMetadata(manager)->getCollVersion();
newVersion.incMajor();
- manager.refreshActiveMetadata(cloneMetadataPlusChunk(
+ manager->refreshActiveMetadata(cloneMetadataPlusChunk(
*recreateMetadata, BSON("key" << 20), BSON("key" << 30), newVersion));
- ASSERT_EQ(manager.getActiveMetadata()->getChunks().size(), 1UL);
+ ASSERT_EQ(manager->getActiveMetadata(manager)->getChunks().size(), 1UL);
- const auto chunkEntry = manager.getActiveMetadata()->getChunks().begin();
+ const auto chunkEntry = manager->getActiveMetadata(manager)->getChunks().begin();
ASSERT_BSONOBJ_EQ(BSON("key" << 20), chunkEntry->first);
ASSERT_BSONOBJ_EQ(BSON("key" << 30), chunkEntry->second.getMaxKey());
ASSERT_EQ(newVersion, chunkEntry->second.getVersion());
@@ -338,15 +357,16 @@ TEST_F(MetadataManagerTest, RefreshMetadataAfterDropAndRecreate) {
// Tests membership functions for _rangesToClean
TEST_F(MetadataManagerTest, RangesToCleanMembership) {
- MetadataManager manager(getServiceContext(), kNss, executor());
- manager.refreshActiveMetadata(makeEmptyMetadata());
+ std::shared_ptr<MetadataManager> manager =
+ std::make_shared<MetadataManager>(getServiceContext(), kNss, executor());
+ manager->refreshActiveMetadata(makeEmptyMetadata());
- ASSERT(manager.numberOfRangesToClean() == 0UL);
+ ASSERT(manager->numberOfRangesToClean() == 0UL);
ChunkRange cr1 = ChunkRange(BSON("key" << 0), BSON("key" << 10));
- auto notifn = manager.cleanUpRange(cr1);
+ auto notifn = manager->cleanUpRange(cr1);
ASSERT(!notifn.ready());
- ASSERT(manager.numberOfRangesToClean() == 1UL);
+ ASSERT(manager->numberOfRangesToClean() == 1UL);
notifn.abandon();
}