summaryrefslogtreecommitdiff
path: root/src/mongo/db/s/collection_sharding_state.h
diff options
context:
space:
mode:
authorHenrik Edin <henrik.edin@mongodb.com>2018-05-18 17:29:14 -0400
committerHenrik Edin <henrik.edin@mongodb.com>2018-09-18 16:34:02 -0400
commit27e0275301eed05bea3d65c766dbe76ee1da9b8a (patch)
treec744bad9984e66cbce25c1f907f3e203db78fad5 /src/mongo/db/s/collection_sharding_state.h
parentdf38daf87387969413c66df415601663e63cbb34 (diff)
downloadmongo-27e0275301eed05bea3d65c766dbe76ee1da9b8a.tar.gz
SERVER-32198 Get rid of CollectionShardingState::collectionIsSharded
(cherry picked from commit 80de0da37b00dbeed576b28a842cb172b6714358) SERVER-35773 Remove references to the CatalogCache from MetadataManager (cherry picked from commit 2aa65a86193e5d38934a4f2d6b0a8298e2432485) SERVER-32198 Add support for an optional `vWanted` to StaleConfigInfo (cherry picked from commit 60559a00b81293184922b3418a8e56610edf8dd9) SERVER-36054 Remove ScopedCollectionMetadata's operator bool (cherry picked from commit c9c340ad6e9e1f33cb001a8375c62d6b16138c74) SERVER-36054 Remove more unused methods from CSS/MetadataManager (cherry picked from commit ca04f5bcf9bfa73c9162b3a77225c997c6deec8a) SERVER-36116 Get rid of CollectionShardingState::resetAll (cherry picked from commit db1cc80d13d203b6351f5510f7756cc1c7bfc0ea) SERVER-36054 Get rid of unused methods from CollectionShardingState (cherry picked from commit 884d232473dca72e0872f0e540d4c3108c1e0b3d) SERVER-36164 Decouple ScopedCollectionMetadata from MetadataManager (cherry picked from commit d91262c4a2ed7d94923c3b1c5ff5d208aa981c73) SERVER-29908 Move CollectionShardingState under sharding_api_d (cherry picked from commit e491e284e8066929c8272c96a3128241ab481be8) SERVER-29908 Remove ShardingState::appendInfo Expose the ShardingState properties and move the appendInfo logic to be entirely inside the 'getShardingState' function, which is its only consumer. (cherry picked from commit 24e411d5cd7f64c5b4da25a351529cd1873284b8) SERVER-29908 Move 'updateConfigServerOpTimeFromMetadata' out of ShardingState (cherry picked from commit 7a97557ce5bf74dc2b663762b7a5ffb9c958d580) SERVER-29908 Move all runtime logic out of ShardingState ... and move it into a ShardingInitializationMongoD class, which is responsible for driving the sharding-awareness of the node and setting it onto ShardingState. Also gets rid of the 'sharding' library, so there is no more library dependency cycle. (cherry picked from commit 200c3dc58410d8b3287a2075cc9b2ad085100e83) SERVER-29908 Fold the 'sharding_connection_hook' library into 'sharding_initialization' ... and also remove dependency of MongoS on the replication coordinator (cherry picked from commit fab6864f4edcae7bb304f79e601f1f62cc376a77)
Diffstat (limited to 'src/mongo/db/s/collection_sharding_state.h')
-rw-r--r--src/mongo/db/s/collection_sharding_state.h217
1 files changed, 56 insertions, 161 deletions
diff --git a/src/mongo/db/s/collection_sharding_state.h b/src/mongo/db/s/collection_sharding_state.h
index 5ea37a6230f..daa0c3ca5e8 100644
--- a/src/mongo/db/s/collection_sharding_state.h
+++ b/src/mongo/db/s/collection_sharding_state.h
@@ -28,117 +28,63 @@
#pragma once
-#include <memory>
-#include <string>
-
#include "mongo/base/disallow_copying.h"
-#include "mongo/bson/bsonobj.h"
#include "mongo/db/namespace_string.h"
-#include "mongo/db/s/metadata_manager.h"
+#include "mongo/db/s/scoped_collection_metadata.h"
#include "mongo/db/s/sharding_migration_critical_section.h"
-#include "mongo/util/decorable.h"
namespace mongo {
-class OperationContext;
-
/**
- * Contains all sharding-related runtime state for a given collection. One such object is assigned
- * to each sharded collection known on a mongod instance. A set of these objects is linked off the
- * instance's sharding state.
+ * Each collection on a mongod instance is dynamically assigned two pieces of information for the
+ * duration of its lifetime:
+ * CollectionShardingState - this is a passive data-only state, which represents what is the
+ * shard's knowledge of its the shard version and the set of chunks that it owns.
+ * CollectionShardingRuntime (missing from the embedded mongod) - this is the heavyweight machinery
+ * which implements the sharding protocol functions and is what controls the data-only state.
*
- * Synchronization rules: In order to look-up this object in the instance's sharding map, one must
- * have some lock on the respective collection.
+ * The CollectionShardingStateFactory class below is used in order to allow for the collection
+ * runtime to be instantiated separately from the sharding state.
+ *
+ * Synchronization rule: In order to obtain an instance of this object, the caller must have some
+ * lock on the respective collection.
*/
-class CollectionShardingState : public Decorable<CollectionShardingState> {
+class CollectionShardingState {
MONGO_DISALLOW_COPYING(CollectionShardingState);
public:
- using CleanupNotification = CollectionRangeDeleter::DeleteNotification;
-
- /**
- * Instantiates a new per-collection sharding state as unsharded.
- */
- CollectionShardingState(ServiceContext* sc, NamespaceString nss);
+ virtual ~CollectionShardingState() = default;
/**
* Obtains the sharding state for the specified collection. If it does not exist, it will be
- * created and will remain active until the collection is dropped or unsharded.
+ * created and will remain in memory until the collection is dropped.
*
* Must be called with some lock held on the specific collection being looked up and the
- * returned pointer should never be stored.
+ * returned pointer must not be stored.
*/
static CollectionShardingState* get(OperationContext* opCtx, const NamespaceString& nss);
- static CollectionShardingState* get(OperationContext* opCtx, const std::string& ns);
-
- static void resetAll(OperationContext* opCtx);
- static void report(OperationContext* opCtx, BSONObjBuilder* builder);
-
- /**
- * Returns the chunk metadata for the collection. The metadata it represents lives as long as
- * the object itself, and the collection, exist. After dropping the collection lock, the
- * collection may no longer exist, but it is still safe to destroy the object.
- * The metadata is tied to a specific point in time (atClusterTime) and the time is retrieved
- * from the operation context (opCtx).
- */
- ScopedCollectionMetadata getMetadata(OperationContext* opCtx);
-
- /**
- * BSON output of the pending metadata into a BSONArray
- */
- void toBSONPending(BSONArrayBuilder& bb) const {
- _metadataManager->toBSONPending(bb);
- }
-
- /**
- * Updates the metadata based on changes received from the config server and also resolves the
- * pending receives map in case some of these pending receives have completed or have been
- * abandoned. If newMetadata is null, unshard the collection.
- *
- * Must always be called with an exclusive collection lock.
- */
- void refreshMetadata(OperationContext* opCtx, std::unique_ptr<CollectionMetadata> newMetadata);
-
- /**
- * Marks the collection as not sharded at stepdown time so that no filtering will occur for
- * slaveOk queries.
- */
- void markNotShardedAtStepdown();
/**
- * Schedules any documents in `range` for immediate cleanup iff no running queries can depend
- * on them, and adds the range to the list of pending ranges. Otherwise, returns a notification
- * that yields bad status immediately. Does not block. Call waitStatus(opCtx) on the result
- * to wait for the deletion to complete or fail. After that, call waitForClean to ensure no
- * other deletions are pending for the range.
- */
- auto beginReceive(ChunkRange const& range) -> CleanupNotification;
-
- /*
- * Removes `range` from the list of pending ranges, and schedules any documents in the range for
- * immediate cleanup. Does not block.
+ * Reports all collections which have filtering information associated.
*/
- void forgetReceive(const ChunkRange& range);
+ static void report(OperationContext* opCtx, BSONObjBuilder* builder);
/**
- * Schedules documents in `range` for cleanup after any running queries that may depend on them
- * have terminated. Does not block. Fails if range overlaps any current local shard chunk.
- * Passed kDelayed, an additional delay (configured via server parameter orphanCleanupDelaySecs)
- * is added to permit (most) dependent queries on secondaries to complete, too.
+ * Returns the chunk filtering metadata for the collection. The returned object is safe to
+ * access outside of collection lock.
*
- * Call result.waitStatus(opCtx) to wait for the deletion to complete or fail. If that succeeds,
- * waitForClean can be called to ensure no other deletions are pending for the range. Call
- * result.abandon(), instead of waitStatus, to ignore the outcome.
+ * If the operation context contains an 'atClusterTime' property, the returned filtering
+ * metadata will be tied to a specific point in time. Otherwise it will reference the latest
+ * time available.
*/
- enum CleanWhen { kNow, kDelayed };
- auto cleanUpRange(ChunkRange const& range, CleanWhen) -> CleanupNotification;
+ ScopedCollectionMetadata getMetadata(OperationContext* opCtx);
/**
- * Returns a vector of ScopedCollectionMetadata objects representing metadata instances in use
- * by running queries that overlap the argument range, suitable for identifying and invalidating
- * those queries.
+ * Checks whether the shard version in the operation context is compatible with the shard
+ * version of the collection and if not, throws StaleConfigException populated with the received
+ * and wanted versions.
*/
- std::vector<ScopedCollectionMetadata> overlappingMetadata(ChunkRange const& range) const;
+ void checkShardVersionOrThrow(OperationContext* opCtx);
/**
* Methods to control the collection's critical section. Must be called with the collection X
@@ -148,107 +94,56 @@ public:
void enterCriticalSectionCommitPhase(OperationContext* opCtx);
void exitCriticalSection(OperationContext* opCtx);
+ /**
+ * If the collection is currently in a critical section, returns the critical section signal to
+ * be waited on. Otherwise, returns nullptr.
+ */
auto getCriticalSectionSignal(ShardingMigrationCriticalSection::Operation op) const {
return _critSec.getSignal(op);
}
- /**
- * Checks whether the shard version in the context is compatible with the shard version of the
- * collection locally and if not throws StaleConfigException populated with the expected and
- * actual versions.
- *
- * Because StaleConfigException has special semantics in terms of how a sharded command's
- * response is constructed, this function should be the only means of checking for shard version
- * match.
- */
- void checkShardVersionOrThrow(OperationContext* opCtx);
-
- /**
- * Returns whether this collection is sharded. Valid only if mongoD is primary.
- * TODO SERVER-24960: This method may return a false positive until SERVER-24960 is fixed.
- */
- bool collectionIsSharded(OperationContext* opCtx);
-
- /**
- * Tracks deletion of any documents within the range, returning when deletion is complete.
- * Throws if the collection is dropped while it sleeps.
- */
- static Status waitForClean(OperationContext* opCtx,
- const NamespaceString& nss,
- OID const& epoch,
- ChunkRange orphanRange);
-
- /**
- * Reports whether any range still scheduled for deletion overlaps the argument range. If so,
- * it returns a notification n such that n->get(opCtx) will wake when the newest overlapping
- * range's deletion (possibly the one of interest) completes or fails. This should be called
- * again after each wakeup until it returns boost::none, because there can be more than one
- * range scheduled for deletion that overlaps its argument.
- */
- auto trackOrphanedDataCleanup(ChunkRange const& range) -> boost::optional<CleanupNotification>;
-
- /**
- * Returns a range _not_ owned by this shard that starts no lower than the specified
- * startingFrom key value, if any, or boost::none if there is no such range.
- */
- boost::optional<ChunkRange> getNextOrphanRange(BSONObj const& startingFrom);
+protected:
+ CollectionShardingState(NamespaceString nss);
private:
- /**
- * Checks whether the shard version of the operation matches that of the collection.
- *
- * opCtx - Operation context from which to retrieve the operation's expected version.
- * errmsg (out) - On false return contains an explanatory error message.
- * expectedShardVersion (out) - On false return contains the expected collection version on this
- * shard. Obtained from the operation sharding state.
- * actualShardVersion (out) - On false return contains the actual collection version on this
- * shard. Obtained from the collection sharding state.
- *
- * Returns true if the expected collection version on the shard matches its actual version on
- * the shard and false otherwise. Upon false return, the output parameters will be set.
- */
- bool _checkShardVersionOk(OperationContext* opCtx,
- std::string* errmsg,
- ChunkVersion* expectedShardVersion,
- ChunkVersion* actualShardVersion);
-
// Namespace this state belongs to.
const NamespaceString _nss;
- // Contains all the metadata associated with this collection.
- std::shared_ptr<MetadataManager> _metadataManager;
-
+ // Tracks the migration critical section state for this collection.
ShardingMigrationCriticalSection _critSec;
- // for access to _metadataManager
- friend auto CollectionRangeDeleter::cleanUpNextRange(OperationContext*,
- NamespaceString const&,
- OID const& epoch,
- int maxToDelete,
- CollectionRangeDeleter*)
- -> boost::optional<Date_t>;
+ // Obtains the current metadata for the collection
+ virtual ScopedCollectionMetadata _getMetadata(OperationContext* opCtx) = 0;
};
/**
- * RAII-style class, which obtains a reference to the critical section for the
- * specified collection.
+ * Singleton factory to instantiate CollectionShardingState objects specific to the type of instance
+ * which is running.
*/
-class CollectionCriticalSection {
- MONGO_DISALLOW_COPYING(CollectionCriticalSection);
+class CollectionShardingStateFactory {
+ MONGO_DISALLOW_COPYING(CollectionShardingStateFactory);
public:
- CollectionCriticalSection(OperationContext* opCtx, NamespaceString ns);
- ~CollectionCriticalSection();
+ static void set(ServiceContext* service,
+ std::unique_ptr<CollectionShardingStateFactory> factory);
+ static void clear(ServiceContext* service);
+
+ virtual ~CollectionShardingStateFactory() = default;
/**
- * Enters the commit phase of the critical section and blocks reads.
+ * Called by the CollectionShardingState::get method once per newly cached namespace. It is
+ * invoked under a mutex and must not acquire any locks or do blocking work.
+ *
+ * Implementations must be thread-safe when called from multiple threads.
*/
- void enterCommitPhase();
+ virtual std::unique_ptr<CollectionShardingState> make(const NamespaceString& nss) = 0;
-private:
- NamespaceString _nss;
+protected:
+ CollectionShardingStateFactory(ServiceContext* serviceContext)
+ : _serviceContext(serviceContext) {}
- OperationContext* _opCtx;
+ // The service context which owns this factory
+ ServiceContext* const _serviceContext;
};
} // namespace mongo