diff options
Diffstat (limited to 'src/mongo/db/s')
-rw-r--r-- | src/mongo/db/s/SConscript | 4 | ||||
-rw-r--r-- | src/mongo/db/s/chunk_splitter.cpp | 22 | ||||
-rw-r--r-- | src/mongo/db/s/chunk_splitter.h | 14 | ||||
-rw-r--r-- | src/mongo/db/s/config/configsvr_create_database_command.cpp | 23 | ||||
-rw-r--r-- | src/mongo/db/s/migration_chunk_cloner_source_legacy.cpp | 11 | ||||
-rw-r--r-- | src/mongo/db/s/sharding_state.cpp | 31 | ||||
-rw-r--r-- | src/mongo/db/s/sharding_state.h | 18 | ||||
-rw-r--r-- | src/mongo/db/s/sharding_state_recovery.cpp | 20 |
8 files changed, 66 insertions, 77 deletions
diff --git a/src/mongo/db/s/SConscript b/src/mongo/db/s/SConscript index 12ab8fac01f..5206a25bfca 100644 --- a/src/mongo/db/s/SConscript +++ b/src/mongo/db/s/SConscript @@ -24,6 +24,7 @@ env.Library( env.Library( target='sharding_runtime_d', source=[ + 'chunk_splitter.cpp', 'implicit_create_collection.cpp', 'migration_destination_manager.cpp', 'session_catalog_migration_destination.cpp', @@ -36,6 +37,7 @@ env.Library( '$BUILD_DIR/mongo/db/repl/repl_coordinator_interface', '$BUILD_DIR/mongo/db/rw_concern_d', 'sharding', + 'sharding_catalog_manager', ], ) @@ -71,7 +73,6 @@ env.Library( 'active_migrations_registry.cpp', 'active_move_primaries_registry.cpp', 'chunk_move_write_concern_options.cpp', - 'chunk_splitter.cpp', 'collection_range_deleter.cpp', 'collection_sharding_state.cpp', 'metadata_manager.cpp', @@ -232,7 +233,6 @@ env.Library( '$BUILD_DIR/mongo/db/repl/repl_coordinator_interface', '$BUILD_DIR/mongo/s/commands/shared_cluster_commands', 'balancer', - 'sharding_catalog_manager', 'sharding_runtime_d', ], ) diff --git a/src/mongo/db/s/chunk_splitter.cpp b/src/mongo/db/s/chunk_splitter.cpp index d314b9c5f84..2c17bda108c 100644 --- a/src/mongo/db/s/chunk_splitter.cpp +++ b/src/mongo/db/s/chunk_splitter.cpp @@ -37,10 +37,10 @@ #include "mongo/db/client.h" #include "mongo/db/dbdirectclient.h" #include "mongo/db/namespace_string.h" -#include "mongo/db/operation_context.h" #include "mongo/db/s/sharding_state.h" #include "mongo/db/s/split_chunk.h" #include "mongo/db/s/split_vector.h" +#include "mongo/db/service_context.h" #include "mongo/s/balancer_configuration.h" #include "mongo/s/catalog/type_chunk.h" #include "mongo/s/catalog_cache.h" @@ -210,9 +210,11 @@ bool isAutoBalanceEnabled(OperationContext* opCtx, return collStatus.getValue().value.getAllowBalance(); } +const auto getChunkSplitter = ServiceContext::declareDecoration<ChunkSplitter>(); + } // namespace -ChunkSplitter::ChunkSplitter() : _isPrimary(false), _threadPool(makeDefaultThreadPoolOptions()) { +ChunkSplitter::ChunkSplitter() : _threadPool(makeDefaultThreadPoolOptions()) { _threadPool.startup(); } @@ -221,13 +223,21 @@ ChunkSplitter::~ChunkSplitter() { _threadPool.join(); } +ChunkSplitter& ChunkSplitter::get(OperationContext* opCtx) { + return get(opCtx->getServiceContext()); +} + +ChunkSplitter& ChunkSplitter::get(ServiceContext* serviceContext) { + return getChunkSplitter(serviceContext); +} + void ChunkSplitter::setReplicaSetMode(bool isPrimary) { stdx::lock_guard<stdx::mutex> scopedLock(_mutex); _isPrimary = isPrimary; } -void ChunkSplitter::initiateChunkSplitter() { - stdx::lock_guard<stdx::mutex> scopedLock(_mutex); +void ChunkSplitter::onStepUp() { + stdx::lock_guard<stdx::mutex> lg(_mutex); if (_isPrimary) { return; } @@ -237,8 +247,8 @@ void ChunkSplitter::initiateChunkSplitter() { // TODO: Re-enable this log line when auto split is actively running on shards. } -void ChunkSplitter::interruptChunkSplitter() { - stdx::lock_guard<stdx::mutex> scopedLock(_mutex); +void ChunkSplitter::onStepDown() { + stdx::lock_guard<stdx::mutex> lg(_mutex); if (!_isPrimary) { return; } diff --git a/src/mongo/db/s/chunk_splitter.h b/src/mongo/db/s/chunk_splitter.h index 1d8a8fcdf06..91de6c7fe10 100644 --- a/src/mongo/db/s/chunk_splitter.h +++ b/src/mongo/db/s/chunk_splitter.h @@ -33,6 +33,8 @@ namespace mongo { class NamespaceString; +class OperationContext; +class ServiceContext; /** * Handles asynchronous auto-splitting of chunks. @@ -45,6 +47,12 @@ public: ~ChunkSplitter(); /** + * Obtains the service-wide chunk splitter instance. + */ + static ChunkSplitter& get(OperationContext* opCtx); + static ChunkSplitter& get(ServiceContext* serviceContext); + + /** * Sets the mode of the ChunkSplitter to either primary or secondary. * The ChunkSplitter is only active when primary. */ @@ -54,7 +62,7 @@ public: * Invoked when the shard server primary enters the 'PRIMARY' state to set up the ChunkSplitter * to begin accepting split requests. */ - void initiateChunkSplitter(); + void onStepUp(); /** * Invoked when this node which is currently serving as a 'PRIMARY' steps down. @@ -62,7 +70,7 @@ public: * This method might be called multiple times in succession, which is what happens as a result * of incomplete transition to primary so it is resilient to that. */ - void interruptChunkSplitter(); + void onStepDown(); /** * Schedules an autosplit task. This function throws on scheduling failure. @@ -90,7 +98,7 @@ private: stdx::mutex _mutex; // The ChunkSplitter is only active on a primary node. - bool _isPrimary; + bool _isPrimary{false}; // Thread pool for parallelizing splits. ThreadPool _threadPool; diff --git a/src/mongo/db/s/config/configsvr_create_database_command.cpp b/src/mongo/db/s/config/configsvr_create_database_command.cpp index 56c429783d9..d42c0a6a222 100644 --- a/src/mongo/db/s/config/configsvr_create_database_command.cpp +++ b/src/mongo/db/s/config/configsvr_create_database_command.cpp @@ -63,26 +63,26 @@ class ConfigSvrCreateDatabaseCommand : public BasicCommand { public: ConfigSvrCreateDatabaseCommand() : BasicCommand("_configsvrCreateDatabase") {} + std::string help() const override { + return "Internal command, which is exported by the sharding config server. Do not call " + "directly. Create a database."; + } + AllowedOnSecondary secondaryAllowed(ServiceContext*) const override { return AllowedOnSecondary::kNever; } - virtual bool adminOnly() const { + bool adminOnly() const override { return true; } - virtual bool supportsWriteConcern(const BSONObj& cmd) const override { + bool supportsWriteConcern(const BSONObj& cmd) const override { return true; } - std::string help() const override { - return "Internal command, which is exported by the sharding config server. Do not call " - "directly. Create a database."; - } - - virtual Status checkAuthForCommand(Client* client, - const std::string& dbname, - const BSONObj& cmdObj) const override { + Status checkAuthForCommand(Client* client, + const std::string& dbname, + const BSONObj& cmdObj) const override { if (!AuthorizationSession::get(client)->isAuthorizedForActionsOnResource( ResourcePattern::forClusterResource(), ActionType::internal)) { return Status(ErrorCodes::Unauthorized, "Unauthorized"); @@ -94,8 +94,7 @@ public: bool run(OperationContext* opCtx, const std::string& dbname_unused, const BSONObj& cmdObj, - BSONObjBuilder& result) { - + BSONObjBuilder& result) override { if (serverGlobalParams.clusterRole != ClusterRole::ConfigServer) { return CommandHelpers::appendCommandStatus( result, diff --git a/src/mongo/db/s/migration_chunk_cloner_source_legacy.cpp b/src/mongo/db/s/migration_chunk_cloner_source_legacy.cpp index 6660ac6366d..4c66e4f7f95 100644 --- a/src/mongo/db/s/migration_chunk_cloner_source_legacy.cpp +++ b/src/mongo/db/s/migration_chunk_cloner_source_legacy.cpp @@ -361,9 +361,14 @@ void MigrationChunkClonerSourceLegacy::cancelClone(OperationContext* opCtx) { switch (_state) { case kDone: break; - case kCloning: - _callRecipient(createRequestWithSessionId(kRecvChunkAbort, _args.getNss(), _sessionId)) - .status_with_transitional_ignore(); + case kCloning: { + const auto status = _callRecipient(createRequestWithSessionId( + kRecvChunkAbort, _args.getNss(), _sessionId)) + .getStatus(); + if (!status.isOK()) { + LOG(0) << "Failed to cancel migration " << causedBy(redact(status)); + } + } // Intentional fall through case kNew: _cleanup(opCtx); diff --git a/src/mongo/db/s/sharding_state.cpp b/src/mongo/db/s/sharding_state.cpp index ae3a93d098b..d23118c7fea 100644 --- a/src/mongo/db/s/sharding_state.cpp +++ b/src/mongo/db/s/sharding_state.cpp @@ -45,6 +45,7 @@ #include "mongo/db/ops/update_lifecycle_impl.h" #include "mongo/db/repl/optime.h" #include "mongo/db/repl/replication_coordinator.h" +#include "mongo/db/s/chunk_splitter.h" #include "mongo/db/s/operation_sharding_state.h" #include "mongo/db/s/sharded_connection_info.h" #include "mongo/db/s/sharding_initialization_mongod.h" @@ -64,11 +65,6 @@ #include "mongo/util/mongoutils/str.h" namespace mongo { - -using std::shared_ptr; -using std::string; -using std::vector; - namespace { const auto getShardingState = ServiceContext::declareDecoration<ShardingState>(); @@ -81,7 +77,8 @@ const auto getShardingState = ServiceContext::declareDecoration<ShardingState>() * One example use case is for the ReplicaSetMonitor asynchronous callback when it detects changes * to replica set membership. */ -void updateShardIdentityConfigStringCB(const string& setName, const string& newConnectionString) { +void updateShardIdentityConfigStringCB(const std::string& setName, + const std::string& newConnectionString) { auto configsvrConnStr = grid.shardRegistry()->getConfigServerConnectionString(); if (configsvrConnStr.getSetName() != setName) { // Ignore all change notification for other sets that are not the config server. @@ -102,8 +99,7 @@ void updateShardIdentityConfigStringCB(const string& setName, const string& newC } // namespace ShardingState::ShardingState() - : _chunkSplitter(stdx::make_unique<ChunkSplitter>()), - _initializationState(static_cast<uint32_t>(InitializationState::kNew)), + : _initializationState(static_cast<uint32_t>(InitializationState::kNew)), _initializationStatus(Status(ErrorCodes::InternalError, "Uninitialized value")), _globalInit(&initializeGlobalShardingStateForMongod) {} @@ -139,7 +135,7 @@ Status ShardingState::canAcceptShardedCommands() const { } } -string ShardingState::getShardName() { +std::string ShardingState::getShardName() { invariant(enabled()); stdx::lock_guard<stdx::mutex> lk(_mutex); return _shardName; @@ -173,18 +169,6 @@ Status ShardingState::updateConfigServerOpTimeFromMetadata(OperationContext* opC return Status::OK(); } -ChunkSplitter* ShardingState::getChunkSplitter() { - return _chunkSplitter.get(); -} - -void ShardingState::initiateChunkSplitter() { - _chunkSplitter->initiateChunkSplitter(); -} - -void ShardingState::interruptChunkSplitter() { - _chunkSplitter->interruptChunkSplitter(); -} - void ShardingState::setGlobalInitMethodForTest(GlobalInitFunc func) { _globalInit = func; } @@ -249,8 +233,7 @@ Status ShardingState::initializeFromShardIdentity(OperationContext* opCtx, repl::MemberState::RS_PRIMARY); CatalogCacheLoader::get(opCtx).initializeReplicaSetRole(isStandaloneOrPrimary); - - _chunkSplitter->setReplicaSetMode(isStandaloneOrPrimary); + ChunkSplitter::get(opCtx).setReplicaSetMode(isStandaloneOrPrimary); log() << "initialized sharding components for " << (isStandaloneOrPrimary ? "primary" : "secondary") << " node."; @@ -431,7 +414,7 @@ void ShardingState::appendInfo(OperationContext* opCtx, BSONObjBuilder& builder) builder.append("clusterId", _clusterId); } -bool ShardingState::needCollectionMetadata(OperationContext* opCtx, const string& ns) { +bool ShardingState::needCollectionMetadata(OperationContext* opCtx, const std::string& ns) { if (!enabled()) return false; diff --git a/src/mongo/db/s/sharding_state.h b/src/mongo/db/s/sharding_state.h index cf5cc2945f9..a21e8e2d489 100644 --- a/src/mongo/db/s/sharding_state.h +++ b/src/mongo/db/s/sharding_state.h @@ -36,7 +36,6 @@ #include "mongo/db/namespace_string.h" #include "mongo/db/s/active_migrations_registry.h" #include "mongo/db/s/active_move_primaries_registry.h" -#include "mongo/db/s/chunk_splitter.h" #include "mongo/db/s/collection_sharding_state.h" #include "mongo/stdx/functional.h" #include "mongo/stdx/memory.h" @@ -128,20 +127,6 @@ public: */ Status updateConfigServerOpTimeFromMetadata(OperationContext* opCtx); - ChunkSplitter* getChunkSplitter(); - - /** - * Should be invoked when the shard server primary enters the 'PRIMARY' state. - * Sets up the ChunkSplitter to begin accepting split requests. - */ - void initiateChunkSplitter(); - - /** - * Should be invoked when this node which is currently serving as a 'PRIMARY' steps down. - * Sets the state of the ChunkSplitter so that it will no longer accept split requests. - */ - void interruptChunkSplitter(); - void appendInfo(OperationContext* opCtx, BSONObjBuilder& b); bool needCollectionMetadata(OperationContext* opCtx, const std::string& ns); @@ -269,9 +254,6 @@ private: // Tracks the active move primary operations running on this shard ActiveMovePrimariesRegistry _activeMovePrimariesRegistry; - // Handles asynchronous auto-splitting of chunks - std::unique_ptr<ChunkSplitter> _chunkSplitter; - // Protects state below stdx::mutex _mutex; diff --git a/src/mongo/db/s/sharding_state_recovery.cpp b/src/mongo/db/s/sharding_state_recovery.cpp index d5d7886c541..04a81b7912b 100644 --- a/src/mongo/db/s/sharding_state_recovery.cpp +++ b/src/mongo/db/s/sharding_state_recovery.cpp @@ -147,7 +147,8 @@ Status modifyRecoveryDocument(OperationContext* opCtx, autoGetOrCreateDb.emplace( opCtx, NamespaceString::kServerConfigurationNamespace.db(), MODE_X); - BSONObj updateObj = RecoveryDocument::createChangeObj(grid.configOpTime(), change); + BSONObj updateObj = + RecoveryDocument::createChangeObj(Grid::get(opCtx)->configOpTime(), change); LOG(1) << "Changing sharding recovery document " << redact(updateObj); @@ -225,12 +226,13 @@ Status ShardingStateRecovery::recover(OperationContext* opCtx) { log() << "Sharding state recovery process found document " << redact(recoveryDoc.toBSON()); + Grid* const grid = Grid::get(opCtx); ShardingState* const shardingState = ShardingState::get(opCtx); invariant(shardingState->enabled()); if (!recoveryDoc.getMinOpTimeUpdaters()) { // Treat the minOpTime as up-to-date - grid.advanceConfigOpTime(recoveryDoc.getMinOpTime()); + grid->advanceConfigOpTime(recoveryDoc.getMinOpTime()); return Status::OK(); } @@ -240,16 +242,16 @@ Status ShardingStateRecovery::recover(OperationContext* opCtx) { "to retrieve the most recent opTime."; // Need to fetch the latest uptime from the config server, so do a logging write - Status status = Grid::get(opCtx)->catalogClient()->logChange( - opCtx, - "Sharding minOpTime recovery", - NamespaceString::kServerConfigurationNamespace.ns(), - recoveryDocBSON, - ShardingCatalogClient::kMajorityWriteConcern); + Status status = + grid->catalogClient()->logChange(opCtx, + "Sharding minOpTime recovery", + NamespaceString::kServerConfigurationNamespace.ns(), + recoveryDocBSON, + ShardingCatalogClient::kMajorityWriteConcern); if (!status.isOK()) return status; - log() << "Sharding state recovered. New config server opTime is " << grid.configOpTime(); + log() << "Sharding state recovered. New config server opTime is " << grid->configOpTime(); // Finally, clear the recovery document so next time we don't need to recover status = modifyRecoveryDocument(opCtx, RecoveryDocument::Clear, kLocalWriteConcern); |