diff options
author | Sergi Mateo Bellido <sergi.mateo-bellido@mongodb.com> | 2022-04-21 14:13:18 +0000 |
---|---|---|
committer | Evergreen Agent <no-reply@evergreen.mongodb.com> | 2022-05-09 15:43:01 +0000 |
commit | d66184cb6e4be8e7ce848d69f675b9bae01091f3 (patch) | |
tree | a7c292f866f8b5797642206dddb03a1b60dc4190 | |
parent | 7774cd9bc44d9e1ba19007f887c3b9445eed1a26 (diff) | |
download | mongo-d66184cb6e4be8e7ce848d69f675b9bae01091f3.tar.gz |
SERVER-64601 Using the onInitialDataAvailable hook to initialize
sharding environment
(cherry picked from commit bf1b133adecc94af8d932170dca6361484946499)
-rw-r--r-- | src/mongo/db/repl/SConscript | 1 | ||||
-rw-r--r-- | src/mongo/db/repl/replica_set_aware_service_test.cpp | 3 | ||||
-rw-r--r-- | src/mongo/db/s/sharding_initialization_mongod.cpp | 18 | ||||
-rw-r--r-- | src/mongo/db/s/sharding_initialization_mongod.h | 15 | ||||
-rw-r--r-- | src/mongo/db/s/sharding_initialization_mongod_test.cpp | 110 |
5 files changed, 146 insertions, 1 deletions
diff --git a/src/mongo/db/repl/SConscript b/src/mongo/db/repl/SConscript index 2992ec9d30f..10783f244cd 100644 --- a/src/mongo/db/repl/SConscript +++ b/src/mongo/db/repl/SConscript @@ -1833,6 +1833,7 @@ env.CppUnitTest( 'replica_set_aware_service_test.cpp', ], LIBDEPS=[ + '$BUILD_DIR/mongo/db/catalog/database_holder', '$BUILD_DIR/mongo/db/service_context_test_fixture', 'repl_coordinator_impl', 'repl_coordinator_test_fixture', diff --git a/src/mongo/db/repl/replica_set_aware_service_test.cpp b/src/mongo/db/repl/replica_set_aware_service_test.cpp index 95bb0eb19e9..0104035b16e 100644 --- a/src/mongo/db/repl/replica_set_aware_service_test.cpp +++ b/src/mongo/db/repl/replica_set_aware_service_test.cpp @@ -31,6 +31,7 @@ #include <memory> +#include "mongo/db/catalog/database_holder_mock.h" #include "mongo/db/repl/replica_set_aware_service.h" #include "mongo/db/repl/replication_coordinator_mock.h" #include "mongo/db/service_context_test_fixture.h" @@ -193,6 +194,8 @@ public: replCoord->setMyLastAppliedOpTimeAndWallTime( repl::OpTimeAndWallTime(repl::OpTime(Timestamp(1, 1), _term), Date_t())); repl::ReplicationCoordinator::set(serviceContext, std::move(replCoord)); + + DatabaseHolder::set(getServiceContext(), std::make_unique<DatabaseHolderMock>()); } protected: diff --git a/src/mongo/db/s/sharding_initialization_mongod.cpp b/src/mongo/db/s/sharding_initialization_mongod.cpp index 9d57fd74933..410ef901ebf 100644 --- a/src/mongo/db/s/sharding_initialization_mongod.cpp +++ b/src/mongo/db/s/sharding_initialization_mongod.cpp @@ -76,6 +76,9 @@ namespace { const auto getInstance = ServiceContext::declareDecoration<ShardingInitializationMongoD>(); +const ReplicaSetAwareServiceRegistry::Registerer<ShardingInitializationMongoD> _registryRegisterer( + "ShardingInitializationMongoDRegistry"); + auto makeEgressHooksList(ServiceContext* service) { auto unshardedHookList = std::make_unique<rpc::EgressMetadataHookList>(); unshardedHookList->addHook(std::make_unique<rpc::VectorClockMetadataHook>(service)); @@ -485,6 +488,21 @@ void ShardingInitializationMongoD::updateShardIdentityConfigString( } } +void ShardingInitializationMongoD::onInitialDataAvailable(OperationContext* opCtx, + bool isMajorityDataAvailable) { + // This function may take the global lock. + auto shardingInitialized = initializeShardingAwarenessIfNeeded(opCtx); + if (shardingInitialized) { + auto status = waitForShardRegistryReload(opCtx); + if (!status.isOK()) { + LOGV2(6460100, + "Error loading shard registry at startup {error}", + "Error loading shard registry at startup", + "error"_attr = redact(status)); + } + } +} + void initializeGlobalShardingStateForMongoD(OperationContext* opCtx, const ShardId& shardId, const ConnectionString& configCS) { diff --git a/src/mongo/db/s/sharding_initialization_mongod.h b/src/mongo/db/s/sharding_initialization_mongod.h index 5aa6d2c8d01..eeb4a323f7b 100644 --- a/src/mongo/db/s/sharding_initialization_mongod.h +++ b/src/mongo/db/s/sharding_initialization_mongod.h @@ -33,6 +33,7 @@ #include "mongo/base/string_data.h" #include "mongo/client/replica_set_change_notifier.h" +#include "mongo/db/repl/replica_set_aware_service.h" #include "mongo/db/s/sharding_state.h" #include "mongo/db/s/type_shard_identity.h" #include "mongo/s/sharding_initialization.h" @@ -45,7 +46,7 @@ namespace mongo { * services, attaches them to the same service context to which it itself is attached and puts the * ShardingState in the initialized state. */ -class ShardingInitializationMongoD { +class ShardingInitializationMongoD : public ReplicaSetAwareService<ShardingInitializationMongoD> { ShardingInitializationMongoD(const ShardingInitializationMongoD&) = delete; ShardingInitializationMongoD& operator=(const ShardingInitializationMongoD&) = delete; @@ -73,6 +74,7 @@ public: * * If it returns false, this means the node is not yet sharding aware. * + * NOTE: this function might be called more than once. * NOTE: this function briefly takes the global lock to determine primary/secondary state. */ bool initializeShardingAwarenessIfNeeded(OperationContext* opCtx); @@ -108,6 +110,17 @@ private: void _initializeShardingEnvironmentOnShardServer(OperationContext* opCtx, const ShardIdentity& shardIdentity); + // Virtual methods coming from the ReplicaSetAwareService + void onStartup(OperationContext* opCtx) override final {} + void onInitialDataAvailable(OperationContext* opCtx, + bool isMajorityDataAvailable) override final; + void onShutdown() override final {} + void onStepUpBegin(OperationContext* opCtx, long long term) override final {} + void onStepUpComplete(OperationContext* opCtx, long long term) override final {} + void onStepDown() override final {} + void onBecomeArbiter() override final {} + + // This mutex ensures that only one thread at a time executes the sharding // initialization/teardown sequence Mutex _initSynchronizationMutex = diff --git a/src/mongo/db/s/sharding_initialization_mongod_test.cpp b/src/mongo/db/s/sharding_initialization_mongod_test.cpp index ab2bffb2ff7..a0c1890c980 100644 --- a/src/mongo/db/s/sharding_initialization_mongod_test.cpp +++ b/src/mongo/db/s/sharding_initialization_mongod_test.cpp @@ -237,6 +237,56 @@ TEST_F(ShardingInitializationMongoDTest, InitializeAgainWithMatchingShardIdentit ASSERT_EQ("config/a:1,b:2", shardRegistry()->getConfigServerConnectionString().toString()); } +TEST_F(ShardingInitializationMongoDTest, InitializeAgainWithNonMatchingShardIdentityFails) { + // Must hold a lock to call initializeFromShardIdentity. + Lock::GlobalWrite lk(operationContext()); + + auto clusterID = OID::gen(); + ShardIdentityType shardIdentity; + shardIdentity.setConfigsvrConnectionString( + ConnectionString(ConnectionString::ConnectionType::kReplicaSet, "a:1,b:2", "config")); + shardIdentity.setShardName(kShardName); + shardIdentity.setClusterId(clusterID); + + shardingInitialization()->initializeFromShardIdentity(operationContext(), shardIdentity); + + shardingInitialization()->setGlobalInitMethodForTest( + [](OperationContext* opCtx, const ShardIdentity& shardIdentity) { + FAIL("Should not be invoked!"); + }); + + // Running again the initialization with a different shardName must result in an error + { + ShardIdentityType shardIdentity2 = shardIdentity; + shardIdentity2.setShardName("AnotherShardName"); + ASSERT_THROWS_CODE(shardingInitialization()->initializeFromShardIdentity(operationContext(), + shardIdentity2), + AssertionException, + 40371); + } + + // Running again the initialization with a different clusterId must result in an error + { + ShardIdentityType shardIdentity2 = shardIdentity; + shardIdentity2.setClusterId(OID::gen()); + ASSERT_THROWS_CODE(shardingInitialization()->initializeFromShardIdentity(operationContext(), + shardIdentity2), + AssertionException, + 40372); + } + + // Running again the initialization with a different ReplicaSetName must result in an error + { + ShardIdentityType shardIdentity2 = shardIdentity; + shardIdentity2.setConfigsvrConnectionString( + ConnectionString(ConnectionString::ConnectionType::kReplicaSet, "a:1,b:2", "config2")); + ASSERT_THROWS_CODE(shardingInitialization()->initializeFromShardIdentity(operationContext(), + shardIdentity2), + AssertionException, + 40374); + } +} + TEST_F(ShardingInitializationMongoDTest, InitializeAgainWithMatchingReplSetNameSucceeds) { // Must hold a lock to call initializeFromShardIdentity. Lock::GlobalWrite lk(operationContext()); @@ -316,6 +366,8 @@ TEST_F(ShardingInitializationMongoDTest, }(); ASSERT(shardingInitialization()->initializeShardingAwarenessIfNeeded(operationContext())); + // A second call with the same parameters shouldn't trigger any error. + ASSERT(shardingInitialization()->initializeShardingAwarenessIfNeeded(operationContext())); } /** @@ -471,6 +523,64 @@ TEST_F(ShardingInitializationMongoDTest, } ASSERT(shardingInitialization()->initializeShardingAwarenessIfNeeded(operationContext())); + // A second call with the same parameters shouldn't trigger any error. + ASSERT(shardingInitialization()->initializeShardingAwarenessIfNeeded(operationContext())); +} + +TEST_F( + ShardingInitializationMongoDTest, + InitializeShardingAwarenessIfNeededNotQueryableBackupModeAndShardServerAsIfLogicalInitialSync) { + // No valid ShardIdentity yet, since we will get it through initial sync. + ASSERT(!shardingInitialization()->initializeShardingAwarenessIfNeeded(operationContext())); + + BSONObj validShardIdentity = [&] { + ShardIdentityType shardIdentity; + shardIdentity.setConfigsvrConnectionString( + ConnectionString(ConnectionString::ConnectionType::kReplicaSet, "a:1,b:2", "config")); + shardIdentity.setShardName(kShardName); + shardIdentity.setClusterId(OID::gen()); + ASSERT_OK(shardIdentity.validate()); + return shardIdentity.toShardIdentityDocument(); + }(); + + // An OpObserver will react to this insertion and initialize the ShardingState. + _dbDirectClient->insert(NamespaceString::kServerConfigurationNamespace.toString(), + validShardIdentity); + ASSERT(shardingState()->enabled()); + + // This call represents the one done by the onInitialDataAvailable. It should be a no-op. + ASSERT(shardingInitialization()->initializeShardingAwarenessIfNeeded(operationContext())); +} + +TEST_F(ShardingInitializationMongoDTest, + InitializeShardingAwarenessIfNeededNotQueryableBackupModeAndShardServerAsIfFCBIS) { + // No valid ShardIdentity yet, since we will get it through initial sync. + ASSERT(!shardingInitialization()->initializeShardingAwarenessIfNeeded(operationContext())); + + // Insert the shardIdentity doc to disk while pretending that we are in "standalone" mode, + // otherwise OpObserver for inserts will prevent the insert from occurring because the + // shardIdentity doc is invalid + { + ScopedSetStandaloneMode standalone(getServiceContext()); + + BSONObj validShardIdentity = [&] { + ShardIdentityType shardIdentity; + shardIdentity.setConfigsvrConnectionString(ConnectionString( + ConnectionString::ConnectionType::kReplicaSet, "a:1,b:2", "config")); + shardIdentity.setShardName(kShardName); + shardIdentity.setClusterId(OID::gen()); + ASSERT_OK(shardIdentity.validate()); + return shardIdentity.toShardIdentityDocument(); + }(); + + _dbDirectClient->insert(NamespaceString::kServerConfigurationNamespace.toString(), + validShardIdentity); + } + + ASSERT(!shardingState()->enabled()); + + // This call represents the one done by the onInitialDataAvailable. It should be a no-op. + ASSERT(shardingInitialization()->initializeShardingAwarenessIfNeeded(operationContext())); } /** |