summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorKaloian Manassiev <kaloian.manassiev@mongodb.com>2020-06-12 08:42:14 -0400
committerEvergreen Agent <no-reply@evergreen.mongodb.com>2020-06-15 11:31:00 +0000
commite1d7052125a3ca7767a81c4737628ba37077d17b (patch)
treec330644ae88efef8a088cfdeb7b4344afb233992
parenta357a44eb10fe00e3fd4378b539ef36fa9fb3ccd (diff)
downloadmongo-e1d7052125a3ca7767a81c4737628ba37077d17b.tar.gz
SERVER-48775 Make OpObserverShardingImpl to be part of sharding_runtime_d
-rw-r--r--src/mongo/SConscript2
-rw-r--r--src/mongo/db/SConscript1
-rw-r--r--src/mongo/db/catalog/SConscript10
-rw-r--r--src/mongo/db/catalog/database_test.cpp14
-rw-r--r--src/mongo/db/index_builds_coordinator.cpp4
-rw-r--r--src/mongo/db/mongod_main.cpp2
-rw-r--r--src/mongo/db/repl/SConscript2
-rw-r--r--src/mongo/db/repl/repl_set_config_checks_test.cpp105
-rw-r--r--src/mongo/db/s/SConscript16
-rw-r--r--src/mongo/db/s/balancer/migration_manager_test.cpp16
-rw-r--r--src/mongo/db/s/config/sharding_catalog_manager_add_shard_test.cpp52
-rw-r--r--src/mongo/db/s/sharding_initialization_mongod_test.cpp10
-rw-r--r--src/mongo/db/s/vector_clock_config_server_test.cpp16
-rw-r--r--src/mongo/db/s/vector_clock_shard_server_test.cpp16
-rw-r--r--src/mongo/db/vector_clock_mongod_test.cpp17
-rw-r--r--src/mongo/dbtests/SConscript1
-rw-r--r--src/mongo/dbtests/querytests.cpp48
-rw-r--r--src/mongo/dbtests/repltests.cpp51
-rw-r--r--src/mongo/dbtests/storage_timestamp_tests.cpp12
-rw-r--r--src/mongo/s/SConscript1
-rw-r--r--src/mongo/watchdog/watchdog_mongod.cpp7
-rw-r--r--src/mongo/watchdog/watchdog_mongod.h4
22 files changed, 182 insertions, 225 deletions
diff --git a/src/mongo/SConscript b/src/mongo/SConscript
index eb8a3f5af60..41eaa509e15 100644
--- a/src/mongo/SConscript
+++ b/src/mongo/SConscript
@@ -451,7 +451,6 @@ env.Library(
'db/replica_set_aware_service',
'db/rw_concern_d',
'db/s/balancer',
- 'db/s/op_observer_sharding_impl',
'db/s/sessions_collection_config_server',
'db/s/sharding_commands_d',
'db/s/sharding_runtime_d',
@@ -555,7 +554,6 @@ env.Library(
'db/repl/serveronly_repl',
'db/repl/storage_interface_impl',
'db/repl/topology_coordinator',
- 'db/s/op_observer_sharding_impl',
'db/s/sessions_collection_config_server',
'db/s/sharding_runtime_d',
'db/serverinit',
diff --git a/src/mongo/db/SConscript b/src/mongo/db/SConscript
index be72fd9ff19..e6d73e37a5b 100644
--- a/src/mongo/db/SConscript
+++ b/src/mongo/db/SConscript
@@ -778,7 +778,6 @@ env.Library(
],
)
-
env.Library(
target="op_observer_impl",
source=[
diff --git a/src/mongo/db/catalog/SConscript b/src/mongo/db/catalog/SConscript
index 3d45096707d..c94140d876f 100644
--- a/src/mongo/db/catalog/SConscript
+++ b/src/mongo/db/catalog/SConscript
@@ -487,6 +487,7 @@ env.CppUnitTest(
'$BUILD_DIR/mongo/db/index_builds_coordinator_mongod',
'$BUILD_DIR/mongo/db/matcher/expressions',
'$BUILD_DIR/mongo/db/namespace_string',
+ '$BUILD_DIR/mongo/db/op_observer_impl',
'$BUILD_DIR/mongo/db/op_observer',
'$BUILD_DIR/mongo/db/query/datetime/date_time_support',
'$BUILD_DIR/mongo/db/query/query_test_service_context',
@@ -496,10 +497,9 @@ env.CppUnitTest(
'$BUILD_DIR/mongo/db/repl/repl_coordinator_interface',
'$BUILD_DIR/mongo/db/repl/replmocks',
'$BUILD_DIR/mongo/db/repl/storage_interface_impl',
- '$BUILD_DIR/mongo/db/s/op_observer_sharding_impl',
- '$BUILD_DIR/mongo/db/service_context',
'$BUILD_DIR/mongo/db/service_context_d_test_fixture',
'$BUILD_DIR/mongo/db/service_context_test_fixture',
+ '$BUILD_DIR/mongo/db/service_context',
'$BUILD_DIR/mongo/db/storage/kv/kv_prefix',
'$BUILD_DIR/mongo/db/storage/wiredtiger/storage_wiredtiger',
'$BUILD_DIR/mongo/unittest/unittest',
@@ -508,20 +508,20 @@ env.CppUnitTest(
'catalog_control',
'catalog_helpers',
'catalog_test_fixture',
- 'collection',
- 'collection_catalog',
'collection_catalog_helper',
+ 'collection_catalog',
'collection_options',
'collection_validation',
+ 'collection',
'commit_quorum_options',
'database_holder',
'index_build_block',
'index_build_entry_idl',
'index_builds_manager',
'index_key_validate',
- 'validate_idl',
'multi_index_block',
'throttle_cursor',
+ 'validate_idl',
'validate_state',
],
)
diff --git a/src/mongo/db/catalog/database_test.cpp b/src/mongo/db/catalog/database_test.cpp
index 8a3baa12c34..70bb59651a3 100644
--- a/src/mongo/db/catalog/database_test.cpp
+++ b/src/mongo/db/catalog/database_test.cpp
@@ -31,7 +31,6 @@
#include <boost/optional/optional_io.hpp>
#include <memory>
-
#include <pcrecpp.h>
#include "mongo/bson/util/builder.h"
@@ -47,6 +46,7 @@
#include "mongo/db/jsobj.h"
#include "mongo/db/namespace_string.h"
#include "mongo/db/op_observer.h"
+#include "mongo/db/op_observer_impl.h"
#include "mongo/db/op_observer_registry.h"
#include "mongo/db/operation_context.h"
#include "mongo/db/repl/drop_pending_collection_reaper.h"
@@ -56,20 +56,14 @@
#include "mongo/db/repl/replication_coordinator.h"
#include "mongo/db/repl/replication_coordinator_mock.h"
#include "mongo/db/repl/storage_interface_mock.h"
-#include "mongo/db/s/op_observer_sharding_impl.h"
#include "mongo/db/service_context_d_test_fixture.h"
#include "mongo/db/storage/durable_catalog.h"
#include "mongo/unittest/unittest.h"
#include "mongo/util/scopeguard.h"
+namespace mongo {
namespace {
-using namespace mongo;
-
-ServiceContext::UniqueOperationContext makeOpCtx() {
- return cc().makeOperationContext();
-}
-
class DatabaseTest : public ServiceContextMongoDTest {
private:
void setUp() override;
@@ -106,7 +100,7 @@ void DatabaseTest::setUp() {
// repl::logOp(). repl::logOp() will also store the oplog entry's optime in ReplClientInfo.
OpObserverRegistry* opObserverRegistry =
dynamic_cast<OpObserverRegistry*>(service->getOpObserver());
- opObserverRegistry->addObserver(std::make_unique<OpObserverShardingImpl>());
+ opObserverRegistry->addObserver(std::make_unique<OpObserverImpl>());
_nss = NamespaceString("test.foo");
}
@@ -554,5 +548,5 @@ TEST_F(DatabaseTest, CreateCollectionProhibitsReplicatedCollectionsWithoutIdInde
});
}
-
} // namespace
+} // namespace mongo
diff --git a/src/mongo/db/index_builds_coordinator.cpp b/src/mongo/db/index_builds_coordinator.cpp
index 94fc21025bc..9c6eb5c324a 100644
--- a/src/mongo/db/index_builds_coordinator.cpp
+++ b/src/mongo/db/index_builds_coordinator.cpp
@@ -394,7 +394,7 @@ StatusWith<std::pair<long long, long long>> IndexBuildsCoordinator::rebuildIndex
return status;
}
- auto& collectionCatalog = CollectionCatalog::get(getGlobalServiceContext());
+ auto& collectionCatalog = CollectionCatalog::get(opCtx->getServiceContext());
Collection* collection = collectionCatalog.lookupCollectionByNamespace(opCtx, nss);
// Complete the index build.
@@ -420,7 +420,7 @@ Status IndexBuildsCoordinator::_startIndexBuildForRecovery(OperationContext* opC
indexNames.push_back(name);
}
- auto& collectionCatalog = CollectionCatalog::get(getGlobalServiceContext());
+ auto& collectionCatalog = CollectionCatalog::get(opCtx->getServiceContext());
Collection* collection = collectionCatalog.lookupCollectionByNamespace(opCtx, nss);
auto indexCatalog = collection->getIndexCatalog();
{
diff --git a/src/mongo/db/mongod_main.cpp b/src/mongo/db/mongod_main.cpp
index 4d093d7824a..c8d46c6eda1 100644
--- a/src/mongo/db/mongod_main.cpp
+++ b/src/mongo/db/mongod_main.cpp
@@ -430,7 +430,7 @@ ExitCode _initAndListen(ServiceContext* serviceContext, int listenPort) {
initializeSNMP();
- startWatchdog();
+ startWatchdog(serviceContext);
if (!storageGlobalParams.readOnly) {
boost::filesystem::remove_all(storageGlobalParams.dbpath + "/_tmp/");
diff --git a/src/mongo/db/repl/SConscript b/src/mongo/db/repl/SConscript
index a9e4fca41ee..89538ec3139 100644
--- a/src/mongo/db/repl/SConscript
+++ b/src/mongo/db/repl/SConscript
@@ -602,7 +602,6 @@ env.Library(
'replmocks',
'storage_interface_impl',
'$BUILD_DIR/mongo/db/catalog/document_validation',
- '$BUILD_DIR/mongo/db/s/op_observer_sharding_impl',
'$BUILD_DIR/mongo/db/service_context_d_test_fixture',
'$BUILD_DIR/mongo/db/storage/wiredtiger/storage_wiredtiger',
],
@@ -1288,7 +1287,6 @@ env.CppUnitTest(
'$BUILD_DIR/mongo/db/logical_time',
'$BUILD_DIR/mongo/db/op_observer',
'$BUILD_DIR/mongo/db/query/command_request_response',
- '$BUILD_DIR/mongo/db/s/op_observer_sharding_impl',
'$BUILD_DIR/mongo/db/service_context_d',
'$BUILD_DIR/mongo/db/service_context_d_test_fixture',
'$BUILD_DIR/mongo/db/service_context_test_fixture',
diff --git a/src/mongo/db/repl/repl_set_config_checks_test.cpp b/src/mongo/db/repl/repl_set_config_checks_test.cpp
index 6aa169249d4..39dd07b1eb4 100644
--- a/src/mongo/db/repl/repl_set_config_checks_test.cpp
+++ b/src/mongo/db/repl/repl_set_config_checks_test.cpp
@@ -63,7 +63,7 @@ TEST_F(ServiceContextTest, ValidateConfigForInitiate_VersionMustBe1) {
<< "h1"))),
newReplSetId);
ASSERT_EQUALS(ErrorCodes::NewReplicaSetConfigurationIncompatible,
- validateConfigForInitiate(&rses, config, getGlobalServiceContext()).getStatus());
+ validateConfigForInitiate(&rses, config, getServiceContext()).getStatus());
}
TEST_F(ServiceContextTest, ValidateConfigForInitiate_TermIsAlwaysInitialTerm) {
@@ -79,7 +79,7 @@ TEST_F(ServiceContextTest, ValidateConfigForInitiate_TermIsAlwaysInitialTerm) {
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "h1"))),
newReplSetId);
- ASSERT_OK(validateConfigForInitiate(&rses, config, getGlobalServiceContext()).getStatus());
+ ASSERT_OK(validateConfigForInitiate(&rses, config, getServiceContext()).getStatus());
ASSERT_EQUALS(config.getConfigTerm(), OpTime::kInitialTerm);
}
@@ -104,17 +104,15 @@ TEST_F(ServiceContextTest, ValidateConfigForInitiate_MustFindSelf) {
presentTwiceExternalState.addSelf(HostAndPort("h3"));
presentTwiceExternalState.addSelf(HostAndPort("h1"));
- ASSERT_EQUALS(
- ErrorCodes::NodeNotFound,
- validateConfigForInitiate(&notPresentExternalState, config, getGlobalServiceContext())
- .getStatus());
- ASSERT_EQUALS(
- ErrorCodes::InvalidReplicaSetConfig,
- validateConfigForInitiate(&presentTwiceExternalState, config, getGlobalServiceContext())
- .getStatus());
+ ASSERT_EQUALS(ErrorCodes::NodeNotFound,
+ validateConfigForInitiate(&notPresentExternalState, config, getServiceContext())
+ .getStatus());
+ ASSERT_EQUALS(ErrorCodes::InvalidReplicaSetConfig,
+ validateConfigForInitiate(&presentTwiceExternalState, config, getServiceContext())
+ .getStatus());
ASSERT_EQUALS(1,
unittest::assertGet(validateConfigForInitiate(
- &presentOnceExternalState, config, getGlobalServiceContext())));
+ &presentOnceExternalState, config, getServiceContext())));
}
TEST_F(ServiceContextTest, ValidateConfigForInitiate_SelfMustBeElectable) {
@@ -135,10 +133,9 @@ TEST_F(ServiceContextTest, ValidateConfigForInitiate_SelfMustBeElectable) {
ReplicationCoordinatorExternalStateMock presentOnceExternalState;
presentOnceExternalState.addSelf(HostAndPort("h2"));
- ASSERT_EQUALS(
- ErrorCodes::NodeNotElectable,
- validateConfigForInitiate(&presentOnceExternalState, config, getGlobalServiceContext())
- .getStatus());
+ ASSERT_EQUALS(ErrorCodes::NodeNotElectable,
+ validateConfigForInitiate(&presentOnceExternalState, config, getServiceContext())
+ .getStatus());
}
TEST_F(ServiceContextTest, ValidateConfigForInitiate_WriteConcernMustBeSatisfiable) {
@@ -157,10 +154,9 @@ TEST_F(ServiceContextTest, ValidateConfigForInitiate_WriteConcernMustBeSatisfiab
ReplicationCoordinatorExternalStateMock presentOnceExternalState;
presentOnceExternalState.addSelf(HostAndPort("h2"));
- ASSERT_EQUALS(
- ErrorCodes::UnsatisfiableWriteConcern,
- validateConfigForInitiate(&presentOnceExternalState, config, getGlobalServiceContext())
- .getStatus());
+ ASSERT_EQUALS(ErrorCodes::UnsatisfiableWriteConcern,
+ validateConfigForInitiate(&presentOnceExternalState, config, getServiceContext())
+ .getStatus());
}
TEST_F(ServiceContextTest, ValidateConfigForInitiate_ArbiterPriorityMustBeZeroOrOne) {
@@ -209,15 +205,13 @@ TEST_F(ServiceContextTest, ValidateConfigForInitiate_ArbiterPriorityMustBeZeroOr
ReplicationCoordinatorExternalStateMock presentOnceExternalState;
presentOnceExternalState.addSelf(HostAndPort("h1"));
- ASSERT_OK(
- validateConfigForInitiate(&presentOnceExternalState, zeroConfig, getGlobalServiceContext())
- .getStatus());
- ASSERT_OK(
- validateConfigForInitiate(&presentOnceExternalState, oneConfig, getGlobalServiceContext())
- .getStatus());
+ ASSERT_OK(validateConfigForInitiate(&presentOnceExternalState, zeroConfig, getServiceContext())
+ .getStatus());
+ ASSERT_OK(validateConfigForInitiate(&presentOnceExternalState, oneConfig, getServiceContext())
+ .getStatus());
ASSERT_EQUALS(
ErrorCodes::InvalidReplicaSetConfig,
- validateConfigForInitiate(&presentOnceExternalState, twoConfig, getGlobalServiceContext())
+ validateConfigForInitiate(&presentOnceExternalState, twoConfig, getServiceContext())
.getStatus());
}
@@ -260,14 +254,14 @@ TEST_F(ServiceContextTest, ValidateConfigForInitiate_NewlyAddedFieldNotAllowed)
ReplicationCoordinatorExternalStateMock presentOnceExternalState;
presentOnceExternalState.addSelf(HostAndPort("h1"));
- auto status = validateConfigForInitiate(
- &presentOnceExternalState, firstNewlyAdded, getGlobalServiceContext())
- .getStatus();
+ auto status =
+ validateConfigForInitiate(&presentOnceExternalState, firstNewlyAdded, getServiceContext())
+ .getStatus();
ASSERT_EQUALS(status, ErrorCodes::InvalidReplicaSetConfig);
ASSERT_TRUE(status.reason().find("newly_added_h1") != std::string::npos);
- status = validateConfigForInitiate(
- &presentOnceExternalState, lastNewlyAdded, getGlobalServiceContext())
- .getStatus();
+ status =
+ validateConfigForInitiate(&presentOnceExternalState, lastNewlyAdded, getServiceContext())
+ .getStatus();
ASSERT_EQUALS(status, ErrorCodes::InvalidReplicaSetConfig);
ASSERT_TRUE(status.reason().find("newly_added_h3") != std::string::npos);
}
@@ -681,7 +675,7 @@ TEST_F(ServiceContextTest, ValidateConfigForInitiate_NewConfigInvalid) {
presentOnceExternalState.addSelf(HostAndPort("h2"));
ASSERT_EQUALS(
ErrorCodes::BadValue,
- validateConfigForInitiate(&presentOnceExternalState, newConfig, getGlobalServiceContext())
+ validateConfigForInitiate(&presentOnceExternalState, newConfig, getServiceContext())
.getStatus());
}
@@ -758,7 +752,7 @@ TEST_F(ServiceContextTest, ValidateConfigForStartUp_NewConfigInvalid) {
presentOnceExternalState.addSelf(HostAndPort("h2"));
ASSERT_EQUALS(
ErrorCodes::BadValue,
- validateConfigForStartUp(&presentOnceExternalState, newConfig, getGlobalServiceContext())
+ validateConfigForStartUp(&presentOnceExternalState, newConfig, getServiceContext())
.getStatus());
}
@@ -777,9 +771,8 @@ TEST_F(ServiceContextTest, ValidateConfigForStartUp_NewConfigValid) {
ReplicationCoordinatorExternalStateMock presentOnceExternalState;
presentOnceExternalState.addSelf(HostAndPort("h2"));
- ASSERT_OK(
- validateConfigForStartUp(&presentOnceExternalState, newConfig, getGlobalServiceContext())
- .getStatus());
+ ASSERT_OK(validateConfigForStartUp(&presentOnceExternalState, newConfig, getServiceContext())
+ .getStatus());
}
TEST_F(ServiceContextTest, ValidateConfigForStartUp_NewConfigWriteConcernNotSatisfiable) {
@@ -797,9 +790,8 @@ TEST_F(ServiceContextTest, ValidateConfigForStartUp_NewConfigWriteConcernNotSati
ReplicationCoordinatorExternalStateMock presentOnceExternalState;
presentOnceExternalState.addSelf(HostAndPort("h2"));
- ASSERT_OK(
- validateConfigForStartUp(&presentOnceExternalState, newConfig, getGlobalServiceContext())
- .getStatus());
+ ASSERT_OK(validateConfigForStartUp(&presentOnceExternalState, newConfig, getServiceContext())
+ .getStatus());
}
TEST_F(ServiceContextTest, ValidateConfigForHeartbeatReconfig_NewConfigInvalid) {
@@ -819,7 +811,7 @@ TEST_F(ServiceContextTest, ValidateConfigForHeartbeatReconfig_NewConfigInvalid)
presentOnceExternalState.addSelf(HostAndPort("h2"));
ASSERT_EQUALS(ErrorCodes::BadValue,
validateConfigForHeartbeatReconfig(
- &presentOnceExternalState, newConfig, getGlobalServiceContext())
+ &presentOnceExternalState, newConfig, getServiceContext())
.getStatus());
}
@@ -838,7 +830,7 @@ TEST_F(ServiceContextTest, ValidateConfigForHeartbeatReconfig_NewConfigValid) {
ReplicationCoordinatorExternalStateMock presentOnceExternalState;
presentOnceExternalState.addSelf(HostAndPort("h2"));
ASSERT_OK(validateConfigForHeartbeatReconfig(
- &presentOnceExternalState, newConfig, getGlobalServiceContext())
+ &presentOnceExternalState, newConfig, getServiceContext())
.getStatus());
}
@@ -859,7 +851,7 @@ TEST_F(ServiceContextTest, ValidateConfigForHeartbeatReconfig_NewConfigWriteConc
ReplicationCoordinatorExternalStateMock presentOnceExternalState;
presentOnceExternalState.addSelf(HostAndPort("h2"));
ASSERT_OK(validateConfigForHeartbeatReconfig(
- &presentOnceExternalState, newConfig, getGlobalServiceContext())
+ &presentOnceExternalState, newConfig, getServiceContext())
.getStatus());
}
@@ -1102,29 +1094,28 @@ TEST_F(ServiceContextTest, FindSelfInConfig) {
presentThriceExternalState.addSelf(HostAndPort("h1"));
// Test 'findSelfInConfig'.
- ASSERT_EQUALS(ErrorCodes::NodeNotFound,
- findSelfInConfig(&notPresentExternalState, newConfig, getGlobalServiceContext())
- .getStatus());
+ ASSERT_EQUALS(
+ ErrorCodes::NodeNotFound,
+ findSelfInConfig(&notPresentExternalState, newConfig, getServiceContext()).getStatus());
ASSERT_EQUALS(
ErrorCodes::InvalidReplicaSetConfig,
- findSelfInConfig(&presentThriceExternalState, newConfig, getGlobalServiceContext())
- .getStatus());
+ findSelfInConfig(&presentThriceExternalState, newConfig, getServiceContext()).getStatus());
ASSERT_EQUALS(1,
- unittest::assertGet(findSelfInConfig(
- &presentOnceExternalState, newConfig, getGlobalServiceContext())));
+ unittest::assertGet(
+ findSelfInConfig(&presentOnceExternalState, newConfig, getServiceContext())));
// The same rules apply to 'findSelfInConfigIfElectable'.
ASSERT_EQUALS(
ErrorCodes::NodeNotFound,
- findSelfInConfigIfElectable(&notPresentExternalState, newConfig, getGlobalServiceContext())
+ findSelfInConfigIfElectable(&notPresentExternalState, newConfig, getServiceContext())
+ .getStatus());
+ ASSERT_EQUALS(
+ ErrorCodes::InvalidReplicaSetConfig,
+ findSelfInConfigIfElectable(&presentThriceExternalState, newConfig, getServiceContext())
.getStatus());
- ASSERT_EQUALS(ErrorCodes::InvalidReplicaSetConfig,
- findSelfInConfigIfElectable(
- &presentThriceExternalState, newConfig, getGlobalServiceContext())
- .getStatus());
ASSERT_EQUALS(1,
unittest::assertGet(findSelfInConfigIfElectable(
- &presentOnceExternalState, newConfig, getGlobalServiceContext())));
+ &presentOnceExternalState, newConfig, getServiceContext())));
// We must be electable in the new config.
newConfig = ReplSetConfig::parse(BSON("_id"
@@ -1140,7 +1131,7 @@ TEST_F(ServiceContextTest, FindSelfInConfig) {
ASSERT_EQUALS(
ErrorCodes::NodeNotElectable,
- findSelfInConfigIfElectable(&presentOnceExternalState, newConfig, getGlobalServiceContext())
+ findSelfInConfigIfElectable(&presentOnceExternalState, newConfig, getServiceContext())
.getStatus());
}
diff --git a/src/mongo/db/s/SConscript b/src/mongo/db/s/SConscript
index e6f46132f70..ba3e90c3932 100644
--- a/src/mongo/db/s/SConscript
+++ b/src/mongo/db/s/SConscript
@@ -62,6 +62,7 @@ env.Library(
'move_primary_source_manager.cpp',
'move_timing_helper.cpp',
'namespace_metadata_change_notifications.cpp',
+ 'op_observer_sharding_impl.cpp',
'periodic_balancer_config_refresher.cpp',
'periodic_sharded_index_consistency_checker.cpp',
'range_deletion_util.cpp',
@@ -82,8 +83,8 @@ env.Library(
'split_chunk.cpp',
'split_vector.cpp',
'start_chunk_clone_request.cpp',
- env.Idlc('range_deletion_task.idl')[0],
env.Idlc('migration_coordinator_document.idl')[0],
+ env.Idlc('range_deletion_task.idl')[0],
],
LIBDEPS=[
'$BUILD_DIR/mongo/db/catalog/multi_index_block',
@@ -128,19 +129,6 @@ env.Library(
)
env.Library(
- target='op_observer_sharding_impl',
- source=[
- 'op_observer_sharding_impl.cpp',
- ],
- LIBDEPS=[
- '$BUILD_DIR/mongo/db/op_observer_impl',
- ],
- LIBDEPS_PRIVATE=[
- 'sharding_runtime_d',
- ],
-)
-
-env.Library(
target='transaction_coordinator',
source=[
'server_transaction_coordinators_metrics.cpp',
diff --git a/src/mongo/db/s/balancer/migration_manager_test.cpp b/src/mongo/db/s/balancer/migration_manager_test.cpp
index c0d4032034d..f9c647e80b3 100644
--- a/src/mongo/db/s/balancer/migration_manager_test.cpp
+++ b/src/mongo/db/s/balancer/migration_manager_test.cpp
@@ -137,7 +137,7 @@ TEST_F(MigrationManagerTest, OneCollectionTwoMigrations) {
MigrateInfo::chunksImbalance}};
auto future = launchAsync([this, migrationRequests] {
- ThreadClient tc("Test", getGlobalServiceContext());
+ ThreadClient tc("Test", getServiceContext());
auto opCtx = cc().makeOperationContext();
// Scheduling the moveChunk commands requires finding a host to which to send the command.
@@ -211,7 +211,7 @@ TEST_F(MigrationManagerTest, TwoCollectionsTwoMigrationsEach) {
MigrateInfo::chunksImbalance}};
auto future = launchAsync([this, migrationRequests] {
- ThreadClient tc("Test", getGlobalServiceContext());
+ ThreadClient tc("Test", getServiceContext());
auto opCtx = cc().makeOperationContext();
// Scheduling the moveChunk commands requires finding a host to which to send the command.
@@ -272,7 +272,7 @@ TEST_F(MigrationManagerTest, SourceShardNotFound) {
MigrateInfo::chunksImbalance}};
auto future = launchAsync([this, chunk1, chunk2, migrationRequests] {
- ThreadClient tc("Test", getGlobalServiceContext());
+ ThreadClient tc("Test", getServiceContext());
auto opCtx = cc().makeOperationContext();
// Scheduling a moveChunk command requires finding a host to which to send the command. Set
@@ -322,7 +322,7 @@ TEST_F(MigrationManagerTest, JumboChunkResponseBackwardsCompatibility) {
MigrateInfo::chunksImbalance}};
auto future = launchAsync([this, chunk1, migrationRequests] {
- ThreadClient tc("Test", getGlobalServiceContext());
+ ThreadClient tc("Test", getServiceContext());
auto opCtx = cc().makeOperationContext();
// Scheduling a moveChunk command requires finding a host to which to send the command. Set
@@ -361,7 +361,7 @@ TEST_F(MigrationManagerTest, InterruptMigration) {
setUpChunk(collName, kKeyPattern.globalMin(), kKeyPattern.globalMax(), kShardId0, version);
auto future = launchAsync([&] {
- ThreadClient tc("Test", getGlobalServiceContext());
+ ThreadClient tc("Test", getServiceContext());
auto opCtx = cc().makeOperationContext();
// Scheduling a moveChunk command requires finding a host to which to send the command. Set
@@ -463,7 +463,7 @@ TEST_F(MigrationManagerTest, RestartMigrationManager) {
_migrationManager->finishRecovery(operationContext(), 0, kDefaultSecondaryThrottle);
auto future = launchAsync([&] {
- ThreadClient tc("Test", getGlobalServiceContext());
+ ThreadClient tc("Test", getServiceContext());
auto opCtx = cc().makeOperationContext();
// Scheduling a moveChunk command requires finding a host to which to send the command. Set
@@ -524,7 +524,7 @@ TEST_F(MigrationManagerTest, MigrationRecovery) {
_migrationManager->startRecoveryAndAcquireDistLocks(operationContext());
auto future = launchAsync([this] {
- ThreadClient tc("Test", getGlobalServiceContext());
+ ThreadClient tc("Test", getServiceContext());
auto opCtx = cc().makeOperationContext();
// Scheduling the moveChunk commands requires finding hosts to which to send the commands.
@@ -635,7 +635,7 @@ TEST_F(MigrationManagerTest, RemoteCallErrorConversionToOperationFailed) {
MigrateInfo::chunksImbalance}};
auto future = launchAsync([&] {
- ThreadClient tc("Test", getGlobalServiceContext());
+ ThreadClient tc("Test", getServiceContext());
auto opCtx = cc().makeOperationContext();
// Scheduling the moveChunk commands requires finding a host to which to send the command.
diff --git a/src/mongo/db/s/config/sharding_catalog_manager_add_shard_test.cpp b/src/mongo/db/s/config/sharding_catalog_manager_add_shard_test.cpp
index 5b00b48005c..dc68d8da844 100644
--- a/src/mongo/db/s/config/sharding_catalog_manager_add_shard_test.cpp
+++ b/src/mongo/db/s/config/sharding_catalog_manager_add_shard_test.cpp
@@ -387,7 +387,7 @@ TEST_F(AddShardTest, StandaloneBasicSuccess) {
operationContext()->setWriteConcern(ShardingCatalogClient::kMajorityWriteConcern);
auto future = launchAsync([this, expectedShardName] {
- ThreadClient tc(getGlobalServiceContext());
+ ThreadClient tc(getServiceContext());
auto shardName =
assertGet(ShardingCatalogManager::get(operationContext())
->addShard(operationContext(),
@@ -470,7 +470,7 @@ TEST_F(AddShardTest, StandaloneGenerateName) {
"TestDB2", ShardId(expectedShardName), false, databaseVersion::makeNew());
auto future = launchAsync([this, &expectedShardName, &shardTarget] {
- ThreadClient tc(getGlobalServiceContext());
+ ThreadClient tc(getServiceContext());
auto shardName = assertGet(
ShardingCatalogManager::get(operationContext())
->addShard(operationContext(), nullptr, ConnectionString(shardTarget), 100));
@@ -560,7 +560,7 @@ TEST_F(AddShardTest, UnreachableHost) {
std::string expectedShardName = "StandaloneShard";
auto future = launchAsync([this, &expectedShardName, &shardTarget] {
- ThreadClient tc(getGlobalServiceContext());
+ ThreadClient tc(getServiceContext());
auto status =
ShardingCatalogManager::get(operationContext())
->addShard(
@@ -587,7 +587,7 @@ TEST_F(AddShardTest, AddMongosAsShard) {
std::string expectedShardName = "StandaloneShard";
auto future = launchAsync([this, &expectedShardName, &shardTarget] {
- ThreadClient tc(getGlobalServiceContext());
+ ThreadClient tc(getServiceContext());
auto status =
ShardingCatalogManager::get(operationContext())
->addShard(
@@ -614,7 +614,7 @@ TEST_F(AddShardTest, AddReplicaSetShardAsStandalone) {
std::string expectedShardName = "Standalone";
auto future = launchAsync([this, expectedShardName, shardTarget] {
- ThreadClient tc(getGlobalServiceContext());
+ ThreadClient tc(getServiceContext());
auto status =
ShardingCatalogManager::get(operationContext())
->addShard(
@@ -645,7 +645,7 @@ TEST_F(AddShardTest, AddStandaloneHostShardAsReplicaSet) {
std::string expectedShardName = "StandaloneShard";
auto future = launchAsync([this, expectedShardName, connString] {
- ThreadClient tc(getGlobalServiceContext());
+ ThreadClient tc(getServiceContext());
auto status = ShardingCatalogManager::get(operationContext())
->addShard(operationContext(), &expectedShardName, connString, 100);
ASSERT_EQUALS(ErrorCodes::OperationFailed, status);
@@ -673,7 +673,7 @@ TEST_F(AddShardTest, ReplicaSetMistmatchedReplicaSetName) {
std::string expectedShardName = "StandaloneShard";
auto future = launchAsync([this, expectedShardName, connString] {
- ThreadClient tc(getGlobalServiceContext());
+ ThreadClient tc(getServiceContext());
auto status = ShardingCatalogManager::get(operationContext())
->addShard(operationContext(), &expectedShardName, connString, 100);
ASSERT_EQUALS(ErrorCodes::OperationFailed, status);
@@ -702,7 +702,7 @@ TEST_F(AddShardTest, ShardIsCSRSConfigServer) {
std::string expectedShardName = "StandaloneShard";
auto future = launchAsync([this, expectedShardName, connString] {
- ThreadClient tc(getGlobalServiceContext());
+ ThreadClient tc(getServiceContext());
auto status = ShardingCatalogManager::get(operationContext())
->addShard(operationContext(), &expectedShardName, connString, 100);
ASSERT_EQUALS(ErrorCodes::OperationFailed, status);
@@ -733,7 +733,7 @@ TEST_F(AddShardTest, ReplicaSetMissingHostsProvidedInSeedList) {
std::string expectedShardName = "StandaloneShard";
auto future = launchAsync([this, expectedShardName, connString] {
- ThreadClient tc(getGlobalServiceContext());
+ ThreadClient tc(getServiceContext());
auto status = ShardingCatalogManager::get(operationContext())
->addShard(operationContext(), &expectedShardName, connString, 100);
ASSERT_EQUALS(ErrorCodes::OperationFailed, status);
@@ -766,7 +766,7 @@ TEST_F(AddShardTest, AddShardWithNameConfigFails) {
std::string expectedShardName = "config";
auto future = launchAsync([this, expectedShardName, connString] {
- ThreadClient tc(getGlobalServiceContext());
+ ThreadClient tc(getServiceContext());
auto status = ShardingCatalogManager::get(operationContext())
->addShard(operationContext(), &expectedShardName, connString, 100);
ASSERT_EQUALS(ErrorCodes::BadValue, status);
@@ -810,7 +810,7 @@ TEST_F(AddShardTest, ShardContainsExistingDatabase) {
auto future = launchAsync([this, expectedShardName, connString] {
- ThreadClient tc(getGlobalServiceContext());
+ ThreadClient tc(getServiceContext());
auto status = ShardingCatalogManager::get(operationContext())
->addShard(operationContext(), &expectedShardName, connString, 100);
ASSERT_EQUALS(ErrorCodes::OperationFailed, status);
@@ -856,7 +856,7 @@ TEST_F(AddShardTest, SuccessfullyAddReplicaSet) {
"shardDB", ShardId(expectedShardName), false, databaseVersion::makeNew());
auto future = launchAsync([this, &expectedShardName, &connString] {
- ThreadClient tc(getGlobalServiceContext());
+ ThreadClient tc(getServiceContext());
auto shardName = assertGet(ShardingCatalogManager::get(operationContext())
->addShard(operationContext(), nullptr, connString, 100));
ASSERT_EQUALS(expectedShardName, shardName);
@@ -920,7 +920,7 @@ TEST_F(AddShardTest, ReplicaSetExtraHostsDiscovered) {
"shardDB", ShardId(expectedShardName), false, databaseVersion::makeNew());
auto future = launchAsync([this, &expectedShardName, &seedString] {
- ThreadClient tc(getGlobalServiceContext());
+ ThreadClient tc(getServiceContext());
auto shardName = assertGet(ShardingCatalogManager::get(operationContext())
->addShard(operationContext(), nullptr, seedString, 100));
ASSERT_EQUALS(expectedShardName, shardName);
@@ -995,7 +995,7 @@ TEST_F(AddShardTest, AddShardSucceedsEvenIfAddingDBsFromNewShardFails) {
ON_BLOCK_EXIT([&] { failPoint->setMode(FailPoint::off); });
auto future = launchAsync([this, &expectedShardName, &shardTarget] {
- ThreadClient tc(getGlobalServiceContext());
+ ThreadClient tc(getServiceContext());
auto shardName = assertGet(
ShardingCatalogManager::get(operationContext())
->addShard(
@@ -1085,7 +1085,7 @@ TEST_F(AddShardTest, AddExistingShardStandalone) {
// Adding the same standalone host with a different shard name should fail.
std::string differentName = "anotherShardName";
auto future1 = launchAsync([&] {
- ThreadClient tc(getGlobalServiceContext());
+ ThreadClient tc(getServiceContext());
ASSERT_EQUALS(ErrorCodes::IllegalOperation,
ShardingCatalogManager::get(operationContext())
->addShard(operationContext(),
@@ -1100,7 +1100,7 @@ TEST_F(AddShardTest, AddExistingShardStandalone) {
// Adding the same standalone host with a different maxSize should fail.
auto future2 = launchAsync([&] {
- ThreadClient tc(getGlobalServiceContext());
+ ThreadClient tc(getServiceContext());
ASSERT_EQUALS(ErrorCodes::IllegalOperation,
ShardingCatalogManager::get(operationContext())
->addShard(operationContext(),
@@ -1115,7 +1115,7 @@ TEST_F(AddShardTest, AddExistingShardStandalone) {
// can't change the sharded cluster's notion of the shard from standalone to replica set just
// by calling addShard.
auto future3 = launchAsync([&] {
- ThreadClient tc(getGlobalServiceContext());
+ ThreadClient tc(getServiceContext());
ASSERT_EQUALS(ErrorCodes::IllegalOperation,
ShardingCatalogManager::get(operationContext())
->addShard(operationContext(),
@@ -1130,7 +1130,7 @@ TEST_F(AddShardTest, AddExistingShardStandalone) {
// Adding the same standalone host with the same options should succeed.
auto future4 = launchAsync([&] {
- ThreadClient tc(getGlobalServiceContext());
+ ThreadClient tc(getServiceContext());
auto shardName = assertGet(ShardingCatalogManager::get(operationContext())
->addShard(operationContext(),
&existingShardName,
@@ -1146,7 +1146,7 @@ TEST_F(AddShardTest, AddExistingShardStandalone) {
// Adding the same standalone host with the same options (without explicitly specifying the
// shard name) should succeed.
auto future5 = launchAsync([&] {
- ThreadClient tc(getGlobalServiceContext());
+ ThreadClient tc(getServiceContext());
auto shardName = assertGet(ShardingCatalogManager::get(operationContext())
->addShard(operationContext(),
nullptr,
@@ -1189,7 +1189,7 @@ TEST_F(AddShardTest, AddExistingShardReplicaSet) {
// Adding the same connection string with a different shard name should fail.
std::string differentName = "anotherShardName";
auto future1 = launchAsync([&] {
- ThreadClient tc(getGlobalServiceContext());
+ ThreadClient tc(getServiceContext());
ASSERT_EQUALS(
ErrorCodes::IllegalOperation,
ShardingCatalogManager::get(operationContext())
@@ -1203,7 +1203,7 @@ TEST_F(AddShardTest, AddExistingShardReplicaSet) {
// Adding the same connection string with a different maxSize should fail.
auto future2 = launchAsync([&] {
- ThreadClient tc(getGlobalServiceContext());
+ ThreadClient tc(getServiceContext());
ASSERT_EQUALS(
ErrorCodes::IllegalOperation,
ShardingCatalogManager::get(operationContext())
@@ -1221,7 +1221,7 @@ TEST_F(AddShardTest, AddExistingShardReplicaSet) {
// the sharded cluster's notion of the shard from replica set to standalone just by calling
// addShard.
auto future3 = launchAsync([&] {
- ThreadClient tc(getGlobalServiceContext());
+ ThreadClient tc(getServiceContext());
ASSERT_EQUALS(ErrorCodes::IllegalOperation,
ShardingCatalogManager::get(operationContext())
->addShard(operationContext(),
@@ -1239,7 +1239,7 @@ TEST_F(AddShardTest, AddExistingShardReplicaSet) {
// change the replica set name the sharded cluster knows for it just by calling addShard again.
std::string differentSetName = "differentSet";
auto future4 = launchAsync([&] {
- ThreadClient tc(getGlobalServiceContext());
+ ThreadClient tc(getServiceContext());
ASSERT_EQUALS(ErrorCodes::IllegalOperation,
ShardingCatalogManager::get(operationContext())
->addShard(operationContext(),
@@ -1255,7 +1255,7 @@ TEST_F(AddShardTest, AddExistingShardReplicaSet) {
// Adding the same host with the same options should succeed.
auto future5 = launchAsync([&] {
- ThreadClient tc(getGlobalServiceContext());
+ ThreadClient tc(getServiceContext());
auto shardName = assertGet(ShardingCatalogManager::get(operationContext())
->addShard(operationContext(),
&existingShardName,
@@ -1268,7 +1268,7 @@ TEST_F(AddShardTest, AddExistingShardReplicaSet) {
// Adding the same host with the same options (without explicitly specifying the shard name)
// should succeed.
auto future6 = launchAsync([&] {
- ThreadClient tc(getGlobalServiceContext());
+ ThreadClient tc(getServiceContext());
auto shardName = assertGet(
ShardingCatalogManager::get(operationContext())
->addShard(operationContext(), nullptr, connString, existingShard.getMaxSizeMB()));
@@ -1292,7 +1292,7 @@ TEST_F(AddShardTest, AddExistingShardReplicaSet) {
targeterFactory()->addTargeterToReturn(otherHostConnString, std::move(otherHostTargeter));
}
auto future7 = launchAsync([&] {
- ThreadClient tc(getGlobalServiceContext());
+ ThreadClient tc(getServiceContext());
auto shardName = assertGet(ShardingCatalogManager::get(operationContext())
->addShard(operationContext(),
nullptr,
diff --git a/src/mongo/db/s/sharding_initialization_mongod_test.cpp b/src/mongo/db/s/sharding_initialization_mongod_test.cpp
index fd195ad0549..6af6ff00399 100644
--- a/src/mongo/db/s/sharding_initialization_mongod_test.cpp
+++ b/src/mongo/db/s/sharding_initialization_mongod_test.cpp
@@ -37,7 +37,6 @@
#include "mongo/db/repl/replication_coordinator_mock.h"
#include "mongo/db/s/collection_sharding_state_factory_shard.h"
#include "mongo/db/s/collection_sharding_state_factory_standalone.h"
-#include "mongo/db/s/config_server_op_observer.h"
#include "mongo/db/s/op_observer_sharding_impl.h"
#include "mongo/db/s/shard_server_catalog_cache_loader.h"
#include "mongo/db/s/shard_server_op_observer.h"
@@ -155,15 +154,12 @@ public:
std::make_unique<CollectionShardingStateFactoryShard>(_serviceContext));
serverGlobalParams.clusterRole = ClusterRole::ShardServer;
- auto makeOpObserver = [&] {
+ _serviceContext->setOpObserver([&] {
auto opObserver = std::make_unique<OpObserverRegistry>();
- opObserver->addObserver(std::make_unique<OpObserverImpl>());
- opObserver->addObserver(std::make_unique<ConfigServerOpObserver>());
+ opObserver->addObserver(std::make_unique<OpObserverShardingImpl>());
opObserver->addObserver(std::make_unique<ShardServerOpObserver>());
return opObserver;
- };
-
- _serviceContext->setOpObserver(makeOpObserver());
+ }());
}
private:
diff --git a/src/mongo/db/s/vector_clock_config_server_test.cpp b/src/mongo/db/s/vector_clock_config_server_test.cpp
index e6b1a4d2bb3..59850cae623 100644
--- a/src/mongo/db/s/vector_clock_config_server_test.cpp
+++ b/src/mongo/db/s/vector_clock_config_server_test.cpp
@@ -91,7 +91,7 @@ private:
TEST_F(VectorClockConfigServerTest, TickClusterTime) {
- auto sc = getGlobalServiceContext();
+ auto sc = getServiceContext();
auto vc = VectorClockMutable::get(sc);
const auto t0 = vc->getTime();
@@ -109,7 +109,7 @@ TEST_F(VectorClockConfigServerTest, TickClusterTime) {
}
TEST_F(VectorClockConfigServerTest, TickToClusterTime) {
- auto sc = getGlobalServiceContext();
+ auto sc = getServiceContext();
auto vc = VectorClockMutable::get(sc);
const auto t0 = vc->getTime();
@@ -129,13 +129,13 @@ TEST_F(VectorClockConfigServerTest, TickToClusterTime) {
}
DEATH_TEST_F(VectorClockConfigServerTest, CannotTickConfigTime, "Hit a MONGO_UNREACHABLE") {
- auto sc = getGlobalServiceContext();
+ auto sc = getServiceContext();
auto vc = VectorClockMutable::get(sc);
vc->tick(VectorClock::Component::ConfigTime, 1);
}
TEST_F(VectorClockConfigServerTest, TickToConfigTime) {
- auto sc = getGlobalServiceContext();
+ auto sc = getServiceContext();
auto vc = VectorClockMutable::get(sc);
const auto t0 = vc->getTime();
@@ -155,7 +155,7 @@ TEST_F(VectorClockConfigServerTest, TickToConfigTime) {
}
TEST_F(VectorClockConfigServerTest, GossipOutInternal) {
- auto sc = getGlobalServiceContext();
+ auto sc = getServiceContext();
auto vc = VectorClockMutable::get(sc);
LogicalTimeValidator::get(getServiceContext())->enableKeyGenerator(operationContext(), true);
@@ -177,7 +177,7 @@ TEST_F(VectorClockConfigServerTest, GossipOutInternal) {
}
TEST_F(VectorClockConfigServerTest, GossipOutExternal) {
- auto sc = getGlobalServiceContext();
+ auto sc = getServiceContext();
auto vc = VectorClockMutable::get(sc);
LogicalTimeValidator::get(getServiceContext())->enableKeyGenerator(operationContext(), true);
@@ -199,7 +199,7 @@ TEST_F(VectorClockConfigServerTest, GossipOutExternal) {
}
TEST_F(VectorClockConfigServerTest, GossipInInternal) {
- auto sc = getGlobalServiceContext();
+ auto sc = getServiceContext();
auto vc = VectorClockMutable::get(sc);
vc->tick(VectorClock::Component::ClusterTime, 1);
@@ -237,7 +237,7 @@ TEST_F(VectorClockConfigServerTest, GossipInInternal) {
}
TEST_F(VectorClockConfigServerTest, GossipInExternal) {
- auto sc = getGlobalServiceContext();
+ auto sc = getServiceContext();
auto vc = VectorClockMutable::get(sc);
vc->tick(VectorClock::Component::ClusterTime, 1);
diff --git a/src/mongo/db/s/vector_clock_shard_server_test.cpp b/src/mongo/db/s/vector_clock_shard_server_test.cpp
index 66bae0d67a0..a2c7577769c 100644
--- a/src/mongo/db/s/vector_clock_shard_server_test.cpp
+++ b/src/mongo/db/s/vector_clock_shard_server_test.cpp
@@ -80,7 +80,7 @@ private:
TEST_F(VectorClockShardServerTest, TickClusterTime) {
- auto sc = getGlobalServiceContext();
+ auto sc = getServiceContext();
auto vc = VectorClockMutable::get(sc);
const auto t0 = vc->getTime();
@@ -98,7 +98,7 @@ TEST_F(VectorClockShardServerTest, TickClusterTime) {
}
TEST_F(VectorClockShardServerTest, TickToClusterTime) {
- auto sc = getGlobalServiceContext();
+ auto sc = getServiceContext();
auto vc = VectorClockMutable::get(sc);
const auto t0 = vc->getTime();
@@ -118,19 +118,19 @@ TEST_F(VectorClockShardServerTest, TickToClusterTime) {
}
DEATH_TEST_F(VectorClockShardServerTest, CannotTickConfigTime, "Hit a MONGO_UNREACHABLE") {
- auto sc = getGlobalServiceContext();
+ auto sc = getServiceContext();
auto vc = VectorClockMutable::get(sc);
vc->tick(VectorClock::Component::ConfigTime, 1);
}
DEATH_TEST_F(VectorClockShardServerTest, CannotTickToConfigTime, "Hit a MONGO_UNREACHABLE") {
- auto sc = getGlobalServiceContext();
+ auto sc = getServiceContext();
auto vc = VectorClockMutable::get(sc);
vc->tickTo(VectorClock::Component::ConfigTime, LogicalTime());
}
TEST_F(VectorClockShardServerTest, GossipOutInternal) {
- auto sc = getGlobalServiceContext();
+ auto sc = getServiceContext();
auto vc = VectorClockMutable::get(sc);
LogicalTimeValidator::get(getServiceContext())->enableKeyGenerator(operationContext(), true);
@@ -150,7 +150,7 @@ TEST_F(VectorClockShardServerTest, GossipOutInternal) {
}
TEST_F(VectorClockShardServerTest, GossipOutExternal) {
- auto sc = getGlobalServiceContext();
+ auto sc = getServiceContext();
auto vc = VectorClockMutable::get(sc);
LogicalTimeValidator::get(getServiceContext())->enableKeyGenerator(operationContext(), true);
@@ -170,7 +170,7 @@ TEST_F(VectorClockShardServerTest, GossipOutExternal) {
}
TEST_F(VectorClockShardServerTest, GossipInInternal) {
- auto sc = getGlobalServiceContext();
+ auto sc = getServiceContext();
auto vc = VectorClockMutable::get(sc);
vc->tick(VectorClock::Component::ClusterTime, 1);
@@ -214,7 +214,7 @@ TEST_F(VectorClockShardServerTest, GossipInInternal) {
}
TEST_F(VectorClockShardServerTest, GossipInExternal) {
- auto sc = getGlobalServiceContext();
+ auto sc = getServiceContext();
auto vc = VectorClockMutable::get(sc);
vc->tick(VectorClock::Component::ClusterTime, 1);
diff --git a/src/mongo/db/vector_clock_mongod_test.cpp b/src/mongo/db/vector_clock_mongod_test.cpp
index cbb34b41ccd..a54b966b8f6 100644
--- a/src/mongo/db/vector_clock_mongod_test.cpp
+++ b/src/mongo/db/vector_clock_mongod_test.cpp
@@ -83,9 +83,8 @@ private:
std::shared_ptr<KeysCollectionManager> _keyManager;
};
-
TEST_F(VectorClockMongoDTest, TickClusterTime) {
- auto sc = getGlobalServiceContext();
+ auto sc = getServiceContext();
auto vc = VectorClockMutable::get(sc);
const auto t0 = vc->getTime();
@@ -103,7 +102,7 @@ TEST_F(VectorClockMongoDTest, TickClusterTime) {
}
TEST_F(VectorClockMongoDTest, TickToClusterTime) {
- auto sc = getGlobalServiceContext();
+ auto sc = getServiceContext();
auto vc = VectorClockMutable::get(sc);
const auto t0 = vc->getTime();
@@ -123,19 +122,19 @@ TEST_F(VectorClockMongoDTest, TickToClusterTime) {
}
DEATH_TEST_F(VectorClockMongoDTest, CannotTickConfigTime, "Hit a MONGO_UNREACHABLE") {
- auto sc = getGlobalServiceContext();
+ auto sc = getServiceContext();
auto vc = VectorClockMutable::get(sc);
vc->tick(VectorClock::Component::ConfigTime, 1);
}
DEATH_TEST_F(VectorClockMongoDTest, CannotTickToConfigTime, "Hit a MONGO_UNREACHABLE") {
- auto sc = getGlobalServiceContext();
+ auto sc = getServiceContext();
auto vc = VectorClockMutable::get(sc);
vc->tickTo(VectorClock::Component::ConfigTime, LogicalTime());
}
TEST_F(VectorClockMongoDTest, GossipOutInternal) {
- auto sc = getGlobalServiceContext();
+ auto sc = getServiceContext();
auto vc = VectorClockMutable::get(sc);
LogicalTimeValidator::get(getServiceContext())->enableKeyGenerator(operationContext(), true);
@@ -155,7 +154,7 @@ TEST_F(VectorClockMongoDTest, GossipOutInternal) {
}
TEST_F(VectorClockMongoDTest, GossipOutExternal) {
- auto sc = getGlobalServiceContext();
+ auto sc = getServiceContext();
auto vc = VectorClockMutable::get(sc);
LogicalTimeValidator::get(getServiceContext())->enableKeyGenerator(operationContext(), true);
@@ -175,7 +174,7 @@ TEST_F(VectorClockMongoDTest, GossipOutExternal) {
}
TEST_F(VectorClockMongoDTest, GossipInInternal) {
- auto sc = getGlobalServiceContext();
+ auto sc = getServiceContext();
auto vc = VectorClockMutable::get(sc);
vc->tick(VectorClock::Component::ClusterTime, 1);
@@ -220,7 +219,7 @@ TEST_F(VectorClockMongoDTest, GossipInInternal) {
}
TEST_F(VectorClockMongoDTest, GossipInExternal) {
- auto sc = getGlobalServiceContext();
+ auto sc = getServiceContext();
auto vc = VectorClockMutable::get(sc);
vc->tick(VectorClock::Component::ClusterTime, 1);
diff --git a/src/mongo/dbtests/SConscript b/src/mongo/dbtests/SConscript
index 02939bd9933..12870af5369 100644
--- a/src/mongo/dbtests/SConscript
+++ b/src/mongo/dbtests/SConscript
@@ -160,7 +160,6 @@ if not has_option('noshell') and usemozjs:
"$BUILD_DIR/mongo/db/repl/serveronly_repl",
"$BUILD_DIR/mongo/db/repl/storage_interface_impl",
"$BUILD_DIR/mongo/db/repl/timestamp_block",
- "$BUILD_DIR/mongo/db/s/op_observer_sharding_impl",
"$BUILD_DIR/mongo/db/server_options_core",
"$BUILD_DIR/mongo/db/sessions_collection_standalone",
"$BUILD_DIR/mongo/db/storage/biggie/storage_biggie",
diff --git a/src/mongo/dbtests/querytests.cpp b/src/mongo/dbtests/querytests.cpp
index 786df7534a9..6920a1dd0c5 100644
--- a/src/mongo/dbtests/querytests.cpp
+++ b/src/mongo/dbtests/querytests.cpp
@@ -54,8 +54,8 @@
#include "mongo/unittest/unittest.h"
#include "mongo/util/timer.h"
+namespace mongo {
namespace {
-namespace QueryTests {
using std::endl;
using std::string;
@@ -333,7 +333,7 @@ public:
class GetMoreKillOp : public ClientBase {
public:
~GetMoreKillOp() {
- getGlobalServiceContext()->unsetKillAllOperations();
+ _opCtx.getServiceContext()->unsetKillAllOperations();
_client.dropCollection("unittests.querytests.GetMoreKillOp");
}
void run() {
@@ -355,7 +355,7 @@ public:
// Set the killop kill all flag, forcing the next get more to fail with a kill op
// exception.
- getGlobalServiceContext()->setKillAllOperations();
+ _opCtx.getServiceContext()->setKillAllOperations();
ASSERT_THROWS_CODE(([&] {
while (cursor->more()) {
cursor->next();
@@ -365,7 +365,7 @@ public:
ErrorCodes::InterruptedAtShutdown);
// Revert the killop kill all flag.
- getGlobalServiceContext()->unsetKillAllOperations();
+ _opCtx.getServiceContext()->unsetKillAllOperations();
}
};
@@ -377,7 +377,7 @@ public:
class GetMoreInvalidRequest : public ClientBase {
public:
~GetMoreInvalidRequest() {
- getGlobalServiceContext()->unsetKillAllOperations();
+ _opCtx.getServiceContext()->unsetKillAllOperations();
_client.dropCollection("unittests.querytests.GetMoreInvalidRequest");
}
void run() {
@@ -456,7 +456,7 @@ public:
}
void run() {
// Skip the test if the storage engine doesn't support capped collections.
- if (!getGlobalServiceContext()->getStorageEngine()->supportsCappedCollections()) {
+ if (!_opCtx.getServiceContext()->getStorageEngine()->supportsCappedCollections()) {
return;
}
@@ -491,7 +491,7 @@ public:
}
void run() {
// Skip the test if the storage engine doesn't support capped collections.
- if (!getGlobalServiceContext()->getStorageEngine()->supportsCappedCollections()) {
+ if (!_opCtx.getServiceContext()->getStorageEngine()->supportsCappedCollections()) {
return;
}
@@ -524,7 +524,7 @@ public:
}
void run() {
// Skip the test if the storage engine doesn't support capped collections.
- if (!getGlobalServiceContext()->getStorageEngine()->supportsCappedCollections()) {
+ if (!_opCtx.getServiceContext()->getStorageEngine()->supportsCappedCollections()) {
return;
}
@@ -556,7 +556,7 @@ public:
}
void run() {
// Skip the test if the storage engine doesn't support capped collections.
- if (!getGlobalServiceContext()->getStorageEngine()->supportsCappedCollections()) {
+ if (!_opCtx.getServiceContext()->getStorageEngine()->supportsCappedCollections()) {
return;
}
@@ -590,7 +590,7 @@ public:
}
void run() {
// Skip the test if the storage engine doesn't support capped collections.
- if (!getGlobalServiceContext()->getStorageEngine()->supportsCappedCollections()) {
+ if (!_opCtx.getServiceContext()->getStorageEngine()->supportsCappedCollections()) {
return;
}
@@ -646,7 +646,7 @@ public:
void run() {
// Skip the test if the storage engine doesn't support capped collections.
- if (!getGlobalServiceContext()->getStorageEngine()->supportsCappedCollections()) {
+ if (!_opCtx.getServiceContext()->getStorageEngine()->supportsCappedCollections()) {
return;
}
@@ -693,7 +693,7 @@ public:
}
void run() {
// Skip the test if the storage engine doesn't support capped collections.
- if (!getGlobalServiceContext()->getStorageEngine()->supportsCappedCollections()) {
+ if (!_opCtx.getServiceContext()->getStorageEngine()->supportsCappedCollections()) {
return;
}
@@ -706,7 +706,7 @@ public:
//
// To ensure we are working with a clean oplog (an oplog without entries), we resort
// to truncating the oplog instead.
- if (getGlobalServiceContext()->getStorageEngine()->supportsRecoveryTimestamp()) {
+ if (_opCtx.getServiceContext()->getStorageEngine()->supportsRecoveryTimestamp()) {
BSONObj info;
_client.runCommand("local",
BSON("emptycapped"
@@ -749,7 +749,7 @@ public:
}
void run() {
// Skip the test if the storage engine doesn't support capped collections.
- if (!getGlobalServiceContext()->getStorageEngine()->supportsCappedCollections()) {
+ if (!_opCtx.getServiceContext()->getStorageEngine()->supportsCappedCollections()) {
return;
}
@@ -762,7 +762,7 @@ public:
//
// To ensure we are working with a clean oplog (an oplog without entries), we resort
// to truncating the oplog instead.
- if (getGlobalServiceContext()->getStorageEngine()->supportsRecoveryTimestamp()) {
+ if (_opCtx.getServiceContext()->getStorageEngine()->supportsRecoveryTimestamp()) {
BSONObj info;
_client.runCommand("local",
BSON("emptycapped"
@@ -1382,7 +1382,7 @@ public:
}
void run() {
// Skip the test if the storage engine doesn't support capped collections.
- if (!getGlobalServiceContext()->getStorageEngine()->supportsCappedCollections()) {
+ if (!_opCtx.getServiceContext()->getStorageEngine()->supportsCappedCollections()) {
return;
}
@@ -1538,7 +1538,7 @@ public:
void run() {
// Skip the test if the storage engine doesn't support capped collections.
- if (!getGlobalServiceContext()->getStorageEngine()->supportsCappedCollections()) {
+ if (!_opCtx.getServiceContext()->getStorageEngine()->supportsCappedCollections()) {
return;
}
@@ -1555,7 +1555,7 @@ public:
//
// To ensure we are working with a clean oplog (an oplog without entries), we resort
// to truncating the oplog instead.
- if (getGlobalServiceContext()->getStorageEngine()->supportsRecoveryTimestamp()) {
+ if (_opCtx.getServiceContext()->getStorageEngine()->supportsRecoveryTimestamp()) {
_client.runCommand("local",
BSON("emptycapped"
<< "oplog.querytests.findingstart"),
@@ -1605,7 +1605,7 @@ public:
void run() {
// Skip the test if the storage engine doesn't support capped collections.
- if (!getGlobalServiceContext()->getStorageEngine()->supportsCappedCollections()) {
+ if (!_opCtx.getServiceContext()->getStorageEngine()->supportsCappedCollections()) {
return;
}
@@ -1624,7 +1624,7 @@ public:
//
// To ensure we are working with a clean oplog (an oplog without entries), we resort
// to truncating the oplog instead.
- if (getGlobalServiceContext()->getStorageEngine()->supportsRecoveryTimestamp()) {
+ if (_opCtx.getServiceContext()->getStorageEngine()->supportsRecoveryTimestamp()) {
_client.runCommand("local",
BSON("emptycapped"
<< "oplog.querytests.findingstart"),
@@ -1670,7 +1670,7 @@ public:
void run() {
// Skip the test if the storage engine doesn't support capped collections.
- if (!getGlobalServiceContext()->getStorageEngine()->supportsCappedCollections()) {
+ if (!_opCtx.getServiceContext()->getStorageEngine()->supportsCappedCollections()) {
return;
}
@@ -1698,7 +1698,7 @@ public:
//
// To ensure we are working with a clean oplog (an oplog without entries), we resort
// to truncating the oplog instead.
- if (getGlobalServiceContext()->getStorageEngine()->supportsRecoveryTimestamp()) {
+ if (_opCtx.getServiceContext()->getStorageEngine()->supportsRecoveryTimestamp()) {
_client.runCommand("local",
BSON("emptycapped"
<< "oplog.querytests.findingstart"),
@@ -1908,7 +1908,7 @@ public:
Exhaust() : CollectionInternalBase("exhaust") {}
void run() {
// Skip the test if the storage engine doesn't support capped collections.
- if (!getGlobalServiceContext()->getStorageEngine()->supportsCappedCollections()) {
+ if (!_opCtx.getServiceContext()->getStorageEngine()->supportsCappedCollections()) {
return;
}
@@ -2066,5 +2066,5 @@ public:
OldStyleSuiteInitializer<All> myall;
-} // namespace QueryTests
} // namespace
+} // namespace mongo
diff --git a/src/mongo/dbtests/repltests.cpp b/src/mongo/dbtests/repltests.cpp
index 218f819c68d..7c34b97bf2b 100644
--- a/src/mongo/dbtests/repltests.cpp
+++ b/src/mongo/dbtests/repltests.cpp
@@ -40,27 +40,21 @@
#include "mongo/db/db_raii.h"
#include "mongo/db/dbdirectclient.h"
#include "mongo/db/json.h"
+#include "mongo/db/op_observer_impl.h"
#include "mongo/db/ops/update.h"
#include "mongo/db/repl/oplog.h"
#include "mongo/db/repl/repl_client_info.h"
-#include "mongo/db/repl/replication_coordinator.h"
#include "mongo/db/repl/replication_coordinator_mock.h"
-#include "mongo/db/s/op_observer_sharding_impl.h"
#include "mongo/dbtests/dbtests.h"
#include "mongo/logger/logger.h"
#include "mongo/logv2/log.h"
#include "mongo/transport/transport_layer_asio.h"
-using namespace mongo::repl;
-
+namespace mongo {
+namespace repl {
namespace ReplTests {
-using mongo::logger::globalLogDomain;
-using mongo::logv2::LogComponent;
-using mongo::logv2::LogSeverity;
-using std::endl;
using std::string;
-using std::stringstream;
using std::unique_ptr;
using std::vector;
@@ -105,32 +99,30 @@ public:
Base()
: _client(&_opCtx),
_defaultReplSettings(
- ReplicationCoordinator::get(getGlobalServiceContext())->getSettings()) {
+ ReplicationCoordinator::get(_opCtx.getServiceContext())->getSettings()) {
+ auto* const sc = _opCtx.getServiceContext();
+
transport::TransportLayerASIO::Options opts;
opts.mode = transport::TransportLayerASIO::Options::kEgress;
- auto sc = getGlobalServiceContext();
-
sc->setTransportLayer(std::make_unique<transport::TransportLayerASIO>(opts, nullptr));
ASSERT_OK(sc->getTransportLayer()->setup());
ASSERT_OK(sc->getTransportLayer()->start());
ReplSettings replSettings;
replSettings.setReplSetString("rs0/host1");
- ReplicationCoordinator::set(
- getGlobalServiceContext(),
- std::unique_ptr<repl::ReplicationCoordinator>(
- new repl::ReplicationCoordinatorMock(_opCtx.getServiceContext(), replSettings)));
- ASSERT_OK(ReplicationCoordinator::get(getGlobalServiceContext())
- ->setFollowerMode(MemberState::RS_PRIMARY));
+ ReplicationCoordinator::set(sc,
+ std::unique_ptr<repl::ReplicationCoordinator>(
+ new repl::ReplicationCoordinatorMock(sc, replSettings)));
+ ASSERT_OK(ReplicationCoordinator::get(sc)->setFollowerMode(MemberState::RS_PRIMARY));
// Since the Client object persists across tests, even though the global
// ReplicationCoordinator does not, we need to clear the last op associated with the client
// to avoid the invariant in ReplClientInfo::setLastOp that the optime only goes forward.
repl::ReplClientInfo::forClient(_opCtx.getClient()).clearLastOp_forTest();
- getGlobalServiceContext()->setOpObserver(std::make_unique<OpObserverShardingImpl>());
+ sc->setOpObserver(std::make_unique<OpObserverImpl>());
- setOplogCollectionName(getGlobalServiceContext());
+ setOplogCollectionName(sc);
createOplog(&_opCtx);
dbtests::WriteContextForTests ctx(&_opCtx, ns());
@@ -149,20 +141,21 @@ public:
// Start with a fresh oplog.
deleteAll(cllNS());
}
+
~Base() {
+ auto* const sc = _opCtx.getServiceContext();
try {
deleteAll(ns());
deleteAll(cllNS());
repl::ReplicationCoordinator::set(
- getGlobalServiceContext(),
- std::unique_ptr<repl::ReplicationCoordinator>(new repl::ReplicationCoordinatorMock(
- _opCtx.getServiceContext(), _defaultReplSettings)));
- repl::ReplicationCoordinator::get(getGlobalServiceContext())
+ sc,
+ std::unique_ptr<repl::ReplicationCoordinator>(
+ new repl::ReplicationCoordinatorMock(sc, _defaultReplSettings)));
+ repl::ReplicationCoordinator::get(sc)
->setFollowerMode(repl::MemberState::RS_PRIMARY)
.ignore();
- getGlobalServiceContext()->getTransportLayer()->shutdown();
-
+ sc->getTransportLayer()->shutdown();
} catch (...) {
FAIL("Exception while cleaning up test");
}
@@ -286,7 +279,7 @@ protected:
coll = db->createCollection(&_opCtx, nss());
}
- auto lastApplied = repl::ReplicationCoordinator::get(getGlobalServiceContext())
+ auto lastApplied = repl::ReplicationCoordinator::get(_opCtx.getServiceContext())
->getMyLastAppliedOpTime()
.getTimestamp();
// The oplog collection may already have some oplog entries for writes prior to this insert.
@@ -772,7 +765,7 @@ protected:
class MultiInc : public Recovering {
public:
string s() const {
- stringstream ss;
+ StringBuilder ss;
unique_ptr<DBClientCursor> cc =
_client.query(NamespaceString(ns()), Query().sort(BSON("_id" << 1)));
bool first = true;
@@ -1386,3 +1379,5 @@ public:
OldStyleSuiteInitializer<All> myall;
} // namespace ReplTests
+} // namespace repl
+} // namespace mongo
diff --git a/src/mongo/dbtests/storage_timestamp_tests.cpp b/src/mongo/dbtests/storage_timestamp_tests.cpp
index 19603ffdbad..d77b57d9dd6 100644
--- a/src/mongo/dbtests/storage_timestamp_tests.cpp
+++ b/src/mongo/dbtests/storage_timestamp_tests.cpp
@@ -53,6 +53,7 @@
#include "mongo/db/index/index_descriptor.h"
#include "mongo/db/index_builds_coordinator.h"
#include "mongo/db/multi_key_path_tracker.h"
+#include "mongo/db/op_observer_impl.h"
#include "mongo/db/op_observer_registry.h"
#include "mongo/db/repl/apply_ops.h"
#include "mongo/db/repl/drop_pending_collection_reaper.h"
@@ -74,7 +75,6 @@
#include "mongo/db/repl/storage_interface_impl.h"
#include "mongo/db/repl/timestamp_block.h"
#include "mongo/db/s/collection_sharding_state_factory_shard.h"
-#include "mongo/db/s/op_observer_sharding_impl.h"
#include "mongo/db/service_context.h"
#include "mongo/db/session.h"
#include "mongo/db/session_catalog_mongod.h"
@@ -206,10 +206,10 @@ public:
repl::ReplClientInfo::forClient(_opCtx->getClient()).clearLastOp_forTest();
auto registry = std::make_unique<OpObserverRegistry>();
- registry->addObserver(std::make_unique<OpObserverShardingImpl>());
+ registry->addObserver(std::make_unique<OpObserverImpl>());
_opCtx->getServiceContext()->setOpObserver(std::move(registry));
- repl::setOplogCollectionName(getGlobalServiceContext());
+ repl::setOplogCollectionName(_opCtx->getServiceContext());
repl::createOplog(_opCtx);
_clock->tickTo(ClusterTime, LogicalTime(Timestamp(1, 0)));
@@ -466,9 +466,9 @@ public:
}
void setReplCoordAppliedOpTime(const repl::OpTime& opTime, Date_t wallTime = Date_t()) {
- repl::ReplicationCoordinator::get(getGlobalServiceContext())
+ repl::ReplicationCoordinator::get(_opCtx->getServiceContext())
->setMyLastAppliedOpTimeAndWallTime({opTime, wallTime});
- ASSERT_OK(repl::ReplicationCoordinator::get(getGlobalServiceContext())
+ ASSERT_OK(repl::ReplicationCoordinator::get(_opCtx->getServiceContext())
->updateTerm(_opCtx, opTime.getTerm()));
}
@@ -2048,7 +2048,7 @@ public:
// Index build drain will timestamp writes from the side table into the index with the
// lastApplied timestamp. This is because these writes are not associated with any specific
// oplog entry.
- ASSERT_EQ(repl::ReplicationCoordinator::get(getGlobalServiceContext())
+ ASSERT_EQ(repl::ReplicationCoordinator::get(_opCtx->getServiceContext())
->getMyLastAppliedOpTime()
.getTimestamp(),
firstInsert.asTimestamp());
diff --git a/src/mongo/s/SConscript b/src/mongo/s/SConscript
index c3ade44368e..a01d97515e6 100644
--- a/src/mongo/s/SConscript
+++ b/src/mongo/s/SConscript
@@ -227,7 +227,6 @@ env.Library(
LIBDEPS=[
'$BUILD_DIR/mongo/db/repl/drop_pending_collection_reaper',
'$BUILD_DIR/mongo/db/repl/replmocks',
- '$BUILD_DIR/mongo/db/s/op_observer_sharding_impl',
'$BUILD_DIR/mongo/db/s/sharding_runtime_d',
'$BUILD_DIR/mongo/db/service_context_d_test_fixture',
'sharding_test_fixture_common',
diff --git a/src/mongo/watchdog/watchdog_mongod.cpp b/src/mongo/watchdog/watchdog_mongod.cpp
index 77c87b087b5..2c7d96075c0 100644
--- a/src/mongo/watchdog/watchdog_mongod.cpp
+++ b/src/mongo/watchdog/watchdog_mongod.cpp
@@ -52,12 +52,11 @@
#include "mongo/watchdog/watchdog_register.h"
namespace mongo {
+namespace {
// Run the watchdog checks at a fixed interval regardless of user choice for monitoring period.
constexpr Seconds watchdogCheckPeriod = Seconds{10};
-namespace {
-
const auto getWatchdogMonitor =
ServiceContext::declareDecoration<std::unique_ptr<WatchdogMonitor>>();
@@ -129,7 +128,7 @@ public:
}
} watchdogServerStatusSection;
-void startWatchdog() {
+void startWatchdog(ServiceContext* service) {
// Check three paths if set
// 1. storage directory - optional for inmemory?
// 2. log path - optional
@@ -192,7 +191,7 @@ void startWatchdog() {
std::move(checks), watchdogCheckPeriod, period, watchdogTerminate);
// Install the new WatchdogMonitor
- auto& staticMonitor = getWatchdogMonitor(getGlobalServiceContext());
+ auto& staticMonitor = getWatchdogMonitor(service);
staticMonitor = std::move(monitor);
diff --git a/src/mongo/watchdog/watchdog_mongod.h b/src/mongo/watchdog/watchdog_mongod.h
index 06892de6543..bfa7f4b711d 100644
--- a/src/mongo/watchdog/watchdog_mongod.h
+++ b/src/mongo/watchdog/watchdog_mongod.h
@@ -33,10 +33,12 @@
namespace mongo {
+class ServiceContext;
+
/**
* Start the watchdog.
*/
-void startWatchdog();
+void startWatchdog(ServiceContext* service);
/**
* Callbacks used by the 'watchdogPeriodSeconds' set parameter.