summaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
authorKaloian Manassiev <kaloian.manassiev@mongodb.com>2018-01-17 17:40:35 -0500
committerKaloian Manassiev <kaloian.manassiev@mongodb.com>2018-03-07 14:01:39 -0500
commit083647f38662195653b87b6a79ae1183d269f910 (patch)
tree1cc2a3b7a036232da1a3c1bc96bc5c39d9d90551 /src
parent4d2ca242fb7b9a28d1123831db99664eb0db3e23 (diff)
downloadmongo-083647f38662195653b87b6a79ae1183d269f910.tar.gz
SERVER-29908 Move OpObserver callbacks out of CollectionShardingState
Diffstat (limited to 'src')
-rw-r--r--src/mongo/db/SConscript31
-rw-r--r--src/mongo/db/db.cpp9
-rw-r--r--src/mongo/db/key_generator_update_test.cpp9
-rw-r--r--src/mongo/db/keys_collection_manager_sharding_test.cpp9
-rw-r--r--src/mongo/db/logical_time_validator_test.cpp10
-rw-r--r--src/mongo/db/op_observer_impl.cpp11
-rw-r--r--src/mongo/db/pipeline/SConscript20
-rw-r--r--src/mongo/db/pipeline/document_source_merge_cursors_test.cpp2
-rw-r--r--src/mongo/db/pipeline/document_source_sample_test.cpp17
-rw-r--r--src/mongo/db/s/SConscript3
-rw-r--r--src/mongo/db/s/balancer/balancer.cpp1
-rw-r--r--src/mongo/db/s/collection_range_deleter_test.cpp170
-rw-r--r--src/mongo/db/s/collection_sharding_state.cpp284
-rw-r--r--src/mongo/db/s/collection_sharding_state.h71
-rw-r--r--src/mongo/db/s/collection_sharding_state_test.cpp102
-rw-r--r--src/mongo/db/s/config_server_op_observer.cpp78
-rw-r--r--src/mongo/db/s/config_server_op_observer.h129
-rw-r--r--src/mongo/db/s/metadata_manager_test.cpp22
-rw-r--r--src/mongo/db/s/migration_chunk_cloner_source_legacy_test.cpp15
-rw-r--r--src/mongo/db/s/session_catalog_migration_destination_test.cpp17
-rw-r--r--src/mongo/db/s/shard_server_op_observer.cpp348
-rw-r--r--src/mongo/db/s/shard_server_op_observer.h129
-rw-r--r--src/mongo/db/s/sharding_state_test.cpp218
-rw-r--r--src/mongo/db/service_context_d_test_fixture.cpp2
-rw-r--r--src/mongo/db/service_context_d_test_fixture.h5
-rw-r--r--src/mongo/s/SConscript42
-rw-r--r--src/mongo/s/balancer_configuration_test.cpp2
-rw-r--r--src/mongo/s/catalog/SConscript5
-rw-r--r--src/mongo/s/catalog/dist_lock_catalog_impl_test.cpp380
-rw-r--r--src/mongo/s/catalog/replset_dist_lock_manager_test.cpp158
-rw-r--r--src/mongo/s/catalog/sharding_catalog_client_impl.cpp6
-rw-r--r--src/mongo/s/catalog/sharding_catalog_log_change_test.cpp2
-rw-r--r--src/mongo/s/catalog/sharding_catalog_test.cpp2
-rw-r--r--src/mongo/s/catalog/sharding_catalog_write_retry_test.cpp2
-rw-r--r--src/mongo/s/catalog_cache_test_fixture.h2
-rw-r--r--src/mongo/s/chunk_manager_index_bounds_test.cpp2
-rw-r--r--src/mongo/s/client/SConscript4
-rw-r--r--src/mongo/s/client/shard_connection_test.cpp44
-rw-r--r--src/mongo/s/client/sharding_connection_hook.cpp3
-rw-r--r--src/mongo/s/cluster_identity_loader_test.cpp10
-rw-r--r--src/mongo/s/cluster_last_error_info_test.cpp2
-rw-r--r--src/mongo/s/commands/SConscript1
-rw-r--r--src/mongo/s/config_server_test_fixture.cpp4
-rw-r--r--src/mongo/s/config_server_test_fixture.h2
-rw-r--r--src/mongo/s/query/SConscript4
-rw-r--r--src/mongo/s/query/async_results_merger_test.cpp2
-rw-r--r--src/mongo/s/query/establish_cursors_test.cpp2
-rw-r--r--src/mongo/s/shard_server_test_fixture.cpp15
-rw-r--r--src/mongo/s/shard_server_test_fixture.h14
-rw-r--r--src/mongo/s/sharding_mongod_test_fixture.cpp38
-rw-r--r--src/mongo/s/sharding_mongod_test_fixture.h50
-rw-r--r--src/mongo/s/sharding_router_test_fixture.cpp (renamed from src/mongo/s/sharding_test_fixture.cpp)55
-rw-r--r--src/mongo/s/sharding_router_test_fixture.h (renamed from src/mongo/s/sharding_test_fixture.h)32
-rw-r--r--src/mongo/s/sharding_test_fixture_common.cpp41
-rw-r--r--src/mongo/s/sharding_test_fixture_common.h79
-rw-r--r--src/mongo/s/write_ops/SConscript7
-rw-r--r--src/mongo/s/write_ops/batch_write_exec_test.cpp2
57 files changed, 1414 insertions, 1312 deletions
diff --git a/src/mongo/db/SConscript b/src/mongo/db/SConscript
index 540b4ac0a0d..e3bbf70a02f 100644
--- a/src/mongo/db/SConscript
+++ b/src/mongo/db/SConscript
@@ -1519,8 +1519,6 @@ env.CppUnitTest(
],
LIBDEPS=[
'keys_collection_document',
- '$BUILD_DIR/mongo/s/coreshard',
- '$BUILD_DIR/mongo/s/client/sharding_client',
],
)
@@ -1562,10 +1560,9 @@ env.CppUnitTest(
'logical_time_validator_test.cpp',
],
LIBDEPS=[
+ '$BUILD_DIR/mongo/s/config_server_test_fixture',
'keys_collection_manager',
'logical_time_validator',
- '$BUILD_DIR/mongo/s/config_server_test_fixture',
- '$BUILD_DIR/mongo/s/coreshard',
],
)
@@ -1605,32 +1602,10 @@ env.CppUnitTest(
)
env.CppUnitTest(
- target='keys_collection_cache_test',
- source=[
- 'keys_collection_cache_test.cpp',
- ],
- LIBDEPS=[
- 'keys_collection_manager',
- '$BUILD_DIR/mongo/s/config_server_test_fixture',
- '$BUILD_DIR/mongo/s/coreshard',
- ],
-)
-
-env.CppUnitTest(
- target='key_generator_update_test',
- source=[
- 'key_generator_update_test.cpp',
- ],
- LIBDEPS=[
- 'keys_collection_manager',
- '$BUILD_DIR/mongo/s/config_server_test_fixture',
- '$BUILD_DIR/mongo/s/coreshard',
- ],
-)
-
-env.CppUnitTest(
target='keys_collection_manager_sharding_test',
source=[
+ 'key_generator_update_test.cpp',
+ 'keys_collection_cache_test.cpp',
'keys_collection_manager_sharding_test.cpp',
],
LIBDEPS=[
diff --git a/src/mongo/db/db.cpp b/src/mongo/db/db.cpp
index 8e27f6a2c26..683d2f3e347 100644
--- a/src/mongo/db/db.cpp
+++ b/src/mongo/db/db.cpp
@@ -108,6 +108,8 @@
#include "mongo/db/repl/topology_coordinator.h"
#include "mongo/db/s/balancer/balancer.h"
#include "mongo/db/s/config/sharding_catalog_manager.h"
+#include "mongo/db/s/config_server_op_observer.h"
+#include "mongo/db/s/shard_server_op_observer.h"
#include "mongo/db/s/sharded_connection_info.h"
#include "mongo/db/s/sharding_initialization_mongod.h"
#include "mongo/db/s/sharding_state.h"
@@ -271,6 +273,13 @@ ExitCode _initAndListen(int listenPort) {
auto opObserverRegistry = stdx::make_unique<OpObserverRegistry>();
opObserverRegistry->addObserver(stdx::make_unique<OpObserverImpl>());
opObserverRegistry->addObserver(stdx::make_unique<UUIDCatalogObserver>());
+
+ if (serverGlobalParams.clusterRole == ClusterRole::ShardServer) {
+ opObserverRegistry->addObserver(stdx::make_unique<ShardServerOpObserver>());
+ } else if (serverGlobalParams.clusterRole == ClusterRole::ConfigServer) {
+ opObserverRegistry->addObserver(stdx::make_unique<ConfigServerOpObserver>());
+ }
+
serviceContext->setOpObserver(std::move(opObserverRegistry));
DBDirectClientFactory::get(serviceContext).registerImplementation([](OperationContext* opCtx) {
diff --git a/src/mongo/db/key_generator_update_test.cpp b/src/mongo/db/key_generator_update_test.cpp
index ee33e8204bf..5c30b3214eb 100644
--- a/src/mongo/db/key_generator_update_test.cpp
+++ b/src/mongo/db/key_generator_update_test.cpp
@@ -37,10 +37,7 @@
#include "mongo/db/keys_collection_document.h"
#include "mongo/db/logical_clock.h"
#include "mongo/db/namespace_string.h"
-#include "mongo/db/operation_context.h"
-#include "mongo/s/catalog/dist_lock_manager_mock.h"
#include "mongo/s/config_server_test_fixture.h"
-#include "mongo/s/grid.h"
#include "mongo/stdx/memory.h"
#include "mongo/unittest/unittest.h"
#include "mongo/util/clock_source_mock.h"
@@ -59,12 +56,6 @@ protected:
Grid::get(operationContext())->catalogClient());
}
- std::unique_ptr<DistLockManager> makeDistLockManager(
- std::unique_ptr<DistLockCatalog> distLockCatalog) override {
- invariant(distLockCatalog);
- return stdx::make_unique<DistLockManagerMock>(std::move(distLockCatalog));
- }
-
KeysCollectionClient* catalogClient() const {
return _catalogClient.get();
}
diff --git a/src/mongo/db/keys_collection_manager_sharding_test.cpp b/src/mongo/db/keys_collection_manager_sharding_test.cpp
index 4653653c836..c97ca659dd6 100644
--- a/src/mongo/db/keys_collection_manager_sharding_test.cpp
+++ b/src/mongo/db/keys_collection_manager_sharding_test.cpp
@@ -37,10 +37,7 @@
#include "mongo/db/keys_collection_manager.h"
#include "mongo/db/logical_clock.h"
#include "mongo/db/namespace_string.h"
-#include "mongo/db/operation_context.h"
-#include "mongo/s/catalog/dist_lock_manager_mock.h"
#include "mongo/s/config_server_test_fixture.h"
-#include "mongo/s/grid.h"
#include "mongo/stdx/memory.h"
#include "mongo/unittest/unittest.h"
#include "mongo/util/clock_source_mock.h"
@@ -76,12 +73,6 @@ protected:
ConfigServerTestFixture::tearDown();
}
- std::unique_ptr<DistLockManager> makeDistLockManager(
- std::unique_ptr<DistLockCatalog> distLockCatalog) override {
- invariant(distLockCatalog);
- return stdx::make_unique<DistLockManagerMock>(std::move(distLockCatalog));
- }
-
private:
std::unique_ptr<KeysCollectionManager> _keyManager;
};
diff --git a/src/mongo/db/logical_time_validator_test.cpp b/src/mongo/db/logical_time_validator_test.cpp
index 90eddb5ec98..5db41df3d72 100644
--- a/src/mongo/db/logical_time_validator_test.cpp
+++ b/src/mongo/db/logical_time_validator_test.cpp
@@ -34,15 +34,11 @@
#include "mongo/db/logical_clock.h"
#include "mongo/db/logical_time.h"
#include "mongo/db/logical_time_validator.h"
-#include "mongo/db/operation_context.h"
#include "mongo/db/server_options.h"
-#include "mongo/db/service_context.h"
#include "mongo/db/signed_logical_time.h"
#include "mongo/db/time_proof_service.h"
#include "mongo/platform/basic.h"
-#include "mongo/s/catalog/dist_lock_manager_mock.h"
#include "mongo/s/config_server_test_fixture.h"
-#include "mongo/s/grid.h"
#include "mongo/stdx/memory.h"
#include "mongo/unittest/unittest.h"
#include "mongo/util/clock_source_mock.h"
@@ -79,12 +75,6 @@ protected:
ConfigServerTestFixture::tearDown();
}
- std::unique_ptr<DistLockManager> makeDistLockManager(
- std::unique_ptr<DistLockCatalog> distLockCatalog) override {
- invariant(distLockCatalog);
- return stdx::make_unique<DistLockManagerMock>(std::move(distLockCatalog));
- }
-
/**
* Forces KeyManager to refresh cache and generate new keys.
*/
diff --git a/src/mongo/db/op_observer_impl.cpp b/src/mongo/db/op_observer_impl.cpp
index f5675135aad..c343ff63050 100644
--- a/src/mongo/db/op_observer_impl.cpp
+++ b/src/mongo/db/op_observer_impl.cpp
@@ -47,7 +47,6 @@
#include "mongo/db/repl/oplog.h"
#include "mongo/db/repl/replication_coordinator.h"
#include "mongo/db/s/collection_sharding_state.h"
-#include "mongo/db/s/sharding_state.h"
#include "mongo/db/server_options.h"
#include "mongo/db/session_catalog.h"
#include "mongo/db/views/durable_view_catalog.h"
@@ -458,12 +457,7 @@ void OpObserverImpl::onUpdate(OperationContext* opCtx, const OplogUpdateEntryArg
if (args.nss != NamespaceString::kSessionTransactionsTableNamespace) {
if (!args.fromMigrate) {
auto css = CollectionShardingState::get(opCtx, args.nss);
- css->onUpdateOp(opCtx,
- args.criteria,
- args.update,
- args.updatedDoc,
- opTime.writeOpTime,
- opTime.prePostImageOpTime);
+ css->onUpdateOp(opCtx, args.updatedDoc, opTime.writeOpTime, opTime.prePostImageOpTime);
}
}
@@ -728,9 +722,6 @@ repl::OpTime OpObserverImpl::onDropCollection(OperationContext* opCtx,
AuthorizationManager::get(opCtx->getServiceContext())
->logOp(opCtx, "c", cmdNss, cmdObj, nullptr);
- auto css = CollectionShardingState::get(opCtx, collectionName);
- css->onDropCollection(opCtx, collectionName);
-
// Evict namespace entry from the namespace/uuid cache if it exists.
NamespaceUUIDCache::get(opCtx).evictNamespace(collectionName);
diff --git a/src/mongo/db/pipeline/SConscript b/src/mongo/db/pipeline/SConscript
index fcbb40e3199..1019ee08d1c 100644
--- a/src/mongo/db/pipeline/SConscript
+++ b/src/mongo/db/pipeline/SConscript
@@ -210,6 +210,7 @@ env.CppUnitTest(
'document_source_lookup_change_post_image_test.cpp',
'document_source_lookup_test.cpp',
'document_source_match_test.cpp',
+ 'document_source_merge_cursors_test.cpp',
'document_source_mock_test.cpp',
'document_source_project_test.cpp',
'document_source_redact_test.cpp',
@@ -228,6 +229,7 @@ env.CppUnitTest(
'$BUILD_DIR/mongo/db/repl/replmocks',
'$BUILD_DIR/mongo/db/service_context',
'$BUILD_DIR/mongo/executor/thread_pool_task_executor_test_fixture',
+ '$BUILD_DIR/mongo/s/sharding_router_test_fixture',
'$BUILD_DIR/mongo/util/clock_source_mock',
'document_source_mock',
'document_value_test_util',
@@ -235,24 +237,6 @@ env.CppUnitTest(
],
)
-# This test depends on the sharding test fixture, which has global initializers that conflict with
-# the ones set in 'document_source_test', so is split into its own test.
-env.CppUnitTest(
- target='document_source_merge_cursors_test',
- source=[
- 'document_source_merge_cursors_test.cpp',
- ],
- LIBDEPS=[
- '$BUILD_DIR/mongo/db/auth/authorization_manager_mock_init',
- '$BUILD_DIR/mongo/db/query/query_request',
- '$BUILD_DIR/mongo/db/query/query_test_service_context',
- '$BUILD_DIR/mongo/executor/thread_pool_task_executor_test_fixture',
- '$BUILD_DIR/mongo/s/sharding_test_fixture',
- 'pipeline',
- 'document_value_test_util',
- ],
-)
-
env.CppUnitTest(
target='document_source_facet_test',
source='document_source_facet_test.cpp',
diff --git a/src/mongo/db/pipeline/document_source_merge_cursors_test.cpp b/src/mongo/db/pipeline/document_source_merge_cursors_test.cpp
index cdaf6b40bc4..4809ac54b50 100644
--- a/src/mongo/db/pipeline/document_source_merge_cursors_test.cpp
+++ b/src/mongo/db/pipeline/document_source_merge_cursors_test.cpp
@@ -46,7 +46,7 @@
#include "mongo/executor/thread_pool_task_executor_test_fixture.h"
#include "mongo/s/catalog/type_shard.h"
#include "mongo/s/client/shard_registry.h"
-#include "mongo/s/sharding_test_fixture.h"
+#include "mongo/s/sharding_router_test_fixture.h"
#include "mongo/stdx/memory.h"
#include "mongo/stdx/thread.h"
#include "mongo/unittest/unittest.h"
diff --git a/src/mongo/db/pipeline/document_source_sample_test.cpp b/src/mongo/db/pipeline/document_source_sample_test.cpp
index 417a2ff5a34..c1c85924c50 100644
--- a/src/mongo/db/pipeline/document_source_sample_test.cpp
+++ b/src/mongo/db/pipeline/document_source_sample_test.cpp
@@ -48,24 +48,9 @@
#include "mongo/util/tick_source_mock.h"
namespace mongo {
-
-std::unique_ptr<ServiceContextNoop> makeTestServiceContext() {
- auto service = stdx::make_unique<ServiceContextNoop>();
- service->setFastClockSource(stdx::make_unique<ClockSourceMock>());
- service->setTickSource(stdx::make_unique<TickSourceMock>());
- return service;
-}
-
namespace {
-using boost::intrusive_ptr;
-static const char* const ns = "unittests.document_source_sample_tests";
-
-// Stub to avoid including the server environment library.
-MONGO_INITIALIZER(SetGlobalEnvironment)(InitializerContext* context) {
- setGlobalServiceContext(makeTestServiceContext());
- return Status::OK();
-}
+using boost::intrusive_ptr;
class SampleBasics : public AggregationContextFixture {
public:
diff --git a/src/mongo/db/s/SConscript b/src/mongo/db/s/SConscript
index 5206a25bfca..0c03223c985 100644
--- a/src/mongo/db/s/SConscript
+++ b/src/mongo/db/s/SConscript
@@ -25,10 +25,12 @@ env.Library(
target='sharding_runtime_d',
source=[
'chunk_splitter.cpp',
+ 'config_server_op_observer.cpp',
'implicit_create_collection.cpp',
'migration_destination_manager.cpp',
'session_catalog_migration_destination.cpp',
'shard_filtering_metadata_refresh.cpp',
+ 'shard_server_op_observer.cpp',
],
LIBDEPS=[
'$BUILD_DIR/mongo/db/db_raii',
@@ -158,7 +160,6 @@ env.CppUnitTest(
],
LIBDEPS=[
'$BUILD_DIR/mongo/s/config_server_test_fixture',
- '$BUILD_DIR/mongo/s/coreshard',
'$BUILD_DIR/mongo/util/version_impl',
'balancer',
]
diff --git a/src/mongo/db/s/balancer/balancer.cpp b/src/mongo/db/s/balancer/balancer.cpp
index 4b6c79c343c..fcabe61f658 100644
--- a/src/mongo/db/s/balancer/balancer.cpp
+++ b/src/mongo/db/s/balancer/balancer.cpp
@@ -47,7 +47,6 @@
#include "mongo/s/catalog/type_chunk.h"
#include "mongo/s/catalog_cache.h"
#include "mongo/s/client/shard_registry.h"
-#include "mongo/s/cluster_identity_loader.h"
#include "mongo/s/grid.h"
#include "mongo/s/shard_util.h"
#include "mongo/stdx/memory.h"
diff --git a/src/mongo/db/s/collection_range_deleter_test.cpp b/src/mongo/db/s/collection_range_deleter_test.cpp
index df1f28be264..beddabc66de 100644
--- a/src/mongo/db/s/collection_range_deleter_test.cpp
+++ b/src/mongo/db/s/collection_range_deleter_test.cpp
@@ -43,7 +43,7 @@
#include "mongo/s/balancer_configuration.h"
#include "mongo/s/chunk_version.h"
#include "mongo/s/client/shard_registry.h"
-#include "mongo/s/sharding_mongod_test_fixture.h"
+#include "mongo/s/shard_server_test_fixture.h"
#include "mongo/unittest/unittest.h"
namespace mongo {
@@ -54,46 +54,38 @@ using unittest::assertGet;
using Deletion = CollectionRangeDeleter::Deletion;
const NamespaceString kNss = NamespaceString("foo", "bar");
-const std::string kPattern = "_id";
-const BSONObj kKeyPattern = BSON(kPattern << 1);
-const std::string kShardName{"a"};
-const HostAndPort dummyHost("dummy", 123);
+const std::string kShardKey = "_id";
+const BSONObj kShardKeyPattern = BSON(kShardKey << 1);
const NamespaceString kAdminSysVer = NamespaceString("admin", "system.version");
-class CollectionRangeDeleterTest : public ShardingMongodTestFixture {
+class CollectionRangeDeleterTest : public ShardServerTestFixture {
protected:
void setUp() override {
- _epoch = OID::gen();
- serverGlobalParams.clusterRole = ClusterRole::ShardServer;
- ShardingMongodTestFixture::setUp();
- replicationCoordinator()->alwaysAllowWrites(true);
- ASSERT_OK(initializeGlobalShardingStateForMongodForTest(ConnectionString(dummyHost)));
-
- // RemoteCommandTargeterMock::get(shardRegistry()->getConfigShard()->getTargeter())
- // ->setConnectionStringReturnValue(kConfigConnStr);
+ ShardServerTestFixture::setUp();
- configTargeter()->setFindHostReturnValue(dummyHost);
+ // Make every test run with a separate epoch
+ _epoch = OID::gen();
- DBDirectClient(operationContext()).createCollection(kNss.ns());
- {
- AutoGetCollection autoColl(operationContext(), kNss, MODE_IX);
- auto collectionShardingState = CollectionShardingState::get(operationContext(), kNss);
- const KeyPattern skPattern(kKeyPattern);
- auto cm = ChunkManager::makeNew(
- kNss,
- UUID::gen(),
- kKeyPattern,
- nullptr,
- false,
- epoch(),
- {ChunkType(kNss,
- ChunkRange{skPattern.globalMin(), skPattern.globalMax()},
- ChunkVersion(1, 0, epoch()),
- ShardId("otherShard"))});
- collectionShardingState->refreshMetadata(
- operationContext(),
- stdx::make_unique<CollectionMetadata>(cm, ShardId("thisShard")));
- }
+ DBDirectClient client(operationContext());
+ client.createCollection(kNss.ns());
+
+ const KeyPattern keyPattern(kShardKeyPattern);
+ auto cm = ChunkManager::makeNew(
+ kNss,
+ UUID::gen(),
+ keyPattern,
+ nullptr,
+ false,
+ epoch(),
+ {ChunkType(kNss,
+ ChunkRange{keyPattern.globalMin(), keyPattern.globalMax()},
+ ChunkVersion(1, 0, epoch()),
+ ShardId("otherShard"))});
+
+ AutoGetCollection autoColl(operationContext(), kNss, MODE_IX);
+ auto const css = CollectionShardingState::get(operationContext(), kNss);
+ css->refreshMetadata(operationContext(),
+ stdx::make_unique<CollectionMetadata>(cm, ShardId("thisShard")));
}
void tearDown() override {
@@ -103,7 +95,7 @@ protected:
collectionShardingState->refreshMetadata(operationContext(), nullptr);
}
- ShardingMongodTestFixture::tearDown();
+ ShardServerTestFixture::tearDown();
}
boost::optional<Date_t> next(CollectionRangeDeleter& rangeDeleter, int maxToDelete) {
@@ -136,19 +128,20 @@ TEST_F(CollectionRangeDeleterTest, EmptyDatabase) {
// Tests the case that there is data, but it is not in a range to clean.
TEST_F(CollectionRangeDeleterTest, NoDataInGivenRangeToClean) {
CollectionRangeDeleter rangeDeleter;
- const BSONObj insertedDoc = BSON(kPattern << 25);
+ const BSONObj insertedDoc = BSON(kShardKey << 25);
DBDirectClient dbclient(operationContext());
dbclient.insert(kNss.toString(), insertedDoc);
- ASSERT_BSONOBJ_EQ(insertedDoc, dbclient.findOne(kNss.toString(), QUERY(kPattern << 25)));
+ ASSERT_BSONOBJ_EQ(insertedDoc, dbclient.findOne(kNss.toString(), QUERY(kShardKey << 25)));
std::list<Deletion> ranges;
- ranges.emplace_back(Deletion{ChunkRange{BSON(kPattern << 0), BSON(kPattern << 10)}, Date_t{}});
+ ranges.emplace_back(
+ Deletion{ChunkRange{BSON(kShardKey << 0), BSON(kShardKey << 10)}, Date_t{}});
auto when = rangeDeleter.add(std::move(ranges));
ASSERT(when && *when == Date_t{});
ASSERT_EQ(1u, rangeDeleter.size());
ASSERT_TRUE(next(rangeDeleter, 1));
ASSERT_EQ(0u, rangeDeleter.size());
- ASSERT_BSONOBJ_EQ(insertedDoc, dbclient.findOne(kNss.toString(), QUERY(kPattern << 25)));
+ ASSERT_BSONOBJ_EQ(insertedDoc, dbclient.findOne(kNss.toString(), QUERY(kShardKey << 25)));
ASSERT_FALSE(next(rangeDeleter, 1));
}
@@ -156,19 +149,19 @@ TEST_F(CollectionRangeDeleterTest, NoDataInGivenRangeToClean) {
// Tests the case that there is a single document within a range to clean.
TEST_F(CollectionRangeDeleterTest, OneDocumentInOneRangeToClean) {
CollectionRangeDeleter rangeDeleter;
- const BSONObj insertedDoc = BSON(kPattern << 5);
+ const BSONObj insertedDoc = BSON(kShardKey << 5);
DBDirectClient dbclient(operationContext());
- dbclient.insert(kNss.toString(), BSON(kPattern << 5));
- ASSERT_BSONOBJ_EQ(insertedDoc, dbclient.findOne(kNss.toString(), QUERY(kPattern << 5)));
+ dbclient.insert(kNss.toString(), BSON(kShardKey << 5));
+ ASSERT_BSONOBJ_EQ(insertedDoc, dbclient.findOne(kNss.toString(), QUERY(kShardKey << 5)));
std::list<Deletion> ranges;
- auto deletion = Deletion{ChunkRange(BSON(kPattern << 0), BSON(kPattern << 10)), Date_t{}};
+ auto deletion = Deletion{ChunkRange(BSON(kShardKey << 0), BSON(kShardKey << 10)), Date_t{}};
ranges.emplace_back(std::move(deletion));
auto when = rangeDeleter.add(std::move(ranges));
ASSERT(when && *when == Date_t{});
ASSERT_TRUE(ranges.empty()); // spliced elements out of it
- auto optNotifn = rangeDeleter.overlaps(ChunkRange(BSON(kPattern << 0), BSON(kPattern << 10)));
+ auto optNotifn = rangeDeleter.overlaps(ChunkRange(BSON(kShardKey << 0), BSON(kShardKey << 10)));
ASSERT(optNotifn);
auto notifn = *optNotifn;
ASSERT(!notifn.ready());
@@ -182,31 +175,31 @@ TEST_F(CollectionRangeDeleterTest, OneDocumentInOneRangeToClean) {
ASSERT_TRUE(rangeDeleter.isEmpty());
ASSERT(notifn.ready() && notifn.waitStatus(operationContext()).isOK());
- ASSERT_TRUE(dbclient.findOne(kNss.toString(), QUERY(kPattern << 5)).isEmpty());
+ ASSERT_TRUE(dbclient.findOne(kNss.toString(), QUERY(kShardKey << 5)).isEmpty());
ASSERT_FALSE(next(rangeDeleter, 1));
- ASSERT_EQUALS(0ULL, dbclient.count(kAdminSysVer.ns(), BSON(kPattern << "startRangeDeletion")));
+ ASSERT_EQUALS(0ULL, dbclient.count(kAdminSysVer.ns(), BSON(kShardKey << "startRangeDeletion")));
}
// Tests the case that there are multiple documents within a range to clean.
TEST_F(CollectionRangeDeleterTest, MultipleDocumentsInOneRangeToClean) {
CollectionRangeDeleter rangeDeleter;
DBDirectClient dbclient(operationContext());
- dbclient.insert(kNss.toString(), BSON(kPattern << 1));
- dbclient.insert(kNss.toString(), BSON(kPattern << 2));
- dbclient.insert(kNss.toString(), BSON(kPattern << 3));
- ASSERT_EQUALS(3ULL, dbclient.count(kNss.toString(), BSON(kPattern << LT << 5)));
+ dbclient.insert(kNss.toString(), BSON(kShardKey << 1));
+ dbclient.insert(kNss.toString(), BSON(kShardKey << 2));
+ dbclient.insert(kNss.toString(), BSON(kShardKey << 3));
+ ASSERT_EQUALS(3ULL, dbclient.count(kNss.toString(), BSON(kShardKey << LT << 5)));
std::list<Deletion> ranges;
- auto deletion = Deletion{ChunkRange(BSON(kPattern << 0), BSON(kPattern << 10)), Date_t{}};
+ auto deletion = Deletion{ChunkRange(BSON(kShardKey << 0), BSON(kShardKey << 10)), Date_t{}};
ranges.emplace_back(std::move(deletion));
auto when = rangeDeleter.add(std::move(ranges));
ASSERT(when && *when == Date_t{});
ASSERT_TRUE(next(rangeDeleter, 100));
ASSERT_TRUE(next(rangeDeleter, 100));
- ASSERT_EQUALS(0ULL, dbclient.count(kNss.toString(), BSON(kPattern << LT << 5)));
+ ASSERT_EQUALS(0ULL, dbclient.count(kNss.toString(), BSON(kShardKey << LT << 5)));
ASSERT_FALSE(next(rangeDeleter, 100));
- ASSERT_EQUALS(0ULL, dbclient.count(kAdminSysVer.ns(), BSON(kPattern << "startRangeDeletion")));
+ ASSERT_EQUALS(0ULL, dbclient.count(kAdminSysVer.ns(), BSON(kShardKey << "startRangeDeletion")));
}
// Tests the case that there are multiple documents within a range to clean, and the range deleter
@@ -214,70 +207,71 @@ TEST_F(CollectionRangeDeleterTest, MultipleDocumentsInOneRangeToClean) {
TEST_F(CollectionRangeDeleterTest, MultipleCleanupNextRangeCalls) {
CollectionRangeDeleter rangeDeleter;
DBDirectClient dbclient(operationContext());
- dbclient.insert(kNss.toString(), BSON(kPattern << 1));
- dbclient.insert(kNss.toString(), BSON(kPattern << 2));
- dbclient.insert(kNss.toString(), BSON(kPattern << 3));
- ASSERT_EQUALS(3ULL, dbclient.count(kNss.toString(), BSON(kPattern << LT << 5)));
+ dbclient.insert(kNss.toString(), BSON(kShardKey << 1));
+ dbclient.insert(kNss.toString(), BSON(kShardKey << 2));
+ dbclient.insert(kNss.toString(), BSON(kShardKey << 3));
+ ASSERT_EQUALS(3ULL, dbclient.count(kNss.toString(), BSON(kShardKey << LT << 5)));
std::list<Deletion> ranges;
- auto deletion = Deletion{ChunkRange(BSON(kPattern << 0), BSON(kPattern << 10)), Date_t{}};
+ auto deletion = Deletion{ChunkRange(BSON(kShardKey << 0), BSON(kShardKey << 10)), Date_t{}};
ranges.emplace_back(std::move(deletion));
auto when = rangeDeleter.add(std::move(ranges));
ASSERT(when && *when == Date_t{});
ASSERT_TRUE(next(rangeDeleter, 1));
- ASSERT_EQUALS(2ULL, dbclient.count(kNss.toString(), BSON(kPattern << LT << 5)));
+ ASSERT_EQUALS(2ULL, dbclient.count(kNss.toString(), BSON(kShardKey << LT << 5)));
ASSERT_TRUE(next(rangeDeleter, 1));
- ASSERT_EQUALS(1ULL, dbclient.count(kNss.toString(), BSON(kPattern << LT << 5)));
+ ASSERT_EQUALS(1ULL, dbclient.count(kNss.toString(), BSON(kShardKey << LT << 5)));
ASSERT_TRUE(next(rangeDeleter, 1));
ASSERT_TRUE(next(rangeDeleter, 1));
- ASSERT_EQUALS(0ULL, dbclient.count(kNss.toString(), BSON(kPattern << LT << 5)));
+ ASSERT_EQUALS(0ULL, dbclient.count(kNss.toString(), BSON(kShardKey << LT << 5)));
ASSERT_FALSE(next(rangeDeleter, 1));
- ASSERT_EQUALS(0ULL, dbclient.count(kAdminSysVer.ns(), BSON(kPattern << "startRangeDeletion")));
+ ASSERT_EQUALS(0ULL, dbclient.count(kAdminSysVer.ns(), BSON(kShardKey << "startRangeDeletion")));
}
// Tests the case that there are two ranges to clean, each containing multiple documents.
TEST_F(CollectionRangeDeleterTest, MultipleDocumentsInMultipleRangesToClean) {
CollectionRangeDeleter rangeDeleter;
DBDirectClient dbclient(operationContext());
- dbclient.insert(kNss.toString(), BSON(kPattern << 1));
- dbclient.insert(kNss.toString(), BSON(kPattern << 2));
- dbclient.insert(kNss.toString(), BSON(kPattern << 3));
- dbclient.insert(kNss.toString(), BSON(kPattern << 4));
- dbclient.insert(kNss.toString(), BSON(kPattern << 5));
- dbclient.insert(kNss.toString(), BSON(kPattern << 6));
- ASSERT_EQUALS(6ULL, dbclient.count(kNss.toString(), BSON(kPattern << LT << 10)));
+ dbclient.insert(kNss.toString(), BSON(kShardKey << 1));
+ dbclient.insert(kNss.toString(), BSON(kShardKey << 2));
+ dbclient.insert(kNss.toString(), BSON(kShardKey << 3));
+ dbclient.insert(kNss.toString(), BSON(kShardKey << 4));
+ dbclient.insert(kNss.toString(), BSON(kShardKey << 5));
+ dbclient.insert(kNss.toString(), BSON(kShardKey << 6));
+ ASSERT_EQUALS(6ULL, dbclient.count(kNss.toString(), BSON(kShardKey << LT << 10)));
std::list<Deletion> ranges;
auto later = Date_t::now();
- ranges.emplace_back(Deletion{ChunkRange{BSON(kPattern << 0), BSON(kPattern << 3)}, later});
+ ranges.emplace_back(Deletion{ChunkRange{BSON(kShardKey << 0), BSON(kShardKey << 3)}, later});
auto when = rangeDeleter.add(std::move(ranges));
ASSERT(when && *when == later);
ASSERT_TRUE(ranges.empty()); // not guaranteed by std, but failure would indicate a problem.
std::list<Deletion> ranges2;
- ranges2.emplace_back(Deletion{ChunkRange{BSON(kPattern << 4), BSON(kPattern << 7)}, later});
+ ranges2.emplace_back(Deletion{ChunkRange{BSON(kShardKey << 4), BSON(kShardKey << 7)}, later});
when = rangeDeleter.add(std::move(ranges2));
ASSERT(!when);
std::list<Deletion> ranges3;
- ranges3.emplace_back(Deletion{ChunkRange{BSON(kPattern << 3), BSON(kPattern << 4)}, Date_t{}});
+ ranges3.emplace_back(
+ Deletion{ChunkRange{BSON(kShardKey << 3), BSON(kShardKey << 4)}, Date_t{}});
when = rangeDeleter.add(std::move(ranges3));
ASSERT(when);
- auto optNotifn1 = rangeDeleter.overlaps(ChunkRange{BSON(kPattern << 0), BSON(kPattern << 3)});
+ auto optNotifn1 = rangeDeleter.overlaps(ChunkRange{BSON(kShardKey << 0), BSON(kShardKey << 3)});
ASSERT_TRUE(optNotifn1);
auto& notifn1 = *optNotifn1;
ASSERT_FALSE(notifn1.ready());
- auto optNotifn2 = rangeDeleter.overlaps(ChunkRange{BSON(kPattern << 4), BSON(kPattern << 7)});
+ auto optNotifn2 = rangeDeleter.overlaps(ChunkRange{BSON(kShardKey << 4), BSON(kShardKey << 7)});
ASSERT_TRUE(optNotifn2);
auto& notifn2 = *optNotifn2;
ASSERT_FALSE(notifn2.ready());
- auto optNotifn3 = rangeDeleter.overlaps(ChunkRange{BSON(kPattern << 3), BSON(kPattern << 4)});
+ auto optNotifn3 = rangeDeleter.overlaps(ChunkRange{BSON(kShardKey << 3), BSON(kShardKey << 4)});
ASSERT_TRUE(optNotifn3);
auto& notifn3 = *optNotifn3;
ASSERT_FALSE(notifn3.ready());
@@ -289,9 +283,9 @@ TEST_F(CollectionRangeDeleterTest, MultipleDocumentsInMultipleRangesToClean) {
ASSERT_FALSE(notifn1 != *optNotifn1);
// no op log entry yet
- ASSERT_EQUALS(0ULL, dbclient.count(kAdminSysVer.ns(), BSON(kPattern << "startRangeDeletion")));
+ ASSERT_EQUALS(0ULL, dbclient.count(kAdminSysVer.ns(), BSON(kShardKey << "startRangeDeletion")));
- ASSERT_EQUALS(6ULL, dbclient.count(kNss.ns(), BSON(kPattern << LT << 7)));
+ ASSERT_EQUALS(6ULL, dbclient.count(kNss.ns(), BSON(kShardKey << LT << 7)));
// catch range3, [3..4) only
auto next1 = next(rangeDeleter, 100);
@@ -299,11 +293,11 @@ TEST_F(CollectionRangeDeleterTest, MultipleDocumentsInMultipleRangesToClean) {
ASSERT_EQUALS(*next1, Date_t{});
// no op log entry for immediate deletions
- ASSERT_EQUALS(0ULL, dbclient.count(kAdminSysVer.ns(), BSON(kPattern << "startRangeDeletion")));
+ ASSERT_EQUALS(0ULL, dbclient.count(kAdminSysVer.ns(), BSON(kShardKey << "startRangeDeletion")));
// 3 gone
- ASSERT_EQUALS(5ULL, dbclient.count(kNss.ns(), BSON(kPattern << LT << 7)));
- ASSERT_EQUALS(2ULL, dbclient.count(kNss.ns(), BSON(kPattern << LT << 4)));
+ ASSERT_EQUALS(5ULL, dbclient.count(kNss.ns(), BSON(kShardKey << LT << 7)));
+ ASSERT_EQUALS(2ULL, dbclient.count(kNss.ns(), BSON(kShardKey << LT << 4)));
ASSERT_FALSE(notifn1.ready()); // no trigger yet
ASSERT_FALSE(notifn2.ready()); // no trigger yet
@@ -315,11 +309,11 @@ TEST_F(CollectionRangeDeleterTest, MultipleDocumentsInMultipleRangesToClean) {
ASSERT_EQUALS(*next2, Date_t{});
// still no op log entry, because not delayed
- ASSERT_EQUALS(0ULL, dbclient.count(kAdminSysVer.ns(), BSON(kPattern << "startRangeDeletion")));
+ ASSERT_EQUALS(0ULL, dbclient.count(kAdminSysVer.ns(), BSON(kShardKey << "startRangeDeletion")));
// deleted 1, 5 left
- ASSERT_EQUALS(2ULL, dbclient.count(kNss.ns(), BSON(kPattern << LT << 4)));
- ASSERT_EQUALS(5ULL, dbclient.count(kNss.ns(), BSON(kPattern << LT << 10)));
+ ASSERT_EQUALS(2ULL, dbclient.count(kNss.ns(), BSON(kShardKey << LT << 4)));
+ ASSERT_EQUALS(5ULL, dbclient.count(kNss.ns(), BSON(kShardKey << LT << 10)));
ASSERT_FALSE(notifn1.ready()); // no trigger yet
ASSERT_FALSE(notifn2.ready()); // no trigger yet
@@ -336,9 +330,9 @@ TEST_F(CollectionRangeDeleterTest, MultipleDocumentsInMultipleRangesToClean) {
ASSERT_FALSE(notifn2.ready()); // no trigger yet
// deleted 3, 3 left
- ASSERT_EQUALS(3ULL, dbclient.count(kNss.ns(), BSON(kPattern << LT << 10)));
+ ASSERT_EQUALS(3ULL, dbclient.count(kNss.ns(), BSON(kShardKey << LT << 10)));
- ASSERT_EQUALS(1ULL, dbclient.count(kAdminSysVer.ns(), BSON(kPattern << "startRangeDeletion")));
+ ASSERT_EQUALS(1ULL, dbclient.count(kAdminSysVer.ns(), BSON(kShardKey << "startRangeDeletion")));
// clang-format off
ASSERT_BSONOBJ_EQ(
BSON("_id" << "startRangeDeletion" << "ns" << kNss.ns()
@@ -364,7 +358,7 @@ TEST_F(CollectionRangeDeleterTest, MultipleDocumentsInMultipleRangesToClean) {
// clang-format on
// still 3 left
- ASSERT_EQUALS(3ULL, dbclient.count(kNss.ns(), BSON(kPattern << LT << 10)));
+ ASSERT_EQUALS(3ULL, dbclient.count(kNss.ns(), BSON(kShardKey << LT << 10)));
// delete the remaining documents
auto next5 = next(rangeDeleter, 100);
@@ -382,7 +376,7 @@ TEST_F(CollectionRangeDeleterTest, MultipleDocumentsInMultipleRangesToClean) {
// clang-format on
// all docs gone
- ASSERT_EQUALS(0ULL, dbclient.count(kNss.ns(), BSON(kPattern << LT << 10)));
+ ASSERT_EQUALS(0ULL, dbclient.count(kNss.ns(), BSON(kShardKey << LT << 10)));
// discover there are no more, pop range 2
auto next6 = next(rangeDeleter, 100);
diff --git a/src/mongo/db/s/collection_sharding_state.cpp b/src/mongo/db/s/collection_sharding_state.cpp
index 3c8d4d6e8d6..d19955f6a3c 100644
--- a/src/mongo/db/s/collection_sharding_state.cpp
+++ b/src/mongo/db/s/collection_sharding_state.cpp
@@ -37,29 +37,16 @@
#include "mongo/db/catalog_raii.h"
#include "mongo/db/client.h"
#include "mongo/db/operation_context.h"
-#include "mongo/db/repl/replication_coordinator.h"
-#include "mongo/db/s/config/sharding_catalog_manager.h"
#include "mongo/db/s/migration_chunk_cloner_source.h"
#include "mongo/db/s/migration_source_manager.h"
#include "mongo/db/s/operation_sharding_state.h"
-#include "mongo/db/s/shard_identity_rollback_notifier.h"
#include "mongo/db/s/sharded_connection_info.h"
#include "mongo/db/s/sharding_state.h"
-#include "mongo/db/s/type_shard_identity.h"
-#include "mongo/db/server_options.h"
#include "mongo/db/server_parameters.h"
#include "mongo/db/service_context.h"
#include "mongo/executor/network_interface_factory.h"
#include "mongo/executor/network_interface_thread_pool.h"
#include "mongo/executor/thread_pool_task_executor.h"
-#include "mongo/s/balancer_configuration.h"
-#include "mongo/s/catalog/type_config_version.h"
-#include "mongo/s/catalog/type_shard.h"
-#include "mongo/s/catalog/type_shard_collection.h"
-#include "mongo/s/catalog_cache.h"
-#include "mongo/s/catalog_cache_loader.h"
-#include "mongo/s/cluster_identity_loader.h"
-#include "mongo/s/grid.h"
#include "mongo/s/stale_exception.h"
#include "mongo/util/log.h"
@@ -169,64 +156,6 @@ private:
const auto getCollectionShardingStateMap =
ServiceContext::declareDecoration<CollectionShardingStateMap>();
-/**
- * Used to perform shard identity initialization once it is certain that the document is committed.
- */
-class ShardIdentityLogOpHandler final : public RecoveryUnit::Change {
-public:
- ShardIdentityLogOpHandler(OperationContext* opCtx, ShardIdentityType shardIdentity)
- : _opCtx(opCtx), _shardIdentity(std::move(shardIdentity)) {}
-
- void commit() override {
- fassertNoTrace(
- 40071, ShardingState::get(_opCtx)->initializeFromShardIdentity(_opCtx, _shardIdentity));
- }
-
- void rollback() override {}
-
-private:
- OperationContext* _opCtx;
- const ShardIdentityType _shardIdentity;
-};
-
-/**
- * Used to notify the catalog cache loader of a new collection version and invalidate the in-memory
- * routing table cache once the oplog updates are committed and become visible.
- */
-class CollectionVersionLogOpHandler final : public RecoveryUnit::Change {
-public:
- CollectionVersionLogOpHandler(OperationContext* opCtx, const NamespaceString& nss)
- : _opCtx(opCtx), _nss(nss) {}
-
- void commit() override {
- invariant(_opCtx->lockState()->isCollectionLockedForMode(_nss.ns(), MODE_IX));
-
- CatalogCacheLoader::get(_opCtx).notifyOfCollectionVersionUpdate(_nss);
-
- // This is a hack to get around CollectionShardingState::refreshMetadata() requiring the X
- // lock: markNotShardedAtStepdown() doesn't have a lock check. Temporary measure until
- // SERVER-31595 removes the X lock requirement.
- CollectionShardingState::get(_opCtx, _nss)->markNotShardedAtStepdown();
- }
-
- void rollback() override {}
-
-private:
- OperationContext* _opCtx;
- const NamespaceString _nss;
-};
-
-/**
- * Caller must hold the global lock in some mode other than MODE_NONE.
- */
-bool isStandaloneOrPrimary(OperationContext* opCtx) {
- dassert(opCtx->lockState()->isLocked());
- auto replCoord = repl::ReplicationCoordinator::get(opCtx);
- bool isReplSet = replCoord->getReplicationMode() == repl::ReplicationCoordinator::modeReplSet;
- return !isReplSet || (repl::ReplicationCoordinator::get(opCtx)->getMemberState() ==
- repl::MemberState::RS_PRIMARY);
-}
-
} // namespace
CollectionShardingState::CollectionShardingState(ServiceContext* sc, NamespaceString nss)
@@ -394,24 +323,6 @@ void CollectionShardingState::onInsertOp(OperationContext* opCtx,
const repl::OpTime& opTime) {
dassert(opCtx->lockState()->isCollectionLockedForMode(_nss.ns(), MODE_IX));
- if (serverGlobalParams.clusterRole == ClusterRole::ShardServer) {
- if (_nss == NamespaceString::kServerConfigurationNamespace) {
- if (auto idElem = insertedDoc["_id"]) {
- if (idElem.str() == ShardIdentityType::IdName) {
- auto shardIdentityDoc =
- uassertStatusOK(ShardIdentityType::fromBSON(insertedDoc));
- uassertStatusOK(shardIdentityDoc.validate());
- opCtx->recoveryUnit()->registerChange(
- new ShardIdentityLogOpHandler(opCtx, std::move(shardIdentityDoc)));
- }
- }
- }
-
- if (ShardingState::get(opCtx)->enabled()) {
- _incrementChunkOnInsertOrUpdate(opCtx, insertedDoc, insertedDoc.objsize());
- }
- }
-
checkShardVersionOrThrow(opCtx);
if (_sourceMgr) {
@@ -420,23 +331,11 @@ void CollectionShardingState::onInsertOp(OperationContext* opCtx,
}
void CollectionShardingState::onUpdateOp(OperationContext* opCtx,
- const BSONObj& query,
- const BSONObj& update,
const BSONObj& updatedDoc,
const repl::OpTime& opTime,
const repl::OpTime& prePostImageOpTime) {
dassert(opCtx->lockState()->isCollectionLockedForMode(_nss.ns(), MODE_IX));
- if (serverGlobalParams.clusterRole == ClusterRole::ShardServer) {
- if (_nss.ns() == NamespaceString::kShardConfigCollectionsCollectionName) {
- _onConfigCollectionsUpdateOp(opCtx, query, update, updatedDoc);
- }
-
- if (ShardingState::get(opCtx)->enabled()) {
- _incrementChunkOnInsertOrUpdate(opCtx, updatedDoc, update.objsize());
- }
- }
-
checkShardVersionOrThrow(opCtx);
if (_sourceMgr) {
@@ -455,41 +354,6 @@ void CollectionShardingState::onDeleteOp(OperationContext* opCtx,
const repl::OpTime& preImageOpTime) {
dassert(opCtx->lockState()->isCollectionLockedForMode(_nss.ns(), MODE_IX));
- if (serverGlobalParams.clusterRole == ClusterRole::ShardServer) {
- if (_nss.ns() == NamespaceString::kShardConfigCollectionsCollectionName) {
- _onConfigDeleteInvalidateCachedMetadataAndNotify(opCtx, deleteState.documentKey);
- }
-
- if (_nss == NamespaceString::kServerConfigurationNamespace) {
- if (auto idElem = deleteState.documentKey["_id"]) {
- auto idStr = idElem.str();
- if (idStr == ShardIdentityType::IdName) {
- if (!repl::ReplicationCoordinator::get(opCtx)->getMemberState().rollback()) {
- uasserted(40070,
- "cannot delete shardIdentity document while in --shardsvr mode");
- } else {
- warning() << "Shard identity document rolled back. Will shut down after "
- "finishing rollback.";
- ShardIdentityRollbackNotifier::get(opCtx)->recordThatRollbackHappened();
- }
- }
- }
- }
- }
-
- if (serverGlobalParams.clusterRole == ClusterRole::ConfigServer) {
- if (_nss == VersionType::ConfigNS) {
- if (!repl::ReplicationCoordinator::get(opCtx)->getMemberState().rollback()) {
- uasserted(40302, "cannot delete config.version document while in --configsvr mode");
- } else {
- // Throw out any cached information related to the cluster ID.
- ShardingCatalogManager::get(opCtx)
- ->discardCachedConfigDatabaseInitializationState();
- ClusterIdentityLoader::get(opCtx)->discardCachedClusterId();
- }
- }
- }
-
checkShardVersionOrThrow(opCtx);
if (_sourceMgr && deleteState.isMigrating) {
@@ -497,100 +361,6 @@ void CollectionShardingState::onDeleteOp(OperationContext* opCtx,
}
}
-void CollectionShardingState::onDropCollection(OperationContext* opCtx,
- const NamespaceString& collectionName) {
- dassert(opCtx->lockState()->isCollectionLockedForMode(_nss.ns(), MODE_IX));
-
- if (serverGlobalParams.clusterRole == ClusterRole::ShardServer &&
- _nss == NamespaceString::kServerConfigurationNamespace) {
- // Dropping system collections is not allowed for end users.
- invariant(!opCtx->writesAreReplicated());
- invariant(repl::ReplicationCoordinator::get(opCtx)->getMemberState().rollback());
-
- // Can't confirm whether there was a ShardIdentity document or not yet, so assume there was
- // one and shut down the process to clear the in-memory sharding state.
- warning() << "admin.system.version collection rolled back. Will shut down after "
- "finishing rollback";
- ShardIdentityRollbackNotifier::get(opCtx)->recordThatRollbackHappened();
- }
-
- if (serverGlobalParams.clusterRole == ClusterRole::ConfigServer) {
- if (_nss == VersionType::ConfigNS) {
- if (!repl::ReplicationCoordinator::get(opCtx)->getMemberState().rollback()) {
- uasserted(40303, "cannot drop config.version document while in --configsvr mode");
- } else {
- // Throw out any cached information related to the cluster ID.
- ShardingCatalogManager::get(opCtx)
- ->discardCachedConfigDatabaseInitializationState();
- ClusterIdentityLoader::get(opCtx)->discardCachedClusterId();
- }
- }
- }
-}
-
-void CollectionShardingState::_onConfigCollectionsUpdateOp(OperationContext* opCtx,
- const BSONObj& query,
- const BSONObj& update,
- const BSONObj& updatedDoc) {
- dassert(opCtx->lockState()->isCollectionLockedForMode(_nss.ns(), MODE_IX));
- invariant(serverGlobalParams.clusterRole == ClusterRole::ShardServer);
-
- // Notification of routing table changes are only needed on secondaries.
- if (isStandaloneOrPrimary(opCtx)) {
- return;
- }
-
- // Extract which user collection was updated.
- std::string updatedCollection;
- fassert(40477,
- bsonExtractStringField(query, ShardCollectionType::ns.name(), &updatedCollection));
-
- // Parse the '$set' update.
- BSONElement setElement;
- Status setStatus = bsonExtractTypedField(update, StringData("$set"), Object, &setElement);
- if (setStatus.isOK()) {
- BSONObj setField = setElement.Obj();
- const NamespaceString updatedNss(updatedCollection);
-
- // Need the WUOW to retain the lock for CollectionVersionLogOpHandler::commit().
- AutoGetCollection autoColl(opCtx, updatedNss, MODE_IX);
-
- if (setField.hasField(ShardCollectionType::lastRefreshedCollectionVersion.name())) {
- opCtx->recoveryUnit()->registerChange(
- new CollectionVersionLogOpHandler(opCtx, updatedNss));
- }
-
- if (setField.hasField(ShardCollectionType::enterCriticalSectionCounter.name())) {
- // This is a hack to get around CollectionShardingState::refreshMetadata() requiring the
- // X lock: markNotShardedAtStepdown() doesn't have a lock check. Temporary measure until
- // SERVER-31595 removes the X lock requirement.
- CollectionShardingState::get(opCtx, updatedNss)->markNotShardedAtStepdown();
- }
- }
-}
-
-void CollectionShardingState::_onConfigDeleteInvalidateCachedMetadataAndNotify(
- OperationContext* opCtx, const BSONObj& query) {
- dassert(opCtx->lockState()->isCollectionLockedForMode(_nss.ns(), MODE_IX));
- invariant(serverGlobalParams.clusterRole == ClusterRole::ShardServer);
-
- // Notification of routing table changes are only needed on secondaries.
- if (isStandaloneOrPrimary(opCtx)) {
- return;
- }
-
- // Extract which collection entry is being deleted from the _id field.
- std::string deletedCollection;
- fassert(40479,
- bsonExtractStringField(query, ShardCollectionType::ns.name(), &deletedCollection));
- const NamespaceString deletedNss(deletedCollection);
-
- // Need the WUOW to retain the lock for CollectionVersionLogOpHandler::commit().
- AutoGetCollection autoColl(opCtx, deletedNss, MODE_IX);
-
- opCtx->recoveryUnit()->registerChange(new CollectionVersionLogOpHandler(opCtx, deletedNss));
-}
-
bool CollectionShardingState::_checkShardVersionOk(OperationContext* opCtx,
std::string* errmsg,
ChunkVersion* expectedShardVersion,
@@ -681,58 +451,4 @@ bool CollectionShardingState::_checkShardVersionOk(OperationContext* opCtx,
MONGO_UNREACHABLE;
}
-uint64_t CollectionShardingState::_incrementChunkOnInsertOrUpdate(OperationContext* opCtx,
- const BSONObj& document,
- long dataWritten) {
-
- // Here, get the collection metadata and check if it exists. If it doesn't exist, then the
- // collection is not sharded, and we can simply return -1.
- ScopedCollectionMetadata metadata = getMetadata();
- if (!metadata) {
- return -1;
- }
-
- std::shared_ptr<ChunkManager> cm = metadata->getChunkManager();
- const ShardKeyPattern& shardKeyPattern = cm->getShardKeyPattern();
-
- // Each inserted/updated document should contain the shard key. The only instance in which a
- // document could not contain a shard key is if the insert/update is performed through mongod
- // explicitly, as opposed to first routed through mongos.
- BSONObj shardKey = shardKeyPattern.extractShardKeyFromDoc(document);
- if (shardKey.woCompare(BSONObj()) == 0) {
- warning() << "inserting document " << document.toString() << " without shard key pattern "
- << shardKeyPattern << " into a sharded collection";
- return -1;
- }
-
- // Use the shard key to locate the chunk into which the document was updated, and increment the
- // number of bytes tracked for the chunk. Note that we can assume the simple collation, because
- // shard keys do not support non-simple collations.
- auto chunk = cm->findIntersectingChunkWithSimpleCollation(shardKey);
- chunk->addBytesWritten(dataWritten);
-
- // If the chunk becomes too large, then we call the ChunkSplitter to schedule a split. Then, we
- // reset the tracking for that chunk to 0.
- if (_shouldSplitChunk(opCtx, shardKeyPattern, *chunk)) {
- // TODO: call ChunkSplitter here
- chunk->clearBytesWritten();
- }
-
- return chunk->getBytesWritten();
-}
-
-bool CollectionShardingState::_shouldSplitChunk(OperationContext* opCtx,
- const ShardKeyPattern& shardKeyPattern,
- const Chunk& chunk) {
-
- const auto balancerConfig = Grid::get(opCtx)->getBalancerConfiguration();
- invariant(balancerConfig);
-
- const KeyPattern keyPattern = shardKeyPattern.getKeyPattern();
- const bool minIsInf = (0 == keyPattern.globalMin().woCompare(chunk.getMin()));
- const bool maxIsInf = (0 == keyPattern.globalMax().woCompare(chunk.getMax()));
-
- return chunk.shouldSplit(balancerConfig->getMaxChunkSizeBytes(), minIsInf, maxIsInf);
-}
-
} // namespace mongo
diff --git a/src/mongo/db/s/collection_sharding_state.h b/src/mongo/db/s/collection_sharding_state.h
index 30ba8a295d0..14b403cd93f 100644
--- a/src/mongo/db/s/collection_sharding_state.h
+++ b/src/mongo/db/s/collection_sharding_state.h
@@ -32,21 +32,14 @@
#include <string>
#include "mongo/base/disallow_copying.h"
-#include "mongo/base/string_data.h"
+#include "mongo/bson/bsonobj.h"
#include "mongo/db/namespace_string.h"
#include "mongo/db/s/metadata_manager.h"
-#include "mongo/util/concurrency/notification.h"
namespace mongo {
-class BalancerConfiguration;
-class BSONObj;
-class BSONObjBuilder;
-struct ChunkVersion;
-class CollectionMetadata;
class MigrationSourceManager;
class OperationContext;
-class Timestamp;
/**
* Contains all sharding-related runtime state for a given collection. One such object is assigned
@@ -229,8 +222,6 @@ public:
const BSONObj& insertedDoc,
const repl::OpTime& opTime);
void onUpdateOp(OperationContext* opCtx,
- const BSONObj& query,
- const BSONObj& update,
const BSONObj& updatedDoc,
const repl::OpTime& opTime,
const repl::OpTime& prePostImageOpTime);
@@ -238,52 +229,9 @@ public:
const DeleteState& deleteState,
const repl::OpTime& opTime,
const repl::OpTime& preImageOpTime);
- void onDropCollection(OperationContext* opCtx, const NamespaceString& collectionName);
private:
/**
- * This runs on updates to the shard's persisted cache of the config server's
- * config.collections collection.
- *
- * If an update occurs to the 'lastRefreshedCollectionVersion' field, registers a task on the
- * opCtx -- to run after writes from the oplog are committed and visible to reads -- to notify
- * the catalog cache loader of a new collection version and clear the routing table so the next
- * caller with routing information will provoke a routing table refresh. When
- * 'lastRefreshedCollectionVersion' is in 'update', it means that a chunk metadata refresh
- * finished being applied to the collection's locally persisted metadata store.
- *
- * If an update occurs to the 'enterCriticalSectionSignal' field, simply clear the routing table
- * immediately. This will provoke the next secondary caller to refresh through the primary,
- * blocking behind the critical section.
- *
- * query - BSON with an _id that identifies which collections entry is being updated.
- * update - the update being applied to the collections entry.
- * updatedDoc - the document identified by 'query' with the 'update' applied.
- *
- * This only runs on secondaries.
- * The global exclusive lock is expected to be held by the caller.
- */
- void _onConfigCollectionsUpdateOp(OperationContext* opCtx,
- const BSONObj& query,
- const BSONObj& update,
- const BSONObj& updatedDoc);
-
- /**
- * Invalidates the in-memory routing table cache when a collection is dropped, so the next
- * caller with routing information will provoke a routing table refresh and see the drop.
- *
- * Registers a task on the opCtx -- to run after writes from the oplog are committed and visible
- * to reads.
- *
- * query - BSON with an _id field that identifies which collections entry is being updated.
- *
- * This only runs on secondaries.
- * The global exclusive lock is expected to be held by the caller.
- */
- void _onConfigDeleteInvalidateCachedMetadataAndNotify(OperationContext* opCtx,
- const BSONObj& query);
-
- /**
* Checks whether the shard version of the operation matches that of the collection.
*
* opCtx - Operation context from which to retrieve the operation's expected version.
@@ -301,23 +249,6 @@ private:
ChunkVersion* expectedShardVersion,
ChunkVersion* actualShardVersion);
- /**
- * If the collection is sharded, finds the chunk that contains the specified document, and
- * increments the size tracked for that chunk by the specified amount of data written, in
- * bytes. Returns the number of total bytes on that chunk, after the data is written.
- */
- uint64_t _incrementChunkOnInsertOrUpdate(OperationContext* opCtx,
- const BSONObj& document,
- long dataWritten);
-
- /**
- * Returns true if the total number of bytes on the specified chunk nears the max size of
- * a shard.
- */
- bool _shouldSplitChunk(OperationContext* opCtx,
- const ShardKeyPattern& shardKeyPattern,
- const Chunk& chunk);
-
// Namespace this state belongs to.
const NamespaceString _nss;
diff --git a/src/mongo/db/s/collection_sharding_state_test.cpp b/src/mongo/db/s/collection_sharding_state_test.cpp
index ee083fec502..d50d0b10d86 100644
--- a/src/mongo/db/s/collection_sharding_state_test.cpp
+++ b/src/mongo/db/s/collection_sharding_state_test.cpp
@@ -28,6 +28,7 @@
#include "mongo/platform/basic.h"
#include "mongo/db/catalog_raii.h"
+#include "mongo/db/dbdirectclient.h"
#include "mongo/db/namespace_string.h"
#include "mongo/db/s/collection_sharding_state.h"
#include "mongo/db/s/sharding_state.h"
@@ -39,7 +40,7 @@ namespace {
const NamespaceString kTestNss("TestDB", "TestColl");
-class CollShardingStateTest : public ShardServerTestFixture {
+class CollectionShardingStateTest : public ShardServerTestFixture {
public:
void setUp() override {
ShardServerTestFixture::setUp();
@@ -62,106 +63,63 @@ private:
int _initCallCount = 0;
};
-TEST_F(CollShardingStateTest, GlobalInitGetsCalledAfterWriteCommits) {
- // Must hold a lock to call initializeFromShardIdentity, which is called by the op observer on
- // the shard identity document.
- Lock::GlobalWrite lock(operationContext());
-
- CollectionShardingState collShardingState(getServiceContext(),
- NamespaceString::kServerConfigurationNamespace);
-
+TEST_F(CollectionShardingStateTest, GlobalInitGetsCalledAfterWriteCommits) {
ShardIdentityType shardIdentity;
shardIdentity.setConfigsvrConnString(
ConnectionString(ConnectionString::SET, "a:1,b:2", "config"));
shardIdentity.setShardName("a");
shardIdentity.setClusterId(OID::gen());
- WriteUnitOfWork wuow(operationContext());
- collShardingState.onInsertOp(operationContext(), shardIdentity.toBSON(), {});
-
- ASSERT_EQ(0, getInitCallCount());
-
- wuow.commit();
-
+ DBDirectClient client(operationContext());
+ client.insert("admin.system.version", shardIdentity.toBSON());
ASSERT_EQ(1, getInitCallCount());
}
-TEST_F(CollShardingStateTest, GlobalInitDoesntGetCalledIfWriteAborts) {
- // Must hold a lock to call initializeFromShardIdentity, which is called by the op observer on
- // the shard identity document.
- Lock::GlobalWrite lock(operationContext());
-
- CollectionShardingState collShardingState(getServiceContext(),
- NamespaceString::kServerConfigurationNamespace);
-
+TEST_F(CollectionShardingStateTest, GlobalInitDoesntGetCalledIfWriteAborts) {
ShardIdentityType shardIdentity;
shardIdentity.setConfigsvrConnString(
ConnectionString(ConnectionString::SET, "a:1,b:2", "config"));
shardIdentity.setShardName("a");
shardIdentity.setClusterId(OID::gen());
+ // This part of the test ensures that the collection exists for the AutoGetCollection below to
+ // find and also validates that the initializer does not get called for non-sharding documents
+ DBDirectClient client(operationContext());
+ client.insert("admin.system.version", BSON("_id" << 1));
+ ASSERT_EQ(0, getInitCallCount());
+
{
- WriteUnitOfWork wuow(operationContext());
- collShardingState.onInsertOp(operationContext(), shardIdentity.toBSON(), {});
+ AutoGetCollection autoColl(
+ operationContext(), NamespaceString("admin.system.version"), MODE_IX);
+ WriteUnitOfWork wuow(operationContext());
+ ASSERT_OK(autoColl.getCollection()->insertDocument(
+ operationContext(), shardIdentity.toBSON(), {}, false));
ASSERT_EQ(0, getInitCallCount());
}
ASSERT_EQ(0, getInitCallCount());
}
-TEST_F(CollShardingStateTest, GlobalInitDoesntGetsCalledIfNSIsNotForShardIdentity) {
- // Must hold a lock to call initializeFromShardIdentity, which is called by the op observer on
- // the shard identity document.
- Lock::GlobalWrite lock(operationContext());
-
- CollectionShardingState collShardingState(getServiceContext(), NamespaceString("admin.user"));
-
+TEST_F(CollectionShardingStateTest, GlobalInitDoesntGetsCalledIfNSIsNotForShardIdentity) {
ShardIdentityType shardIdentity;
shardIdentity.setConfigsvrConnString(
ConnectionString(ConnectionString::SET, "a:1,b:2", "config"));
shardIdentity.setShardName("a");
shardIdentity.setClusterId(OID::gen());
- WriteUnitOfWork wuow(operationContext());
- collShardingState.onInsertOp(operationContext(), shardIdentity.toBSON(), {});
-
- ASSERT_EQ(0, getInitCallCount());
-
- wuow.commit();
-
+ DBDirectClient client(operationContext());
+ client.insert("admin.user", shardIdentity.toBSON());
ASSERT_EQ(0, getInitCallCount());
}
-TEST_F(CollShardingStateTest, OnInsertOpThrowWithIncompleteShardIdentityDocument) {
- // Must hold a lock to call CollectionShardingState::onInsertOp.
- Lock::GlobalWrite lock(operationContext());
-
- CollectionShardingState collShardingState(getServiceContext(),
- NamespaceString::kServerConfigurationNamespace);
-
+TEST_F(CollectionShardingStateTest, OnInsertOpThrowWithIncompleteShardIdentityDocument) {
ShardIdentityType shardIdentity;
shardIdentity.setShardName("a");
- ASSERT_THROWS(collShardingState.onInsertOp(operationContext(), shardIdentity.toBSON(), {}),
- AssertionException);
-}
-
-TEST_F(CollShardingStateTest, GlobalInitDoesntGetsCalledIfShardIdentityDocWasNotInserted) {
- // Must hold a lock to call CollectionShardingState::onInsertOp.
- Lock::GlobalWrite lock(operationContext());
-
- CollectionShardingState collShardingState(getServiceContext(),
- NamespaceString::kServerConfigurationNamespace);
-
- WriteUnitOfWork wuow(operationContext());
- collShardingState.onInsertOp(operationContext(), BSON("_id" << 1), {});
-
- ASSERT_EQ(0, getInitCallCount());
-
- wuow.commit();
-
- ASSERT_EQ(0, getInitCallCount());
+ DBDirectClient client(operationContext());
+ client.insert("admin.system.version", shardIdentity.toBSON());
+ ASSERT(!client.getLastError().empty());
}
/**
@@ -170,7 +128,7 @@ TEST_F(CollShardingStateTest, GlobalInitDoesntGetsCalledIfShardIdentityDocWasNot
* that DeleteState's constructor will extract from its `doc` argument into its member
* DeleteState::documentKey.
*/
-auto makeAMetadata(BSONObj const& keyPattern) -> std::unique_ptr<CollectionMetadata> {
+std::unique_ptr<CollectionMetadata> makeAMetadata(BSONObj const& keyPattern) {
const OID epoch = OID::gen();
auto range = ChunkRange(BSON("key" << MINKEY), BSON("key" << MAXKEY));
auto chunk = ChunkType(kTestNss, std::move(range), ChunkVersion(1, 0, epoch), ShardId("other"));
@@ -179,7 +137,9 @@ auto makeAMetadata(BSONObj const& keyPattern) -> std::unique_ptr<CollectionMetad
return stdx::make_unique<CollectionMetadata>(std::move(cm), ShardId("this"));
}
-TEST_F(CollShardingStateTest, MakeDeleteStateUnsharded) {
+using DeleteStateTest = ShardServerTestFixture;
+
+TEST_F(DeleteStateTest, MakeDeleteStateUnsharded) {
AutoGetCollection autoColl(operationContext(), kTestNss, MODE_IX);
auto* css = CollectionShardingState::get(operationContext(), kTestNss);
@@ -201,7 +161,7 @@ TEST_F(CollShardingStateTest, MakeDeleteStateUnsharded) {
ASSERT_FALSE(deleteState.isMigrating);
}
-TEST_F(CollShardingStateTest, MakeDeleteStateShardedWithoutIdInShardKey) {
+TEST_F(DeleteStateTest, MakeDeleteStateShardedWithoutIdInShardKey) {
AutoGetCollection autoColl(operationContext(), kTestNss, MODE_IX);
auto* css = CollectionShardingState::get(operationContext(), kTestNss);
@@ -228,7 +188,7 @@ TEST_F(CollShardingStateTest, MakeDeleteStateShardedWithoutIdInShardKey) {
ASSERT_FALSE(deleteState.isMigrating);
}
-TEST_F(CollShardingStateTest, MakeDeleteStateShardedWithIdInShardKey) {
+TEST_F(DeleteStateTest, MakeDeleteStateShardedWithIdInShardKey) {
AutoGetCollection autoColl(operationContext(), kTestNss, MODE_IX);
auto* css = CollectionShardingState::get(operationContext(), kTestNss);
@@ -254,7 +214,7 @@ TEST_F(CollShardingStateTest, MakeDeleteStateShardedWithIdInShardKey) {
ASSERT_FALSE(deleteState.isMigrating);
}
-TEST_F(CollShardingStateTest, MakeDeleteStateShardedWithIdHashInShardKey) {
+TEST_F(DeleteStateTest, MakeDeleteStateShardedWithIdHashInShardKey) {
AutoGetCollection autoColl(operationContext(), kTestNss, MODE_IX);
auto* css = CollectionShardingState::get(operationContext(), kTestNss);
diff --git a/src/mongo/db/s/config_server_op_observer.cpp b/src/mongo/db/s/config_server_op_observer.cpp
new file mode 100644
index 00000000000..6b52c94ef37
--- /dev/null
+++ b/src/mongo/db/s/config_server_op_observer.cpp
@@ -0,0 +1,78 @@
+/**
+ * Copyright (C) 2018 MongoDB Inc.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Affero General Public License, version 3,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Affero General Public License for more details.
+ *
+ * You should have received a copy of the GNU Affero General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ * As a special exception, the copyright holders give permission to link the
+ * code of portions of this program with the OpenSSL library under certain
+ * conditions as described in each individual source file and distribute
+ * linked combinations including the program with the OpenSSL library. You
+ * must comply with the GNU Affero General Public License in all respects for
+ * all of the code used other than as permitted herein. If you modify file(s)
+ * with this exception, you may extend this exception to your version of the
+ * file(s), but you are not obligated to do so. If you do not wish to do so,
+ * delete this exception statement from your version. If you delete this
+ * exception statement from all source files in the program, then also delete
+ * it in the license file.
+ */
+
+#define MONGO_LOG_DEFAULT_COMPONENT ::mongo::logger::LogComponent::kSharding
+
+#include "mongo/platform/basic.h"
+
+#include "mongo/db/s/config_server_op_observer.h"
+
+#include "mongo/db/s/config/sharding_catalog_manager.h"
+#include "mongo/s/catalog/type_config_version.h"
+#include "mongo/s/cluster_identity_loader.h"
+
+namespace mongo {
+
+ConfigServerOpObserver::ConfigServerOpObserver() = default;
+
+ConfigServerOpObserver::~ConfigServerOpObserver() = default;
+
+void ConfigServerOpObserver::onDelete(OperationContext* opCtx,
+ const NamespaceString& nss,
+ OptionalCollectionUUID uuid,
+ StmtId stmtId,
+ bool fromMigrate,
+ const boost::optional<BSONObj>& deletedDoc) {
+ if (nss == VersionType::ConfigNS) {
+ if (!repl::ReplicationCoordinator::get(opCtx)->getMemberState().rollback()) {
+ uasserted(40302, "cannot delete config.version document while in --configsvr mode");
+ } else {
+ // Throw out any cached information related to the cluster ID.
+ ShardingCatalogManager::get(opCtx)->discardCachedConfigDatabaseInitializationState();
+ ClusterIdentityLoader::get(opCtx)->discardCachedClusterId();
+ }
+ }
+}
+
+repl::OpTime ConfigServerOpObserver::onDropCollection(OperationContext* opCtx,
+ const NamespaceString& collectionName,
+ OptionalCollectionUUID uuid) {
+ if (collectionName == VersionType::ConfigNS) {
+ if (!repl::ReplicationCoordinator::get(opCtx)->getMemberState().rollback()) {
+ uasserted(40303, "cannot drop config.version document while in --configsvr mode");
+ } else {
+ // Throw out any cached information related to the cluster ID.
+ ShardingCatalogManager::get(opCtx)->discardCachedConfigDatabaseInitializationState();
+ ClusterIdentityLoader::get(opCtx)->discardCachedClusterId();
+ }
+ }
+
+ return {};
+}
+
+} // namespace mongo
diff --git a/src/mongo/db/s/config_server_op_observer.h b/src/mongo/db/s/config_server_op_observer.h
new file mode 100644
index 00000000000..6bea48f4a11
--- /dev/null
+++ b/src/mongo/db/s/config_server_op_observer.h
@@ -0,0 +1,129 @@
+/**
+ * Copyright (C) 2018 MongoDB Inc.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Affero General Public License, version 3,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Affero General Public License for more details.
+ *
+ * You should have received a copy of the GNU Affero General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ * As a special exception, the copyright holders give permission to link the
+ * code of portions of this program with the OpenSSL library under certain
+ * conditions as described in each individual source file and distribute
+ * linked combinations including the program with the OpenSSL library. You
+ * must comply with the GNU Affero General Public License in all respects for
+ * all of the code used other than as permitted herein. If you modify file(s)
+ * with this exception, you may extend this exception to your version of the
+ * file(s), but you are not obligated to do so. If you do not wish to do so,
+ * delete this exception statement from your version. If you delete this
+ * exception statement from all source files in the program, then also delete
+ * it in the license file.
+ */
+
+#pragma once
+
+#include "mongo/base/disallow_copying.h"
+#include "mongo/db/op_observer.h"
+
+namespace mongo {
+
+/**
+ * OpObserver which is installed on the op observers chain when the server is running as a config
+ * server (--configsvr).
+ */
+class ConfigServerOpObserver final : public OpObserver {
+ MONGO_DISALLOW_COPYING(ConfigServerOpObserver);
+
+public:
+ ConfigServerOpObserver();
+ ~ConfigServerOpObserver();
+
+ void onCreateIndex(OperationContext* opCtx,
+ const NamespaceString& nss,
+ OptionalCollectionUUID uuid,
+ BSONObj indexDoc,
+ bool fromMigrate) override {}
+
+ void onInserts(OperationContext* opCtx,
+ const NamespaceString& nss,
+ OptionalCollectionUUID uuid,
+ std::vector<InsertStatement>::const_iterator begin,
+ std::vector<InsertStatement>::const_iterator end,
+ bool fromMigrate) override {}
+
+ void onUpdate(OperationContext* opCtx, const OplogUpdateEntryArgs& args) override {}
+
+ void aboutToDelete(OperationContext* opCtx,
+ const NamespaceString& nss,
+ const BSONObj& doc) override {}
+
+ void onDelete(OperationContext* opCtx,
+ const NamespaceString& nss,
+ OptionalCollectionUUID uuid,
+ StmtId stmtId,
+ bool fromMigrate,
+ const boost::optional<BSONObj>& deletedDoc) override;
+
+ void onInternalOpMessage(OperationContext* opCtx,
+ const NamespaceString& nss,
+ const boost::optional<UUID> uuid,
+ const BSONObj& msgObj,
+ const boost::optional<BSONObj> o2MsgObj) override {}
+
+ void onCreateCollection(OperationContext* opCtx,
+ Collection* coll,
+ const NamespaceString& collectionName,
+ const CollectionOptions& options,
+ const BSONObj& idIndex) override {}
+
+ void onCollMod(OperationContext* opCtx,
+ const NamespaceString& nss,
+ OptionalCollectionUUID uuid,
+ const BSONObj& collModCmd,
+ const CollectionOptions& oldCollOptions,
+ boost::optional<TTLCollModInfo> ttlInfo) override {}
+
+ void onDropDatabase(OperationContext* opCtx, const std::string& dbName) override {}
+
+ repl::OpTime onDropCollection(OperationContext* opCtx,
+ const NamespaceString& collectionName,
+ OptionalCollectionUUID uuid) override;
+
+ void onDropIndex(OperationContext* opCtx,
+ const NamespaceString& nss,
+ OptionalCollectionUUID uuid,
+ const std::string& indexName,
+ const BSONObj& indexInfo) override {}
+
+ repl::OpTime onRenameCollection(OperationContext* opCtx,
+ const NamespaceString& fromCollection,
+ const NamespaceString& toCollection,
+ OptionalCollectionUUID uuid,
+ bool dropTarget,
+ OptionalCollectionUUID dropTargetUUID,
+ bool stayTemp) override {
+ return repl::OpTime();
+ }
+
+ void onApplyOps(OperationContext* opCtx,
+ const std::string& dbName,
+ const BSONObj& applyOpCmd) override {}
+
+ void onEmptyCapped(OperationContext* opCtx,
+ const NamespaceString& collectionName,
+ OptionalCollectionUUID uuid) override {}
+
+ void onTransactionCommit(OperationContext* opCtx) override {}
+
+ void onTransactionAbort(OperationContext* opCtx) override {}
+
+ void onReplicationRollback(OperationContext* opCtx, const RollbackObserverInfo& rbInfo) {}
+};
+
+} // namespace mongo
diff --git a/src/mongo/db/s/metadata_manager_test.cpp b/src/mongo/db/s/metadata_manager_test.cpp
index 9c8e670dc2b..de7b0adce82 100644
--- a/src/mongo/db/s/metadata_manager_test.cpp
+++ b/src/mongo/db/s/metadata_manager_test.cpp
@@ -31,7 +31,6 @@
#include <boost/optional.hpp>
#include "mongo/bson/bsonobjbuilder.h"
-#include "mongo/client/remote_command_targeter_mock.h"
#include "mongo/db/catalog_raii.h"
#include "mongo/db/client.h"
#include "mongo/db/dbdirectclient.h"
@@ -42,11 +41,10 @@
#include "mongo/db/s/sharding_state.h"
#include "mongo/db/server_options.h"
#include "mongo/db/service_context.h"
-#include "mongo/db/service_context_d_test_fixture.h"
#include "mongo/executor/task_executor.h"
#include "mongo/s/catalog/type_chunk.h"
#include "mongo/s/client/shard_registry.h"
-#include "mongo/s/sharding_mongod_test_fixture.h"
+#include "mongo/s/shard_server_test_fixture.h"
#include "mongo/stdx/condition_variable.h"
#include "mongo/stdx/memory.h"
#include "mongo/unittest/unittest.h"
@@ -59,29 +57,17 @@ using unittest::assertGet;
const NamespaceString kNss("TestDB", "TestColl");
const std::string kPattern = "key";
-const BSONObj kShardKeyPatternBSON{BSON(kPattern << 1)};
-const KeyPattern kShardKeyPattern{kShardKeyPatternBSON};
+const KeyPattern kShardKeyPattern(BSON(kPattern << 1));
const std::string kThisShard{"thisShard"};
const std::string kOtherShard{"otherShard"};
-const HostAndPort dummyHost("dummy", 123);
-class MetadataManagerTest : public ShardingMongodTestFixture {
+class MetadataManagerTest : public ShardServerTestFixture {
protected:
void setUp() override {
- ShardingMongodTestFixture::setUp();
- serverGlobalParams.clusterRole = ClusterRole::ShardServer;
- initializeGlobalShardingStateForMongodForTest(ConnectionString(dummyHost))
- .transitional_ignore();
-
- configTargeter()->setFindHostReturnValue(dummyHost);
-
+ ShardServerTestFixture::setUp();
_manager = std::make_shared<MetadataManager>(getServiceContext(), kNss, executor());
}
- std::shared_ptr<RemoteCommandTargeterMock> configTargeter() const {
- return RemoteCommandTargeterMock::get(shardRegistry()->getConfigShard()->getTargeter());
- }
-
static std::unique_ptr<CollectionMetadata> makeEmptyMetadata() {
const OID epoch = OID::gen();
diff --git a/src/mongo/db/s/migration_chunk_cloner_source_legacy_test.cpp b/src/mongo/db/s/migration_chunk_cloner_source_legacy_test.cpp
index 34d91019d5e..49aec18b0fd 100644
--- a/src/mongo/db/s/migration_chunk_cloner_source_legacy_test.cpp
+++ b/src/mongo/db/s/migration_chunk_cloner_source_legacy_test.cpp
@@ -32,12 +32,11 @@
#include "mongo/db/catalog_raii.h"
#include "mongo/db/dbdirectclient.h"
#include "mongo/db/namespace_string.h"
-#include "mongo/db/repl/replication_coordinator_mock.h"
#include "mongo/db/s/migration_chunk_cloner_source_legacy.h"
#include "mongo/s/catalog/sharding_catalog_client_mock.h"
#include "mongo/s/catalog/type_shard.h"
#include "mongo/s/client/shard_registry.h"
-#include "mongo/s/sharding_mongod_test_fixture.h"
+#include "mongo/s/shard_server_test_fixture.h"
#include "mongo/unittest/unittest.h"
namespace mongo {
@@ -64,23 +63,17 @@ const ConnectionString kRecipientConnStr =
HostAndPort("RecipientHost2:1234"),
HostAndPort("RecipientHost3:1234")});
-class MigrationChunkClonerSourceLegacyTest : public ShardingMongodTestFixture {
+class MigrationChunkClonerSourceLegacyTest : public ShardServerTestFixture {
protected:
void setUp() override {
- serverGlobalParams.clusterRole = ClusterRole::ShardServer;
- ShardingMongodTestFixture::setUp();
+ ShardServerTestFixture::setUp();
// TODO: SERVER-26919 set the flag on the mock repl coordinator just for the window where it
// actually needs to bypass the op observer.
replicationCoordinator()->alwaysAllowWrites(true);
- ASSERT_OK(initializeGlobalShardingStateForMongodForTest(kConfigConnStr));
-
_client.emplace(operationContext());
- RemoteCommandTargeterMock::get(shardRegistry()->getConfigShard()->getTargeter())
- ->setConnectionStringReturnValue(kConfigConnStr);
-
{
auto donorShard = assertGet(
shardRegistry()->getShard(operationContext(), kDonorConnStr.getSetName()));
@@ -103,7 +96,7 @@ protected:
void tearDown() override {
_client.reset();
- ShardingMongodTestFixture::tearDown();
+ ShardServerTestFixture::tearDown();
}
/**
diff --git a/src/mongo/db/s/session_catalog_migration_destination_test.cpp b/src/mongo/db/s/session_catalog_migration_destination_test.cpp
index 698f0464029..f1a2b422305 100644
--- a/src/mongo/db/s/session_catalog_migration_destination_test.cpp
+++ b/src/mongo/db/s/session_catalog_migration_destination_test.cpp
@@ -49,13 +49,12 @@
#include "mongo/s/catalog/sharding_catalog_client_mock.h"
#include "mongo/s/catalog/type_shard.h"
#include "mongo/s/client/shard_registry.h"
-#include "mongo/s/sharding_mongod_test_fixture.h"
+#include "mongo/s/shard_server_test_fixture.h"
#include "mongo/stdx/memory.h"
#include "mongo/stdx/thread.h"
#include "mongo/unittest/unittest.h"
namespace mongo {
-
namespace {
using executor::RemoteCommandRequest;
@@ -113,16 +112,12 @@ repl::OplogEntry extractInnerOplog(const repl::OplogEntry& oplog) {
return oplogStatus.getValue();
}
-class SessionCatalogMigrationDestinationTest : public ShardingMongodTestFixture {
+class SessionCatalogMigrationDestinationTest : public ShardServerTestFixture {
public:
void setUp() override {
- serverGlobalParams.clusterRole = ClusterRole::ShardServer;
- ShardingMongodTestFixture::setUp();
-
- ASSERT_OK(initializeGlobalShardingStateForMongodForTest(kConfigConnStr));
+ ShardServerTestFixture::setUp();
- RemoteCommandTargeterMock::get(shardRegistry()->getConfigShard()->getTargeter())
- ->setConnectionStringReturnValue(kConfigConnStr);
+ _migrationId = MigrationSessionId::generate("donor", "recipient");
{
auto donorShard = assertGet(
@@ -133,8 +128,6 @@ public:
->setFindHostReturnValue(kDonorConnStr.getServers()[0]);
}
- _migrationId = MigrationSessionId::generate("donor", "recipient");
-
SessionCatalog::create(getServiceContext());
SessionCatalog::get(getServiceContext())->onStepUp(operationContext());
LogicalSessionCache::set(getServiceContext(), stdx::make_unique<LogicalSessionCacheNoop>());
@@ -142,7 +135,7 @@ public:
void tearDown() override {
SessionCatalog::reset_forTest(getServiceContext());
- ShardingMongodTestFixture::tearDown();
+ ShardServerTestFixture::tearDown();
}
void returnOplog(const std::vector<OplogEntry>& oplogList) {
diff --git a/src/mongo/db/s/shard_server_op_observer.cpp b/src/mongo/db/s/shard_server_op_observer.cpp
new file mode 100644
index 00000000000..6c0d678d85f
--- /dev/null
+++ b/src/mongo/db/s/shard_server_op_observer.cpp
@@ -0,0 +1,348 @@
+/**
+ * Copyright (C) 2018 MongoDB Inc.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Affero General Public License, version 3,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Affero General Public License for more details.
+ *
+ * You should have received a copy of the GNU Affero General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ * As a special exception, the copyright holders give permission to link the
+ * code of portions of this program with the OpenSSL library under certain
+ * conditions as described in each individual source file and distribute
+ * linked combinations including the program with the OpenSSL library. You
+ * must comply with the GNU Affero General Public License in all respects for
+ * all of the code used other than as permitted herein. If you modify file(s)
+ * with this exception, you may extend this exception to your version of the
+ * file(s), but you are not obligated to do so. If you do not wish to do so,
+ * delete this exception statement from your version. If you delete this
+ * exception statement from all source files in the program, then also delete
+ * it in the license file.
+ */
+
+#define MONGO_LOG_DEFAULT_COMPONENT ::mongo::logger::LogComponent::kSharding
+
+#include "mongo/platform/basic.h"
+
+#include "mongo/db/s/shard_server_op_observer.h"
+
+#include "mongo/bson/util/bson_extract.h"
+#include "mongo/db/catalog_raii.h"
+#include "mongo/db/s/collection_sharding_state.h"
+#include "mongo/db/s/shard_identity_rollback_notifier.h"
+#include "mongo/db/s/sharding_state.h"
+#include "mongo/db/s/type_shard_identity.h"
+#include "mongo/s/balancer_configuration.h"
+#include "mongo/s/catalog/type_shard_collection.h"
+#include "mongo/s/catalog_cache_loader.h"
+#include "mongo/s/grid.h"
+#include "mongo/util/log.h"
+
+namespace mongo {
+namespace {
+
+using DeleteState = CollectionShardingState::DeleteState;
+
+const OperationContext::Decoration<DeleteState> getDeleteState =
+ OperationContext::declareDecoration<DeleteState>();
+
+bool isStandaloneOrPrimary(OperationContext* opCtx) {
+ auto replCoord = repl::ReplicationCoordinator::get(opCtx);
+ const bool isReplSet =
+ replCoord->getReplicationMode() == repl::ReplicationCoordinator::modeReplSet;
+ return !isReplSet || (repl::ReplicationCoordinator::get(opCtx)->getMemberState() ==
+ repl::MemberState::RS_PRIMARY);
+}
+
+/**
+ * Used to notify the catalog cache loader of a new collection version and invalidate the in-memory
+ * routing table cache once the oplog updates are committed and become visible.
+ */
+class CollectionVersionLogOpHandler final : public RecoveryUnit::Change {
+public:
+ CollectionVersionLogOpHandler(OperationContext* opCtx, const NamespaceString& nss)
+ : _opCtx(opCtx), _nss(nss) {}
+
+ void commit() override {
+ invariant(_opCtx->lockState()->isCollectionLockedForMode(_nss.ns(), MODE_IX));
+
+ CatalogCacheLoader::get(_opCtx).notifyOfCollectionVersionUpdate(_nss);
+
+ // This is a hack to get around CollectionShardingState::refreshMetadata() requiring the X
+ // lock: markNotShardedAtStepdown() doesn't have a lock check. Temporary measure until
+ // SERVER-31595 removes the X lock requirement.
+ CollectionShardingState::get(_opCtx, _nss)->markNotShardedAtStepdown();
+ }
+
+ void rollback() override {}
+
+private:
+ OperationContext* _opCtx;
+ const NamespaceString _nss;
+};
+
+/**
+ * Used to perform shard identity initialization once it is certain that the document is committed.
+ */
+class ShardIdentityLogOpHandler final : public RecoveryUnit::Change {
+public:
+ ShardIdentityLogOpHandler(OperationContext* opCtx, ShardIdentityType shardIdentity)
+ : _opCtx(opCtx), _shardIdentity(std::move(shardIdentity)) {}
+
+ void commit() override {
+ fassertNoTrace(
+ 40071, ShardingState::get(_opCtx)->initializeFromShardIdentity(_opCtx, _shardIdentity));
+ }
+
+ void rollback() override {}
+
+private:
+ OperationContext* _opCtx;
+ const ShardIdentityType _shardIdentity;
+};
+
+/**
+ * Invalidates the in-memory routing table cache when a collection is dropped, so the next caller
+ * with routing information will provoke a routing table refresh and see the drop.
+ *
+ * The query parameter must contain an _id field that identifies which collections entry is being
+ * updated.
+ *
+ * This only runs on secondaries.
+ * The global exclusive lock is expected to be held by the caller.
+ */
+void onConfigDeleteInvalidateCachedMetadataAndNotify(OperationContext* opCtx,
+ const BSONObj& query) {
+ // Notification of routing table changes are only needed on secondaries
+ if (isStandaloneOrPrimary(opCtx)) {
+ return;
+ }
+
+ // Extract which collection entry is being deleted from the _id field.
+ std::string deletedCollection;
+ fassert(40479,
+ bsonExtractStringField(query, ShardCollectionType::ns.name(), &deletedCollection));
+ const NamespaceString deletedNss(deletedCollection);
+
+ // Need the WUOW to retain the lock for CollectionVersionLogOpHandler::commit().
+ AutoGetCollection autoColl(opCtx, deletedNss, MODE_IX);
+
+ opCtx->recoveryUnit()->registerChange(new CollectionVersionLogOpHandler(opCtx, deletedNss));
+}
+
+/**
+ * Returns true if the total number of bytes on the specified chunk nears the max size of a shard.
+ */
+bool shouldSplitChunk(OperationContext* opCtx,
+ const ShardKeyPattern& shardKeyPattern,
+ const Chunk& chunk) {
+ const auto balancerConfig = Grid::get(opCtx)->getBalancerConfiguration();
+ invariant(balancerConfig);
+
+ const auto& keyPattern = shardKeyPattern.getKeyPattern();
+ const bool minIsInf = (0 == keyPattern.globalMin().woCompare(chunk.getMin()));
+ const bool maxIsInf = (0 == keyPattern.globalMax().woCompare(chunk.getMax()));
+
+ return chunk.shouldSplit(balancerConfig->getMaxChunkSizeBytes(), minIsInf, maxIsInf);
+}
+
+/**
+ * If the collection is sharded, finds the chunk that contains the specified document and increments
+ * the size tracked for that chunk by the specified amount of data written, in bytes. Returns the
+ * number of total bytes on that chunk after the data is written.
+ */
+void incrementChunkOnInsertOrUpdate(OperationContext* opCtx,
+ const ChunkManager& chunkManager,
+ const BSONObj& document,
+ long dataWritten) {
+ const auto& shardKeyPattern = chunkManager.getShardKeyPattern();
+
+ // Each inserted/updated document should contain the shard key. The only instance in which a
+ // document could not contain a shard key is if the insert/update is performed through mongod
+ // explicitly, as opposed to first routed through mongos.
+ BSONObj shardKey = shardKeyPattern.extractShardKeyFromDoc(document);
+ if (shardKey.woCompare(BSONObj()) == 0) {
+ warning() << "inserting document " << document.toString() << " without shard key pattern "
+ << shardKeyPattern << " into a sharded collection";
+ return;
+ }
+
+ // Use the shard key to locate the chunk into which the document was updated, and increment the
+ // number of bytes tracked for the chunk.
+ //
+ // Note that we can assume the simple collation, because shard keys do not support non-simple
+ // collations.
+ auto chunk = chunkManager.findIntersectingChunkWithSimpleCollation(shardKey);
+ chunk->addBytesWritten(dataWritten);
+
+ // If the chunk becomes too large, then we call the ChunkSplitter to schedule a split. Then, we
+ // reset the tracking for that chunk to 0.
+ if (shouldSplitChunk(opCtx, shardKeyPattern, *chunk)) {
+ // TODO: call ChunkSplitter here
+ chunk->clearBytesWritten();
+ }
+}
+
+} // namespace
+
+ShardServerOpObserver::ShardServerOpObserver() = default;
+
+ShardServerOpObserver::~ShardServerOpObserver() = default;
+
+void ShardServerOpObserver::onInserts(OperationContext* opCtx,
+ const NamespaceString& nss,
+ OptionalCollectionUUID uuid,
+ std::vector<InsertStatement>::const_iterator begin,
+ std::vector<InsertStatement>::const_iterator end,
+ bool fromMigrate) {
+ auto const css = CollectionShardingState::get(opCtx, nss);
+ const auto metadata = css->getMetadata();
+
+ for (auto it = begin; it != end; ++it) {
+ const auto& insertedDoc = it->doc;
+
+ if (nss == NamespaceString::kServerConfigurationNamespace) {
+ if (auto idElem = insertedDoc["_id"]) {
+ if (idElem.str() == ShardIdentityType::IdName) {
+ auto shardIdentityDoc =
+ uassertStatusOK(ShardIdentityType::fromBSON(insertedDoc));
+ uassertStatusOK(shardIdentityDoc.validate());
+ opCtx->recoveryUnit()->registerChange(
+ new ShardIdentityLogOpHandler(opCtx, std::move(shardIdentityDoc)));
+ }
+ }
+ }
+
+ if (metadata) {
+ incrementChunkOnInsertOrUpdate(
+ opCtx, *metadata->getChunkManager(), insertedDoc, insertedDoc.objsize());
+ }
+ }
+}
+
+void ShardServerOpObserver::onUpdate(OperationContext* opCtx, const OplogUpdateEntryArgs& args) {
+ auto const css = CollectionShardingState::get(opCtx, args.nss);
+ const auto metadata = css->getMetadata();
+
+ if (args.nss.ns() == NamespaceString::kShardConfigCollectionsCollectionName) {
+ // Notification of routing table changes are only needed on secondaries
+ if (isStandaloneOrPrimary(opCtx)) {
+ return;
+ }
+
+ // This logic runs on updates to the shard's persisted cache of the config server's
+ // config.collections collection.
+ //
+ // If an update occurs to the 'lastRefreshedCollectionVersion' field it notifies the catalog
+ // cache loader of a new collection version and clears the routing table so the next caller
+ // with routing information will provoke a routing table refresh.
+ //
+ // When 'lastRefreshedCollectionVersion' is in 'update', it means that a chunk metadata
+ // refresh has finished being applied to the collection's locally persisted metadata store.
+ //
+ // If an update occurs to the 'enterCriticalSectionSignal' field, simply clear the routing
+ // table immediately. This will provoke the next secondary caller to refresh through the
+ // primary, blocking behind the critical section.
+
+ // Extract which user collection was updated
+ const auto updatedNss([&] {
+ std::string coll;
+ fassertStatusOK(
+ 40477,
+ bsonExtractStringField(args.criteria, ShardCollectionType::ns.name(), &coll));
+ return NamespaceString(coll);
+ }());
+
+ // Parse the '$set' update
+ BSONElement setElement;
+ Status setStatus =
+ bsonExtractTypedField(args.update, StringData("$set"), Object, &setElement);
+ if (setStatus.isOK()) {
+ BSONObj setField = setElement.Obj();
+
+ // Need the WUOW to retain the lock for CollectionVersionLogOpHandler::commit()
+ AutoGetCollection autoColl(opCtx, updatedNss, MODE_IX);
+
+ if (setField.hasField(ShardCollectionType::lastRefreshedCollectionVersion.name())) {
+ opCtx->recoveryUnit()->registerChange(
+ new CollectionVersionLogOpHandler(opCtx, updatedNss));
+ }
+
+ if (setField.hasField(ShardCollectionType::enterCriticalSectionCounter.name())) {
+ // This is a hack to get around CollectionShardingState::refreshMetadata() requiring
+ // the X lock: markNotShardedAtStepdown() doesn't have a lock check. Temporary
+ // measure until SERVER-31595 removes the X lock requirement.
+ CollectionShardingState::get(opCtx, updatedNss)->markNotShardedAtStepdown();
+ }
+ }
+ }
+
+ if (metadata) {
+ incrementChunkOnInsertOrUpdate(
+ opCtx, *metadata->getChunkManager(), args.updatedDoc, args.updatedDoc.objsize());
+ }
+}
+
+void ShardServerOpObserver::aboutToDelete(OperationContext* opCtx,
+ NamespaceString const& nss,
+ BSONObj const& doc) {
+ auto& deleteState = getDeleteState(opCtx);
+ auto* css = CollectionShardingState::get(opCtx, nss.ns());
+ deleteState = css->makeDeleteState(doc);
+}
+
+void ShardServerOpObserver::onDelete(OperationContext* opCtx,
+ const NamespaceString& nss,
+ OptionalCollectionUUID uuid,
+ StmtId stmtId,
+ bool fromMigrate,
+ const boost::optional<BSONObj>& deletedDoc) {
+ auto& deleteState = getDeleteState(opCtx);
+
+ if (nss.ns() == NamespaceString::kShardConfigCollectionsCollectionName) {
+ onConfigDeleteInvalidateCachedMetadataAndNotify(opCtx, deleteState.documentKey);
+ }
+
+ if (nss == NamespaceString::kServerConfigurationNamespace) {
+ if (auto idElem = deleteState.documentKey["_id"]) {
+ auto idStr = idElem.str();
+ if (idStr == ShardIdentityType::IdName) {
+ if (!repl::ReplicationCoordinator::get(opCtx)->getMemberState().rollback()) {
+ uasserted(40070,
+ "cannot delete shardIdentity document while in --shardsvr mode");
+ } else {
+ warning() << "Shard identity document rolled back. Will shut down after "
+ "finishing rollback.";
+ ShardIdentityRollbackNotifier::get(opCtx)->recordThatRollbackHappened();
+ }
+ }
+ }
+ }
+}
+
+repl::OpTime ShardServerOpObserver::onDropCollection(OperationContext* opCtx,
+ const NamespaceString& collectionName,
+ OptionalCollectionUUID uuid) {
+ if (collectionName == NamespaceString::kServerConfigurationNamespace) {
+ // Dropping system collections is not allowed for end users
+ invariant(!opCtx->writesAreReplicated());
+ invariant(repl::ReplicationCoordinator::get(opCtx)->getMemberState().rollback());
+
+ // Can't confirm whether there was a ShardIdentity document or not yet, so assume there was
+ // one and shut down the process to clear the in-memory sharding state
+ warning() << "admin.system.version collection rolled back. Will shut down after finishing "
+ "rollback";
+
+ ShardIdentityRollbackNotifier::get(opCtx)->recordThatRollbackHappened();
+ }
+
+ return {};
+}
+
+} // namespace mongo
diff --git a/src/mongo/db/s/shard_server_op_observer.h b/src/mongo/db/s/shard_server_op_observer.h
new file mode 100644
index 00000000000..d15a7266668
--- /dev/null
+++ b/src/mongo/db/s/shard_server_op_observer.h
@@ -0,0 +1,129 @@
+/**
+ * Copyright (C) 2018 MongoDB Inc.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Affero General Public License, version 3,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Affero General Public License for more details.
+ *
+ * You should have received a copy of the GNU Affero General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ * As a special exception, the copyright holders give permission to link the
+ * code of portions of this program with the OpenSSL library under certain
+ * conditions as described in each individual source file and distribute
+ * linked combinations including the program with the OpenSSL library. You
+ * must comply with the GNU Affero General Public License in all respects for
+ * all of the code used other than as permitted herein. If you modify file(s)
+ * with this exception, you may extend this exception to your version of the
+ * file(s), but you are not obligated to do so. If you do not wish to do so,
+ * delete this exception statement from your version. If you delete this
+ * exception statement from all source files in the program, then also delete
+ * it in the license file.
+ */
+
+#pragma once
+
+#include "mongo/base/disallow_copying.h"
+#include "mongo/db/op_observer.h"
+
+namespace mongo {
+
+/**
+ * OpObserver which is installed on the op observers chain when the server is running as a shard
+ * server (--shardsvr).
+ */
+class ShardServerOpObserver final : public OpObserver {
+ MONGO_DISALLOW_COPYING(ShardServerOpObserver);
+
+public:
+ ShardServerOpObserver();
+ ~ShardServerOpObserver();
+
+ void onCreateIndex(OperationContext* opCtx,
+ const NamespaceString& nss,
+ OptionalCollectionUUID uuid,
+ BSONObj indexDoc,
+ bool fromMigrate) override {}
+
+ void onInserts(OperationContext* opCtx,
+ const NamespaceString& nss,
+ OptionalCollectionUUID uuid,
+ std::vector<InsertStatement>::const_iterator begin,
+ std::vector<InsertStatement>::const_iterator end,
+ bool fromMigrate) override;
+
+ void onUpdate(OperationContext* opCtx, const OplogUpdateEntryArgs& args) override;
+
+ void aboutToDelete(OperationContext* opCtx,
+ const NamespaceString& nss,
+ const BSONObj& doc) override;
+
+ void onDelete(OperationContext* opCtx,
+ const NamespaceString& nss,
+ OptionalCollectionUUID uuid,
+ StmtId stmtId,
+ bool fromMigrate,
+ const boost::optional<BSONObj>& deletedDoc) override;
+
+ void onInternalOpMessage(OperationContext* opCtx,
+ const NamespaceString& nss,
+ const boost::optional<UUID> uuid,
+ const BSONObj& msgObj,
+ const boost::optional<BSONObj> o2MsgObj) override {}
+
+ void onCreateCollection(OperationContext* opCtx,
+ Collection* coll,
+ const NamespaceString& collectionName,
+ const CollectionOptions& options,
+ const BSONObj& idIndex) override {}
+
+ void onCollMod(OperationContext* opCtx,
+ const NamespaceString& nss,
+ OptionalCollectionUUID uuid,
+ const BSONObj& collModCmd,
+ const CollectionOptions& oldCollOptions,
+ boost::optional<TTLCollModInfo> ttlInfo) override {}
+
+ void onDropDatabase(OperationContext* opCtx, const std::string& dbName) override {}
+
+ repl::OpTime onDropCollection(OperationContext* opCtx,
+ const NamespaceString& collectionName,
+ OptionalCollectionUUID uuid) override;
+
+ void onDropIndex(OperationContext* opCtx,
+ const NamespaceString& nss,
+ OptionalCollectionUUID uuid,
+ const std::string& indexName,
+ const BSONObj& indexInfo) override {}
+
+ repl::OpTime onRenameCollection(OperationContext* opCtx,
+ const NamespaceString& fromCollection,
+ const NamespaceString& toCollection,
+ OptionalCollectionUUID uuid,
+ bool dropTarget,
+ OptionalCollectionUUID dropTargetUUID,
+ bool stayTemp) override {
+ return repl::OpTime();
+ }
+
+ void onApplyOps(OperationContext* opCtx,
+ const std::string& dbName,
+ const BSONObj& applyOpCmd) override {}
+
+ void onEmptyCapped(OperationContext* opCtx,
+ const NamespaceString& collectionName,
+ OptionalCollectionUUID uuid) override {}
+
+ void onTransactionCommit(OperationContext* opCtx) override {}
+
+ void onTransactionAbort(OperationContext* opCtx) override {}
+
+ void onReplicationRollback(OperationContext* opCtx, const RollbackObserverInfo& rbInfo) {}
+};
+
+} // namespace mongo
diff --git a/src/mongo/db/s/sharding_state_test.cpp b/src/mongo/db/s/sharding_state_test.cpp
index 370a792d716..1b29fe5ceae 100644
--- a/src/mongo/db/s/sharding_state_test.cpp
+++ b/src/mongo/db/s/sharding_state_test.cpp
@@ -29,48 +29,37 @@
#include "mongo/platform/basic.h"
#include "mongo/client/remote_command_targeter_mock.h"
-#include "mongo/client/replica_set_monitor.h"
#include "mongo/db/concurrency/d_concurrency.h"
#include "mongo/db/dbdirectclient.h"
-#include "mongo/db/jsobj.h"
-#include "mongo/db/namespace_string.h"
+#include "mongo/db/op_observer_impl.h"
+#include "mongo/db/op_observer_registry.h"
#include "mongo/db/repl/replication_coordinator_mock.h"
+#include "mongo/db/s/config_server_op_observer.h"
#include "mongo/db/s/shard_server_catalog_cache_loader.h"
+#include "mongo/db/s/shard_server_op_observer.h"
#include "mongo/db/s/sharding_state.h"
#include "mongo/db/s/type_shard_identity.h"
#include "mongo/db/server_options.h"
-#include "mongo/db/service_context_noop.h"
#include "mongo/db/storage/storage_options.h"
#include "mongo/s/catalog/dist_lock_manager_mock.h"
#include "mongo/s/catalog/sharding_catalog_client_impl.h"
-#include "mongo/s/catalog_cache.h"
#include "mongo/s/client/shard_registry.h"
#include "mongo/s/config_server_catalog_cache_loader.h"
#include "mongo/s/sharding_mongod_test_fixture.h"
namespace mongo {
+namespace {
using executor::RemoteCommandRequest;
-namespace {
+const std::string kShardName("a");
class ShardingStateTest : public ShardingMongodTestFixture {
-public:
- ShardingState* shardingState() {
- return &_shardingState;
- }
-
- std::string shardName() const {
- return _shardName.toString();
- }
-
protected:
// Used to write to set up local collections before exercising server logic.
std::unique_ptr<DBDirectClient> _dbDirectClient;
void setUp() override {
- _shardName = ShardId("a");
-
serverGlobalParams.clusterRole = ClusterRole::None;
ShardingMongodTestFixture::setUp();
@@ -88,8 +77,9 @@ protected:
if (!status.isOK()) {
return status;
}
- // Set the ConnectionString return value on the mock targeter so that later calls to
- // the targeter's getConnString() return the appropriate value.
+
+ // Set the ConnectionString return value on the mock targeter so that later calls to the
+ // targeter's getConnString() return the appropriate value
auto configTargeter =
RemoteCommandTargeterMock::get(shardRegistry()->getConfigShard()->getTargeter());
configTargeter->setConnectionStringReturnValue(configConnStr);
@@ -104,12 +94,9 @@ protected:
void tearDown() override {
_dbDirectClient.reset();
- // Some test cases modify the readOnly value, but the teardown calls below depend on
- // readOnly being false, so we reset the value here rather than in setUp().
+ // Restore the defaults before calling tearDown
storageGlobalParams.readOnly = false;
-
- // ShardingState initialize can modify ReplicaSetMonitor state.
- ReplicaSetMonitor::cleanup();
+ serverGlobalParams.overrideShardIdentity = BSONObj();
CatalogCacheLoader::clearForTests(getServiceContext());
@@ -127,27 +114,55 @@ protected:
return stdx::make_unique<ShardingCatalogClientImpl>(std::move(distLockManager));
}
- std::unique_ptr<CatalogCache> makeCatalogCache() override {
- return stdx::make_unique<CatalogCache>(CatalogCacheLoader::get(getServiceContext()));
+ ShardingState* shardingState() {
+ return &_shardingState;
}
private:
ShardingState _shardingState;
- ShardId _shardName;
+};
+
+/**
+ * This class emulates the server being started as a standalone node for the scope for which it is
+ * used
+ */
+class ScopedSetStandaloneMode {
+public:
+ ScopedSetStandaloneMode(ServiceContext* serviceContext) : _serviceContext(serviceContext) {
+ serverGlobalParams.clusterRole = ClusterRole::None;
+ _serviceContext->setOpObserver(stdx::make_unique<OpObserverRegistry>());
+ }
+
+ ~ScopedSetStandaloneMode() {
+ serverGlobalParams.clusterRole = ClusterRole::ShardServer;
+ auto makeOpObserver = [&] {
+ auto opObserver = stdx::make_unique<OpObserverRegistry>();
+ opObserver->addObserver(stdx::make_unique<OpObserverImpl>());
+ opObserver->addObserver(stdx::make_unique<ConfigServerOpObserver>());
+ opObserver->addObserver(stdx::make_unique<ShardServerOpObserver>());
+ return opObserver;
+ };
+
+ _serviceContext->setOpObserver(makeOpObserver());
+ }
+
+private:
+ ServiceContext* const _serviceContext;
};
TEST_F(ShardingStateTest, ValidShardIdentitySucceeds) {
+ // Must hold a lock to call initializeFromShardIdentity.
Lock::GlobalWrite lk(operationContext());
ShardIdentityType shardIdentity;
shardIdentity.setConfigsvrConnString(
ConnectionString(ConnectionString::SET, "a:1,b:2", "config"));
- shardIdentity.setShardName(shardName());
+ shardIdentity.setShardName(kShardName);
shardIdentity.setClusterId(OID::gen());
ASSERT_OK(shardingState()->initializeFromShardIdentity(operationContext(), shardIdentity));
ASSERT_TRUE(shardingState()->enabled());
- ASSERT_EQ(shardName(), shardingState()->getShardName());
+ ASSERT_EQ(kShardName, shardingState()->getShardName());
ASSERT_EQ("config/a:1,b:2", shardRegistry()->getConfigServerConnectionString().toString());
}
@@ -158,7 +173,7 @@ TEST_F(ShardingStateTest, InitWhilePreviouslyInErrorStateWillStayInErrorState) {
ShardIdentityType shardIdentity;
shardIdentity.setConfigsvrConnString(
ConnectionString(ConnectionString::SET, "a:1,b:2", "config"));
- shardIdentity.setShardName(shardName());
+ shardIdentity.setShardName(kShardName);
shardIdentity.setClusterId(OID::gen());
shardingState()->setGlobalInitMethodForTest(
@@ -196,7 +211,7 @@ TEST_F(ShardingStateTest, InitializeAgainWithMatchingShardIdentitySucceeds) {
ShardIdentityType shardIdentity;
shardIdentity.setConfigsvrConnString(
ConnectionString(ConnectionString::SET, "a:1,b:2", "config"));
- shardIdentity.setShardName(shardName());
+ shardIdentity.setShardName(kShardName);
shardIdentity.setClusterId(clusterID);
ASSERT_OK(shardingState()->initializeFromShardIdentity(operationContext(), shardIdentity));
@@ -204,7 +219,7 @@ TEST_F(ShardingStateTest, InitializeAgainWithMatchingShardIdentitySucceeds) {
ShardIdentityType shardIdentity2;
shardIdentity2.setConfigsvrConnString(
ConnectionString(ConnectionString::SET, "a:1,b:2", "config"));
- shardIdentity2.setShardName(shardName());
+ shardIdentity2.setShardName(kShardName);
shardIdentity2.setClusterId(clusterID);
shardingState()->setGlobalInitMethodForTest(
@@ -215,7 +230,7 @@ TEST_F(ShardingStateTest, InitializeAgainWithMatchingShardIdentitySucceeds) {
ASSERT_OK(shardingState()->initializeFromShardIdentity(operationContext(), shardIdentity2));
ASSERT_TRUE(shardingState()->enabled());
- ASSERT_EQ(shardName(), shardingState()->getShardName());
+ ASSERT_EQ(kShardName, shardingState()->getShardName());
ASSERT_EQ("config/a:1,b:2", shardRegistry()->getConfigServerConnectionString().toString());
}
@@ -227,7 +242,7 @@ TEST_F(ShardingStateTest, InitializeAgainWithSameReplSetNameSucceeds) {
ShardIdentityType shardIdentity;
shardIdentity.setConfigsvrConnString(
ConnectionString(ConnectionString::SET, "a:1,b:2", "config"));
- shardIdentity.setShardName(shardName());
+ shardIdentity.setShardName(kShardName);
shardIdentity.setClusterId(clusterID);
ASSERT_OK(shardingState()->initializeFromShardIdentity(operationContext(), shardIdentity));
@@ -235,7 +250,7 @@ TEST_F(ShardingStateTest, InitializeAgainWithSameReplSetNameSucceeds) {
ShardIdentityType shardIdentity2;
shardIdentity2.setConfigsvrConnString(
ConnectionString(ConnectionString::SET, "b:2,c:3", "config"));
- shardIdentity2.setShardName(shardName());
+ shardIdentity2.setShardName(kShardName);
shardIdentity2.setClusterId(clusterID);
shardingState()->setGlobalInitMethodForTest(
@@ -246,21 +261,18 @@ TEST_F(ShardingStateTest, InitializeAgainWithSameReplSetNameSucceeds) {
ASSERT_OK(shardingState()->initializeFromShardIdentity(operationContext(), shardIdentity2));
ASSERT_TRUE(shardingState()->enabled());
- ASSERT_EQ(shardName(), shardingState()->getShardName());
+ ASSERT_EQ(kShardName, shardingState()->getShardName());
ASSERT_EQ("config/a:1,b:2", shardRegistry()->getConfigServerConnectionString().toString());
}
-// The below tests check for compatible startup parameters for --shardsvr, --overrideShardIdentity,
-// and queryableBackup (readOnly) mode.
+// The tests below check for different combinations of the compatible startup parameters for
+// --shardsvr, --overrideShardIdentity, and queryableBackup (readOnly) mode
// readOnly and --shardsvr
TEST_F(ShardingStateTest,
InitializeShardingAwarenessIfNeededReadOnlyAndShardServerAndNoOverrideShardIdentity) {
storageGlobalParams.readOnly = true;
- serverGlobalParams.clusterRole = ClusterRole::ShardServer;
- serverGlobalParams.overrideShardIdentity = BSONObj();
-
auto swShardingInitialized =
shardingState()->initializeShardingAwarenessIfNeeded(operationContext());
ASSERT_EQUALS(ErrorCodes::InvalidOptions, swShardingInitialized.getStatus().code());
@@ -269,7 +281,6 @@ TEST_F(ShardingStateTest,
TEST_F(ShardingStateTest,
InitializeShardingAwarenessIfNeededReadOnlyAndShardServerAndInvalidOverrideShardIdentity) {
storageGlobalParams.readOnly = true;
- serverGlobalParams.clusterRole = ClusterRole::ShardServer;
serverGlobalParams.overrideShardIdentity = BSON("_id"
<< "shardIdentity"
<< "configsvrConnectionString"
@@ -287,7 +298,7 @@ TEST_F(ShardingStateTest,
ShardIdentityType shardIdentity;
shardIdentity.setConfigsvrConnString(
ConnectionString(ConnectionString::SET, "a:1,b:2", "config"));
- shardIdentity.setShardName(shardName());
+ shardIdentity.setShardName(kShardName);
shardIdentity.setClusterId(OID::gen());
ASSERT_OK(shardIdentity.validate());
serverGlobalParams.overrideShardIdentity = shardIdentity.toBSON();
@@ -304,7 +315,6 @@ TEST_F(ShardingStateTest,
InitializeShardingAwarenessIfNeededReadOnlyAndNotShardServerAndNoOverrideShardIdentity) {
storageGlobalParams.readOnly = true;
serverGlobalParams.clusterRole = ClusterRole::None;
- serverGlobalParams.overrideShardIdentity = BSONObj();
auto swShardingInitialized =
shardingState()->initializeShardingAwarenessIfNeeded(operationContext());
@@ -317,11 +327,11 @@ TEST_F(
InitializeShardingAwarenessIfNeededReadOnlyAndNotShardServerAndInvalidOverrideShardIdentity) {
storageGlobalParams.readOnly = true;
serverGlobalParams.clusterRole = ClusterRole::None;
+
serverGlobalParams.overrideShardIdentity = BSON("_id"
<< "shardIdentity"
<< "configsvrConnectionString"
<< "invalid");
-
auto swShardingInitialized =
shardingState()->initializeShardingAwarenessIfNeeded(operationContext());
ASSERT_EQUALS(ErrorCodes::InvalidOptions, swShardingInitialized.getStatus().code());
@@ -335,7 +345,7 @@ TEST_F(ShardingStateTest,
ShardIdentityType shardIdentity;
shardIdentity.setConfigsvrConnString(
ConnectionString(ConnectionString::SET, "a:1,b:2", "config"));
- shardIdentity.setShardName(shardName());
+ shardIdentity.setShardName(kShardName);
shardIdentity.setClusterId(OID::gen());
ASSERT_OK(shardIdentity.validate());
serverGlobalParams.overrideShardIdentity = shardIdentity.toBSON();
@@ -349,7 +359,6 @@ TEST_F(ShardingStateTest,
TEST_F(ShardingStateTest,
InitializeShardingAwarenessIfNeededNotReadOnlyAndInvalidOverrideShardIdentity) {
- storageGlobalParams.readOnly = false;
serverGlobalParams.overrideShardIdentity = BSON("_id"
<< "shardIdentity"
<< "configsvrConnectionString"
@@ -370,12 +379,10 @@ TEST_F(ShardingStateTest,
TEST_F(ShardingStateTest,
InitializeShardingAwarenessIfNeededNotReadOnlyAndValidOverrideShardIdentity) {
- storageGlobalParams.readOnly = false;
-
ShardIdentityType shardIdentity;
shardIdentity.setConfigsvrConnString(
ConnectionString(ConnectionString::SET, "a:1,b:2", "config"));
- shardIdentity.setShardName(shardName());
+ shardIdentity.setShardName(kShardName);
shardIdentity.setClusterId(OID::gen());
ASSERT_OK(shardIdentity.validate());
serverGlobalParams.overrideShardIdentity = shardIdentity.toBSON();
@@ -397,9 +404,6 @@ TEST_F(ShardingStateTest,
TEST_F(ShardingStateTest,
InitializeShardingAwarenessIfNeededNotReadOnlyAndShardServerAndNoShardIdentity) {
- storageGlobalParams.readOnly = false;
- serverGlobalParams.clusterRole = ClusterRole::ShardServer;
- serverGlobalParams.overrideShardIdentity = BSONObj();
auto swShardingInitialized =
shardingState()->initializeShardingAwarenessIfNeeded(operationContext());
ASSERT_OK(swShardingInitialized);
@@ -408,23 +412,19 @@ TEST_F(ShardingStateTest,
TEST_F(ShardingStateTest,
InitializeShardingAwarenessIfNeededNotReadOnlyAndShardServerAndInvalidShardIdentity) {
-
- ASSERT_OK(replicationCoordinator()->setFollowerMode(repl::MemberState::RS_PRIMARY));
-
- // Insert the shardIdentity doc to disk before setting the clusterRole, since if the clusterRole
- // is ShardServer, the OpObserver for inserts will prevent the insert from occurring, since the
- // shardIdentity doc is invalid.
- serverGlobalParams.clusterRole = ClusterRole::None;
- BSONObj invalidShardIdentity = BSON("_id"
- << "shardIdentity"
- << "configsvrConnectionString"
- << "invalid");
- _dbDirectClient->insert(NamespaceString::kServerConfigurationNamespace.toString(),
- invalidShardIdentity);
-
- storageGlobalParams.readOnly = false;
- serverGlobalParams.clusterRole = ClusterRole::ShardServer;
- serverGlobalParams.overrideShardIdentity = BSONObj();
+ // Insert the shardIdentity doc to disk while pretending that we are in "standalone" mode,
+ // otherwise OpObserver for inserts will prevent the insert from occurring because the
+ // shardIdentity doc is invalid
+ {
+ ScopedSetStandaloneMode standalone(getServiceContext());
+
+ BSONObj invalidShardIdentity = BSON("_id"
+ << "shardIdentity"
+ << "configsvrConnectionString"
+ << "invalid");
+ _dbDirectClient->insert(NamespaceString::kServerConfigurationNamespace.toString(),
+ invalidShardIdentity);
+ }
auto swShardingInitialized =
shardingState()->initializeShardingAwarenessIfNeeded(operationContext());
@@ -434,28 +434,25 @@ TEST_F(ShardingStateTest,
TEST_F(ShardingStateTest,
InitializeShardingAwarenessIfNeededNotReadOnlyAndShardServerAndValidShardIdentity) {
-
- ASSERT_OK(replicationCoordinator()->setFollowerMode(repl::MemberState::RS_PRIMARY));
-
- // Insert the shardIdentity doc to disk before setting the clusterRole, since if the clusterRole
- // is ShardServer, the OpObserver for inserts will trigger sharding initialization from the
- // inserted doc.
- serverGlobalParams.clusterRole = ClusterRole::None;
-
- ShardIdentityType shardIdentity;
- shardIdentity.setConfigsvrConnString(
- ConnectionString(ConnectionString::SET, "a:1,b:2", "config"));
- shardIdentity.setShardName(shardName());
- shardIdentity.setClusterId(OID::gen());
- ASSERT_OK(shardIdentity.validate());
- BSONObj validShardIdentity = shardIdentity.toBSON();
-
- _dbDirectClient->insert(NamespaceString::kServerConfigurationNamespace.toString(),
- validShardIdentity);
-
- storageGlobalParams.readOnly = false;
- serverGlobalParams.clusterRole = ClusterRole::ShardServer;
- serverGlobalParams.overrideShardIdentity = BSONObj();
+ // Insert the shardIdentity doc to disk while pretending that we are in "standalone" mode,
+ // otherwise OpObserver for inserts will prevent the insert from occurring because the
+ // shardIdentity doc is invalid
+ {
+ ScopedSetStandaloneMode standalone(getServiceContext());
+
+ BSONObj validShardIdentity = [&] {
+ ShardIdentityType shardIdentity;
+ shardIdentity.setConfigsvrConnString(
+ ConnectionString(ConnectionString::SET, "a:1,b:2", "config"));
+ shardIdentity.setShardName(kShardName);
+ shardIdentity.setClusterId(OID::gen());
+ ASSERT_OK(shardIdentity.validate());
+ return shardIdentity.toBSON();
+ }();
+
+ _dbDirectClient->insert(NamespaceString::kServerConfigurationNamespace.toString(),
+ validShardIdentity);
+ }
auto swShardingInitialized =
shardingState()->initializeShardingAwarenessIfNeeded(operationContext());
@@ -467,9 +464,8 @@ TEST_F(ShardingStateTest,
TEST_F(ShardingStateTest,
InitializeShardingAwarenessIfNeededNotReadOnlyAndNotShardServerAndNoShardIdentity) {
- storageGlobalParams.readOnly = false;
- serverGlobalParams.clusterRole = ClusterRole::None;
- serverGlobalParams.overrideShardIdentity = BSONObj();
+ ScopedSetStandaloneMode standalone(getServiceContext());
+
auto swShardingInitialized =
shardingState()->initializeShardingAwarenessIfNeeded(operationContext());
ASSERT_OK(swShardingInitialized);
@@ -478,18 +474,16 @@ TEST_F(ShardingStateTest,
TEST_F(ShardingStateTest,
InitializeShardingAwarenessIfNeededNotReadOnlyAndNotShardServerAndInvalidShardIdentity) {
+ ScopedSetStandaloneMode standalone(getServiceContext());
+
_dbDirectClient->insert(NamespaceString::kServerConfigurationNamespace.toString(),
BSON("_id"
<< "shardIdentity"
<< "configsvrConnectionString"
<< "invalid"));
- storageGlobalParams.readOnly = false;
- serverGlobalParams.clusterRole = ClusterRole::None;
- serverGlobalParams.overrideShardIdentity = BSONObj();
-
- // The shardIdentity doc on disk, even if invalid, is ignored if ClusterRole is None.
- // This is to allow fixing the shardIdentity doc by starting without --shardsvr.
+ // The shardIdentity doc on disk, even if invalid, is ignored if the ClusterRole is None. This
+ // is to allow fixing the shardIdentity doc by starting without --shardsvr.
auto swShardingInitialized =
shardingState()->initializeShardingAwarenessIfNeeded(operationContext());
ASSERT_OK(swShardingInitialized);
@@ -498,17 +492,17 @@ TEST_F(ShardingStateTest,
TEST_F(ShardingStateTest,
InitializeShardingAwarenessIfNeededNotReadOnlyAndNotShardServerAndValidShardIdentity) {
- storageGlobalParams.readOnly = false;
- serverGlobalParams.clusterRole = ClusterRole::None;
- serverGlobalParams.overrideShardIdentity = BSONObj();
-
- ShardIdentityType shardIdentity;
- shardIdentity.setConfigsvrConnString(
- ConnectionString(ConnectionString::SET, "a:1,b:2", "config"));
- shardIdentity.setShardName(shardName());
- shardIdentity.setClusterId(OID::gen());
- ASSERT_OK(shardIdentity.validate());
- BSONObj validShardIdentity = shardIdentity.toBSON();
+ ScopedSetStandaloneMode standalone(getServiceContext());
+
+ BSONObj validShardIdentity = [&] {
+ ShardIdentityType shardIdentity;
+ shardIdentity.setConfigsvrConnString(
+ ConnectionString(ConnectionString::SET, "a:1,b:2", "config"));
+ shardIdentity.setShardName(kShardName);
+ shardIdentity.setClusterId(OID::gen());
+ ASSERT_OK(shardIdentity.validate());
+ return shardIdentity.toBSON();
+ }();
_dbDirectClient->insert(NamespaceString::kServerConfigurationNamespace.toString(),
validShardIdentity);
diff --git a/src/mongo/db/service_context_d_test_fixture.cpp b/src/mongo/db/service_context_d_test_fixture.cpp
index cc1401b6517..4f69e5eb7b9 100644
--- a/src/mongo/db/service_context_d_test_fixture.cpp
+++ b/src/mongo/db/service_context_d_test_fixture.cpp
@@ -41,8 +41,6 @@
#include "mongo/db/logical_clock.h"
#include "mongo/db/op_observer_noop.h"
#include "mongo/db/op_observer_registry.h"
-#include "mongo/db/operation_context.h"
-#include "mongo/db/service_context.h"
#include "mongo/db/service_context_d.h"
#include "mongo/db/storage/storage_options.h"
#include "mongo/stdx/memory.h"
diff --git a/src/mongo/db/service_context_d_test_fixture.h b/src/mongo/db/service_context_d_test_fixture.h
index 402a1837724..35932b64814 100644
--- a/src/mongo/db/service_context_d_test_fixture.h
+++ b/src/mongo/db/service_context_d_test_fixture.h
@@ -28,13 +28,12 @@
#pragma once
+#include "mongo/db/operation_context.h"
+#include "mongo/db/service_context.h"
#include "mongo/unittest/unittest.h"
namespace mongo {
-class ServiceContext;
-class OperationContext;
-
/**
* Test fixture class for tests that use either the "ephemeralForTest" or "devnull" storage engines.
*/
diff --git a/src/mongo/s/SConscript b/src/mongo/s/SConscript
index 7339848dac1..49ccc44ba4d 100644
--- a/src/mongo/s/SConscript
+++ b/src/mongo/s/SConscript
@@ -148,18 +148,28 @@ env.Library(
)
env.Library(
- target='sharding_test_fixture',
+ target='sharding_test_fixture_common',
source=[
- 'sharding_test_fixture.cpp',
+ 'sharding_test_fixture_common.cpp',
],
LIBDEPS=[
'$BUILD_DIR/mongo/client/remote_command_targeter_mock',
- '$BUILD_DIR/mongo/db/auth/authorization_manager_mock_init',
+ '$BUILD_DIR/mongo/executor/network_interface_mock',
+ '$BUILD_DIR/mongo/executor/network_test_env',
+ '$BUILD_DIR/mongo/executor/thread_pool_task_executor_test_fixture',
+ '$BUILD_DIR/mongo/unittest/unittest',
+ ],
+)
+
+env.Library(
+ target='sharding_router_test_fixture',
+ source=[
+ 'sharding_router_test_fixture.cpp',
+ ],
+ LIBDEPS=[
'$BUILD_DIR/mongo/db/query/collation/collator_factory_mock',
'$BUILD_DIR/mongo/db/service_context_noop_init',
- '$BUILD_DIR/mongo/executor/network_test_env',
'$BUILD_DIR/mongo/executor/task_executor_pool',
- '$BUILD_DIR/mongo/executor/thread_pool_task_executor_test_fixture',
'$BUILD_DIR/mongo/s/catalog/dist_lock_manager_mock',
'$BUILD_DIR/mongo/s/catalog/sharding_catalog_client_impl',
'$BUILD_DIR/mongo/s/coreshard',
@@ -167,6 +177,7 @@ env.Library(
'$BUILD_DIR/mongo/util/clock_source_mock',
'sharding_egress_metadata_hook_for_mongos',
'sharding_task_executor',
+ 'sharding_test_fixture_common',
],
)
@@ -176,20 +187,11 @@ env.Library(
'sharding_mongod_test_fixture.cpp',
],
LIBDEPS=[
- '$BUILD_DIR/mongo/client/remote_command_targeter_mock',
- '$BUILD_DIR/mongo/db/namespace_string',
'$BUILD_DIR/mongo/db/op_observer_d',
'$BUILD_DIR/mongo/db/repl/drop_pending_collection_reaper',
'$BUILD_DIR/mongo/db/repl/replmocks',
'$BUILD_DIR/mongo/db/service_context_d_test_fixture',
- '$BUILD_DIR/mongo/executor/network_test_env',
- '$BUILD_DIR/mongo/executor/task_executor_pool',
- '$BUILD_DIR/mongo/executor/thread_pool_task_executor_test_fixture',
- '$BUILD_DIR/mongo/rpc/metadata',
- '$BUILD_DIR/mongo/s/catalog/dist_lock_manager_mock',
- '$BUILD_DIR/mongo/s/catalog/sharding_catalog_client_impl',
- '$BUILD_DIR/mongo/s/coreshard',
- '$BUILD_DIR/mongo/util/clock_source_mock',
+ 'sharding_test_fixture_common',
],
)
@@ -223,7 +225,7 @@ env.CppUnitTest(
],
LIBDEPS=[
'cluster_last_error_info',
- 'sharding_test_fixture',
+ 'sharding_router_test_fixture',
],
)
@@ -324,8 +326,6 @@ env.Library(
'sharding_egress_metadata_hook.cpp',
],
LIBDEPS=[
- '$BUILD_DIR/mongo/db/audit',
- '$BUILD_DIR/mongo/db/repl/repl_coordinator_interface',
'$BUILD_DIR/mongo/util/concurrency/thread_pool',
'grid',
],
@@ -342,8 +342,8 @@ env.CppUnitTest(
],
LIBDEPS=[
'$BUILD_DIR/mongo/db/query/query_test_service_context',
- '$BUILD_DIR/mongo/s/sharding_test_fixture',
'coreshard',
+ 'sharding_router_test_fixture',
]
)
@@ -385,7 +385,7 @@ env.CppUnitTest(
],
LIBDEPS=[
'coreshard',
- 'sharding_test_fixture',
+ 'sharding_router_test_fixture',
]
)
@@ -396,7 +396,7 @@ env.CppUnitTest(
],
LIBDEPS=[
'coreshard',
- 'sharding_test_fixture',
+ 'sharding_router_test_fixture',
]
)
diff --git a/src/mongo/s/balancer_configuration_test.cpp b/src/mongo/s/balancer_configuration_test.cpp
index da21e716f18..9e12adc6e7c 100644
--- a/src/mongo/s/balancer_configuration_test.cpp
+++ b/src/mongo/s/balancer_configuration_test.cpp
@@ -40,7 +40,7 @@
#include "mongo/rpc/metadata/tracking_metadata.h"
#include "mongo/s/balancer_configuration.h"
#include "mongo/s/catalog/sharding_catalog_client.h"
-#include "mongo/s/sharding_test_fixture.h"
+#include "mongo/s/sharding_router_test_fixture.h"
#include "mongo/unittest/unittest.h"
#include "mongo/util/net/hostandport.h"
diff --git a/src/mongo/s/catalog/SConscript b/src/mongo/s/catalog/SConscript
index 9960139fb73..e0b1f9b936a 100644
--- a/src/mongo/s/catalog/SConscript
+++ b/src/mongo/s/catalog/SConscript
@@ -46,7 +46,6 @@ env.Library(
'$BUILD_DIR/mongo/s/catalog/dist_lock_catalog_interface',
'$BUILD_DIR/mongo/s/catalog/dist_lock_manager',
'$BUILD_DIR/mongo/s/client/sharding_client',
- '$BUILD_DIR/mongo/s/coreshard',
'$BUILD_DIR/mongo/util/fail_point'
],
)
@@ -129,7 +128,7 @@ env.CppUnitTest(
LIBDEPS=[
'$BUILD_DIR/mongo/s/catalog/dist_lock_catalog_mock',
'$BUILD_DIR/mongo/s/catalog/sharding_catalog_client_mock',
- '$BUILD_DIR/mongo/s/sharding_mongod_test_fixture',
+ '$BUILD_DIR/mongo/s/shard_server_test_fixture',
'dist_lock_catalog_impl',
'replset_dist_lock_manager',
]
@@ -143,6 +142,6 @@ env.CppUnitTest(
'sharding_catalog_write_retry_test.cpp',
],
LIBDEPS=[
- '$BUILD_DIR/mongo/s/sharding_test_fixture',
+ '$BUILD_DIR/mongo/s/sharding_router_test_fixture',
]
)
diff --git a/src/mongo/s/catalog/dist_lock_catalog_impl_test.cpp b/src/mongo/s/catalog/dist_lock_catalog_impl_test.cpp
index e584944658d..fd66f088346 100644
--- a/src/mongo/s/catalog/dist_lock_catalog_impl_test.cpp
+++ b/src/mongo/s/catalog/dist_lock_catalog_impl_test.cpp
@@ -30,20 +30,13 @@
#include <utility>
-#include "mongo/base/status.h"
-#include "mongo/base/status_with.h"
#include "mongo/bson/json.h"
-#include "mongo/client/remote_command_targeter_factory_mock.h"
#include "mongo/client/remote_command_targeter_mock.h"
#include "mongo/db/commands.h"
-#include "mongo/db/jsobj.h"
-#include "mongo/db/operation_context_noop.h"
#include "mongo/db/query/find_and_modify_request.h"
#include "mongo/db/repl/read_concern_args.h"
#include "mongo/executor/network_interface_mock.h"
#include "mongo/executor/network_test_env.h"
-#include "mongo/executor/task_executor_pool.h"
-#include "mongo/executor/thread_pool_task_executor_test_fixture.h"
#include "mongo/s/catalog/dist_lock_catalog_impl.h"
#include "mongo/s/catalog/dist_lock_manager_mock.h"
#include "mongo/s/catalog/sharding_catalog_client_mock.h"
@@ -51,50 +44,31 @@
#include "mongo/s/catalog/type_locks.h"
#include "mongo/s/client/shard_factory.h"
#include "mongo/s/client/shard_registry.h"
-#include "mongo/s/client/shard_remote.h"
#include "mongo/s/grid.h"
-#include "mongo/s/sharding_mongod_test_fixture.h"
+#include "mongo/s/shard_server_test_fixture.h"
#include "mongo/s/write_ops/batched_command_request.h"
-#include "mongo/stdx/future.h"
#include "mongo/stdx/memory.h"
-#include "mongo/stdx/thread.h"
#include "mongo/util/time_support.h"
namespace mongo {
+namespace {
-using std::vector;
using executor::NetworkInterfaceMock;
using executor::NetworkTestEnv;
using executor::RemoteCommandRequest;
using executor::RemoteCommandResponse;
using repl::ReadConcernArgs;
-namespace {
-
const HostAndPort dummyHost("dummy", 123);
/**
- * Sets up the mocked out objects for testing the replica-set backed catalog manager.
+ * Sets up the mocked out objects for testing the replica-set backed catalog manager
+ *
+ * NOTE: Even though the dist lock manager only runs on the config server, this test is using the
+ * ShardServerTestFixture and emulating the network due to legacy reasons.
*/
-class DistLockCatalogFixture : public ShardingMongodTestFixture {
-public:
- std::shared_ptr<RemoteCommandTargeterMock> configTargeter() {
- return RemoteCommandTargeterMock::get(shardRegistry()->getConfigShard()->getTargeter());
- }
-
+class DistLockCatalogTest : public ShardServerTestFixture {
protected:
- void setUp() override {
- ShardingMongodTestFixture::setUp();
-
- // Initialize sharding components as a shard server.
- serverGlobalParams.clusterRole = ClusterRole::ShardServer;
- uassertStatusOK(initializeGlobalShardingStateForMongodForTest(ConnectionString(dummyHost)));
-
- // Set the findHost() return value on the mock targeter so that later calls to the
- // targeter's findHost() return the appropriate value.
- configTargeter()->setFindHostReturnValue(dummyHost);
- }
-
std::unique_ptr<DistLockCatalog> makeDistLockCatalog() override {
return stdx::make_unique<DistLockCatalogImpl>();
}
@@ -108,6 +82,20 @@ protected:
std::unique_ptr<DistLockManager> distLockManager) override {
return stdx::make_unique<ShardingCatalogClientMock>(std::move(distLockManager));
}
+
+ std::shared_ptr<RemoteCommandTargeterMock> configTargeter() {
+ return RemoteCommandTargeterMock::get(shardRegistry()->getConfigShard()->getTargeter());
+ }
+
+ auto launchOnSeparateThread(std::function<void(OperationContext*)> func) {
+ auto const serviceContext = getServiceContext();
+ return launchAsync([serviceContext, func] {
+ ON_BLOCK_EXIT([&] { Client::destroy(); });
+ Client::initThreadIfNotAlready("Test");
+ auto opCtx = Client::getCurrent()->makeOperationContext();
+ func(opCtx.get());
+ });
+ }
};
void checkReadConcern(const BSONObj& findCmd) {
@@ -116,10 +104,10 @@ void checkReadConcern(const BSONObj& findCmd) {
ASSERT(repl::ReadConcernLevel::kMajorityReadConcern == readConcernArgs.getLevel());
}
-TEST_F(DistLockCatalogFixture, BasicPing) {
- auto future = launchAsync([this] {
+TEST_F(DistLockCatalogTest, BasicPing) {
+ auto future = launchOnSeparateThread([this](OperationContext* opCtx) {
Date_t ping(dateFromISOString("2014-03-11T09:17:18.098Z").getValue());
- auto status = distLockCatalog()->ping(operationContext(), "abcd", ping);
+ auto status = distLockCatalog()->ping(opCtx, "abcd", ping);
ASSERT_OK(status);
});
@@ -154,13 +142,13 @@ TEST_F(DistLockCatalogFixture, BasicPing) {
future.timed_get(kFutureTimeout);
}
-TEST_F(DistLockCatalogFixture, PingTargetError) {
+TEST_F(DistLockCatalogTest, PingTargetError) {
configTargeter()->setFindHostReturnValue({ErrorCodes::InternalError, "can't target"});
auto status = distLockCatalog()->ping(operationContext(), "abcd", Date_t::now());
ASSERT_NOT_OK(status);
}
-TEST_F(DistLockCatalogFixture, PingRunCmdError) {
+TEST_F(DistLockCatalogTest, PingRunCmdError) {
shutdownExecutorPool();
auto status = distLockCatalog()->ping(operationContext(), "abcd", Date_t::now());
@@ -168,9 +156,9 @@ TEST_F(DistLockCatalogFixture, PingRunCmdError) {
ASSERT_FALSE(status.reason().empty());
}
-TEST_F(DistLockCatalogFixture, PingCommandError) {
- auto future = launchAsync([this] {
- auto status = distLockCatalog()->ping(operationContext(), "abcd", Date_t::now());
+TEST_F(DistLockCatalogTest, PingCommandError) {
+ auto future = launchOnSeparateThread([this](OperationContext* opCtx) {
+ auto status = distLockCatalog()->ping(opCtx, "abcd", Date_t::now());
ASSERT_EQUALS(ErrorCodes::FailedToParse, status.code());
ASSERT_FALSE(status.reason().empty());
});
@@ -186,9 +174,9 @@ TEST_F(DistLockCatalogFixture, PingCommandError) {
future.timed_get(kFutureTimeout);
}
-TEST_F(DistLockCatalogFixture, PingWriteError) {
- auto future = launchAsync([this] {
- auto status = distLockCatalog()->ping(operationContext(), "abcd", Date_t::now());
+TEST_F(DistLockCatalogTest, PingWriteError) {
+ auto future = launchOnSeparateThread([this](OperationContext* opCtx) {
+ auto status = distLockCatalog()->ping(opCtx, "abcd", Date_t::now());
ASSERT_EQUALS(ErrorCodes::Unauthorized, status.code());
ASSERT_FALSE(status.reason().empty());
});
@@ -204,9 +192,9 @@ TEST_F(DistLockCatalogFixture, PingWriteError) {
future.timed_get(kFutureTimeout);
}
-TEST_F(DistLockCatalogFixture, PingWriteConcernError) {
- auto future = launchAsync([this] {
- auto status = distLockCatalog()->ping(operationContext(), "abcd", Date_t::now());
+TEST_F(DistLockCatalogTest, PingWriteConcernError) {
+ auto future = launchOnSeparateThread([this](OperationContext* opCtx) {
+ auto status = distLockCatalog()->ping(opCtx, "abcd", Date_t::now());
ASSERT_EQUALS(ErrorCodes::WriteConcernFailed, status.code());
ASSERT_FALSE(status.reason().empty());
});
@@ -225,9 +213,9 @@ TEST_F(DistLockCatalogFixture, PingWriteConcernError) {
future.timed_get(kFutureTimeout);
}
-TEST_F(DistLockCatalogFixture, PingUnsupportedWriteConcernResponse) {
- auto future = launchAsync([this] {
- auto status = distLockCatalog()->ping(operationContext(), "abcd", Date_t::now());
+TEST_F(DistLockCatalogTest, PingUnsupportedWriteConcernResponse) {
+ auto future = launchOnSeparateThread([this](OperationContext* opCtx) {
+ auto status = distLockCatalog()->ping(opCtx, "abcd", Date_t::now());
ASSERT_EQUALS(ErrorCodes::UnsupportedFormat, status.code());
ASSERT_FALSE(status.reason().empty());
});
@@ -247,9 +235,9 @@ TEST_F(DistLockCatalogFixture, PingUnsupportedWriteConcernResponse) {
future.timed_get(kFutureTimeout);
}
-TEST_F(DistLockCatalogFixture, PingUnsupportedResponseFormat) {
- auto future = launchAsync([this] {
- auto status = distLockCatalog()->ping(operationContext(), "abcd", Date_t::now());
+TEST_F(DistLockCatalogTest, PingUnsupportedResponseFormat) {
+ auto future = launchOnSeparateThread([this](OperationContext* opCtx) {
+ auto status = distLockCatalog()->ping(opCtx, "abcd", Date_t::now());
ASSERT_EQUALS(ErrorCodes::UnsupportedFormat, status.code());
});
@@ -261,14 +249,13 @@ TEST_F(DistLockCatalogFixture, PingUnsupportedResponseFormat) {
future.timed_get(kFutureTimeout);
}
-TEST_F(DistLockCatalogFixture, GrabLockNoOp) {
- auto future = launchAsync([this] {
+TEST_F(DistLockCatalogTest, GrabLockNoOp) {
+ auto future = launchOnSeparateThread([this](OperationContext* opCtx) {
OID myID("555f80be366c194b13fb0372");
Date_t now(dateFromISOString("2015-05-22T19:17:18.098Z").getValue());
- auto resultStatus =
- distLockCatalog()
- ->grabLock(operationContext(), "test", myID, "me", "mongos", now, "because")
- .getStatus();
+ auto resultStatus = distLockCatalog()
+ ->grabLock(opCtx, "test", myID, "me", "mongos", now, "because")
+ .getStatus();
ASSERT_EQUALS(ErrorCodes::LockStateChangeFailed, resultStatus.code());
});
@@ -304,12 +291,12 @@ TEST_F(DistLockCatalogFixture, GrabLockNoOp) {
future.timed_get(kFutureTimeout);
}
-TEST_F(DistLockCatalogFixture, GrabLockWithNewDoc) {
- auto future = launchAsync([this] {
+TEST_F(DistLockCatalogTest, GrabLockWithNewDoc) {
+ auto future = launchOnSeparateThread([this](OperationContext* opCtx) {
OID myID("555f80be366c194b13fb0372");
Date_t now(dateFromISOString("2015-05-22T19:17:18.098Z").getValue());
- auto resultStatus = distLockCatalog()->grabLock(
- operationContext(), "test", myID, "me", "mongos", now, "because");
+ auto resultStatus =
+ distLockCatalog()->grabLock(opCtx, "test", myID, "me", "mongos", now, "because");
ASSERT_OK(resultStatus.getStatus());
const auto& lockDoc = resultStatus.getValue();
@@ -368,12 +355,11 @@ TEST_F(DistLockCatalogFixture, GrabLockWithNewDoc) {
future.timed_get(kFutureTimeout);
}
-TEST_F(DistLockCatalogFixture, GrabLockWithBadLockDoc) {
- auto future = launchAsync([this] {
+TEST_F(DistLockCatalogTest, GrabLockWithBadLockDoc) {
+ auto future = launchOnSeparateThread([this](OperationContext* opCtx) {
Date_t now(dateFromISOString("2015-05-22T19:17:18.098Z").getValue());
- auto resultStatus = distLockCatalog()
- ->grabLock(operationContext(), "test", OID(), "", "", now, "")
- .getStatus();
+ auto resultStatus =
+ distLockCatalog()->grabLock(opCtx, "test", OID(), "", "", now, "").getStatus();
ASSERT_EQUALS(ErrorCodes::FailedToParse, resultStatus.code());
});
@@ -402,7 +388,7 @@ TEST_F(DistLockCatalogFixture, GrabLockWithBadLockDoc) {
future.timed_get(kFutureTimeout);
}
-TEST_F(DistLockCatalogFixture, GrabLockTargetError) {
+TEST_F(DistLockCatalogTest, GrabLockTargetError) {
configTargeter()->setFindHostReturnValue({ErrorCodes::InternalError, "can't target"});
auto status = distLockCatalog()
->grabLock(operationContext(), "", OID::gen(), "", "", Date_t::now(), "")
@@ -410,7 +396,7 @@ TEST_F(DistLockCatalogFixture, GrabLockTargetError) {
ASSERT_NOT_OK(status);
}
-TEST_F(DistLockCatalogFixture, GrabLockRunCmdError) {
+TEST_F(DistLockCatalogTest, GrabLockRunCmdError) {
shutdownExecutorPool();
auto status = distLockCatalog()
@@ -420,10 +406,10 @@ TEST_F(DistLockCatalogFixture, GrabLockRunCmdError) {
ASSERT_FALSE(status.reason().empty());
}
-TEST_F(DistLockCatalogFixture, GrabLockCommandError) {
- auto future = launchAsync([this] {
+TEST_F(DistLockCatalogTest, GrabLockCommandError) {
+ auto future = launchOnSeparateThread([this](OperationContext* opCtx) {
auto status = distLockCatalog()
- ->grabLock(operationContext(), "", OID::gen(), "", "", Date_t::now(), "")
+ ->grabLock(opCtx, "", OID::gen(), "", "", Date_t::now(), "")
.getStatus();
ASSERT_EQUALS(ErrorCodes::FailedToParse, status.code());
ASSERT_FALSE(status.reason().empty());
@@ -440,10 +426,10 @@ TEST_F(DistLockCatalogFixture, GrabLockCommandError) {
future.timed_get(kFutureTimeout);
}
-TEST_F(DistLockCatalogFixture, GrabLockDupKeyError) {
- auto future = launchAsync([this] {
+TEST_F(DistLockCatalogTest, GrabLockDupKeyError) {
+ auto future = launchOnSeparateThread([this](OperationContext* opCtx) {
auto status = distLockCatalog()
- ->grabLock(operationContext(), "", OID::gen(), "", "", Date_t::now(), "")
+ ->grabLock(opCtx, "", OID::gen(), "", "", Date_t::now(), "")
.getStatus();
ASSERT_EQUALS(ErrorCodes::LockStateChangeFailed, status.code());
ASSERT_FALSE(status.reason().empty());
@@ -460,10 +446,10 @@ TEST_F(DistLockCatalogFixture, GrabLockDupKeyError) {
future.timed_get(kFutureTimeout);
}
-TEST_F(DistLockCatalogFixture, GrabLockWriteError) {
- auto future = launchAsync([this] {
+TEST_F(DistLockCatalogTest, GrabLockWriteError) {
+ auto future = launchOnSeparateThread([this](OperationContext* opCtx) {
auto status = distLockCatalog()
- ->grabLock(operationContext(), "", OID::gen(), "", "", Date_t::now(), "")
+ ->grabLock(opCtx, "", OID::gen(), "", "", Date_t::now(), "")
.getStatus();
ASSERT_EQUALS(ErrorCodes::Unauthorized, status.code());
ASSERT_FALSE(status.reason().empty());
@@ -480,8 +466,8 @@ TEST_F(DistLockCatalogFixture, GrabLockWriteError) {
future.timed_get(kFutureTimeout);
}
-TEST_F(DistLockCatalogFixture, GrabLockWriteConcernError) {
- auto future = launchAsync([this] {
+TEST_F(DistLockCatalogTest, GrabLockWriteConcernError) {
+ auto future = launchOnSeparateThread([this](OperationContext* opCtx) {
auto status = distLockCatalog()
->grabLock(operationContext(), "", OID::gen(), "", "", Date_t::now(), "")
.getStatus();
@@ -503,8 +489,8 @@ TEST_F(DistLockCatalogFixture, GrabLockWriteConcernError) {
future.timed_get(kFutureTimeout);
}
-TEST_F(DistLockCatalogFixture, GrabLockWriteConcernErrorBadType) {
- auto future = launchAsync([this] {
+TEST_F(DistLockCatalogTest, GrabLockWriteConcernErrorBadType) {
+ auto future = launchOnSeparateThread([this](OperationContext* opCtx) {
auto status = distLockCatalog()
->grabLock(operationContext(), "", OID::gen(), "", "", Date_t::now(), "")
.getStatus();
@@ -524,8 +510,8 @@ TEST_F(DistLockCatalogFixture, GrabLockWriteConcernErrorBadType) {
future.timed_get(kFutureTimeout);
}
-TEST_F(DistLockCatalogFixture, GrabLockResponseMissingValueField) {
- auto future = launchAsync([this] {
+TEST_F(DistLockCatalogTest, GrabLockResponseMissingValueField) {
+ auto future = launchOnSeparateThread([this](OperationContext* opCtx) {
auto status = distLockCatalog()
->grabLock(operationContext(), "", OID::gen(), "", "", Date_t::now(), "")
.getStatus();
@@ -542,8 +528,8 @@ TEST_F(DistLockCatalogFixture, GrabLockResponseMissingValueField) {
future.timed_get(kFutureTimeout);
}
-TEST_F(DistLockCatalogFixture, GrabLockUnsupportedWriteConcernResponse) {
- auto future = launchAsync([this] {
+TEST_F(DistLockCatalogTest, GrabLockUnsupportedWriteConcernResponse) {
+ auto future = launchOnSeparateThread([this](OperationContext* opCtx) {
auto status = distLockCatalog()
->grabLock(operationContext(), "", OID::gen(), "", "", Date_t::now(), "")
.getStatus();
@@ -566,8 +552,8 @@ TEST_F(DistLockCatalogFixture, GrabLockUnsupportedWriteConcernResponse) {
future.timed_get(kFutureTimeout);
}
-TEST_F(DistLockCatalogFixture, GrabLockUnsupportedResponseFormat) {
- auto future = launchAsync([this] {
+TEST_F(DistLockCatalogTest, GrabLockUnsupportedResponseFormat) {
+ auto future = launchOnSeparateThread([this](OperationContext* opCtx) {
auto status = distLockCatalog()
->grabLock(operationContext(), "", OID::gen(), "", "", Date_t::now(), "")
.getStatus();
@@ -582,8 +568,8 @@ TEST_F(DistLockCatalogFixture, GrabLockUnsupportedResponseFormat) {
future.timed_get(kFutureTimeout);
}
-TEST_F(DistLockCatalogFixture, OvertakeLockNoOp) {
- auto future = launchAsync([this] {
+TEST_F(DistLockCatalogTest, OvertakeLockNoOp) {
+ auto future = launchOnSeparateThread([this](OperationContext* opCtx) {
OID myID("555f80be366c194b13fb0372");
OID currentOwner("555f99712c99a78c5b083358");
Date_t now(dateFromISOString("2015-05-22T19:17:18.098Z").getValue());
@@ -631,8 +617,8 @@ TEST_F(DistLockCatalogFixture, OvertakeLockNoOp) {
future.timed_get(kFutureTimeout);
}
-TEST_F(DistLockCatalogFixture, OvertakeLockWithNewDoc) {
- auto future = launchAsync([this] {
+TEST_F(DistLockCatalogTest, OvertakeLockWithNewDoc) {
+ auto future = launchOnSeparateThread([this](OperationContext* opCtx) {
OID myID("555f80be366c194b13fb0372");
OID currentOwner("555f99712c99a78c5b083358");
Date_t now(dateFromISOString("2015-05-22T19:17:18.098Z").getValue());
@@ -700,8 +686,8 @@ TEST_F(DistLockCatalogFixture, OvertakeLockWithNewDoc) {
future.timed_get(kFutureTimeout);
}
-TEST_F(DistLockCatalogFixture, OvertakeLockWithBadLockDoc) {
- auto future = launchAsync([this] {
+TEST_F(DistLockCatalogTest, OvertakeLockWithBadLockDoc) {
+ auto future = launchOnSeparateThread([this](OperationContext* opCtx) {
Date_t now(dateFromISOString("2015-05-22T19:17:18.098Z").getValue());
auto resultStatus =
distLockCatalog()
@@ -735,7 +721,7 @@ TEST_F(DistLockCatalogFixture, OvertakeLockWithBadLockDoc) {
future.timed_get(kFutureTimeout);
}
-TEST_F(DistLockCatalogFixture, OvertakeLockTargetError) {
+TEST_F(DistLockCatalogTest, OvertakeLockTargetError) {
configTargeter()->setFindHostReturnValue({ErrorCodes::InternalError, "can't target"});
auto status =
distLockCatalog()
@@ -744,7 +730,7 @@ TEST_F(DistLockCatalogFixture, OvertakeLockTargetError) {
ASSERT_NOT_OK(status);
}
-TEST_F(DistLockCatalogFixture, OvertakeLockRunCmdError) {
+TEST_F(DistLockCatalogTest, OvertakeLockRunCmdError) {
shutdownExecutorPool();
auto status =
@@ -755,8 +741,8 @@ TEST_F(DistLockCatalogFixture, OvertakeLockRunCmdError) {
ASSERT_FALSE(status.reason().empty());
}
-TEST_F(DistLockCatalogFixture, OvertakeLockCommandError) {
- auto future = launchAsync([this] {
+TEST_F(DistLockCatalogTest, OvertakeLockCommandError) {
+ auto future = launchOnSeparateThread([this](OperationContext* opCtx) {
auto status =
distLockCatalog()
->overtakeLock(operationContext(), "", OID(), OID(), "", "", Date_t::now(), "")
@@ -776,8 +762,8 @@ TEST_F(DistLockCatalogFixture, OvertakeLockCommandError) {
future.timed_get(kFutureTimeout);
}
-TEST_F(DistLockCatalogFixture, OvertakeLockWriteError) {
- auto future = launchAsync([this] {
+TEST_F(DistLockCatalogTest, OvertakeLockWriteError) {
+ auto future = launchOnSeparateThread([this](OperationContext* opCtx) {
auto status =
distLockCatalog()
->overtakeLock(operationContext(), "", OID(), OID(), "", "", Date_t::now(), "")
@@ -797,8 +783,8 @@ TEST_F(DistLockCatalogFixture, OvertakeLockWriteError) {
future.timed_get(kFutureTimeout);
}
-TEST_F(DistLockCatalogFixture, OvertakeLockWriteConcernError) {
- auto future = launchAsync([this] {
+TEST_F(DistLockCatalogTest, OvertakeLockWriteConcernError) {
+ auto future = launchOnSeparateThread([this](OperationContext* opCtx) {
auto status =
distLockCatalog()
->overtakeLock(operationContext(), "", OID(), OID(), "", "", Date_t::now(), "")
@@ -821,8 +807,8 @@ TEST_F(DistLockCatalogFixture, OvertakeLockWriteConcernError) {
future.timed_get(kFutureTimeout);
}
-TEST_F(DistLockCatalogFixture, OvertakeLockUnsupportedWriteConcernResponse) {
- auto future = launchAsync([this] {
+TEST_F(DistLockCatalogTest, OvertakeLockUnsupportedWriteConcernResponse) {
+ auto future = launchOnSeparateThread([this](OperationContext* opCtx) {
auto status =
distLockCatalog()
->overtakeLock(operationContext(), "", OID(), OID(), "", "", Date_t::now(), "")
@@ -846,8 +832,8 @@ TEST_F(DistLockCatalogFixture, OvertakeLockUnsupportedWriteConcernResponse) {
future.timed_get(kFutureTimeout);
}
-TEST_F(DistLockCatalogFixture, OvertakeLockUnsupportedResponseFormat) {
- auto future = launchAsync([this] {
+TEST_F(DistLockCatalogTest, OvertakeLockUnsupportedResponseFormat) {
+ auto future = launchOnSeparateThread([this](OperationContext* opCtx) {
auto status =
distLockCatalog()
->overtakeLock(operationContext(), "", OID(), OID(), "", "", Date_t::now(), "")
@@ -863,8 +849,8 @@ TEST_F(DistLockCatalogFixture, OvertakeLockUnsupportedResponseFormat) {
future.timed_get(kFutureTimeout);
}
-TEST_F(DistLockCatalogFixture, BasicUnlock) {
- auto future = launchAsync([this] {
+TEST_F(DistLockCatalogTest, BasicUnlock) {
+ auto future = launchOnSeparateThread([this](OperationContext* opCtx) {
auto status =
distLockCatalog()->unlock(operationContext(), OID("555f99712c99a78c5b083358"));
ASSERT_OK(status);
@@ -897,8 +883,8 @@ TEST_F(DistLockCatalogFixture, BasicUnlock) {
future.timed_get(kFutureTimeout);
}
-TEST_F(DistLockCatalogFixture, BasicUnlockWithName) {
- auto future = launchAsync([this] {
+TEST_F(DistLockCatalogTest, BasicUnlockWithName) {
+ auto future = launchOnSeparateThread([this](OperationContext* opCtx) {
auto status = distLockCatalog()->unlock(
operationContext(), OID("555f99712c99a78c5b083358"), "TestDB.TestColl");
ASSERT_OK(status);
@@ -931,8 +917,8 @@ TEST_F(DistLockCatalogFixture, BasicUnlockWithName) {
future.timed_get(kFutureTimeout);
}
-TEST_F(DistLockCatalogFixture, UnlockWithNoNewDoc) {
- auto future = launchAsync([this] {
+TEST_F(DistLockCatalogTest, UnlockWithNoNewDoc) {
+ auto future = launchOnSeparateThread([this](OperationContext* opCtx) {
auto status =
distLockCatalog()->unlock(operationContext(), OID("555f99712c99a78c5b083358"));
ASSERT_OK(status);
@@ -961,8 +947,8 @@ TEST_F(DistLockCatalogFixture, UnlockWithNoNewDoc) {
future.timed_get(kFutureTimeout);
}
-TEST_F(DistLockCatalogFixture, UnlockWithNameWithNoNewDoc) {
- auto future = launchAsync([this] {
+TEST_F(DistLockCatalogTest, UnlockWithNameWithNoNewDoc) {
+ auto future = launchOnSeparateThread([this](OperationContext* opCtx) {
auto status = distLockCatalog()->unlock(
operationContext(), OID("555f99712c99a78c5b083358"), "TestDB.TestColl");
ASSERT_OK(status);
@@ -991,13 +977,13 @@ TEST_F(DistLockCatalogFixture, UnlockWithNameWithNoNewDoc) {
future.timed_get(kFutureTimeout);
}
-TEST_F(DistLockCatalogFixture, UnlockTargetError) {
+TEST_F(DistLockCatalogTest, UnlockTargetError) {
configTargeter()->setFindHostReturnValue({ErrorCodes::InternalError, "can't target"});
auto status = distLockCatalog()->unlock(operationContext(), OID());
ASSERT_NOT_OK(status);
}
-TEST_F(DistLockCatalogFixture, UnlockRunCmdError) {
+TEST_F(DistLockCatalogTest, UnlockRunCmdError) {
shutdownExecutorPool();
auto status = distLockCatalog()->unlock(operationContext(), OID());
@@ -1005,8 +991,8 @@ TEST_F(DistLockCatalogFixture, UnlockRunCmdError) {
ASSERT_FALSE(status.reason().empty());
}
-TEST_F(DistLockCatalogFixture, UnlockCommandError) {
- auto future = launchAsync([this] {
+TEST_F(DistLockCatalogTest, UnlockCommandError) {
+ auto future = launchOnSeparateThread([this](OperationContext* opCtx) {
auto status = distLockCatalog()->unlock(operationContext(), OID());
ASSERT_EQUALS(ErrorCodes::FailedToParse, status.code());
ASSERT_FALSE(status.reason().empty());
@@ -1023,8 +1009,8 @@ TEST_F(DistLockCatalogFixture, UnlockCommandError) {
future.timed_get(kFutureTimeout);
}
-TEST_F(DistLockCatalogFixture, UnlockWriteError) {
- auto future = launchAsync([this] {
+TEST_F(DistLockCatalogTest, UnlockWriteError) {
+ auto future = launchOnSeparateThread([this](OperationContext* opCtx) {
auto status = distLockCatalog()->unlock(operationContext(), OID());
ASSERT_EQUALS(ErrorCodes::Unauthorized, status.code());
ASSERT_FALSE(status.reason().empty());
@@ -1041,8 +1027,8 @@ TEST_F(DistLockCatalogFixture, UnlockWriteError) {
future.timed_get(kFutureTimeout);
}
-TEST_F(DistLockCatalogFixture, UnlockWriteConcernError) {
- auto future = launchAsync([this] {
+TEST_F(DistLockCatalogTest, UnlockWriteConcernError) {
+ auto future = launchOnSeparateThread([this](OperationContext* opCtx) {
auto status = distLockCatalog()->unlock(operationContext(), OID());
ASSERT_EQUALS(ErrorCodes::WriteConcernFailed, status.code());
ASSERT_FALSE(status.reason().empty());
@@ -1074,8 +1060,8 @@ TEST_F(DistLockCatalogFixture, UnlockWriteConcernError) {
future.timed_get(kFutureTimeout);
}
-TEST_F(DistLockCatalogFixture, UnlockUnsupportedWriteConcernResponse) {
- auto future = launchAsync([this] {
+TEST_F(DistLockCatalogTest, UnlockUnsupportedWriteConcernResponse) {
+ auto future = launchOnSeparateThread([this](OperationContext* opCtx) {
auto status = distLockCatalog()->unlock(operationContext(), OID());
ASSERT_EQUALS(ErrorCodes::UnsupportedFormat, status.code());
ASSERT_FALSE(status.reason().empty());
@@ -1096,8 +1082,8 @@ TEST_F(DistLockCatalogFixture, UnlockUnsupportedWriteConcernResponse) {
future.timed_get(kFutureTimeout);
}
-TEST_F(DistLockCatalogFixture, UnlockUnsupportedResponseFormat) {
- auto future = launchAsync([this] {
+TEST_F(DistLockCatalogTest, UnlockUnsupportedResponseFormat) {
+ auto future = launchOnSeparateThread([this](OperationContext* opCtx) {
auto status = distLockCatalog()->unlock(operationContext(), OID());
ASSERT_EQUALS(ErrorCodes::UnsupportedFormat, status.code());
});
@@ -1110,8 +1096,8 @@ TEST_F(DistLockCatalogFixture, UnlockUnsupportedResponseFormat) {
future.timed_get(kFutureTimeout);
}
-TEST_F(DistLockCatalogFixture, BasicUnlockAll) {
- auto future = launchAsync([this] {
+TEST_F(DistLockCatalogTest, BasicUnlockAll) {
+ auto future = launchOnSeparateThread([this](OperationContext* opCtx) {
auto status = distLockCatalog()->unlockAll(operationContext(), "processID");
ASSERT_OK(status);
});
@@ -1144,8 +1130,8 @@ TEST_F(DistLockCatalogFixture, BasicUnlockAll) {
future.timed_get(kFutureTimeout);
}
-TEST_F(DistLockCatalogFixture, UnlockAllWriteFailed) {
- auto future = launchAsync([this] {
+TEST_F(DistLockCatalogTest, UnlockAllWriteFailed) {
+ auto future = launchOnSeparateThread([this](OperationContext* opCtx) {
auto status = distLockCatalog()->unlockAll(operationContext(), "processID");
ASSERT_EQUALS(ErrorCodes::IllegalOperation, status);
});
@@ -1158,8 +1144,8 @@ TEST_F(DistLockCatalogFixture, UnlockAllWriteFailed) {
future.timed_get(kFutureTimeout);
}
-TEST_F(DistLockCatalogFixture, UnlockAllNetworkError) {
- auto future = launchAsync([this] {
+TEST_F(DistLockCatalogTest, UnlockAllNetworkError) {
+ auto future = launchOnSeparateThread([this](OperationContext* opCtx) {
auto status = distLockCatalog()->unlockAll(operationContext(), "processID");
ASSERT_EQUALS(ErrorCodes::NetworkTimeout, status);
});
@@ -1173,8 +1159,8 @@ TEST_F(DistLockCatalogFixture, UnlockAllNetworkError) {
future.timed_get(kFutureTimeout);
}
-TEST_F(DistLockCatalogFixture, BasicGetServerInfo) {
- auto future = launchAsync([this] {
+TEST_F(DistLockCatalogTest, BasicGetServerInfo) {
+ auto future = launchOnSeparateThread([this](OperationContext* opCtx) {
Date_t localTime(dateFromISOString("2015-05-26T13:06:27.293Z").getValue());
OID electionID("555fa85d4d8640862a0fc79b");
auto resultStatus = distLockCatalog()->getServerInfo(operationContext());
@@ -1202,13 +1188,13 @@ TEST_F(DistLockCatalogFixture, BasicGetServerInfo) {
future.timed_get(kFutureTimeout);
}
-TEST_F(DistLockCatalogFixture, GetServerTargetError) {
+TEST_F(DistLockCatalogTest, GetServerTargetError) {
configTargeter()->setFindHostReturnValue({ErrorCodes::InternalError, "can't target"});
auto status = distLockCatalog()->getServerInfo(operationContext()).getStatus();
ASSERT_NOT_OK(status);
}
-TEST_F(DistLockCatalogFixture, GetServerRunCmdError) {
+TEST_F(DistLockCatalogTest, GetServerRunCmdError) {
shutdownExecutorPool();
auto status = distLockCatalog()->getServerInfo(operationContext()).getStatus();
@@ -1216,8 +1202,8 @@ TEST_F(DistLockCatalogFixture, GetServerRunCmdError) {
ASSERT_FALSE(status.reason().empty());
}
-TEST_F(DistLockCatalogFixture, GetServerCommandError) {
- auto future = launchAsync([this] {
+TEST_F(DistLockCatalogTest, GetServerCommandError) {
+ auto future = launchOnSeparateThread([this](OperationContext* opCtx) {
auto status = distLockCatalog()->getServerInfo(operationContext()).getStatus();
ASSERT_EQUALS(ErrorCodes::FailedToParse, status.code());
ASSERT_FALSE(status.reason().empty());
@@ -1234,8 +1220,8 @@ TEST_F(DistLockCatalogFixture, GetServerCommandError) {
future.timed_get(kFutureTimeout);
}
-TEST_F(DistLockCatalogFixture, GetServerBadElectionId) {
- auto future = launchAsync([this] {
+TEST_F(DistLockCatalogTest, GetServerBadElectionId) {
+ auto future = launchOnSeparateThread([this](OperationContext* opCtx) {
auto status = distLockCatalog()->getServerInfo(operationContext()).getStatus();
ASSERT_EQUALS(ErrorCodes::UnsupportedFormat, status.code());
ASSERT_FALSE(status.reason().empty());
@@ -1255,8 +1241,8 @@ TEST_F(DistLockCatalogFixture, GetServerBadElectionId) {
future.timed_get(kFutureTimeout);
}
-TEST_F(DistLockCatalogFixture, GetServerBadLocalTime) {
- auto future = launchAsync([this] {
+TEST_F(DistLockCatalogTest, GetServerBadLocalTime) {
+ auto future = launchOnSeparateThread([this](OperationContext* opCtx) {
auto status = distLockCatalog()->getServerInfo(operationContext()).getStatus();
ASSERT_EQUALS(ErrorCodes::UnsupportedFormat, status.code());
ASSERT_FALSE(status.reason().empty());
@@ -1276,8 +1262,8 @@ TEST_F(DistLockCatalogFixture, GetServerBadLocalTime) {
future.timed_get(kFutureTimeout);
}
-TEST_F(DistLockCatalogFixture, GetServerNoGLEStats) {
- auto future = launchAsync([this] {
+TEST_F(DistLockCatalogTest, GetServerNoGLEStats) {
+ auto future = launchOnSeparateThread([this](OperationContext* opCtx) {
auto status = distLockCatalog()->getServerInfo(operationContext()).getStatus();
ASSERT_EQUALS(ErrorCodes::UnsupportedFormat, status.code());
ASSERT_FALSE(status.reason().empty());
@@ -1293,8 +1279,8 @@ TEST_F(DistLockCatalogFixture, GetServerNoGLEStats) {
future.timed_get(kFutureTimeout);
}
-TEST_F(DistLockCatalogFixture, GetServerNoElectionId) {
- auto future = launchAsync([this] {
+TEST_F(DistLockCatalogTest, GetServerNoElectionId) {
+ auto future = launchOnSeparateThread([this](OperationContext* opCtx) {
auto status = distLockCatalog()->getServerInfo(operationContext()).getStatus();
ASSERT_EQUALS(ErrorCodes::NotMaster, status.code());
ASSERT_FALSE(status.reason().empty());
@@ -1314,8 +1300,8 @@ TEST_F(DistLockCatalogFixture, GetServerNoElectionId) {
future.timed_get(kFutureTimeout);
}
-TEST_F(DistLockCatalogFixture, GetServerInvalidReplSubsectionShouldFail) {
- auto future = launchAsync([this] {
+TEST_F(DistLockCatalogTest, GetServerInvalidReplSubsectionShouldFail) {
+ auto future = launchOnSeparateThread([this](OperationContext* opCtx) {
auto status = distLockCatalog()->getServerInfo(operationContext()).getStatus();
ASSERT_EQUALS(ErrorCodes::UnsupportedFormat, status.code());
ASSERT_FALSE(status.reason().empty());
@@ -1334,8 +1320,8 @@ TEST_F(DistLockCatalogFixture, GetServerInvalidReplSubsectionShouldFail) {
future.timed_get(kFutureTimeout);
}
-TEST_F(DistLockCatalogFixture, GetServerNoElectionIdButMasterShouldFail) {
- auto future = launchAsync([this] {
+TEST_F(DistLockCatalogTest, GetServerNoElectionIdButMasterShouldFail) {
+ auto future = launchOnSeparateThread([this](OperationContext* opCtx) {
auto status = distLockCatalog()->getServerInfo(operationContext()).getStatus();
ASSERT_EQUALS(ErrorCodes::UnsupportedFormat, status.code());
ASSERT_NOT_EQUALS(std::string::npos, status.reason().find("me:1234"));
@@ -1355,8 +1341,8 @@ TEST_F(DistLockCatalogFixture, GetServerNoElectionIdButMasterShouldFail) {
future.timed_get(kFutureTimeout);
}
-TEST_F(DistLockCatalogFixture, BasicStopPing) {
- auto future = launchAsync([this] {
+TEST_F(DistLockCatalogTest, BasicStopPing) {
+ auto future = launchOnSeparateThread([this](OperationContext* opCtx) {
auto status = distLockCatalog()->stopPing(operationContext(), "test");
ASSERT_OK(status);
});
@@ -1387,13 +1373,13 @@ TEST_F(DistLockCatalogFixture, BasicStopPing) {
future.timed_get(kFutureTimeout);
}
-TEST_F(DistLockCatalogFixture, StopPingTargetError) {
+TEST_F(DistLockCatalogTest, StopPingTargetError) {
configTargeter()->setFindHostReturnValue({ErrorCodes::InternalError, "can't target"});
auto status = distLockCatalog()->stopPing(operationContext(), "");
ASSERT_NOT_OK(status);
}
-TEST_F(DistLockCatalogFixture, StopPingRunCmdError) {
+TEST_F(DistLockCatalogTest, StopPingRunCmdError) {
shutdownExecutorPool();
auto status = distLockCatalog()->stopPing(operationContext(), "");
@@ -1401,8 +1387,8 @@ TEST_F(DistLockCatalogFixture, StopPingRunCmdError) {
ASSERT_FALSE(status.reason().empty());
}
-TEST_F(DistLockCatalogFixture, StopPingCommandError) {
- auto future = launchAsync([this] {
+TEST_F(DistLockCatalogTest, StopPingCommandError) {
+ auto future = launchOnSeparateThread([this](OperationContext* opCtx) {
auto status = distLockCatalog()->stopPing(operationContext(), "");
ASSERT_EQUALS(ErrorCodes::FailedToParse, status.code());
ASSERT_FALSE(status.reason().empty());
@@ -1419,8 +1405,8 @@ TEST_F(DistLockCatalogFixture, StopPingCommandError) {
future.timed_get(kFutureTimeout);
}
-TEST_F(DistLockCatalogFixture, StopPingWriteError) {
- auto future = launchAsync([this] {
+TEST_F(DistLockCatalogTest, StopPingWriteError) {
+ auto future = launchOnSeparateThread([this](OperationContext* opCtx) {
auto status = distLockCatalog()->stopPing(operationContext(), "");
ASSERT_EQUALS(ErrorCodes::Unauthorized, status.code());
ASSERT_FALSE(status.reason().empty());
@@ -1437,8 +1423,8 @@ TEST_F(DistLockCatalogFixture, StopPingWriteError) {
future.timed_get(kFutureTimeout);
}
-TEST_F(DistLockCatalogFixture, StopPingWriteConcernError) {
- auto future = launchAsync([this] {
+TEST_F(DistLockCatalogTest, StopPingWriteConcernError) {
+ auto future = launchOnSeparateThread([this](OperationContext* opCtx) {
auto status = distLockCatalog()->stopPing(operationContext(), "");
ASSERT_EQUALS(ErrorCodes::WriteConcernFailed, status.code());
ASSERT_FALSE(status.reason().empty());
@@ -1458,8 +1444,8 @@ TEST_F(DistLockCatalogFixture, StopPingWriteConcernError) {
future.timed_get(kFutureTimeout);
}
-TEST_F(DistLockCatalogFixture, StopPingUnsupportedWriteConcernResponse) {
- auto future = launchAsync([this] {
+TEST_F(DistLockCatalogTest, StopPingUnsupportedWriteConcernResponse) {
+ auto future = launchOnSeparateThread([this](OperationContext* opCtx) {
auto status = distLockCatalog()->stopPing(operationContext(), "");
ASSERT_EQUALS(ErrorCodes::UnsupportedFormat, status.code());
ASSERT_FALSE(status.reason().empty());
@@ -1480,8 +1466,8 @@ TEST_F(DistLockCatalogFixture, StopPingUnsupportedWriteConcernResponse) {
future.timed_get(kFutureTimeout);
}
-TEST_F(DistLockCatalogFixture, StopPingUnsupportedResponseFormat) {
- auto future = launchAsync([this] {
+TEST_F(DistLockCatalogTest, StopPingUnsupportedResponseFormat) {
+ auto future = launchOnSeparateThread([this](OperationContext* opCtx) {
auto status = distLockCatalog()->stopPing(operationContext(), "");
ASSERT_EQUALS(ErrorCodes::UnsupportedFormat, status.code());
});
@@ -1494,8 +1480,8 @@ TEST_F(DistLockCatalogFixture, StopPingUnsupportedResponseFormat) {
future.timed_get(kFutureTimeout);
}
-TEST_F(DistLockCatalogFixture, BasicGetPing) {
- auto future = launchAsync([this] {
+TEST_F(DistLockCatalogTest, BasicGetPing) {
+ auto future = launchOnSeparateThread([this](OperationContext* opCtx) {
Date_t ping(dateFromISOString("2015-05-26T13:06:27.293Z").getValue());
auto resultStatus = distLockCatalog()->getPing(operationContext(), "test");
ASSERT_OK(resultStatus.getStatus());
@@ -1531,13 +1517,13 @@ TEST_F(DistLockCatalogFixture, BasicGetPing) {
future.timed_get(kFutureTimeout);
}
-TEST_F(DistLockCatalogFixture, GetPingTargetError) {
+TEST_F(DistLockCatalogTest, GetPingTargetError) {
configTargeter()->setFindHostReturnValue({ErrorCodes::InternalError, "can't target"});
auto status = distLockCatalog()->getPing(operationContext(), "").getStatus();
ASSERT_EQUALS(ErrorCodes::InternalError, status.code());
}
-TEST_F(DistLockCatalogFixture, GetPingRunCmdError) {
+TEST_F(DistLockCatalogTest, GetPingRunCmdError) {
shutdownExecutorPool();
auto status = distLockCatalog()->getPing(operationContext(), "").getStatus();
@@ -1545,28 +1531,28 @@ TEST_F(DistLockCatalogFixture, GetPingRunCmdError) {
ASSERT_FALSE(status.reason().empty());
}
-TEST_F(DistLockCatalogFixture, GetPingNotFound) {
- auto future = launchAsync([this] {
+TEST_F(DistLockCatalogTest, GetPingNotFound) {
+ auto future = launchOnSeparateThread([this](OperationContext* opCtx) {
auto status = distLockCatalog()->getPing(operationContext(), "").getStatus();
ASSERT_EQUALS(ErrorCodes::NoMatchingDocument, status.code());
ASSERT_FALSE(status.reason().empty());
});
- onFindCommand([](const RemoteCommandRequest& request) -> StatusWith<vector<BSONObj>> {
+ onFindCommand([](const RemoteCommandRequest& request) -> StatusWith<std::vector<BSONObj>> {
return std::vector<BSONObj>();
});
future.timed_get(kFutureTimeout);
}
-TEST_F(DistLockCatalogFixture, GetPingUnsupportedFormat) {
- auto future = launchAsync([this] {
+TEST_F(DistLockCatalogTest, GetPingUnsupportedFormat) {
+ auto future = launchOnSeparateThread([this](OperationContext* opCtx) {
auto status = distLockCatalog()->getPing(operationContext(), "test").getStatus();
ASSERT_EQUALS(ErrorCodes::FailedToParse, status.code());
ASSERT_FALSE(status.reason().empty());
});
- onFindCommand([](const RemoteCommandRequest& request) -> StatusWith<vector<BSONObj>> {
+ onFindCommand([](const RemoteCommandRequest& request) -> StatusWith<std::vector<BSONObj>> {
// return non-date type for ping.
BSONObj pingDoc(fromjson(R"({
_id: "test",
@@ -1582,8 +1568,8 @@ TEST_F(DistLockCatalogFixture, GetPingUnsupportedFormat) {
future.timed_get(kFutureTimeout);
}
-TEST_F(DistLockCatalogFixture, BasicGetLockByTS) {
- auto future = launchAsync([this] {
+TEST_F(DistLockCatalogTest, BasicGetLockByTS) {
+ auto future = launchOnSeparateThread([this](OperationContext* opCtx) {
OID ts("555f99712c99a78c5b083358");
auto resultStatus = distLockCatalog()->getLockByTS(operationContext(), ts);
ASSERT_OK(resultStatus.getStatus());
@@ -1593,7 +1579,7 @@ TEST_F(DistLockCatalogFixture, BasicGetLockByTS) {
ASSERT_EQUALS(ts, lockDoc.getLockID());
});
- onFindCommand([](const RemoteCommandRequest& request) -> StatusWith<vector<BSONObj>> {
+ onFindCommand([](const RemoteCommandRequest& request) -> StatusWith<std::vector<BSONObj>> {
ASSERT_EQUALS(dummyHost, request.target);
ASSERT_EQUALS("config", request.dbname);
@@ -1617,41 +1603,41 @@ TEST_F(DistLockCatalogFixture, BasicGetLockByTS) {
future.timed_get(kFutureTimeout);
}
-TEST_F(DistLockCatalogFixture, GetLockByTSTargetError) {
+TEST_F(DistLockCatalogTest, GetLockByTSTargetError) {
configTargeter()->setFindHostReturnValue({ErrorCodes::InternalError, "can't target"});
auto status = distLockCatalog()->getLockByTS(operationContext(), OID()).getStatus();
ASSERT_EQUALS(ErrorCodes::InternalError, status.code());
}
-TEST_F(DistLockCatalogFixture, GetLockByTSRunCmdError) {
+TEST_F(DistLockCatalogTest, GetLockByTSRunCmdError) {
shutdownExecutorPool();
auto status = distLockCatalog()->getLockByTS(operationContext(), OID()).getStatus();
ASSERT_EQUALS(ErrorCodes::ShutdownInProgress, status.code());
ASSERT_FALSE(status.reason().empty());
}
-TEST_F(DistLockCatalogFixture, GetLockByTSNotFound) {
- auto future = launchAsync([this] {
+TEST_F(DistLockCatalogTest, GetLockByTSNotFound) {
+ auto future = launchOnSeparateThread([this](OperationContext* opCtx) {
auto status = distLockCatalog()->getLockByTS(operationContext(), OID()).getStatus();
ASSERT_EQUALS(ErrorCodes::LockNotFound, status.code());
ASSERT_FALSE(status.reason().empty());
});
- onFindCommand([](const RemoteCommandRequest& request) -> StatusWith<vector<BSONObj>> {
+ onFindCommand([](const RemoteCommandRequest& request) -> StatusWith<std::vector<BSONObj>> {
return std::vector<BSONObj>();
});
future.timed_get(kFutureTimeout);
}
-TEST_F(DistLockCatalogFixture, GetLockByTSUnsupportedFormat) {
- auto future = launchAsync([this] {
+TEST_F(DistLockCatalogTest, GetLockByTSUnsupportedFormat) {
+ auto future = launchOnSeparateThread([this](OperationContext* opCtx) {
auto status = distLockCatalog()->getLockByTS(operationContext(), OID()).getStatus();
ASSERT_EQUALS(ErrorCodes::FailedToParse, status.code());
ASSERT_FALSE(status.reason().empty());
});
- onFindCommand([](const RemoteCommandRequest& request) -> StatusWith<vector<BSONObj>> {
+ onFindCommand([](const RemoteCommandRequest& request) -> StatusWith<std::vector<BSONObj>> {
// return invalid non-numeric type for state.
BSONObj lockDoc(fromjson(R"({
_id: "test",
@@ -1667,8 +1653,8 @@ TEST_F(DistLockCatalogFixture, GetLockByTSUnsupportedFormat) {
future.timed_get(kFutureTimeout);
}
-TEST_F(DistLockCatalogFixture, BasicGetLockByName) {
- auto future = launchAsync([this] {
+TEST_F(DistLockCatalogTest, BasicGetLockByName) {
+ auto future = launchOnSeparateThread([this](OperationContext* opCtx) {
OID ts("555f99712c99a78c5b083358");
auto resultStatus = distLockCatalog()->getLockByName(operationContext(), "abc");
ASSERT_OK(resultStatus.getStatus());
@@ -1704,13 +1690,13 @@ TEST_F(DistLockCatalogFixture, BasicGetLockByName) {
future.timed_get(kFutureTimeout);
}
-TEST_F(DistLockCatalogFixture, GetLockByNameTargetError) {
+TEST_F(DistLockCatalogTest, GetLockByNameTargetError) {
configTargeter()->setFindHostReturnValue({ErrorCodes::InternalError, "can't target"});
auto status = distLockCatalog()->getLockByName(operationContext(), "x").getStatus();
ASSERT_EQUALS(ErrorCodes::InternalError, status.code());
}
-TEST_F(DistLockCatalogFixture, GetLockByNameRunCmdError) {
+TEST_F(DistLockCatalogTest, GetLockByNameRunCmdError) {
shutdownExecutorPool();
auto status = distLockCatalog()->getLockByName(operationContext(), "x").getStatus();
@@ -1718,28 +1704,28 @@ TEST_F(DistLockCatalogFixture, GetLockByNameRunCmdError) {
ASSERT_FALSE(status.reason().empty());
}
-TEST_F(DistLockCatalogFixture, GetLockByNameNotFound) {
- auto future = launchAsync([this] {
+TEST_F(DistLockCatalogTest, GetLockByNameNotFound) {
+ auto future = launchOnSeparateThread([this](OperationContext* opCtx) {
auto status = distLockCatalog()->getLockByName(operationContext(), "x").getStatus();
ASSERT_EQUALS(ErrorCodes::LockNotFound, status.code());
ASSERT_FALSE(status.reason().empty());
});
- onFindCommand([](const RemoteCommandRequest& request) -> StatusWith<vector<BSONObj>> {
+ onFindCommand([](const RemoteCommandRequest& request) -> StatusWith<std::vector<BSONObj>> {
return std::vector<BSONObj>();
});
future.timed_get(kFutureTimeout);
}
-TEST_F(DistLockCatalogFixture, GetLockByNameUnsupportedFormat) {
- auto future = launchAsync([this] {
+TEST_F(DistLockCatalogTest, GetLockByNameUnsupportedFormat) {
+ auto future = launchOnSeparateThread([this](OperationContext* opCtx) {
auto status = distLockCatalog()->getLockByName(operationContext(), "x").getStatus();
ASSERT_EQUALS(ErrorCodes::FailedToParse, status.code());
ASSERT_FALSE(status.reason().empty());
});
- onFindCommand([](const RemoteCommandRequest& request) -> StatusWith<vector<BSONObj>> {
+ onFindCommand([](const RemoteCommandRequest& request) -> StatusWith<std::vector<BSONObj>> {
// Return non-numeric type for state.
BSONObj lockDoc(fromjson(R"({
_id: "x",
@@ -1755,5 +1741,5 @@ TEST_F(DistLockCatalogFixture, GetLockByNameUnsupportedFormat) {
future.timed_get(kFutureTimeout);
}
-} // unnamed namespace
+} // namespace
} // namespace mongo
diff --git a/src/mongo/s/catalog/replset_dist_lock_manager_test.cpp b/src/mongo/s/catalog/replset_dist_lock_manager_test.cpp
index 40bb685867f..9ec67b40930 100644
--- a/src/mongo/s/catalog/replset_dist_lock_manager_test.cpp
+++ b/src/mongo/s/catalog/replset_dist_lock_manager_test.cpp
@@ -29,35 +29,19 @@
#include "mongo/platform/basic.h"
#include <boost/optional.hpp>
-#include <boost/optional/optional_io.hpp>
#include <map>
#include <string>
-#include <type_traits>
#include <vector>
-#include "mongo/base/status.h"
-#include "mongo/base/status_with.h"
#include "mongo/bson/json.h"
-#include "mongo/bson/util/builder.h"
-#include "mongo/client/remote_command_targeter.h"
-#include "mongo/client/remote_command_targeter_factory_mock.h"
-#include "mongo/db/jsobj.h"
-#include "mongo/db/operation_context_noop.h"
-#include "mongo/db/server_options.h"
-#include "mongo/db/service_context_noop.h"
-#include "mongo/executor/task_executor.h"
-#include "mongo/executor/task_executor_pool.h"
#include "mongo/s/balancer_configuration.h"
#include "mongo/s/catalog/dist_lock_catalog_mock.h"
#include "mongo/s/catalog/replset_dist_lock_manager.h"
#include "mongo/s/catalog/sharding_catalog_client_mock.h"
#include "mongo/s/catalog/type_lockpings.h"
#include "mongo/s/catalog/type_locks.h"
-#include "mongo/s/client/shard_factory.h"
-#include "mongo/s/client/shard_registry.h"
-#include "mongo/s/client/shard_remote.h"
#include "mongo/s/grid.h"
-#include "mongo/s/sharding_mongod_test_fixture.h"
+#include "mongo/s/shard_server_test_fixture.h"
#include "mongo/stdx/condition_variable.h"
#include "mongo/stdx/memory.h"
#include "mongo/stdx/mutex.h"
@@ -67,18 +51,13 @@
#include "mongo/util/time_support.h"
/**
- * Tests for ReplSetDistLockManager. Note that unlock and ping operations are executed on a
- * separate thread. And since this thread cannot capture the assertion exceptions, all the
- * assertion calls should be performed on the main thread.
+ * Tests for ReplSetDistLockManager. Note that unlock and ping operations are executed on a separate
+ * thread. And since this thread cannot capture the assertion exceptions, all the assertion calls
+ * should be performed on the main thread.
*/
-
namespace mongo {
namespace {
-using std::map;
-using std::string;
-using std::vector;
-
// Max duration to wait to satisfy test invariant before joining with main test thread.
const Seconds kJoinTimeout(30);
const Milliseconds kPingInterval(2);
@@ -88,49 +67,19 @@ const Seconds kLockExpiration(10);
* Basic fixture for ReplSetDistLockManager that starts it up before the test begins
* and shuts it down when a test finishes.
*/
-class ReplSetDistLockManagerFixture : public ShardingMongodTestFixture {
-public:
- /**
- * Returns the mocked catalog used by the lock manager being tested.
- */
- DistLockCatalogMock* getMockCatalog() {
- auto distLockCatalogMock = dynamic_cast<DistLockCatalogMock*>(distLockCatalog());
- invariant(distLockCatalogMock);
- return distLockCatalogMock;
- }
-
- /**
- * Get the process id that was initialized with the lock manager being tested.
- */
- string getProcessID() const {
- return _processID;
- }
-
+class ReplSetDistLockManagerFixture : public ShardServerTestFixture {
protected:
- virtual std::unique_ptr<TickSource> makeTickSource() {
- return stdx::make_unique<SystemTickSource>();
- }
-
void setUp() override {
- ShardingMongodTestFixture::setUp();
+ ShardServerTestFixture::setUp();
getServiceContext()->setTickSource(makeTickSource());
-
- // Initialize sharding components as a shard server.
- serverGlobalParams.clusterRole = ClusterRole::ShardServer;
- ConnectionString configCS = ConnectionString::forReplicaSet(
- "configReplSet", std::vector<HostAndPort>{HostAndPort{"config"}});
- uassertStatusOK(initializeGlobalShardingStateForMongodForTest(configCS));
}
void tearDown() override {
// Don't care about what shutDown passes to stopPing here.
getMockCatalog()->expectStopPing([](StringData) {}, Status::OK());
- ShardingMongodTestFixture::tearDown();
- }
- std::unique_ptr<DistLockCatalog> makeDistLockCatalog() override {
- return stdx::make_unique<DistLockCatalogMock>();
+ ShardServerTestFixture::tearDown();
}
std::unique_ptr<DistLockManager> makeDistLockManager(
@@ -144,20 +93,40 @@ protected:
}
std::unique_ptr<ShardingCatalogClient> makeShardingCatalogClient(
- std::unique_ptr<DistLockManager> distLockManager) {
+ std::unique_ptr<DistLockManager> distLockManager) override {
return stdx::make_unique<ShardingCatalogClientMock>(std::move(distLockManager));
}
- std::unique_ptr<BalancerConfiguration> makeBalancerConfiguration() {
+ std::unique_ptr<BalancerConfiguration> makeBalancerConfiguration() override {
return stdx::make_unique<BalancerConfiguration>();
}
+ virtual std::unique_ptr<TickSource> makeTickSource() {
+ return stdx::make_unique<SystemTickSource>();
+ }
+
+ /**
+ * Returns the mocked catalog used by the lock manager being tested.
+ */
+ DistLockCatalogMock* getMockCatalog() {
+ auto distLockCatalogMock = dynamic_cast<DistLockCatalogMock*>(distLockCatalog());
+ invariant(distLockCatalogMock);
+ return distLockCatalogMock;
+ }
+
+ /**
+ * Get the process id that was initialized with the lock manager being tested.
+ */
+ std::string getProcessID() const {
+ return _processID;
+ }
+
private:
- string _processID = "test";
+ std::string _processID = "test";
};
class RSDistLockMgrWithMockTickSource : public ReplSetDistLockManagerFixture {
-public:
+protected:
/**
* Override the way the fixture gets the tick source to install to use a mock tick source.
*/
@@ -169,29 +138,27 @@ public:
* Returns the mock tick source.
*/
TickSourceMock* getMockTickSource() {
- return dynamic_cast<TickSourceMock*>(getGlobalServiceContext()->getTickSource());
+ return dynamic_cast<TickSourceMock*>(getServiceContext()->getTickSource());
}
};
std::string mapToString(const std::map<OID, int>& map) {
StringBuilder str;
-
for (const auto& entry : map) {
str << "(" << entry.first.toString() << ": " << entry.second << ")";
}
return str.str();
-};
+}
std::string vectorToString(const std::vector<OID>& list) {
StringBuilder str;
-
for (const auto& entry : list) {
str << "(" << entry.toString() << ")";
}
return str.str();
-};
+}
/**
* Test scenario:
@@ -200,9 +167,9 @@ std::string vectorToString(const std::vector<OID>& list) {
* 3. Check lock id used in lock and unlock are the same.
*/
TEST_F(ReplSetDistLockManagerFixture, BasicLockLifeCycle) {
- string lockName("test");
+ std::string lockName("test");
Date_t now(Date_t::now());
- string whyMsg("because");
+ std::string whyMsg("because");
LocksType retLockDoc;
retLockDoc.setName(lockName);
@@ -262,11 +229,11 @@ TEST_F(ReplSetDistLockManagerFixture, BasicLockLifeCycle) {
* 4. Check lock id used in lock and unlock are the same.
*/
TEST_F(RSDistLockMgrWithMockTickSource, LockSuccessAfterRetry) {
- string lockName("test");
- string me("me");
+ std::string lockName("test");
+ std::string me("me");
boost::optional<OID> lastTS;
Date_t lastTime(Date_t::now());
- string whyMsg("because");
+ std::string whyMsg("because");
int retryAttempt = 0;
const int kMaxRetryAttempt = 3;
@@ -297,7 +264,7 @@ TEST_F(RSDistLockMgrWithMockTickSource, LockSuccessAfterRetry) {
ASSERT_EQUALS(lockName, lockID);
// Lock session ID should be the same after first attempt.
if (lastTS) {
- ASSERT_EQUALS(lastTS, lockSessionID);
+ ASSERT_EQUALS(*lastTS, lockSessionID);
}
ASSERT_EQUALS(getProcessID(), processId);
ASSERT_GREATER_THAN_OR_EQUALS(time, lastTime);
@@ -319,7 +286,7 @@ TEST_F(RSDistLockMgrWithMockTickSource, LockSuccessAfterRetry) {
ASSERT_EQUALS(lockName, lockID);
// Lock session ID should be the same after first attempt.
if (lastTS) {
- ASSERT_EQUALS(lastTS, lockSessionID);
+ ASSERT_EQUALS(*lastTS, lockSessionID);
}
ASSERT_TRUE(lockSessionID.isSet());
ASSERT_EQUALS(getProcessID(), processId);
@@ -385,7 +352,8 @@ TEST_F(RSDistLockMgrWithMockTickSource, LockSuccessAfterRetry) {
}
ASSERT_EQUALS(1, unlockCallCount);
- ASSERT_EQUALS(lastTS, unlockSessionIDPassed);
+ ASSERT(lastTS);
+ ASSERT_EQUALS(*lastTS, unlockSessionIDPassed);
}
/**
@@ -396,11 +364,11 @@ TEST_F(RSDistLockMgrWithMockTickSource, LockSuccessAfterRetry) {
* 4. Make sure that unlock is called to cleanup the last lock attempted that error out.
*/
TEST_F(RSDistLockMgrWithMockTickSource, LockFailsAfterRetry) {
- string lockName("test");
- string me("me");
+ std::string lockName("test");
+ std::string me("me");
boost::optional<OID> lastTS;
Date_t lastTime(Date_t::now());
- string whyMsg("because");
+ std::string whyMsg("because");
int retryAttempt = 0;
const int kMaxRetryAttempt = 3;
@@ -416,7 +384,7 @@ TEST_F(RSDistLockMgrWithMockTickSource, LockFailsAfterRetry) {
ASSERT_EQUALS(lockName, lockID);
// Lock session ID should be the same after first attempt.
if (lastTS) {
- ASSERT_EQUALS(lastTS, lockSessionID);
+ ASSERT_EQUALS(*lastTS, lockSessionID);
}
ASSERT_EQUALS(getProcessID(), processId);
ASSERT_GREATER_THAN_OR_EQUALS(time, lastTime);
@@ -438,7 +406,7 @@ TEST_F(RSDistLockMgrWithMockTickSource, LockFailsAfterRetry) {
ASSERT_EQUALS(lockName, lockID);
// Lock session ID should be the same after first attempt.
if (lastTS) {
- ASSERT_EQUALS(lastTS, lockSessionID);
+ ASSERT_EQUALS(*lastTS, lockSessionID);
}
lastTS = lockSessionID;
ASSERT_TRUE(lockSessionID.isSet());
@@ -498,7 +466,8 @@ TEST_F(RSDistLockMgrWithMockTickSource, LockFailsAfterRetry) {
ASSERT_FALSE(didTimeout);
ASSERT_EQUALS(1, unlockCallCount);
- ASSERT_EQUALS(lastTS, unlockSessionIDPassed);
+ ASSERT(lastTS);
+ ASSERT_EQUALS(*lastTS, unlockSessionIDPassed);
}
TEST_F(ReplSetDistLockManagerFixture, LockBusyNoRetry) {
@@ -526,11 +495,11 @@ TEST_F(ReplSetDistLockManagerFixture, LockBusyNoRetry) {
* 5. Implicitly check that unlock is not called (default setting of mock catalog).
*/
TEST_F(RSDistLockMgrWithMockTickSource, LockRetryTimeout) {
- string lockName("test");
- string me("me");
+ std::string lockName("test");
+ std::string me("me");
boost::optional<OID> lastTS;
Date_t lastTime(Date_t::now());
- string whyMsg("because");
+ std::string whyMsg("because");
int retryAttempt = 0;
@@ -544,7 +513,7 @@ TEST_F(RSDistLockMgrWithMockTickSource, LockRetryTimeout) {
ASSERT_EQUALS(lockName, lockID);
// Lock session ID should be the same after first attempt.
if (lastTS) {
- ASSERT_EQUALS(lastTS, lockSessionID);
+ ASSERT_EQUALS(*lastTS, lockSessionID);
}
ASSERT_EQUALS(getProcessID(), processId);
ASSERT_GREATER_THAN_OR_EQUALS(time, lastTime);
@@ -578,10 +547,10 @@ TEST_F(RSDistLockMgrWithMockTickSource, LockRetryTimeout) {
* 4. Check that lockSessionID used on all unlock is the same as the one used to grab lock.
*/
TEST_F(ReplSetDistLockManagerFixture, MustUnlockOnLockError) {
- string lockName("test");
- string me("me");
+ std::string lockName("test");
+ std::string me("me");
OID lastTS;
- string whyMsg("because");
+ std::string whyMsg("because");
getMockCatalog()->expectGrabLock(
[this, &lockName, &lastTS, &me, &whyMsg](StringData lockID,
@@ -654,7 +623,7 @@ TEST_F(ReplSetDistLockManagerFixture, MustUnlockOnLockError) {
TEST_F(ReplSetDistLockManagerFixture, LockPinging) {
stdx::mutex testMutex;
stdx::condition_variable ping3TimesCV;
- vector<string> processIDList;
+ std::vector<std::string> processIDList;
getMockCatalog()->expectPing(
[&testMutex, &ping3TimesCV, &processIDList](StringData processIDArg, Date_t ping) {
@@ -705,7 +674,7 @@ TEST_F(ReplSetDistLockManagerFixture, UnlockUntilNoError) {
stdx::mutex unlockMutex;
stdx::condition_variable unlockCV;
const unsigned int kUnlockErrorCount = 3;
- vector<OID> lockSessionIDPassed;
+ std::vector<OID> lockSessionIDPassed;
getMockCatalog()->expectUnLock(
[this, &unlockMutex, &unlockCV, &kUnlockErrorCount, &lockSessionIDPassed](
@@ -784,9 +753,8 @@ TEST_F(ReplSetDistLockManagerFixture, UnlockUntilNoError) {
TEST_F(ReplSetDistLockManagerFixture, MultipleQueuedUnlock) {
stdx::mutex testMutex;
stdx::condition_variable unlockCV;
-
- vector<OID> lockSessionIDPassed;
- map<OID, int> unlockIDMap; // id -> count
+ std::vector<OID> lockSessionIDPassed;
+ std::map<OID, int> unlockIDMap; // id -> count
/**
* Returns true if all values in the map are greater than 2.
@@ -2079,9 +2047,9 @@ TEST_F(RSDistLockMgrWithMockTickSource, CanOvertakeIfNoPingDocument) {
}
TEST_F(ReplSetDistLockManagerFixture, TryLockWithLocalWriteConcernBusy) {
- string lockName("test");
+ std::string lockName("test");
Date_t now(Date_t::now());
- string whyMsg("because");
+ std::string whyMsg("because");
LocksType retLockDoc;
retLockDoc.setName(lockName);
diff --git a/src/mongo/s/catalog/sharding_catalog_client_impl.cpp b/src/mongo/s/catalog/sharding_catalog_client_impl.cpp
index 9b26d5903bd..92a8ece2ff5 100644
--- a/src/mongo/s/catalog/sharding_catalog_client_impl.cpp
+++ b/src/mongo/s/catalog/sharding_catalog_client_impl.cpp
@@ -35,14 +35,10 @@
#include <iomanip>
#include <pcrecpp.h>
-#include "mongo/base/status.h"
-#include "mongo/base/status_with.h"
#include "mongo/bson/bsonobjbuilder.h"
#include "mongo/bson/util/bson_extract.h"
#include "mongo/client/read_preference.h"
#include "mongo/client/remote_command_targeter.h"
-#include "mongo/client/replica_set_monitor.h"
-#include "mongo/db/audit.h"
#include "mongo/db/client.h"
#include "mongo/db/commands.h"
#include "mongo/db/namespace_string.h"
@@ -51,7 +47,6 @@
#include "mongo/db/repl/read_concern_args.h"
#include "mongo/db/repl/repl_client_info.h"
#include "mongo/executor/network_interface.h"
-#include "mongo/executor/task_executor.h"
#include "mongo/rpc/get_status_from_command_result.h"
#include "mongo/rpc/metadata/repl_set_metadata.h"
#include "mongo/s/catalog/config_server_version.h"
@@ -64,7 +59,6 @@
#include "mongo/s/catalog/type_shard.h"
#include "mongo/s/catalog/type_tags.h"
#include "mongo/s/client/shard.h"
-#include "mongo/s/client/shard_registry.h"
#include "mongo/s/grid.h"
#include "mongo/s/request_types/set_shard_version_request.h"
#include "mongo/s/shard_key_pattern.h"
diff --git a/src/mongo/s/catalog/sharding_catalog_log_change_test.cpp b/src/mongo/s/catalog/sharding_catalog_log_change_test.cpp
index 9e7f354c9c0..429a11ec39b 100644
--- a/src/mongo/s/catalog/sharding_catalog_log_change_test.cpp
+++ b/src/mongo/s/catalog/sharding_catalog_log_change_test.cpp
@@ -38,7 +38,7 @@
#include "mongo/executor/task_executor.h"
#include "mongo/s/catalog/sharding_catalog_client_impl.h"
#include "mongo/s/client/shard_registry.h"
-#include "mongo/s/sharding_test_fixture.h"
+#include "mongo/s/sharding_router_test_fixture.h"
#include "mongo/stdx/chrono.h"
#include "mongo/stdx/future.h"
#include "mongo/util/log.h"
diff --git a/src/mongo/s/catalog/sharding_catalog_test.cpp b/src/mongo/s/catalog/sharding_catalog_test.cpp
index 9f1c784a9f0..707e6500931 100644
--- a/src/mongo/s/catalog/sharding_catalog_test.cpp
+++ b/src/mongo/s/catalog/sharding_catalog_test.cpp
@@ -52,7 +52,7 @@
#include "mongo/s/catalog/type_tags.h"
#include "mongo/s/chunk_version.h"
#include "mongo/s/client/shard_registry.h"
-#include "mongo/s/sharding_test_fixture.h"
+#include "mongo/s/sharding_router_test_fixture.h"
#include "mongo/s/versioning.h"
#include "mongo/s/write_ops/batched_command_response.h"
#include "mongo/stdx/future.h"
diff --git a/src/mongo/s/catalog/sharding_catalog_write_retry_test.cpp b/src/mongo/s/catalog/sharding_catalog_write_retry_test.cpp
index be76d387b1a..e15217ade8e 100644
--- a/src/mongo/s/catalog/sharding_catalog_write_retry_test.cpp
+++ b/src/mongo/s/catalog/sharding_catalog_write_retry_test.cpp
@@ -51,7 +51,7 @@
#include "mongo/s/catalog/type_shard.h"
#include "mongo/s/client/shard_registry.h"
#include "mongo/s/grid.h"
-#include "mongo/s/sharding_test_fixture.h"
+#include "mongo/s/sharding_router_test_fixture.h"
#include "mongo/s/write_ops/batched_command_response.h"
#include "mongo/stdx/future.h"
#include "mongo/stdx/memory.h"
diff --git a/src/mongo/s/catalog_cache_test_fixture.h b/src/mongo/s/catalog_cache_test_fixture.h
index 439541424c5..244930a664d 100644
--- a/src/mongo/s/catalog_cache_test_fixture.h
+++ b/src/mongo/s/catalog_cache_test_fixture.h
@@ -31,7 +31,7 @@
#include <vector>
#include "mongo/db/namespace_string.h"
-#include "mongo/s/sharding_test_fixture.h"
+#include "mongo/s/sharding_router_test_fixture.h"
#include "mongo/stdx/memory.h"
namespace mongo {
diff --git a/src/mongo/s/chunk_manager_index_bounds_test.cpp b/src/mongo/s/chunk_manager_index_bounds_test.cpp
index 9ffc73d0a1c..31f8182db7a 100644
--- a/src/mongo/s/chunk_manager_index_bounds_test.cpp
+++ b/src/mongo/s/chunk_manager_index_bounds_test.cpp
@@ -37,7 +37,7 @@
#include "mongo/db/query/canonical_query.h"
#include "mongo/s/chunk_manager.h"
#include "mongo/s/shard_key_pattern.h"
-#include "mongo/s/sharding_test_fixture.h"
+#include "mongo/s/sharding_router_test_fixture.h"
#include "mongo/unittest/unittest.h"
#include "mongo/util/log.h"
diff --git a/src/mongo/s/client/SConscript b/src/mongo/s/client/SConscript
index ff5fd98ba3b..325ccc23842 100644
--- a/src/mongo/s/client/SConscript
+++ b/src/mongo/s/client/SConscript
@@ -25,7 +25,7 @@ env.CppUnitTest(
'shard_remote_test.cpp',
],
LIBDEPS=[
- '$BUILD_DIR/mongo/s/sharding_test_fixture',
+ '$BUILD_DIR/mongo/s/sharding_router_test_fixture',
'sharding_client',
],
)
@@ -49,7 +49,7 @@ env.CppUnitTest(
],
LIBDEPS=[
'$BUILD_DIR/mongo/s/coreshard',
- '$BUILD_DIR/mongo/s/sharding_test_fixture',
+ '$BUILD_DIR/mongo/s/sharding_router_test_fixture',
]
)
diff --git a/src/mongo/s/client/shard_connection_test.cpp b/src/mongo/s/client/shard_connection_test.cpp
index dc734b82640..2b6ef24a4c9 100644
--- a/src/mongo/s/client/shard_connection_test.cpp
+++ b/src/mongo/s/client/shard_connection_test.cpp
@@ -27,11 +27,9 @@
#include "mongo/platform/basic.h"
-#include <cstdint>
#include <vector>
#include "mongo/db/client.h"
-#include "mongo/db/service_context.h"
#include "mongo/dbtests/mock/mock_conn_registry.h"
#include "mongo/dbtests/mock/mock_dbclient_connection.h"
#include "mongo/s/client/shard_connection.h"
@@ -47,20 +45,12 @@
namespace mongo {
namespace {
-using std::string;
-using std::vector;
+const std::string TARGET_HOST = "$dummy:27017";
-const string TARGET_HOST = "$dummy:27017";
-
-/**
- * Warning: cannot run in parallel
- */
-class ShardConnFixture : public mongo::unittest::Test {
+class ShardConnFixture : public unittest::Test {
public:
void setUp() {
- if (!haveClient()) {
- Client::initThread("ShardConnFixture", getGlobalServiceContext(), NULL);
- }
+ Client::initThreadIfNotAlready("ShardConnFixture");
_maxPoolSizePerHost = mongo::shardConnectionPool.getMaxPoolSize();
mongo::ConnectionString::setConnectionHook(
@@ -107,38 +97,32 @@ protected:
void checkNewConns(void (*checkFunc)(uint64_t, uint64_t),
uint64_t arg2,
size_t newConnsToCreate) {
- vector<ShardConnection*> newConnList;
+ std::vector<std::unique_ptr<ShardConnection>> newConnList;
for (size_t x = 0; x < newConnsToCreate; x++) {
- ShardConnection* newConn =
- new ShardConnection(ConnectionString(HostAndPort(TARGET_HOST)), "test.user");
+ auto newConn = std::make_unique<ShardConnection>(
+ ConnectionString(HostAndPort(TARGET_HOST)), "test.user");
checkFunc(newConn->get()->getSockCreationMicroSec(), arg2);
- newConnList.push_back(newConn);
+ newConnList.emplace_back(std::move(newConn));
}
const uint64_t oldCreationTime = mongo::curTimeMicros64();
- for (vector<ShardConnection*>::iterator iter = newConnList.begin();
- iter != newConnList.end();
- ++iter) {
- (*iter)->done();
- delete *iter;
+ for (auto& conn : newConnList) {
+ conn->done();
}
newConnList.clear();
// Check that connections created after the purge was put back to the pool.
for (size_t x = 0; x < newConnsToCreate; x++) {
- ShardConnection* newConn =
- new ShardConnection(ConnectionString(HostAndPort(TARGET_HOST)), "test.user");
+ auto newConn = std::make_unique<ShardConnection>(
+ ConnectionString(HostAndPort(TARGET_HOST)), "test.user");
ASSERT_LESS_THAN(newConn->get()->getSockCreationMicroSec(), oldCreationTime);
- newConnList.push_back(newConn);
+ newConnList.emplace_back(std::move(newConn));
}
- for (vector<ShardConnection*>::iterator iter = newConnList.begin();
- iter != newConnList.end();
- ++iter) {
- (*iter)->done();
- delete *iter;
+ for (auto& conn : newConnList) {
+ conn->done();
}
}
diff --git a/src/mongo/s/client/sharding_connection_hook.cpp b/src/mongo/s/client/sharding_connection_hook.cpp
index 94e19f2ee4e..8306d0a00f3 100644
--- a/src/mongo/s/client/sharding_connection_hook.cpp
+++ b/src/mongo/s/client/sharding_connection_hook.cpp
@@ -35,9 +35,6 @@
#include <string>
#include "mongo/bson/util/bson_extract.h"
-#include "mongo/db/audit.h"
-#include "mongo/db/auth/authorization_manager_global.h"
-#include "mongo/db/auth/authorization_session.h"
#include "mongo/db/auth/internal_user_auth.h"
#include "mongo/db/client.h"
#include "mongo/rpc/get_status_from_command_result.h"
diff --git a/src/mongo/s/cluster_identity_loader_test.cpp b/src/mongo/s/cluster_identity_loader_test.cpp
index 213fec88d7e..98817150d6b 100644
--- a/src/mongo/s/cluster_identity_loader_test.cpp
+++ b/src/mongo/s/cluster_identity_loader_test.cpp
@@ -35,6 +35,7 @@
#include "mongo/client/remote_command_targeter_mock.h"
#include "mongo/db/commands.h"
#include "mongo/db/query/query_request.h"
+#include "mongo/db/service_context_noop.h"
#include "mongo/executor/network_interface_mock.h"
#include "mongo/executor/task_executor.h"
#include "mongo/rpc/metadata/repl_set_metadata.h"
@@ -44,7 +45,7 @@
#include "mongo/s/catalog/type_config_version.h"
#include "mongo/s/client/shard_registry.h"
#include "mongo/s/cluster_identity_loader.h"
-#include "mongo/s/sharding_test_fixture.h"
+#include "mongo/s/sharding_router_test_fixture.h"
#include "mongo/stdx/future.h"
#include "mongo/util/log.h"
#include "mongo/util/mongoutils/str.h"
@@ -72,6 +73,13 @@ public:
configTargeter()->setFindHostReturnValue(configHost);
}
+ void tearDown() override {
+ ShardingTestFixture::tearDown();
+
+ // Reset the global service context so that the cluster identity gets cleared
+ setGlobalServiceContext(std::make_unique<ServiceContextNoop>());
+ }
+
void expectConfigVersionLoad(StatusWith<OID> result) {
onFindCommand([&](const RemoteCommandRequest& request) {
ASSERT_EQUALS(configHost, request.target);
diff --git a/src/mongo/s/cluster_last_error_info_test.cpp b/src/mongo/s/cluster_last_error_info_test.cpp
index b7a06a58ea1..b9903192581 100644
--- a/src/mongo/s/cluster_last_error_info_test.cpp
+++ b/src/mongo/s/cluster_last_error_info_test.cpp
@@ -36,7 +36,7 @@
#include "mongo/executor/network_interface_mock.h"
#include "mongo/rpc/metadata/sharding_metadata.h"
#include "mongo/s/cluster_last_error_info.h"
-#include "mongo/s/sharding_test_fixture.h"
+#include "mongo/s/sharding_router_test_fixture.h"
#include "mongo/stdx/future.h"
#include "mongo/util/log.h"
#include "mongo/util/mongoutils/str.h"
diff --git a/src/mongo/s/commands/SConscript b/src/mongo/s/commands/SConscript
index 9dc7ae2124a..7d53bbe70eb 100644
--- a/src/mongo/s/commands/SConscript
+++ b/src/mongo/s/commands/SConscript
@@ -13,7 +13,6 @@ env.Library(
],
LIBDEPS=[
'$BUILD_DIR/mongo/db/commands',
- '$BUILD_DIR/mongo/s/client/sharding_client',
'$BUILD_DIR/mongo/db/commands/killcursors_common',
'$BUILD_DIR/mongo/db/commands/current_op_common',
'$BUILD_DIR/mongo/s/async_requests_sender',
diff --git a/src/mongo/s/config_server_test_fixture.cpp b/src/mongo/s/config_server_test_fixture.cpp
index 4df783c1055..6df09647b8f 100644
--- a/src/mongo/s/config_server_test_fixture.cpp
+++ b/src/mongo/s/config_server_test_fixture.cpp
@@ -162,10 +162,6 @@ std::unique_ptr<ShardingCatalogClient> ConfigServerTestFixture::makeShardingCata
return stdx::make_unique<ShardingCatalogClientImpl>(std::move(distLockManager));
}
-std::unique_ptr<CatalogCache> ConfigServerTestFixture::makeCatalogCache() {
- return stdx::make_unique<CatalogCache>(CatalogCacheLoader::get(getServiceContext()));
-}
-
std::unique_ptr<BalancerConfiguration> ConfigServerTestFixture::makeBalancerConfiguration() {
return stdx::make_unique<BalancerConfiguration>();
}
diff --git a/src/mongo/s/config_server_test_fixture.h b/src/mongo/s/config_server_test_fixture.h
index cfbe682aa5e..b2e2bed6a6d 100644
--- a/src/mongo/s/config_server_test_fixture.h
+++ b/src/mongo/s/config_server_test_fixture.h
@@ -163,8 +163,6 @@ protected:
std::unique_ptr<ShardingCatalogClient> makeShardingCatalogClient(
std::unique_ptr<DistLockManager> distLockManager) override;
- std::unique_ptr<CatalogCache> makeCatalogCache() override;
-
std::unique_ptr<ClusterCursorManager> makeClusterCursorManager() override;
std::unique_ptr<BalancerConfiguration> makeBalancerConfiguration() override;
diff --git a/src/mongo/s/query/SConscript b/src/mongo/s/query/SConscript
index 0c90f644c5e..4ff2e432ae6 100644
--- a/src/mongo/s/query/SConscript
+++ b/src/mongo/s/query/SConscript
@@ -102,7 +102,7 @@ env.CppUnitTest(
'$BUILD_DIR/mongo/db/query/query_request',
'$BUILD_DIR/mongo/db/service_context_noop_init',
'$BUILD_DIR/mongo/executor/thread_pool_task_executor_test_fixture',
- '$BUILD_DIR/mongo/s/sharding_test_fixture',
+ '$BUILD_DIR/mongo/s/sharding_router_test_fixture',
],
)
@@ -116,7 +116,7 @@ env.CppUnitTest(
'$BUILD_DIR/mongo/db/auth/authorization_manager_mock_init',
'$BUILD_DIR/mongo/db/query/query_request',
'$BUILD_DIR/mongo/db/service_context_noop_init',
- '$BUILD_DIR/mongo/s/sharding_test_fixture',
+ '$BUILD_DIR/mongo/s/sharding_router_test_fixture',
],
)
env.Library(
diff --git a/src/mongo/s/query/async_results_merger_test.cpp b/src/mongo/s/query/async_results_merger_test.cpp
index 9106a3f1941..4c0e32cba51 100644
--- a/src/mongo/s/query/async_results_merger_test.cpp
+++ b/src/mongo/s/query/async_results_merger_test.cpp
@@ -41,7 +41,7 @@
#include "mongo/executor/thread_pool_task_executor_test_fixture.h"
#include "mongo/s/catalog/type_shard.h"
#include "mongo/s/client/shard_registry.h"
-#include "mongo/s/sharding_test_fixture.h"
+#include "mongo/s/sharding_router_test_fixture.h"
#include "mongo/stdx/memory.h"
#include "mongo/unittest/unittest.h"
diff --git a/src/mongo/s/query/establish_cursors_test.cpp b/src/mongo/s/query/establish_cursors_test.cpp
index c7437b3eb7e..b36a531b2a2 100644
--- a/src/mongo/s/query/establish_cursors_test.cpp
+++ b/src/mongo/s/query/establish_cursors_test.cpp
@@ -35,7 +35,7 @@
#include "mongo/s/catalog/type_shard.h"
#include "mongo/s/client/shard_registry.h"
#include "mongo/s/query/establish_cursors.h"
-#include "mongo/s/sharding_test_fixture.h"
+#include "mongo/s/sharding_router_test_fixture.h"
#include "mongo/stdx/memory.h"
#include "mongo/unittest/unittest.h"
diff --git a/src/mongo/s/shard_server_test_fixture.cpp b/src/mongo/s/shard_server_test_fixture.cpp
index e246e4b621a..36fca36f3a1 100644
--- a/src/mongo/s/shard_server_test_fixture.cpp
+++ b/src/mongo/s/shard_server_test_fixture.cpp
@@ -42,7 +42,6 @@
#include "mongo/stdx/memory.h"
namespace mongo {
-
namespace {
const HostAndPort kConfigHostAndPort("dummy", 123);
@@ -57,16 +56,6 @@ std::shared_ptr<RemoteCommandTargeterMock> ShardServerTestFixture::configTargete
return RemoteCommandTargeterMock::get(shardRegistry()->getConfigShard()->getTargeter());
}
-void ShardServerTestFixture::expectFindOnConfigSendErrorCode(ErrorCodes::Error code) {
- onCommand([&, code](const executor::RemoteCommandRequest& request) {
- ASSERT_EQ(request.target, kConfigHostAndPort);
- ASSERT_EQ(request.dbname, "config");
- BSONObjBuilder responseBuilder;
- CommandHelpers::appendCommandStatus(responseBuilder, Status(code, ""));
- return responseBuilder.obj();
- });
-}
-
void ShardServerTestFixture::setUp() {
ShardingMongodTestFixture::setUp();
@@ -109,8 +98,4 @@ std::unique_ptr<ShardingCatalogClient> ShardServerTestFixture::makeShardingCatal
return stdx::make_unique<ShardingCatalogClientImpl>(std::move(distLockManager));
}
-std::unique_ptr<CatalogCache> ShardServerTestFixture::makeCatalogCache() {
- return stdx::make_unique<CatalogCache>(CatalogCacheLoader::get(getServiceContext()));
-}
-
} // namespace mongo
diff --git a/src/mongo/s/shard_server_test_fixture.h b/src/mongo/s/shard_server_test_fixture.h
index 6fe244964aa..4ca620f2792 100644
--- a/src/mongo/s/shard_server_test_fixture.h
+++ b/src/mongo/s/shard_server_test_fixture.h
@@ -36,9 +36,9 @@ namespace mongo {
class RemoteCommandTargeterMock;
/**
- * Test fixture for shard components, as opposed to config or mongos components.
- * Has a mock network and ephemeral storage engine provided by ShardingMongodTestFixture,
- * additionally sets up mock dist lock catalog and manager with a real catalog client.
+ * Test fixture for shard components, as opposed to config or mongos components. Provides a mock
+ * network and ephemeral storage engine via ShardingMongodTestFixture. Additionally sets up mock
+ * dist lock catalog and manager with a real catalog client.
*/
class ShardServerTestFixture : public ShardingMongodTestFixture {
public:
@@ -56,13 +56,7 @@ public:
*/
std::shared_ptr<RemoteCommandTargeterMock> configTargeterMock();
- void expectFindOnConfigSendErrorCode(ErrorCodes::Error code);
-
protected:
- /**
- * Sets up a ClusterRole::ShardServer replica set with a real catalog client and mock dist lock
- * catalog and manager.
- */
void setUp() override;
void tearDown() override;
@@ -83,8 +77,6 @@ protected:
*/
std::unique_ptr<ShardingCatalogClient> makeShardingCatalogClient(
std::unique_ptr<DistLockManager> distLockManager) override;
-
- std::unique_ptr<CatalogCache> makeCatalogCache() override;
};
} // namespace mongo
diff --git a/src/mongo/s/sharding_mongod_test_fixture.cpp b/src/mongo/s/sharding_mongod_test_fixture.cpp
index 83215c99999..cbf5099a2df 100644
--- a/src/mongo/s/sharding_mongod_test_fixture.cpp
+++ b/src/mongo/s/sharding_mongod_test_fixture.cpp
@@ -36,6 +36,7 @@
#include "mongo/base/status_with.h"
#include "mongo/client/remote_command_targeter_factory_mock.h"
#include "mongo/client/remote_command_targeter_mock.h"
+#include "mongo/client/replica_set_monitor.h"
#include "mongo/db/catalog_raii.h"
#include "mongo/db/client.h"
#include "mongo/db/commands.h"
@@ -49,11 +50,11 @@
#include "mongo/db/repl/read_concern_args.h"
#include "mongo/db/repl/repl_settings.h"
#include "mongo/db/repl/replication_consistency_markers_mock.h"
-#include "mongo/db/repl/replication_coordinator.h"
-#include "mongo/db/repl/replication_coordinator_mock.h"
#include "mongo/db/repl/replication_process.h"
#include "mongo/db/repl/replication_recovery_mock.h"
#include "mongo/db/repl/storage_interface_mock.h"
+#include "mongo/db/s/config_server_op_observer.h"
+#include "mongo/db/s/shard_server_op_observer.h"
#include "mongo/db/service_context_noop.h"
#include "mongo/executor/network_interface_mock.h"
#include "mongo/executor/task_executor_pool.h"
@@ -90,16 +91,10 @@ using repl::ReplicationCoordinatorMock;
using repl::ReplSettings;
using unittest::assertGet;
-using std::string;
-using std::vector;
-using unittest::assertGet;
-
ShardingMongodTestFixture::ShardingMongodTestFixture() = default;
ShardingMongodTestFixture::~ShardingMongodTestFixture() = default;
-const Seconds ShardingMongodTestFixture::kFutureTimeout{5};
-
void ShardingMongodTestFixture::setUp() {
ServiceContextMongoDTest::setUp();
@@ -119,10 +114,9 @@ void ShardingMongodTestFixture::setUp() {
serversBob.append(BSON("host" << _servers[i].toString() << "_id" << static_cast<int>(i)));
}
repl::ReplSetConfig replSetConfig;
- replSetConfig
- .initialize(BSON("_id" << _setName << "protocolVersion" << 1 << "version" << 3 << "members"
- << serversBob.arr()))
- .transitional_ignore();
+ ASSERT_OK(replSetConfig.initialize(
+ BSON("_id" << _setName << "protocolVersion" << 1 << "version" << 3 << "members"
+ << serversBob.arr())));
replCoordPtr->setGetConfigReturnValue(replSetConfig);
repl::ReplicationCoordinator::set(service, std::move(replCoordPtr));
@@ -137,15 +131,16 @@ void ShardingMongodTestFixture::setUp() {
storagePtr.get(),
stdx::make_unique<repl::ReplicationConsistencyMarkersMock>(),
stdx::make_unique<repl::ReplicationRecoveryMock>()));
- repl::ReplicationProcess::get(_opCtx.get())
- ->initializeRollbackID(_opCtx.get())
- .transitional_ignore();
+
+ ASSERT_OK(repl::ReplicationProcess::get(_opCtx.get())->initializeRollbackID(_opCtx.get()));
repl::StorageInterface::set(service, std::move(storagePtr));
auto makeOpObserver = [&] {
auto opObserver = stdx::make_unique<OpObserverRegistry>();
opObserver->addObserver(stdx::make_unique<OpObserverImpl>());
+ opObserver->addObserver(stdx::make_unique<ConfigServerOpObserver>());
+ opObserver->addObserver(stdx::make_unique<ShardServerOpObserver>());
return opObserver;
};
service->setOpObserver(makeOpObserver());
@@ -250,10 +245,6 @@ std::unique_ptr<ShardingCatalogClient> ShardingMongodTestFixture::makeShardingCa
return nullptr;
}
-std::unique_ptr<CatalogCache> ShardingMongodTestFixture::makeCatalogCache() {
- return nullptr;
-}
-
std::unique_ptr<ClusterCursorManager> ShardingMongodTestFixture::makeClusterCursorManager() {
return nullptr;
}
@@ -283,7 +274,7 @@ Status ShardingMongodTestFixture::initializeGlobalShardingStateForMongodForTest(
auto const grid = Grid::get(operationContext());
grid->init(makeShardingCatalogClient(std::move(distLockManagerPtr)),
- makeCatalogCache(),
+ stdx::make_unique<CatalogCache>(CatalogCacheLoader::get(getServiceContext())),
makeShardRegistry(configConnStr),
makeClusterCursorManager(),
makeBalancerConfiguration(),
@@ -302,7 +293,7 @@ Status ShardingMongodTestFixture::initializeGlobalShardingStateForMongodForTest(
}
void ShardingMongodTestFixture::tearDown() {
- // Only shut down components that were actually initialized and not already shut down.
+ ReplicaSetMonitor::cleanup();
if (Grid::get(operationContext())->getExecutorPool() && !_executorPoolShutDown) {
Grid::get(operationContext())->getExecutorPool()->shutdownAndJoin();
@@ -351,11 +342,6 @@ void ShardingMongodTestFixture::shutdownExecutorPool() {
_executorPoolShutDown = true;
}
-executor::NetworkInterfaceMock* ShardingMongodTestFixture::network() const {
- invariant(_mockNetwork);
- return _mockNetwork;
-}
-
executor::TaskExecutor* ShardingMongodTestFixture::executor() const {
invariant(Grid::get(operationContext())->getExecutorPool());
return Grid::get(operationContext())->getExecutorPool()->getFixedExecutor();
diff --git a/src/mongo/s/sharding_mongod_test_fixture.h b/src/mongo/s/sharding_mongod_test_fixture.h
index aca1a3f22c0..25f549f0128 100644
--- a/src/mongo/s/sharding_mongod_test_fixture.h
+++ b/src/mongo/s/sharding_mongod_test_fixture.h
@@ -28,13 +28,9 @@
#pragma once
-#include <utility>
-
-#include "mongo/db/service_context.h"
+#include "mongo/db/repl/replication_coordinator_mock.h"
#include "mongo/db/service_context_d_test_fixture.h"
-#include "mongo/executor/network_test_env.h"
-#include "mongo/s/grid.h"
-#include "mongo/unittest/unittest.h"
+#include "mongo/s/sharding_test_fixture_common.h"
namespace mongo {
@@ -42,12 +38,9 @@ class CatalogCacheLoader;
class ConnectionString;
class DistLockCatalog;
class DistLockManager;
-class NamespaceString;
class RemoteCommandTargeterFactoryMock;
-class ShardRegistry;
namespace repl {
-class ReplicationCoordinatorMock;
class ReplSettings;
} // namespace repl
@@ -60,19 +53,12 @@ class ReplSettings;
* components (including a NetworkInterface/TaskExecutor subsystem backed by the NetworkTestEnv),
* but allows subclasses to replace any component with its real implementation, a mock, or nullptr.
*/
-class ShardingMongodTestFixture : public ServiceContextMongoDTest {
+class ShardingMongodTestFixture : public ServiceContextMongoDTest,
+ public ShardingTestFixtureCommon {
public:
ShardingMongodTestFixture();
~ShardingMongodTestFixture();
- static const Seconds kFutureTimeout;
-
- template <typename Lambda>
- executor::NetworkTestEnv::FutureHandle<typename std::result_of<Lambda()>::type> launchAsync(
- Lambda&& func) const {
- return _networkTestEnv->launchAsync(std::forward<Lambda>(func));
- }
-
/**
* Initializes sharding components according to the cluster role in
* serverGlobalParams.clusterRole and puts the components on the Grid, mimicking the
@@ -103,7 +89,6 @@ public:
// if they have been initialized.
executor::TaskExecutor* executor() const;
- executor::NetworkInterfaceMock* network() const;
repl::ReplicationCoordinatorMock* replicationCoordinator() const;
@@ -151,13 +136,6 @@ protected:
*/
void tearDown() override;
- // Allow subclasses to modify this node's hostname and port, set name, and replica set members.
-
- const HostAndPort _host{"node1:12345"};
- const std::string _setName = "mySet";
- const std::vector<HostAndPort> _servers{
- _host, HostAndPort("node2:12345"), HostAndPort("node3:12345")};
-
// Methods for creating and returning sharding components. Some of these methods have been
// implemented to return the real implementation of the component as the default, while others
// return a mock or nullptr. Subclasses can override any of these methods to create and
@@ -211,11 +189,6 @@ protected:
/**
* Base class returns nullptr.
*/
- virtual std::unique_ptr<CatalogCache> makeCatalogCache();
-
- /**
- * Base class returns nullptr.
- */
virtual std::unique_ptr<ClusterCursorManager> makeClusterCursorManager();
/**
@@ -224,16 +197,14 @@ protected:
virtual std::unique_ptr<BalancerConfiguration> makeBalancerConfiguration();
private:
+ const HostAndPort _host{"node1:12345"};
+ const std::string _setName = "mySet";
+ const std::vector<HostAndPort> _servers{
+ _host, HostAndPort("node2:12345"), HostAndPort("node3:12345")};
+
ServiceContext::UniqueClient _client;
ServiceContext::UniqueOperationContext _opCtx;
- // Since a NetworkInterface is a private member of a TaskExecutor, we store a raw pointer to the
- // fixed TaskExecutor's NetworkInterface here.
- // TODO(esha): Currently, some fine-grained synchronization of the network and task executor is
- // is outside of NetworkTestEnv's capabilities. If all control of the network is done through
- // _networkTestEnv, storing this raw pointer is not necessary.
- executor::NetworkInterfaceMock* _mockNetwork = nullptr;
-
// Since the RemoteCommandTargeterFactory is currently a private member of ShardFactory, we
// store a raw pointer to it here.
RemoteCommandTargeterFactoryMock* _targeterFactory = nullptr;
@@ -248,9 +219,6 @@ private:
repl::ReplicationCoordinatorMock* _replCoord = nullptr;
- // Allows for processing tasks through the NetworkInterfaceMock/ThreadPoolMock subsystem.
- std::unique_ptr<executor::NetworkTestEnv> _networkTestEnv;
-
// Records if a component has been shut down, so that it is only shut down once.
bool _executorPoolShutDown = false;
};
diff --git a/src/mongo/s/sharding_test_fixture.cpp b/src/mongo/s/sharding_router_test_fixture.cpp
index b9495d65e7a..05dc4a44069 100644
--- a/src/mongo/s/sharding_test_fixture.cpp
+++ b/src/mongo/s/sharding_router_test_fixture.cpp
@@ -28,12 +28,11 @@
#include "mongo/platform/basic.h"
-#include "mongo/s/sharding_test_fixture.h"
+#include "mongo/s/sharding_router_test_fixture.h"
#include <algorithm>
#include <vector>
-#include "mongo/base/status_with.h"
#include "mongo/bson/simple_bsonobj_comparator.h"
#include "mongo/client/remote_command_targeter_factory_mock.h"
#include "mongo/client/remote_command_targeter_mock.h"
@@ -69,7 +68,6 @@
#include "mongo/s/write_ops/batched_command_response.h"
#include "mongo/stdx/memory.h"
#include "mongo/transport/mock_session.h"
-#include "mongo/transport/transport_layer.h"
#include "mongo/transport/transport_layer_mock.h"
#include "mongo/util/clock_source_mock.h"
#include "mongo/util/tick_source_mock.h"
@@ -83,49 +81,44 @@ using executor::RemoteCommandResponse;
using executor::ShardingTaskExecutor;
using unittest::assertGet;
-using std::string;
-using std::vector;
-using unittest::assertGet;
-
namespace {
+
std::unique_ptr<ShardingTaskExecutor> makeShardingTestExecutor(
std::unique_ptr<NetworkInterfaceMock> net) {
auto testExecutor = makeThreadPoolTestExecutor(std::move(net));
return stdx::make_unique<ShardingTaskExecutor>(std::move(testExecutor));
}
-}
+
+} // namespace
ShardingTestFixture::ShardingTestFixture() = default;
ShardingTestFixture::~ShardingTestFixture() = default;
-const Seconds ShardingTestFixture::kFutureTimeout{5};
-
void ShardingTestFixture::setUp() {
+ auto const service = serviceContext();
+
+ // Configure the service context
+ service->setFastClockSource(stdx::make_unique<ClockSourceMock>());
+ service->setPreciseClockSource(stdx::make_unique<ClockSourceMock>());
+ service->setTickSource(stdx::make_unique<TickSourceMock>());
+
{
- auto service = stdx::make_unique<ServiceContextNoop>();
- service->setFastClockSource(stdx::make_unique<ClockSourceMock>());
- service->setPreciseClockSource(stdx::make_unique<ClockSourceMock>());
- service->setTickSource(stdx::make_unique<TickSourceMock>());
auto tlMock = stdx::make_unique<transport::TransportLayerMock>();
_transportLayer = tlMock.get();
+ ASSERT_OK(_transportLayer->start());
service->setTransportLayer(std::move(tlMock));
- _transportLayer->start().transitional_ignore();
-
- // Set the newly created service context to be the current global context so that tests,
- // which invoke code still referencing getGlobalServiceContext will work properly.
- setGlobalServiceContext(std::move(service));
}
- CollatorFactoryInterface::set(serviceContext(), stdx::make_unique<CollatorFactoryMock>());
+ CollatorFactoryInterface::set(service, stdx::make_unique<CollatorFactoryMock>());
_transportSession = transport::MockSession::create(_transportLayer);
- _client = serviceContext()->makeClient("ShardingTestFixture", _transportSession);
+ _client = service->makeClient("ShardingTestFixture", _transportSession);
_opCtx = _client->makeOperationContext();
// Set up executor pool used for most operations.
auto fixedNet = stdx::make_unique<executor::NetworkInterfaceMock>();
fixedNet->setEgressMetadataHook(
- stdx::make_unique<rpc::ShardingEgressMetadataHookForMongos>(serviceContext()));
+ stdx::make_unique<rpc::ShardingEgressMetadataHookForMongos>(service));
_mockNetwork = fixedNet.get();
auto fixedExec = makeShardingTestExecutor(std::move(fixedNet));
_networkTestEnv = stdx::make_unique<NetworkTestEnv>(fixedExec.get(), _mockNetwork);
@@ -133,7 +126,7 @@ void ShardingTestFixture::setUp() {
auto netForPool = stdx::make_unique<executor::NetworkInterfaceMock>();
netForPool->setEgressMetadataHook(
- stdx::make_unique<rpc::ShardingEgressMetadataHookForMongos>(serviceContext()));
+ stdx::make_unique<rpc::ShardingEgressMetadataHookForMongos>(service));
auto _mockNetworkForPool = netForPool.get();
auto execForPool = makeShardingTestExecutor(std::move(netForPool));
_networkTestEnvForPool =
@@ -186,15 +179,15 @@ void ShardingTestFixture::setUp() {
auto shardRegistry(stdx::make_unique<ShardRegistry>(std::move(shardFactory), configCS));
executorPool->startup();
- CatalogCacheLoader::set(serviceContext(), stdx::make_unique<ConfigServerCatalogCacheLoader>());
+ CatalogCacheLoader::set(service, stdx::make_unique<ConfigServerCatalogCacheLoader>());
// For now initialize the global grid object. All sharding objects will be accessible from there
// until we get rid of it.
Grid::get(operationContext())
->init(std::move(catalogClient),
- stdx::make_unique<CatalogCache>(CatalogCacheLoader::get(serviceContext())),
+ stdx::make_unique<CatalogCache>(CatalogCacheLoader::get(service)),
std::move(shardRegistry),
- stdx::make_unique<ClusterCursorManager>(serviceContext()->getPreciseClockSource()),
+ stdx::make_unique<ClusterCursorManager>(service->getPreciseClockSource()),
stdx::make_unique<BalancerConfiguration>(),
std::move(executorPool),
_mockNetwork);
@@ -241,12 +234,6 @@ RemoteCommandTargeterMock* ShardingTestFixture::configTargeter() const {
return _configTargeter;
}
-executor::NetworkInterfaceMock* ShardingTestFixture::network() const {
- invariant(_mockNetwork);
-
- return _mockNetwork;
-}
-
executor::TaskExecutor* ShardingTestFixture::executor() const {
invariant(_executor);
@@ -316,7 +303,7 @@ void ShardingTestFixture::expectGetShards(const std::vector<ShardType>& shards)
checkReadConcern(request.cmdObj, Timestamp(0, 0), repl::OpTime::kUninitializedTerm);
- vector<BSONObj> shardsToReturn;
+ std::vector<BSONObj> shardsToReturn;
std::transform(shards.begin(),
shards.end(),
@@ -498,7 +485,7 @@ void ShardingTestFixture::expectCount(const HostAndPort& configHost,
const StatusWith<long long>& response) {
onCommand([&](const RemoteCommandRequest& request) {
ASSERT_EQUALS(configHost, request.target);
- string cmdName = request.cmdObj.firstElement().fieldName();
+ const std::string cmdName(request.cmdObj.firstElement().fieldName());
ASSERT_EQUALS("count", cmdName);
const NamespaceString nss(request.dbname, request.cmdObj.firstElement().String());
ASSERT_EQUALS(expectedNs.toString(), nss.toString());
diff --git a/src/mongo/s/sharding_test_fixture.h b/src/mongo/s/sharding_router_test_fixture.h
index 49b28d46efa..bc17b831764 100644
--- a/src/mongo/s/sharding_test_fixture.h
+++ b/src/mongo/s/sharding_router_test_fixture.h
@@ -28,12 +28,7 @@
#pragma once
-#include <utility>
-
-#include "mongo/db/service_context.h"
-#include "mongo/executor/network_test_env.h"
-#include "mongo/transport/session.h"
-#include "mongo/unittest/unittest.h"
+#include "mongo/s/sharding_test_fixture_common.h"
namespace mongo {
@@ -44,42 +39,25 @@ class ShardingCatalogClientImpl;
struct ChunkVersion;
class CollectionType;
class DistLockManagerMock;
-class NamespaceString;
-class ShardFactoryMock;
class RemoteCommandTargeterFactoryMock;
class RemoteCommandTargeterMock;
class ShardRegistry;
class ShardType;
-template <typename T>
-class StatusWith;
-
-namespace executor {
-class NetworkInterfaceMock;
-class TaskExecutor;
-} // namespace executor
namespace transport {
class TransportLayerMock;
-} // namepsace transport
+} // namespace transport
/**
* Sets up the mocked out objects for testing the replica-set backed catalog manager and catalog
* client.
*/
-class ShardingTestFixture : public mongo::unittest::Test {
+class ShardingTestFixture : public unittest::Test, public ShardingTestFixtureCommon {
public:
ShardingTestFixture();
~ShardingTestFixture();
protected:
- static const Seconds kFutureTimeout;
-
- template <typename Lambda>
- executor::NetworkTestEnv::FutureHandle<typename std::result_of<Lambda()>::type> launchAsync(
- Lambda&& func) const {
- return _networkTestEnv->launchAsync(std::forward<Lambda>(func));
- }
-
ShardingCatalogClient* catalogClient() const;
/**
@@ -93,8 +71,6 @@ protected:
RemoteCommandTargeterMock* configTargeter() const;
- executor::NetworkInterfaceMock* network() const;
-
executor::TaskExecutor* executor() const;
DistLockManagerMock* distLock() const;
@@ -226,9 +202,7 @@ private:
RemoteCommandTargeterMock* _configTargeter;
// For the Grid's fixed executor.
- executor::NetworkInterfaceMock* _mockNetwork;
executor::TaskExecutor* _executor;
- std::unique_ptr<executor::NetworkTestEnv> _networkTestEnv;
// For the Grid's arbitrary executor in its executorPool.
std::unique_ptr<executor::NetworkTestEnv> _networkTestEnvForPool;
diff --git a/src/mongo/s/sharding_test_fixture_common.cpp b/src/mongo/s/sharding_test_fixture_common.cpp
new file mode 100644
index 00000000000..ebff5f0aceb
--- /dev/null
+++ b/src/mongo/s/sharding_test_fixture_common.cpp
@@ -0,0 +1,41 @@
+/**
+ * Copyright (C) 2015 MongoDB Inc.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Affero General Public License, version 3,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Affero General Public License for more details.
+ *
+ * You should have received a copy of the GNU Affero General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ * As a special exception, the copyright holders give permission to link the
+ * code of portions of this program with the OpenSSL library under certain
+ * conditions as described in each individual source file and distribute
+ * linked combinations including the program with the OpenSSL library. You
+ * must comply with the GNU Affero General Public License in all respects for
+ * all of the code used other than as permitted herein. If you modify file(s)
+ * with this exception, you may extend this exception to your version of the
+ * file(s), but you are not obligated to do so. If you do not wish to do so,
+ * delete this exception statement from your version. If you delete this
+ * exception statement from all source files in the program, then also delete
+ * it in the license file.
+ */
+
+#include "mongo/platform/basic.h"
+
+#include "mongo/s/sharding_test_fixture_common.h"
+
+namespace mongo {
+
+constexpr Seconds ShardingTestFixtureCommon::kFutureTimeout;
+
+ShardingTestFixtureCommon::ShardingTestFixtureCommon() = default;
+
+ShardingTestFixtureCommon::~ShardingTestFixtureCommon() = default;
+
+} // namespace mongo
diff --git a/src/mongo/s/sharding_test_fixture_common.h b/src/mongo/s/sharding_test_fixture_common.h
new file mode 100644
index 00000000000..88d9ebaa845
--- /dev/null
+++ b/src/mongo/s/sharding_test_fixture_common.h
@@ -0,0 +1,79 @@
+/**
+ * Copyright (C) 2015 MongoDB Inc.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Affero General Public License, version 3,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Affero General Public License for more details.
+ *
+ * You should have received a copy of the GNU Affero General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ * As a special exception, the copyright holders give permission to link the
+ * code of portions of this program with the OpenSSL library under certain
+ * conditions as described in each individual source file and distribute
+ * linked combinations including the program with the OpenSSL library. You
+ * must comply with the GNU Affero General Public License in all respects for
+ * all of the code used other than as permitted herein. If you modify file(s)
+ * with this exception, you may extend this exception to your version of the
+ * file(s), but you are not obligated to do so. If you do not wish to do so,
+ * delete this exception statement from your version. If you delete this
+ * exception statement from all source files in the program, then also delete
+ * it in the license file.
+ */
+
+#pragma once
+
+#include "mongo/db/operation_context.h"
+#include "mongo/db/service_context.h"
+#include "mongo/executor/network_test_env.h"
+#include "mongo/s/grid.h"
+#include "mongo/transport/session.h"
+#include "mongo/unittest/unittest.h"
+
+namespace mongo {
+
+namespace executor {
+class NetworkInterfaceMock;
+class TaskExecutor;
+} // namespace executor
+
+/**
+ * Contains common functionality and tools, which apply to both mongos and mongod unit-tests.
+ */
+class ShardingTestFixtureCommon {
+public:
+ static constexpr Seconds kFutureTimeout{5};
+
+ ShardingTestFixtureCommon();
+ ~ShardingTestFixtureCommon();
+
+ template <typename Lambda>
+ executor::NetworkTestEnv::FutureHandle<typename std::result_of<Lambda()>::type> launchAsync(
+ Lambda&& func) const {
+ return _networkTestEnv->launchAsync(std::forward<Lambda>(func));
+ }
+
+ executor::NetworkInterfaceMock* network() const {
+ invariant(_mockNetwork);
+ return _mockNetwork;
+ }
+
+protected:
+ // Since a NetworkInterface is a private member of a TaskExecutor, we store a raw pointer to the
+ // fixed TaskExecutor's NetworkInterface here.
+ //
+ // TODO(Esha): Currently, some fine-grained synchronization of the network and task executor is
+ // outside of NetworkTestEnv's capabilities. If all control of the network is done through
+ // _networkTestEnv, storing this raw pointer is not necessary.
+ executor::NetworkInterfaceMock* _mockNetwork{nullptr};
+
+ // Allows for processing tasks through the NetworkInterfaceMock/ThreadPoolMock subsystem
+ std::unique_ptr<executor::NetworkTestEnv> _networkTestEnv;
+};
+
+} // namespace mongo
diff --git a/src/mongo/s/write_ops/SConscript b/src/mongo/s/write_ops/SConscript
index 89401a022d6..0bfc32d72c6 100644
--- a/src/mongo/s/write_ops/SConscript
+++ b/src/mongo/s/write_ops/SConscript
@@ -31,11 +31,8 @@ env.Library(
'write_op.cpp',
],
LIBDEPS=[
- '$BUILD_DIR/mongo/client/connection_string',
'$BUILD_DIR/mongo/s/async_requests_sender',
- '$BUILD_DIR/mongo/s/client/sharding_client',
'$BUILD_DIR/mongo/s/commands/shared_cluster_commands',
- '$BUILD_DIR/mongo/s/coreshard',
'batch_write_types',
],
)
@@ -70,8 +67,8 @@ env.CppUnitTest(
'write_op_test.cpp',
],
LIBDEPS=[
- '$BUILD_DIR/mongo/db/service_context',
- '$BUILD_DIR/mongo/s/sharding_test_fixture',
+ '$BUILD_DIR/mongo/db/auth/authorization_manager_mock_init',
+ '$BUILD_DIR/mongo/s/sharding_router_test_fixture',
'cluster_write_op',
]
)
diff --git a/src/mongo/s/write_ops/batch_write_exec_test.cpp b/src/mongo/s/write_ops/batch_write_exec_test.cpp
index 045785754a0..e39bc0bc930 100644
--- a/src/mongo/s/write_ops/batch_write_exec_test.cpp
+++ b/src/mongo/s/write_ops/batch_write_exec_test.cpp
@@ -33,7 +33,7 @@
#include "mongo/db/logical_session_id.h"
#include "mongo/s/catalog/type_shard.h"
#include "mongo/s/client/shard_registry.h"
-#include "mongo/s/sharding_test_fixture.h"
+#include "mongo/s/sharding_router_test_fixture.h"
#include "mongo/s/write_ops/batch_write_exec.h"
#include "mongo/s/write_ops/batched_command_request.h"
#include "mongo/s/write_ops/batched_command_response.h"