summaryrefslogtreecommitdiff
path: root/src/mongo/db
diff options
context:
space:
mode:
authorSam Dunietz <sam.dunietz@10gen.com>2016-07-28 16:36:01 -0400
committerSam Dunietz <sam.dunietz@10gen.com>2016-07-28 16:36:01 -0400
commit60cf7576bc66c1ca82d9ab25f0b8340c837d05ac (patch)
tree42880e90dd6d701c4f05851ee0c7bbce2e69a22f /src/mongo/db
parent11ceae3d75af61ec17a1612c11305a733927625b (diff)
downloadmongo-60cf7576bc66c1ca82d9ab25f0b8340c837d05ac.tar.gz
Revert "SERVER-24367 Implement CollectionRangeDeleter task lifetime management"
This reverts commit 9a776eae4f669fdcfae94c41c0cbbea662d36c94.
Diffstat (limited to 'src/mongo/db')
-rw-r--r--src/mongo/db/client.cpp2
-rw-r--r--src/mongo/db/repl/rs_rollback_test.cpp2
-rw-r--r--src/mongo/db/s/SConscript1
-rw-r--r--src/mongo/db/s/collection_range_deleter.cpp21
-rw-r--r--src/mongo/db/s/collection_range_deleter.h3
-rw-r--r--src/mongo/db/s/collection_sharding_state.cpp7
-rw-r--r--src/mongo/db/s/collection_sharding_state.h2
-rw-r--r--src/mongo/db/s/collection_sharding_state_test.cpp22
-rw-r--r--src/mongo/db/s/metadata_manager.cpp15
-rw-r--r--src/mongo/db/s/metadata_manager.h10
-rw-r--r--src/mongo/db/s/metadata_manager_test.cpp232
-rw-r--r--src/mongo/db/s/sharding_state.cpp26
-rw-r--r--src/mongo/db/s/sharding_state.h25
-rw-r--r--src/mongo/db/service_context_d_test_fixture.cpp7
-rw-r--r--src/mongo/db/service_context_d_test_fixture.h7
15 files changed, 131 insertions, 251 deletions
diff --git a/src/mongo/db/client.cpp b/src/mongo/db/client.cpp
index 6776239f036..2a665268811 100644
--- a/src/mongo/db/client.cpp
+++ b/src/mongo/db/client.cpp
@@ -153,7 +153,7 @@ ClientBasic* ClientBasic::getCurrent() {
Client& cc() {
Client* c = currentClient.getMake()->get();
- invariant(c);
+ verify(c);
return *c;
}
diff --git a/src/mongo/db/repl/rs_rollback_test.cpp b/src/mongo/db/repl/rs_rollback_test.cpp
index f90546aeb94..096e3902bed 100644
--- a/src/mongo/db/repl/rs_rollback_test.cpp
+++ b/src/mongo/db/repl/rs_rollback_test.cpp
@@ -138,6 +138,7 @@ private:
void RSRollbackTest::setUp() {
ServiceContextMongoDTest::setUp();
+ Client::initThreadIfNotAlready();
_txn = cc().makeOperationContext();
_coordinator = new ReplicationCoordinatorRollbackMock();
@@ -157,7 +158,6 @@ void RSRollbackTest::tearDown() {
invariant(mongo::dbHolder().closeAll(_txn.get(), unused, false));
}
_txn.reset();
- ServiceContextMongoDTest::tearDown();
setGlobalReplicationCoordinator(nullptr);
}
diff --git a/src/mongo/db/s/SConscript b/src/mongo/db/s/SConscript
index 072bf231adf..67300543db0 100644
--- a/src/mongo/db/s/SConscript
+++ b/src/mongo/db/s/SConscript
@@ -167,7 +167,6 @@ env.CppUnitTest(
LIBDEPS=[
'$BUILD_DIR/mongo/client/remote_command_targeter_mock',
'$BUILD_DIR/mongo/db/serveronly',
- '$BUILD_DIR/mongo/db/service_context_d_test_fixture',
'$BUILD_DIR/mongo/executor/network_test_env',
'$BUILD_DIR/mongo/executor/thread_pool_task_executor_test_fixture',
'$BUILD_DIR/mongo/s/catalog/sharding_catalog_mock',
diff --git a/src/mongo/db/s/collection_range_deleter.cpp b/src/mongo/db/s/collection_range_deleter.cpp
index ec9a16c209f..f08a1608a42 100644
--- a/src/mongo/db/s/collection_range_deleter.cpp
+++ b/src/mongo/db/s/collection_range_deleter.cpp
@@ -28,34 +28,17 @@
#include "mongo/platform/basic.h"
-#include "mongo/db/client.h"
#include "mongo/db/s/collection_range_deleter.h"
-#include "mongo/db/s/sharding_state.h"
-#include "mongo/executor/task_executor.h"
-#include "mongo/util/scopeguard.h"
namespace mongo {
-using CallbackArgs = executor::TaskExecutor::CallbackArgs;
-
CollectionRangeDeleter::CollectionRangeDeleter(NamespaceString nss) : _nss(std::move(nss)) {}
void CollectionRangeDeleter::run() {
- Client::initThread(getThreadName().c_str());
- ON_BLOCK_EXIT([&] { Client::destroy(); });
- auto txn = cc().makeOperationContext().get();
- bool hasNextRangeToClean = cleanupNextRange(txn);
-
- // If there are more ranges to run, we add <this> back onto the task executor to run again.
- if (hasNextRangeToClean) {
- auto executor = ShardingState::get(txn)->getRangeDeleterTaskExecutor();
- executor->scheduleWork([this](const CallbackArgs& cbArgs) { run(); });
- } else {
- delete this;
- }
+ // TODO: not implemented
}
-bool CollectionRangeDeleter::cleanupNextRange(OperationContext* txn) {
+bool CollectionRangeDeleter::cleanupNextRange() {
// TODO: not implemented
return false;
}
diff --git a/src/mongo/db/s/collection_range_deleter.h b/src/mongo/db/s/collection_range_deleter.h
index cf3599c17ae..c447bd750e2 100644
--- a/src/mongo/db/s/collection_range_deleter.h
+++ b/src/mongo/db/s/collection_range_deleter.h
@@ -29,7 +29,6 @@
#include "mongo/base/disallow_copying.h"
#include "mongo/db/namespace_string.h"
-#include "mongo/db/operation_context.h"
namespace mongo {
@@ -52,7 +51,7 @@ public:
*
* Returns true if there are more entries in rangesToClean, false if the set is empty.
*/
- bool cleanupNextRange(OperationContext* txn);
+ bool cleanupNextRange();
private:
NamespaceString _nss;
diff --git a/src/mongo/db/s/collection_sharding_state.cpp b/src/mongo/db/s/collection_sharding_state.cpp
index 47e66ca914b..ba36ff9e546 100644
--- a/src/mongo/db/s/collection_sharding_state.cpp
+++ b/src/mongo/db/s/collection_sharding_state.cpp
@@ -44,7 +44,6 @@
#include "mongo/db/s/sharding_state.h"
#include "mongo/db/s/type_shard_identity.h"
#include "mongo/db/server_options.h"
-#include "mongo/db/service_context.h"
#include "mongo/s/catalog/sharding_catalog_manager.h"
#include "mongo/s/catalog/type_shard.h"
#include "mongo/s/chunk_version.h"
@@ -130,8 +129,8 @@ private:
} // unnamed namespace
-CollectionShardingState::CollectionShardingState(ServiceContext* sc, NamespaceString nss)
- : _nss(std::move(nss)), _metadataManager{sc, _nss} {}
+CollectionShardingState::CollectionShardingState(NamespaceString nss)
+ : _nss(std::move(nss)), _metadataManager{} {}
CollectionShardingState::~CollectionShardingState() {
invariant(!_sourceMgr);
@@ -148,7 +147,7 @@ CollectionShardingState* CollectionShardingState::get(OperationContext* txn,
dassert(txn->lockState()->isCollectionLockedForMode(ns, MODE_IS));
ShardingState* const shardingState = ShardingState::get(txn);
- return shardingState->getNS(ns, txn);
+ return shardingState->getNS(ns);
}
ScopedCollectionMetadata CollectionShardingState::getMetadata() {
diff --git a/src/mongo/db/s/collection_sharding_state.h b/src/mongo/db/s/collection_sharding_state.h
index 40ce2829e53..24ed2cb4ffb 100644
--- a/src/mongo/db/s/collection_sharding_state.h
+++ b/src/mongo/db/s/collection_sharding_state.h
@@ -59,7 +59,7 @@ public:
/**
* Instantiates a new per-collection sharding state as unsharded.
*/
- CollectionShardingState(ServiceContext* sc, NamespaceString nss);
+ CollectionShardingState(NamespaceString nss);
~CollectionShardingState();
/**
diff --git a/src/mongo/db/s/collection_sharding_state_test.cpp b/src/mongo/db/s/collection_sharding_state_test.cpp
index 8a7ca715141..818ae80af3c 100644
--- a/src/mongo/db/s/collection_sharding_state_test.cpp
+++ b/src/mongo/db/s/collection_sharding_state_test.cpp
@@ -78,14 +78,8 @@ public:
return _initCallCount;
}
- ServiceContext* getServiceContext() {
- return &_service;
- }
-
-protected:
- ServiceContextNoop _service;
-
private:
+ ServiceContextNoop _service;
ServiceContext::UniqueClient _client;
ServiceContext::UniqueOperationContext _opCtx;
@@ -93,8 +87,7 @@ private:
};
TEST_F(CollShardingStateTest, GlobalInitGetsCalledAfterWriteCommits) {
- CollectionShardingState collShardingState(&_service,
- NamespaceString::kConfigCollectionNamespace);
+ CollectionShardingState collShardingState(NamespaceString::kConfigCollectionNamespace);
ShardIdentityType shardIdentity;
shardIdentity.setConfigsvrConnString(
@@ -113,8 +106,7 @@ TEST_F(CollShardingStateTest, GlobalInitGetsCalledAfterWriteCommits) {
}
TEST_F(CollShardingStateTest, GlobalInitDoesntGetCalledIfWriteAborts) {
- CollectionShardingState collShardingState(getServiceContext(),
- NamespaceString::kConfigCollectionNamespace);
+ CollectionShardingState collShardingState(NamespaceString::kConfigCollectionNamespace);
ShardIdentityType shardIdentity;
shardIdentity.setConfigsvrConnString(
@@ -133,7 +125,7 @@ TEST_F(CollShardingStateTest, GlobalInitDoesntGetCalledIfWriteAborts) {
}
TEST_F(CollShardingStateTest, GlobalInitDoesntGetsCalledIfNSIsNotForShardIdentity) {
- CollectionShardingState collShardingState(getServiceContext(), NamespaceString("admin.user"));
+ CollectionShardingState collShardingState(NamespaceString("admin.user"));
ShardIdentityType shardIdentity;
shardIdentity.setConfigsvrConnString(
@@ -152,8 +144,7 @@ TEST_F(CollShardingStateTest, GlobalInitDoesntGetsCalledIfNSIsNotForShardIdentit
}
TEST_F(CollShardingStateTest, OnInsertOpThrowWithIncompleteShardIdentityDocument) {
- CollectionShardingState collShardingState(getServiceContext(),
- NamespaceString::kConfigCollectionNamespace);
+ CollectionShardingState collShardingState(NamespaceString::kConfigCollectionNamespace);
ShardIdentityType shardIdentity;
shardIdentity.setShardName("a");
@@ -162,8 +153,7 @@ TEST_F(CollShardingStateTest, OnInsertOpThrowWithIncompleteShardIdentityDocument
}
TEST_F(CollShardingStateTest, GlobalInitDoesntGetsCalledIfShardIdentityDocWasNotInserted) {
- CollectionShardingState collShardingState(getServiceContext(),
- NamespaceString::kConfigCollectionNamespace);
+ CollectionShardingState collShardingState(NamespaceString::kConfigCollectionNamespace);
WriteUnitOfWork wuow(txn());
collShardingState.onInsertOp(txn(), BSON("_id" << 1));
diff --git a/src/mongo/db/s/metadata_manager.cpp b/src/mongo/db/s/metadata_manager.cpp
index d438f89f505..84fb91e592f 100644
--- a/src/mongo/db/s/metadata_manager.cpp
+++ b/src/mongo/db/s/metadata_manager.cpp
@@ -31,20 +31,14 @@
#include "mongo/platform/basic.h"
#include "mongo/db/range_arithmetic.h"
-#include "mongo/db/s/collection_range_deleter.h"
#include "mongo/db/s/metadata_manager.h"
-#include "mongo/db/s/sharding_state.h"
#include "mongo/stdx/memory.h"
#include "mongo/util/log.h"
namespace mongo {
-using CallbackArgs = executor::TaskExecutor::CallbackArgs;
-
-MetadataManager::MetadataManager(ServiceContext* sc, NamespaceString nss)
- : _nss(std::move(nss)),
- _serviceContext(sc),
- _activeMetadataTracker(stdx::make_unique<CollectionMetadataTracker>(nullptr)) {}
+MetadataManager::MetadataManager()
+ : _activeMetadataTracker(stdx::make_unique<CollectionMetadataTracker>(nullptr)) {}
MetadataManager::~MetadataManager() {
stdx::lock_guard<stdx::mutex> scopedLock(_managerLock);
@@ -326,11 +320,6 @@ void MetadataManager::_addRangeToClean_inlock(const ChunkRange& range) {
invariant(!rangeMapOverlaps(_rangesToClean, range.getMin(), range.getMax()));
invariant(!rangeMapOverlaps(_receivingChunks, range.getMin(), range.getMax()));
_rangesToClean.insert(std::make_pair(range.getMin().getOwned(), range.getMax().getOwned()));
-
- // If _rangesToClean was previously empty, we need to start the collection range deleter
- if (_rangesToClean.size() == 1UL) {
- ShardingState::get(_serviceContext)->scheduleCleanup(_nss);
- }
}
void MetadataManager::removeRangeToClean(const ChunkRange& range) {
diff --git a/src/mongo/db/s/metadata_manager.h b/src/mongo/db/s/metadata_manager.h
index 0100ef59d28..319db8180fb 100644
--- a/src/mongo/db/s/metadata_manager.h
+++ b/src/mongo/db/s/metadata_manager.h
@@ -32,11 +32,8 @@
#include <memory>
#include "mongo/base/disallow_copying.h"
-#include "mongo/db/namespace_string.h"
#include "mongo/db/s/collection_metadata.h"
-#include "mongo/db/service_context.h"
#include "mongo/s/catalog/type_chunk.h"
-
#include "mongo/stdx/memory.h"
namespace mongo {
@@ -47,7 +44,7 @@ class MetadataManager {
MONGO_DISALLOW_COPYING(MetadataManager);
public:
- MetadataManager(ServiceContext* sc, NamespaceString nss);
+ MetadataManager();
~MetadataManager();
/**
@@ -130,11 +127,6 @@ private:
void _setActiveMetadata_inlock(std::unique_ptr<CollectionMetadata> newMetadata);
- const NamespaceString _nss;
-
- // ServiceContext from which to obtain instances of global support objects.
- ServiceContext* _serviceContext;
-
// Mutex to protect the state below
stdx::mutex _managerLock;
diff --git a/src/mongo/db/s/metadata_manager_test.cpp b/src/mongo/db/s/metadata_manager_test.cpp
index 1e50b85bea3..412b5e7ba89 100644
--- a/src/mongo/db/s/metadata_manager_test.cpp
+++ b/src/mongo/db/s/metadata_manager_test.cpp
@@ -31,13 +31,8 @@
#include "mongo/db/s/metadata_manager.h"
#include "mongo/bson/bsonobjbuilder.h"
-#include "mongo/db/client.h"
#include "mongo/db/jsobj.h"
-#include "mongo/db/namespace_string.h"
#include "mongo/db/s/collection_metadata.h"
-#include "mongo/db/s/sharding_state.h"
-#include "mongo/db/service_context.h"
-#include "mongo/db/service_context_d_test_fixture.h"
#include "mongo/s/catalog/type_chunk.h"
#include "mongo/stdx/memory.h"
#include "mongo/unittest/unittest.h"
@@ -49,23 +44,13 @@ using unittest::assertGet;
namespace {
-class MetadataManagerTest : public ServiceContextMongoDTest {
-protected:
- void setUp() override {
- ServiceContextMongoDTest::setUp();
- ShardingState::get(getServiceContext())
- ->setScheduleCleanupFunctionForTest([](const NamespaceString& nss) {});
- }
-
- std::unique_ptr<CollectionMetadata> makeEmptyMetadata() {
- return stdx::make_unique<CollectionMetadata>(BSON("key" << 1),
- ChunkVersion(1, 0, OID::gen()));
- }
-};
+std::unique_ptr<CollectionMetadata> makeEmptyMetadata() {
+ return stdx::make_unique<CollectionMetadata>(BSON("key" << 1), ChunkVersion(1, 0, OID::gen()));
+}
+TEST(MetadataManager, SetAndGetActiveMetadata) {
+ MetadataManager manager;
-TEST_F(MetadataManagerTest, SetAndGetActiveMetadata) {
- MetadataManager manager(getServiceContext(), NamespaceString("TestDb", "CollDB"));
std::unique_ptr<CollectionMetadata> cm = makeEmptyMetadata();
auto cmPtr = cm.get();
@@ -75,9 +60,8 @@ TEST_F(MetadataManagerTest, SetAndGetActiveMetadata) {
ASSERT_EQ(cmPtr, scopedMetadata.getMetadata());
};
-
-TEST_F(MetadataManagerTest, ResetActiveMetadata) {
- MetadataManager manager(getServiceContext(), NamespaceString("TestDb", "CollDB"));
+TEST(MetadataManager, RefreshActiveMetadata) {
+ MetadataManager manager;
manager.refreshActiveMetadata(makeEmptyMetadata());
ScopedCollectionMetadata scopedMetadata1 = manager.getActiveMetadata();
@@ -94,38 +78,38 @@ TEST_F(MetadataManagerTest, ResetActiveMetadata) {
ASSERT_EQ(cm2Ptr, scopedMetadata2.getMetadata());
};
-TEST_F(MetadataManagerTest, AddAndRemoveRanges) {
- MetadataManager manager(getServiceContext(), NamespaceString("TestDb", "CollDB"));
+TEST(MetadataManager, AddAndRemoveRanges) {
+ MetadataManager mm;
ChunkRange cr1 = ChunkRange(BSON("key" << 0), BSON("key" << 10));
ChunkRange cr2 = ChunkRange(BSON("key" << 10), BSON("key" << 20));
- manager.addRangeToClean(cr1);
- ASSERT_EQ(manager.getCopyOfRangesToClean().size(), 1UL);
- manager.removeRangeToClean(cr1);
- ASSERT_EQ(manager.getCopyOfRangesToClean().size(), 0UL);
+ mm.addRangeToClean(cr1);
+ ASSERT_EQ(mm.getCopyOfRangesToClean().size(), 1UL);
+ mm.removeRangeToClean(cr1);
+ ASSERT_EQ(mm.getCopyOfRangesToClean().size(), 0UL);
- manager.addRangeToClean(cr1);
- manager.addRangeToClean(cr2);
- manager.removeRangeToClean(cr1);
- ASSERT_EQ(manager.getCopyOfRangesToClean().size(), 1UL);
- auto ranges = manager.getCopyOfRangesToClean();
+ mm.addRangeToClean(cr1);
+ mm.addRangeToClean(cr2);
+ mm.removeRangeToClean(cr1);
+ ASSERT_EQ(mm.getCopyOfRangesToClean().size(), 1UL);
+ auto ranges = mm.getCopyOfRangesToClean();
auto it = ranges.find(cr2.getMin());
ChunkRange remainingChunk = ChunkRange(it->first, it->second);
ASSERT_EQ(remainingChunk.toString(), cr2.toString());
- manager.removeRangeToClean(cr2);
+ mm.removeRangeToClean(cr2);
}
// Tests that a removal in the middle of an existing ChunkRange results in
// two correct chunk ranges.
-TEST_F(MetadataManagerTest, RemoveRangeInMiddleOfRange) {
- MetadataManager manager(getServiceContext(), NamespaceString("TestDb", "CollDB"));
+TEST(MetadataManager, RemoveRangeInMiddleOfRange) {
+ MetadataManager mm;
ChunkRange cr1 = ChunkRange(BSON("key" << 0), BSON("key" << 10));
- manager.addRangeToClean(cr1);
- manager.removeRangeToClean(ChunkRange(BSON("key" << 4), BSON("key" << 6)));
- ASSERT_EQ(manager.getCopyOfRangesToClean().size(), 2UL);
+ mm.addRangeToClean(cr1);
+ mm.removeRangeToClean(ChunkRange(BSON("key" << 4), BSON("key" << 6)));
+ ASSERT_EQ(mm.getCopyOfRangesToClean().size(), 2UL);
- auto ranges = manager.getCopyOfRangesToClean();
+ auto ranges = mm.getCopyOfRangesToClean();
auto it = ranges.find(BSON("key" << 0));
ChunkRange expectedChunk = ChunkRange(BSON("key" << 0), BSON("key" << 4));
ChunkRange remainingChunk = ChunkRange(it->first, it->second);
@@ -136,59 +120,59 @@ TEST_F(MetadataManagerTest, RemoveRangeInMiddleOfRange) {
remainingChunk = ChunkRange(it->first, it->second);
ASSERT_EQ(remainingChunk.toString(), expectedChunk.toString());
- manager.removeRangeToClean(cr1);
- ASSERT_EQ(manager.getCopyOfRangesToClean().size(), 0UL);
+ mm.removeRangeToClean(cr1);
+ ASSERT_EQ(mm.getCopyOfRangesToClean().size(), 0UL);
}
// Tests removals that overlap with just one ChunkRange.
-TEST_F(MetadataManagerTest, RemoveRangeWithSingleRangeOverlap) {
- MetadataManager manager(getServiceContext(), NamespaceString("TestDb", "CollDB"));
+TEST(MetadataManager, RemoveRangeWithSingleRangeOverlap) {
+ MetadataManager mm;
ChunkRange cr1 = ChunkRange(BSON("key" << 0), BSON("key" << 10));
- manager.addRangeToClean(cr1);
- manager.removeRangeToClean(ChunkRange(BSON("key" << 0), BSON("key" << 5)));
- ASSERT_EQ(manager.getCopyOfRangesToClean().size(), 1UL);
- auto ranges = manager.getCopyOfRangesToClean();
+ mm.addRangeToClean(cr1);
+ mm.removeRangeToClean(ChunkRange(BSON("key" << 0), BSON("key" << 5)));
+ ASSERT_EQ(mm.getCopyOfRangesToClean().size(), 1UL);
+ auto ranges = mm.getCopyOfRangesToClean();
auto it = ranges.find(BSON("key" << 5));
ChunkRange remainingChunk = ChunkRange(it->first, it->second);
ChunkRange expectedChunk = ChunkRange(BSON("key" << 5), BSON("key" << 10));
ASSERT_EQ(remainingChunk.toString(), expectedChunk.toString());
- manager.removeRangeToClean(ChunkRange(BSON("key" << 4), BSON("key" << 6)));
- ASSERT_EQ(manager.getCopyOfRangesToClean().size(), 1UL);
- ranges = manager.getCopyOfRangesToClean();
+ mm.removeRangeToClean(ChunkRange(BSON("key" << 4), BSON("key" << 6)));
+ ASSERT_EQ(mm.getCopyOfRangesToClean().size(), 1UL);
+ ranges = mm.getCopyOfRangesToClean();
it = ranges.find(BSON("key" << 6));
remainingChunk = ChunkRange(it->first, it->second);
expectedChunk = ChunkRange(BSON("key" << 6), BSON("key" << 10));
ASSERT_EQ(remainingChunk.toString(), expectedChunk.toString());
- manager.removeRangeToClean(ChunkRange(BSON("key" << 9), BSON("key" << 13)));
- ASSERT_EQ(manager.getCopyOfRangesToClean().size(), 1UL);
- ranges = manager.getCopyOfRangesToClean();
+ mm.removeRangeToClean(ChunkRange(BSON("key" << 9), BSON("key" << 13)));
+ ASSERT_EQ(mm.getCopyOfRangesToClean().size(), 1UL);
+ ranges = mm.getCopyOfRangesToClean();
it = ranges.find(BSON("key" << 6));
remainingChunk = ChunkRange(it->first, it->second);
expectedChunk = ChunkRange(BSON("key" << 6), BSON("key" << 9));
ASSERT_EQ(remainingChunk.toString(), expectedChunk.toString());
- manager.removeRangeToClean(ChunkRange(BSON("key" << 0), BSON("key" << 10)));
- ASSERT_EQ(manager.getCopyOfRangesToClean().size(), 0UL);
+ mm.removeRangeToClean(ChunkRange(BSON("key" << 0), BSON("key" << 10)));
+ ASSERT_EQ(mm.getCopyOfRangesToClean().size(), 0UL);
}
// Tests removals that overlap with more than one ChunkRange.
-TEST_F(MetadataManagerTest, RemoveRangeWithMultipleRangeOverlaps) {
- MetadataManager manager(getServiceContext(), NamespaceString("TestDb", "CollDB"));
+TEST(MetadataManager, RemoveRangeWithMultipleRangeOverlaps) {
+ MetadataManager mm;
ChunkRange cr1 = ChunkRange(BSON("key" << 0), BSON("key" << 10));
ChunkRange cr2 = ChunkRange(BSON("key" << 10), BSON("key" << 20));
ChunkRange cr3 = ChunkRange(BSON("key" << 20), BSON("key" << 30));
- manager.addRangeToClean(cr1);
- manager.addRangeToClean(cr2);
- manager.addRangeToClean(cr3);
- ASSERT_EQ(manager.getCopyOfRangesToClean().size(), 3UL);
+ mm.addRangeToClean(cr1);
+ mm.addRangeToClean(cr2);
+ mm.addRangeToClean(cr3);
+ ASSERT_EQ(mm.getCopyOfRangesToClean().size(), 3UL);
- manager.removeRangeToClean(ChunkRange(BSON("key" << 8), BSON("key" << 22)));
- ASSERT_EQ(manager.getCopyOfRangesToClean().size(), 2UL);
- auto ranges = manager.getCopyOfRangesToClean();
+ mm.removeRangeToClean(ChunkRange(BSON("key" << 8), BSON("key" << 22)));
+ ASSERT_EQ(mm.getCopyOfRangesToClean().size(), 2UL);
+ auto ranges = mm.getCopyOfRangesToClean();
auto it = ranges.find(BSON("key" << 0));
ChunkRange remainingChunk = ChunkRange(it->first, it->second);
ChunkRange expectedChunk = ChunkRange(BSON("key" << 0), BSON("key" << 8));
@@ -198,117 +182,117 @@ TEST_F(MetadataManagerTest, RemoveRangeWithMultipleRangeOverlaps) {
expectedChunk = ChunkRange(BSON("key" << 22), BSON("key" << 30));
ASSERT_EQ(remainingChunk.toString(), expectedChunk.toString());
- manager.removeRangeToClean(ChunkRange(BSON("key" << 0), BSON("key" << 30)));
- ASSERT_EQ(manager.getCopyOfRangesToClean().size(), 0UL);
+ mm.removeRangeToClean(ChunkRange(BSON("key" << 0), BSON("key" << 30)));
+ ASSERT_EQ(mm.getCopyOfRangesToClean().size(), 0UL);
}
-TEST_F(MetadataManagerTest, RefreshAfterSuccessfulMigrationSinglePending) {
- MetadataManager manager(getServiceContext(), NamespaceString("TestDb", "CollDB"));
- manager.refreshActiveMetadata(makeEmptyMetadata());
+TEST(MetadataManager, RefreshAfterSuccessfulMigrationSinglePending) {
+ MetadataManager mm;
+ mm.refreshActiveMetadata(makeEmptyMetadata());
const ChunkRange cr1(BSON("key" << 0), BSON("key" << 10));
- manager.beginReceive(cr1);
- ASSERT_EQ(manager.getCopyOfReceivingChunks().size(), 1UL);
- ASSERT_EQ(manager.getActiveMetadata()->getChunks().size(), 0UL);
+ mm.beginReceive(cr1);
+ ASSERT_EQ(mm.getCopyOfReceivingChunks().size(), 1UL);
+ ASSERT_EQ(mm.getActiveMetadata()->getChunks().size(), 0UL);
- ChunkVersion version = manager.getActiveMetadata()->getCollVersion();
+ ChunkVersion version = mm.getActiveMetadata()->getCollVersion();
version.incMajor();
- manager.refreshActiveMetadata(
- manager.getActiveMetadata()->clonePlusChunk(cr1.getMin(), cr1.getMax(), version));
- ASSERT_EQ(manager.getCopyOfReceivingChunks().size(), 0UL);
- ASSERT_EQ(manager.getActiveMetadata()->getChunks().size(), 1UL);
+ mm.refreshActiveMetadata(
+ mm.getActiveMetadata()->clonePlusChunk(cr1.getMin(), cr1.getMax(), version));
+ ASSERT_EQ(mm.getCopyOfReceivingChunks().size(), 0UL);
+ ASSERT_EQ(mm.getActiveMetadata()->getChunks().size(), 1UL);
}
-TEST_F(MetadataManagerTest, RefreshAfterSuccessfulMigrationMultiplePending) {
- MetadataManager manager(getServiceContext(), NamespaceString("TestDb", "CollDB"));
- manager.refreshActiveMetadata(makeEmptyMetadata());
+TEST(MetadataManager, RefreshAfterSuccessfulMigrationMultiplePending) {
+ MetadataManager mm;
+ mm.refreshActiveMetadata(makeEmptyMetadata());
const ChunkRange cr1(BSON("key" << 0), BSON("key" << 10));
- manager.beginReceive(cr1);
+ mm.beginReceive(cr1);
const ChunkRange cr2(BSON("key" << 30), BSON("key" << 40));
- manager.beginReceive(cr2);
+ mm.beginReceive(cr2);
- ASSERT_EQ(manager.getCopyOfReceivingChunks().size(), 2UL);
- ASSERT_EQ(manager.getActiveMetadata()->getChunks().size(), 0UL);
+ ASSERT_EQ(mm.getCopyOfReceivingChunks().size(), 2UL);
+ ASSERT_EQ(mm.getActiveMetadata()->getChunks().size(), 0UL);
{
- ChunkVersion version = manager.getActiveMetadata()->getCollVersion();
+ ChunkVersion version = mm.getActiveMetadata()->getCollVersion();
version.incMajor();
- manager.refreshActiveMetadata(
- manager.getActiveMetadata()->clonePlusChunk(cr1.getMin(), cr1.getMax(), version));
- ASSERT_EQ(manager.getCopyOfReceivingChunks().size(), 1UL);
- ASSERT_EQ(manager.getActiveMetadata()->getChunks().size(), 1UL);
+ mm.refreshActiveMetadata(
+ mm.getActiveMetadata()->clonePlusChunk(cr1.getMin(), cr1.getMax(), version));
+ ASSERT_EQ(mm.getCopyOfReceivingChunks().size(), 1UL);
+ ASSERT_EQ(mm.getActiveMetadata()->getChunks().size(), 1UL);
}
{
- ChunkVersion version = manager.getActiveMetadata()->getCollVersion();
+ ChunkVersion version = mm.getActiveMetadata()->getCollVersion();
version.incMajor();
- manager.refreshActiveMetadata(
- manager.getActiveMetadata()->clonePlusChunk(cr2.getMin(), cr2.getMax(), version));
- ASSERT_EQ(manager.getCopyOfReceivingChunks().size(), 0UL);
- ASSERT_EQ(manager.getActiveMetadata()->getChunks().size(), 2UL);
+ mm.refreshActiveMetadata(
+ mm.getActiveMetadata()->clonePlusChunk(cr2.getMin(), cr2.getMax(), version));
+ ASSERT_EQ(mm.getCopyOfReceivingChunks().size(), 0UL);
+ ASSERT_EQ(mm.getActiveMetadata()->getChunks().size(), 2UL);
}
}
-TEST_F(MetadataManagerTest, RefreshAfterNotYetCompletedMigrationMultiplePending) {
- MetadataManager manager(getServiceContext(), NamespaceString("TestDb", "CollDB"));
- manager.refreshActiveMetadata(makeEmptyMetadata());
+TEST(MetadataManager, RefreshAfterNotYetCompletedMigrationMultiplePending) {
+ MetadataManager mm;
+ mm.refreshActiveMetadata(makeEmptyMetadata());
const ChunkRange cr1(BSON("key" << 0), BSON("key" << 10));
- manager.beginReceive(cr1);
+ mm.beginReceive(cr1);
const ChunkRange cr2(BSON("key" << 30), BSON("key" << 40));
- manager.beginReceive(cr2);
+ mm.beginReceive(cr2);
- ASSERT_EQ(manager.getCopyOfReceivingChunks().size(), 2UL);
- ASSERT_EQ(manager.getActiveMetadata()->getChunks().size(), 0UL);
+ ASSERT_EQ(mm.getCopyOfReceivingChunks().size(), 2UL);
+ ASSERT_EQ(mm.getActiveMetadata()->getChunks().size(), 0UL);
- ChunkVersion version = manager.getActiveMetadata()->getCollVersion();
+ ChunkVersion version = mm.getActiveMetadata()->getCollVersion();
version.incMajor();
- manager.refreshActiveMetadata(
- manager.getActiveMetadata()->clonePlusChunk(BSON("key" << 50), BSON("key" << 60), version));
- ASSERT_EQ(manager.getCopyOfReceivingChunks().size(), 2UL);
- ASSERT_EQ(manager.getActiveMetadata()->getChunks().size(), 1UL);
+ mm.refreshActiveMetadata(
+ mm.getActiveMetadata()->clonePlusChunk(BSON("key" << 50), BSON("key" << 60), version));
+ ASSERT_EQ(mm.getCopyOfReceivingChunks().size(), 2UL);
+ ASSERT_EQ(mm.getActiveMetadata()->getChunks().size(), 1UL);
}
-TEST_F(MetadataManagerTest, BeginReceiveWithOverlappingRange) {
- MetadataManager manager(getServiceContext(), NamespaceString("TestDb", "CollDB"));
- manager.refreshActiveMetadata(makeEmptyMetadata());
+TEST(MetadataManager, BeginReceiveWithOverlappingRange) {
+ MetadataManager mm;
+ mm.refreshActiveMetadata(makeEmptyMetadata());
const ChunkRange cr1(BSON("key" << 0), BSON("key" << 10));
- manager.beginReceive(cr1);
+ mm.beginReceive(cr1);
const ChunkRange cr2(BSON("key" << 30), BSON("key" << 40));
- manager.beginReceive(cr2);
+ mm.beginReceive(cr2);
const ChunkRange crOverlap(BSON("key" << 5), BSON("key" << 35));
- manager.beginReceive(crOverlap);
+ mm.beginReceive(crOverlap);
- const auto copyOfPending = manager.getCopyOfReceivingChunks();
+ const auto copyOfPending = mm.getCopyOfReceivingChunks();
ASSERT_EQ(copyOfPending.size(), 1UL);
- ASSERT_EQ(manager.getActiveMetadata()->getChunks().size(), 0UL);
+ ASSERT_EQ(mm.getActiveMetadata()->getChunks().size(), 0UL);
const auto it = copyOfPending.find(BSON("key" << 5));
ASSERT(it != copyOfPending.end());
ASSERT_EQ(it->second, BSON("key" << 35));
}
-TEST_F(MetadataManagerTest, RefreshMetadataAfterDropAndRecreate) {
- MetadataManager manager(getServiceContext(), NamespaceString("TestDb", "CollDB"));
- manager.refreshActiveMetadata(makeEmptyMetadata());
+TEST(MetadataManager, RefreshMetadataAfterDropAndRecreate) {
+ MetadataManager mm;
+ mm.refreshActiveMetadata(makeEmptyMetadata());
{
- auto metadata = manager.getActiveMetadata();
+ auto metadata = mm.getActiveMetadata();
ChunkVersion newVersion = metadata->getCollVersion();
newVersion.incMajor();
- manager.refreshActiveMetadata(
+ mm.refreshActiveMetadata(
metadata->clonePlusChunk(BSON("key" << 0), BSON("key" << 10), newVersion));
}
@@ -317,11 +301,11 @@ TEST_F(MetadataManagerTest, RefreshMetadataAfterDropAndRecreate) {
ChunkVersion newVersion = recreateMetadata->getCollVersion();
newVersion.incMajor();
- manager.refreshActiveMetadata(
+ mm.refreshActiveMetadata(
recreateMetadata->clonePlusChunk(BSON("key" << 20), BSON("key" << 30), newVersion));
- ASSERT_EQ(manager.getActiveMetadata()->getChunks().size(), 1UL);
+ ASSERT_EQ(mm.getActiveMetadata()->getChunks().size(), 1UL);
- const auto chunkEntry = manager.getActiveMetadata()->getChunks().begin();
+ const auto chunkEntry = mm.getActiveMetadata()->getChunks().begin();
ASSERT_EQ(BSON("key" << 20), chunkEntry->first);
ASSERT_EQ(BSON("key" << 30), chunkEntry->second);
}
diff --git a/src/mongo/db/s/sharding_state.cpp b/src/mongo/db/s/sharding_state.cpp
index 98fc7037c40..999f74a6be0 100644
--- a/src/mongo/db/s/sharding_state.cpp
+++ b/src/mongo/db/s/sharding_state.cpp
@@ -78,8 +78,6 @@ using std::shared_ptr;
using std::string;
using std::vector;
-using CallbackArgs = executor::TaskExecutor::CallbackArgs;
-
namespace {
const auto getShardingState = ServiceContext::declareDecoration<ShardingState>();
@@ -122,13 +120,7 @@ ShardingState::ShardingState()
: _initializationState(static_cast<uint32_t>(InitializationState::kNew)),
_initializationStatus(Status(ErrorCodes::InternalError, "Uninitialized value")),
_configServerTickets(kMaxConfigServerRefreshThreads),
- _globalInit(&initializeGlobalShardingStateForMongod),
- _scheduleWorkFn([this](NamespaceString nss) {
- getRangeDeleterTaskExecutor()->scheduleWork([=](const CallbackArgs& cbArgs) {
- CollectionRangeDeleter* rd = new CollectionRangeDeleter(nss);
- rd->run();
- });
- }) {}
+ _globalInit(&initializeGlobalShardingStateForMongod) {}
ShardingState::~ShardingState() = default;
@@ -228,14 +220,12 @@ void ShardingState::setShardName(const string& name) {
}
}
-CollectionShardingState* ShardingState::getNS(const std::string& ns, OperationContext* txn) {
+CollectionShardingState* ShardingState::getNS(const std::string& ns) {
stdx::lock_guard<stdx::mutex> lk(_mutex);
CollectionShardingStateMap::iterator it = _collections.find(ns);
if (it == _collections.end()) {
- auto inserted =
- _collections.insert(make_pair(ns,
- stdx::make_unique<CollectionShardingState>(
- txn->getServiceContext(), NamespaceString(ns))));
+ auto inserted = _collections.insert(
+ make_pair(ns, stdx::make_unique<CollectionShardingState>(NamespaceString(ns))));
invariant(inserted.second);
it = std::move(inserted.first);
}
@@ -260,14 +250,6 @@ void ShardingState::setGlobalInitMethodForTest(GlobalInitFunc func) {
_globalInit = func;
}
-void ShardingState::setScheduleCleanupFunctionForTest(RangeDeleterCleanupNotificationFunc fn) {
- _scheduleWorkFn = fn;
-}
-
-void ShardingState::scheduleCleanup(const NamespaceString& nss) {
- _scheduleWorkFn(nss);
-}
-
Status ShardingState::onStaleShardVersion(OperationContext* txn,
const NamespaceString& nss,
const ChunkVersion& expectedVersion) {
diff --git a/src/mongo/db/s/sharding_state.h b/src/mongo/db/s/sharding_state.h
index a27b2841eda..e9605c6b717 100644
--- a/src/mongo/db/s/sharding_state.h
+++ b/src/mongo/db/s/sharding_state.h
@@ -36,9 +36,7 @@
#include "mongo/bson/oid.h"
#include "mongo/db/namespace_string.h"
#include "mongo/db/s/active_migrations_registry.h"
-#include "mongo/db/s/collection_range_deleter.h"
#include "mongo/db/s/migration_destination_manager.h"
-#include "mongo/executor/task_executor.h"
#include "mongo/executor/thread_pool_task_executor.h"
#include "mongo/stdx/functional.h"
#include "mongo/stdx/memory.h"
@@ -75,10 +73,6 @@ public:
using GlobalInitFunc =
stdx::function<Status(OperationContext*, const ConnectionString&, StringData)>;
- // Signature for the callback function used by the MetadataManager to inform the
- // sharding subsystem that there is range cleanup work to be done.
- using RangeDeleterCleanupNotificationFunc = stdx::function<void(const NamespaceString&)>;
-
ShardingState();
~ShardingState();
@@ -161,7 +155,7 @@ public:
*/
void setShardName(const std::string& shardName);
- CollectionShardingState* getNS(const std::string& ns, OperationContext* txn);
+ CollectionShardingState* getNS(const std::string& ns);
/**
* Clears the collection metadata cache after step down.
@@ -249,23 +243,10 @@ public:
void setGlobalInitMethodForTest(GlobalInitFunc func);
/**
- * Schedules for the range to clean of the given namespace to be deleted.
- * Behavior can be modified through setScheduleCleanupFunctionForTest.
- */
- void scheduleCleanup(const NamespaceString& nss);
-
- /**
* Returns a pointer to the collection range deleter task executor.
*/
executor::ThreadPoolTaskExecutor* getRangeDeleterTaskExecutor();
- /**
- * Sets the function used by scheduleWorkOnRangeDeleterTaskExecutor to
- * schedule work. Used for mocking the executor for testing. See the ShardingState
- * for the default implementation of _scheduleWorkFn.
- */
- void setScheduleCleanupFunctionForTest(RangeDeleterCleanupNotificationFunc fn);
-
private:
friend class ScopedRegisterMigration;
@@ -382,10 +363,6 @@ private:
// Function for initializing the external sharding state components not owned here.
GlobalInitFunc _globalInit;
- // Function for scheduling work on the _rangeDeleterTaskExecutor.
- // Used in call to scheduleCleanup(NamespaceString).
- RangeDeleterCleanupNotificationFunc _scheduleWorkFn;
-
// Task executor for the collection range deleter.
std::unique_ptr<executor::ThreadPoolTaskExecutor> _rangeDeleterTaskExecutor;
};
diff --git a/src/mongo/db/service_context_d_test_fixture.cpp b/src/mongo/db/service_context_d_test_fixture.cpp
index 5108d9ae2db..a2e6edcbb29 100644
--- a/src/mongo/db/service_context_d_test_fixture.cpp
+++ b/src/mongo/db/service_context_d_test_fixture.cpp
@@ -42,12 +42,10 @@
#include "mongo/db/service_context_d.h"
#include "mongo/db/storage/storage_options.h"
#include "mongo/unittest/temp_dir.h"
-#include "mongo/util/scopeguard.h"
namespace mongo {
void ServiceContextMongoDTest::setUp() {
- Client::initThread(getThreadName().c_str());
ServiceContext* serviceContext = getGlobalServiceContext();
if (!serviceContext->getGlobalStorageEngine()) {
// When using the "ephemeralForTest" storage engine, it is fine for the temporary directory
@@ -62,15 +60,10 @@ void ServiceContextMongoDTest::setUp() {
}
void ServiceContextMongoDTest::tearDown() {
- ON_BLOCK_EXIT([&] { Client::destroy(); });
auto txn = cc().makeOperationContext();
_dropAllDBs(txn.get());
}
-ServiceContext* ServiceContextMongoDTest::getServiceContext() {
- return getGlobalServiceContext();
-}
-
void ServiceContextMongoDTest::_dropAllDBs(OperationContext* txn) {
dropAllDatabasesExceptLocal(txn);
diff --git a/src/mongo/db/service_context_d_test_fixture.h b/src/mongo/db/service_context_d_test_fixture.h
index d6eff14c72d..2cfb6f1cb89 100644
--- a/src/mongo/db/service_context_d_test_fixture.h
+++ b/src/mongo/db/service_context_d_test_fixture.h
@@ -32,7 +32,6 @@
namespace mongo {
-class ServiceContext;
class OperationContext;
/**
@@ -51,12 +50,6 @@ protected:
void tearDown() override;
/**
- * Returns a service context, which is only valid for this instance of the test.
- * Must not be called before setUp or after tearDown.
- */
- ServiceContext* getServiceContext();
-
- /**
* Drops all databases. Call this before global ReplicationCoordinator is destroyed -- it is
* used to drop the databases.
*/