summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorDianna Hohensee <dianna.hohensee@10gen.com>2017-02-21 15:54:10 -0500
committerDianna Hohensee <dianna.hohensee@10gen.com>2017-03-01 11:14:23 -0500
commita887cc06efb80e746a476d6e5a12a1e2033df8b2 (patch)
tree18e1d5960e64fc7fdb70045c0d76ab21b14eb494
parentd29e95a056c2522f4bfd57e417970e7ecf18266c (diff)
downloadmongo-a887cc06efb80e746a476d6e5a12a1e2033df8b2.tar.gz
SERVER-27704 persist chunk metadata on shard primary
-rw-r--r--src/mongo/db/repl/replication_coordinator_mock.cpp4
-rw-r--r--src/mongo/db/repl/replication_coordinator_mock.h4
-rw-r--r--src/mongo/db/s/SConscript19
-rw-r--r--src/mongo/db/s/collection_metadata_test.cpp68
-rw-r--r--src/mongo/db/s/metadata_loader.cpp145
-rw-r--r--src/mongo/db/s/metadata_loader.h27
-rw-r--r--src/mongo/db/s/metadata_loader_test.cpp348
-rw-r--r--src/mongo/db/s/sharding_state.cpp4
-rw-r--r--src/mongo/s/SConscript12
-rw-r--r--src/mongo/s/catalog/sharding_catalog_manager_impl.h3
-rw-r--r--src/mongo/s/catalog/sharding_catalog_manager_shard_operations_impl.cpp3
-rw-r--r--src/mongo/s/catalog/sharding_catalog_test_fixture.cpp10
-rw-r--r--src/mongo/s/catalog/sharding_catalog_test_fixture.h1
-rw-r--r--src/mongo/s/shard_server_test_fixture.cpp109
-rw-r--r--src/mongo/s/shard_server_test_fixture.h88
-rw-r--r--src/mongo/s/sharding_mongod_test_fixture.cpp1
16 files changed, 594 insertions, 252 deletions
diff --git a/src/mongo/db/repl/replication_coordinator_mock.cpp b/src/mongo/db/repl/replication_coordinator_mock.cpp
index 38c4408eebc..b037d333018 100644
--- a/src/mongo/db/repl/replication_coordinator_mock.cpp
+++ b/src/mongo/db/repl/replication_coordinator_mock.cpp
@@ -474,5 +474,9 @@ void ReplicationCoordinatorMock::alwaysAllowWrites(bool allowWrites) {
_alwaysAllowWrites = allowWrites;
}
+void ReplicationCoordinatorMock::setMaster(bool isMaster) {
+ _settings.setMaster(isMaster);
+}
+
} // namespace repl
} // namespace mongo
diff --git a/src/mongo/db/repl/replication_coordinator_mock.h b/src/mongo/db/repl/replication_coordinator_mock.h
index 5fed6539829..640aac82254 100644
--- a/src/mongo/db/repl/replication_coordinator_mock.h
+++ b/src/mongo/db/repl/replication_coordinator_mock.h
@@ -273,6 +273,8 @@ public:
*/
void alwaysAllowWrites(bool allowWrites);
+ void setMaster(bool isMaster);
+
virtual ServiceContext* getServiceContext() override {
return _service;
}
@@ -280,7 +282,7 @@ public:
private:
AtomicUInt64 _snapshotNameGenerator;
ServiceContext* const _service;
- const ReplSettings _settings;
+ ReplSettings _settings;
MemberState _memberState;
OpTime _myLastDurableOpTime;
OpTime _myLastAppliedOpTime;
diff --git a/src/mongo/db/s/SConscript b/src/mongo/db/s/SConscript
index 07f1971a7fe..f5c62c47fa6 100644
--- a/src/mongo/db/s/SConscript
+++ b/src/mongo/db/s/SConscript
@@ -12,11 +12,14 @@ env.Library(
],
LIBDEPS=[
'$BUILD_DIR/mongo/base',
+ '$BUILD_DIR/mongo/client/clientdriver',
+ '$BUILD_DIR/mongo/db/dbdirectclient',
'$BUILD_DIR/mongo/db/common',
'$BUILD_DIR/mongo/db/range_arithmetic',
- '$BUILD_DIR/mongo/s/common',
+ '$BUILD_DIR/mongo/db/repl/repl_coordinator_impl',
'$BUILD_DIR/mongo/db/service_context',
- ]
+ '$BUILD_DIR/mongo/s/common',
+ ],
)
env.Library(
@@ -203,19 +206,13 @@ env.CppUnitTest(
env.CppUnitTest(
target='sharding_metadata_test',
source=[
- 'metadata_loader_test.cpp',
'collection_metadata_test.cpp',
+ 'metadata_loader_test.cpp',
],
LIBDEPS=[
'metadata',
- '$BUILD_DIR/mongo/db/auth/authorization_manager_mock_init',
- '$BUILD_DIR/mongo/db/common',
- '$BUILD_DIR/mongo/db/service_context_noop_init',
- '$BUILD_DIR/mongo/s/catalog/sharding_catalog_test_fixture',
- '$BUILD_DIR/mongo/s/coreshard',
- '$BUILD_DIR/mongo/s/catalog/dist_lock_manager_mock',
- '$BUILD_DIR/mongo/s/sharding_test_fixture',
- ]
+ '$BUILD_DIR/mongo/s/shard_server_test_fixture',
+ ],
)
env.CppUnitTest(
diff --git a/src/mongo/db/s/collection_metadata_test.cpp b/src/mongo/db/s/collection_metadata_test.cpp
index c75405f33bb..778a28285d9 100644
--- a/src/mongo/db/s/collection_metadata_test.cpp
+++ b/src/mongo/db/s/collection_metadata_test.cpp
@@ -33,10 +33,11 @@
#include "mongo/db/commands.h"
#include "mongo/db/s/collection_metadata.h"
#include "mongo/db/s/metadata_loader.h"
-#include "mongo/s/catalog/sharding_catalog_test_fixture.h"
#include "mongo/s/catalog/type_chunk.h"
#include "mongo/s/catalog/type_collection.h"
#include "mongo/s/chunk_version.h"
+#include "mongo/s/shard_server_test_fixture.h"
+#include "mongo/util/scopeguard.h"
namespace mongo {
namespace {
@@ -46,12 +47,10 @@ using std::unique_ptr;
using std::vector;
using unittest::assertGet;
-class NoChunkFixture : public ShardingCatalogTestFixture {
+class NoChunkFixture : public ShardServerTestFixture {
protected:
void setUp() {
- ShardingCatalogTestFixture::setUp();
- setRemote(HostAndPort("FakeRemoteClient:34567"));
- configTargeter()->setFindHostReturnValue(configHost);
+ ShardServerTestFixture::setUp();
OID epoch = OID::gen();
@@ -75,7 +74,11 @@ protected:
std::vector<BSONObj> chunksToSend{chunkType.toConfigBSON()};
auto future = launchAsync([this] {
- auto status = MetadataLoader::makeCollectionMetadata(operationContext(),
+ ON_BLOCK_EXIT([&] { Client::destroy(); });
+ Client::initThreadIfNotAlready("Test");
+ auto txn = cc().makeOperationContext();
+
+ auto status = MetadataLoader::makeCollectionMetadata(txn.get(),
catalogClient(),
"test.foo",
"shard0000",
@@ -97,7 +100,6 @@ protected:
private:
CollectionMetadata _metadata;
- const HostAndPort configHost{HostAndPort(CONFIG_HOST_PORT)};
};
TEST_F(NoChunkFixture, BasicBelongsToMe) {
@@ -278,12 +280,10 @@ TEST_F(NoChunkFixture, PendingOrphanedDataRanges) {
* Fixture with single chunk containing:
* [10->20)
*/
-class SingleChunkFixture : public ShardingCatalogTestFixture {
+class SingleChunkFixture : public ShardServerTestFixture {
protected:
void setUp() {
- ShardingCatalogTestFixture::setUp();
- setRemote(HostAndPort("FakeRemoteClient:34567"));
- configTargeter()->setFindHostReturnValue(configHost);
+ ShardServerTestFixture::setUp();
CollectionType collType;
collType.setNs(NamespaceString{"test.foo"});
@@ -303,7 +303,11 @@ protected:
std::vector<BSONObj> chunksToSend{fooSingle};
auto future = launchAsync([this] {
- auto status = MetadataLoader::makeCollectionMetadata(operationContext(),
+ ON_BLOCK_EXIT([&] { Client::destroy(); });
+ Client::initThreadIfNotAlready("Test");
+ auto txn = cc().makeOperationContext();
+
+ auto status = MetadataLoader::makeCollectionMetadata(txn.get(),
catalogClient(),
"test.foo",
"shard0000",
@@ -326,7 +330,6 @@ protected:
private:
CollectionMetadata _metadata;
- const HostAndPort configHost{HostAndPort(CONFIG_HOST_PORT)};
};
TEST_F(SingleChunkFixture, BasicBelongsToMe) {
@@ -398,12 +401,10 @@ TEST_F(SingleChunkFixture, ChunkOrphanedDataRanges) {
* Fixture with single chunk containing:
* [(min, min)->(max, max))
*/
-class SingleChunkMinMaxCompoundKeyFixture : public ShardingCatalogTestFixture {
+class SingleChunkMinMaxCompoundKeyFixture : public ShardServerTestFixture {
protected:
void setUp() {
- ShardingCatalogTestFixture::setUp();
- setRemote(HostAndPort("FakeRemoteClient:34567"));
- configTargeter()->setFindHostReturnValue(configHost);
+ ShardServerTestFixture::setUp();
OID epoch = OID::gen();
@@ -427,7 +428,11 @@ protected:
std::vector<BSONObj> chunksToSend{fooSingle};
auto future = launchAsync([this] {
- auto status = MetadataLoader::makeCollectionMetadata(operationContext(),
+ ON_BLOCK_EXIT([&] { Client::destroy(); });
+ Client::initThreadIfNotAlready("Test");
+ auto txn = cc().makeOperationContext();
+
+ auto status = MetadataLoader::makeCollectionMetadata(txn.get(),
catalogClient(),
"test.foo",
"shard0000",
@@ -450,7 +455,6 @@ protected:
private:
CollectionMetadata _metadata;
- const HostAndPort configHost{HostAndPort(CONFIG_HOST_PORT)};
};
// Note: no tests for single key belongsToMe because they are not allowed
@@ -467,12 +471,10 @@ TEST_F(SingleChunkMinMaxCompoundKeyFixture, CompoudKeyBelongsToMe) {
* Fixture with chunks:
* [(10, 0)->(20, 0)), [(30, 0)->(40, 0))
*/
-class TwoChunksWithGapCompoundKeyFixture : public ShardingCatalogTestFixture {
+class TwoChunksWithGapCompoundKeyFixture : public ShardServerTestFixture {
protected:
void setUp() {
- ShardingCatalogTestFixture::setUp();
- setRemote(HostAndPort("FakeRemoteClient:34567"));
- configTargeter()->setFindHostReturnValue(configHost);
+ ShardServerTestFixture::setUp();
ChunkVersion chunkVersion = ChunkVersion(1, 0, OID::gen());
@@ -505,7 +507,11 @@ protected:
<< ChunkType::shard("shard0000")));
auto future = launchAsync([this] {
- auto status = MetadataLoader::makeCollectionMetadata(operationContext(),
+ ON_BLOCK_EXIT([&] { Client::destroy(); });
+ Client::initThreadIfNotAlready("Test");
+ auto txn = cc().makeOperationContext();
+
+ auto status = MetadataLoader::makeCollectionMetadata(txn.get(),
catalogClient(),
"test.foo",
"shard0000",
@@ -526,7 +532,6 @@ protected:
private:
CollectionMetadata _metadata;
- const HostAndPort configHost{HostAndPort(CONFIG_HOST_PORT)};
};
TEST_F(TwoChunksWithGapCompoundKeyFixture, ChunkGapOrphanedDataRanges) {
@@ -574,12 +579,10 @@ TEST_F(TwoChunksWithGapCompoundKeyFixture, ChunkGapAndPendingOrphanedDataRanges)
* Fixture with chunk containing:
* [min->10) , [10->20) , <gap> , [30->max)
*/
-class ThreeChunkWithRangeGapFixture : public ShardingCatalogTestFixture {
+class ThreeChunkWithRangeGapFixture : public ShardServerTestFixture {
protected:
void setUp() {
- ShardingCatalogTestFixture::setUp();
- setRemote(HostAndPort("FakeRemoteClient:34567"));
- configTargeter()->setFindHostReturnValue(configHost);
+ ShardServerTestFixture::setUp();
OID epoch = OID::gen();
@@ -628,7 +631,11 @@ protected:
}
auto future = launchAsync([this] {
- auto status = MetadataLoader::makeCollectionMetadata(operationContext(),
+ ON_BLOCK_EXIT([&] { Client::destroy(); });
+ Client::initThreadIfNotAlready("Test");
+ auto txn = cc().makeOperationContext();
+
+ auto status = MetadataLoader::makeCollectionMetadata(txn.get(),
catalogClient(),
"test.foo",
"shard0000",
@@ -649,7 +656,6 @@ protected:
private:
CollectionMetadata _metadata;
- const HostAndPort configHost{HostAndPort(CONFIG_HOST_PORT)};
};
TEST_F(ThreeChunkWithRangeGapFixture, ChunkVersionsMatch) {
diff --git a/src/mongo/db/s/metadata_loader.cpp b/src/mongo/db/s/metadata_loader.cpp
index 43e718dbe9b..762b1382412 100644
--- a/src/mongo/db/s/metadata_loader.cpp
+++ b/src/mongo/db/s/metadata_loader.cpp
@@ -34,12 +34,18 @@
#include <vector>
+#include "mongo/client/dbclientinterface.h"
+#include "mongo/db/dbdirectclient.h"
+#include "mongo/db/repl/replication_coordinator_impl.h"
#include "mongo/db/s/collection_metadata.h"
+#include "mongo/rpc/get_status_from_command_result.h"
+#include "mongo/rpc/unique_message.h"
#include "mongo/s/catalog/sharding_catalog_client.h"
#include "mongo/s/catalog/type_chunk.h"
#include "mongo/s/catalog/type_collection.h"
#include "mongo/s/chunk_diff.h"
#include "mongo/s/chunk_version.h"
+#include "mongo/s/write_ops/batched_command_request.h"
#include "mongo/util/log.h"
namespace mongo {
@@ -109,11 +115,14 @@ Status MetadataLoader::_initCollection(OperationContext* txn,
const string& ns,
const string& shard,
CollectionMetadata* metadata) {
+ // Get the config.collections entry for 'ns'.
auto coll = catalogClient->getCollection(txn, ns);
if (!coll.isOK()) {
return coll.getStatus();
}
+ // Check that the collection hasn't been dropped: passing this check does not mean the
+ // collection hasn't been dropped and recreated.
const auto& collInfo = coll.getValue().value;
if (collInfo.getDropped()) {
return {ErrorCodes::NamespaceNotFound,
@@ -162,9 +171,9 @@ Status MetadataLoader::_initChunks(OperationContext* txn,
<< " using old metadata w/ version " << oldMetadata->getShardVersion() << " and "
<< metadata->_chunksMap.size() << " chunks";
} else {
- warning() << "reloading collection metadata for " << ns << " with new epoch "
- << epoch.toString() << ", the current epoch is "
- << oldMetadata->getCollVersion().epoch().toString();
+ log() << "reloading collection metadata for " << ns << " with new epoch "
+ << epoch.toString() << ", the current epoch is "
+ << oldMetadata->getCollVersion().epoch().toString();
}
}
@@ -174,6 +183,7 @@ Status MetadataLoader::_initChunks(OperationContext* txn,
ns, &metadata->_chunksMap, &metadata->_collVersion, &versionMap, shard);
try {
+ // Get any new chunks.
std::vector<ChunkType> chunks;
const auto diffQuery = differ.configDiffQuery();
Status status = catalogClient->getChunks(txn,
@@ -183,6 +193,14 @@ Status MetadataLoader::_initChunks(OperationContext* txn,
&chunks,
nullptr,
repl::ReadConcernLevel::kMajorityReadConcern);
+
+ if (!status.isOK()) {
+ return status;
+ }
+
+ // If we are the primary, or a standalone, persist new chunks locally.
+ status = _writeNewChunksIfPrimary(
+ txn, NamespaceString(ns), chunks, metadata->_collVersion.epoch());
if (!status.isOK()) {
return status;
}
@@ -216,7 +234,6 @@ Status MetadataLoader::_initChunks(OperationContext* txn,
// If this is a full reload, assume it is a drop for backwards compatibility
// TODO: drop the config.collections entry *before* the chunks and eliminate this
// ambiguity
-
return {fullReload ? ErrorCodes::NamespaceNotFound : ErrorCodes::RemoteChangeDetected,
str::stream() << "No chunks found when reloading " << ns
<< ", previous version was "
@@ -236,4 +253,124 @@ Status MetadataLoader::_initChunks(OperationContext* txn,
}
}
+Status MetadataLoader::_writeNewChunksIfPrimary(OperationContext* txn,
+ const NamespaceString& nss,
+ const std::vector<ChunkType>& chunks,
+ const OID& currEpoch) {
+ NamespaceString chunkMetadataNss(ChunkType::ConfigNS + "." + nss.ns());
+
+ // Only do the write(s) if this is a primary or standalone. Otherwise, return OK.
+ if (serverGlobalParams.clusterRole != ClusterRole::ShardServer ||
+ !repl::ReplicationCoordinator::get(txn)->canAcceptWritesForDatabase(
+ chunkMetadataNss.ns())) {
+ return Status::OK();
+ }
+
+ try {
+ DBDirectClient client(txn);
+
+ /**
+ * Here are examples of the operations that can happen on the config server to update
+ * the config.chunks collection. 'chunks' only includes the chunks that result from the
+ * operations, which can be read from the config server, not any that were removed, so
+ * we must delete any chunks that overlap with the new 'chunks'.
+ *
+ * CollectionVersion = 10.3
+ *
+ * moveChunk
+ * {_id: 3, max: 5, version: 10.1} --> {_id: 3, max: 5, version: 11.0}
+ *
+ * splitChunk
+ * {_id: 3, max: 9, version 10.3} --> {_id: 3, max: 5, version 10.4}
+ * {_id: 5, max: 8, version 10.5}
+ * {_id: 8, max: 9, version 10.6}
+ *
+ * mergeChunk
+ * {_id: 10, max: 14, version 4.3} --> {_id: 10, max: 22, version 10.4}
+ * {_id: 14, max: 19, version 7.1}
+ * {_id: 19, max: 22, version 2.0}
+ *
+ */
+ for (auto& chunk : chunks) {
+ // Check for a different epoch.
+ if (!chunk.getVersion().hasEqualEpoch(currEpoch)) {
+ // This means the collection was dropped and recreated. Drop the chunk metadata
+ // and return.
+ rpc::UniqueReply commandResponse =
+ client.runCommandWithMetadata(chunkMetadataNss.db().toString(),
+ "drop",
+ rpc::makeEmptyMetadata(),
+ BSON("drop" << chunkMetadataNss.coll()));
+ Status status = getStatusFromCommandResult(commandResponse->getCommandReply());
+
+ // A NamespaceNotFound error is okay because it's possible that we find a new epoch
+ // twice in a row before ever inserting documents.
+ if (!status.isOK() && status.code() != ErrorCodes::NamespaceNotFound) {
+ return status;
+ }
+
+ return Status{ErrorCodes::RemoteChangeDetected,
+ str::stream() << "Invalid chunks found when reloading '"
+ << nss.toString()
+ << "'. Previous collection epoch was '"
+ << currEpoch.toString()
+ << "', but unexpectedly found a new epoch '"
+ << chunk.getVersion().epoch().toString()
+ << "'. Collection was dropped and recreated."};
+ }
+
+ // Delete any overlapping chunk ranges. Overlapping chunks will have a min value
+ // ("_id") between (chunk.min, chunk.max].
+ //
+ // query: { "_id" : {"$gte": chunk.min, "$lt": chunk.max}}
+ auto deleteDocs(stdx::make_unique<BatchedDeleteDocument>());
+ deleteDocs->setQuery(BSON(ChunkType::minShardID << BSON(
+ "$gte" << chunk.getMin() << "$lt" << chunk.getMax())));
+ deleteDocs->setLimit(0);
+
+ auto deleteRequest(stdx::make_unique<BatchedDeleteRequest>());
+ deleteRequest->addToDeletes(deleteDocs.release());
+
+ BatchedCommandRequest batchedDeleteRequest(deleteRequest.release());
+ batchedDeleteRequest.setNS(chunkMetadataNss);
+ const BSONObj deleteCmdObj = batchedDeleteRequest.toBSON();
+
+ rpc::UniqueReply deleteCommandResponse =
+ client.runCommandWithMetadata(chunkMetadataNss.db().toString(),
+ deleteCmdObj.firstElementFieldName(),
+ rpc::makeEmptyMetadata(),
+ deleteCmdObj);
+ auto deleteStatus =
+ getStatusFromCommandResult(deleteCommandResponse->getCommandReply());
+
+ if (!deleteStatus.isOK()) {
+ return deleteStatus;
+ }
+
+ // Now the document can be expected to cleanly insert without overlap.
+ auto insert(stdx::make_unique<BatchedInsertRequest>());
+ insert->addToDocuments(chunk.toShardBSON());
+
+ BatchedCommandRequest insertRequest(insert.release());
+ insertRequest.setNS(chunkMetadataNss);
+ const BSONObj insertCmdObj = insertRequest.toBSON();
+
+ rpc::UniqueReply commandResponse =
+ client.runCommandWithMetadata(chunkMetadataNss.db().toString(),
+ insertCmdObj.firstElementFieldName(),
+ rpc::makeEmptyMetadata(),
+ insertCmdObj);
+ auto insertStatus = getStatusFromCommandResult(commandResponse->getCommandReply());
+
+ if (!insertStatus.isOK()) {
+ return insertStatus;
+ }
+ }
+
+ return Status::OK();
+ } catch (const DBException& ex) {
+ return ex.toStatus();
+ }
+}
+
} // namespace mongo
diff --git a/src/mongo/db/s/metadata_loader.h b/src/mongo/db/s/metadata_loader.h
index e4624a73f8f..8c12233da2e 100644
--- a/src/mongo/db/s/metadata_loader.h
+++ b/src/mongo/db/s/metadata_loader.h
@@ -29,14 +29,18 @@
#pragma once
#include <string>
+#include <vector>
#include "mongo/base/status.h"
namespace mongo {
class ShardingCatalogClient;
+class ChunkType;
class CollectionMetadata;
class CollectionType;
+class NamespaceString;
+class OID;
class OperationContext;
/**
@@ -125,6 +129,29 @@ private:
const std::string& shard,
const CollectionMetadata* oldMetadata,
CollectionMetadata* metadata);
+
+
+ /**
+ * Takes a vector of 'chunks' and updates the config.chunks.ns collection specified by 'nss'.
+ * Any chunk documents in config.chunks.ns that overlap with a chunk in 'chunks' is removed
+ * as the new chunk document is inserted. If the epoch of any chunk in 'chunks' does not match
+ * 'currEpoch', the chunk metadata is dropped.
+ *
+ * @nss - the regular collection namespace for which chunk metadata is being updated.
+ * @chunks - a range of chunks retrieved from the config server, sorted in ascending chunk
+ * version order.
+ * @currEpoch - what this shard server knows to be the collection epoch.
+ *
+ * Returns:
+ * - OK if not primary and no writes are needed.
+ * - RemoteChangeDetected if the chunk version epoch of any chunk in 'chunks' is different than
+ * 'currEpoch'
+ * - Other errors in writes/reads to the config.chunks.ns collection fails.
+ */
+ static Status _writeNewChunksIfPrimary(OperationContext* txn,
+ const NamespaceString& nss,
+ const std::vector<ChunkType>& chunks,
+ const OID& currEpoch);
};
} // namespace mongo
diff --git a/src/mongo/db/s/metadata_loader_test.cpp b/src/mongo/db/s/metadata_loader_test.cpp
index 99a4cf065bf..964cffe9071 100644
--- a/src/mongo/db/s/metadata_loader_test.cpp
+++ b/src/mongo/db/s/metadata_loader_test.cpp
@@ -29,12 +29,23 @@
#include "mongo/platform/basic.h"
#include "mongo/base/status.h"
+#include "mongo/client/dbclientinterface.h"
#include "mongo/client/remote_command_targeter_mock.h"
+#include "mongo/db/commands.h"
+#include "mongo/db/dbdirectclient.h"
+#include "mongo/db/repl/replication_coordinator_mock.h"
#include "mongo/db/s/collection_metadata.h"
#include "mongo/db/s/metadata_loader.h"
-#include "mongo/s/catalog/sharding_catalog_test_fixture.h"
+#include "mongo/db/server_options.h"
+#include "mongo/s/catalog/dist_lock_catalog_mock.h"
+#include "mongo/s/catalog/dist_lock_manager_mock.h"
+#include "mongo/s/catalog/sharding_catalog_client_impl.h"
#include "mongo/s/catalog/type_chunk.h"
#include "mongo/s/catalog/type_collection.h"
+#include "mongo/s/client/shard_registry.h"
+#include "mongo/s/shard_server_test_fixture.h"
+#include "mongo/stdx/memory.h"
+#include "mongo/util/scopeguard.h"
namespace mongo {
namespace {
@@ -42,24 +53,52 @@ namespace {
using std::string;
using std::unique_ptr;
using std::vector;
+using unittest::assertGet;
-class MetadataLoaderFixture : public ShardingCatalogTestFixture {
-public:
- MetadataLoaderFixture() = default;
- ~MetadataLoaderFixture() = default;
+const HostAndPort kConfigHostAndPort = HostAndPort("dummy", 123);
+const NamespaceString kNss = NamespaceString("test.foo");
+const NamespaceString kChunkMetadataNss = NamespaceString("config.chunks.test.foo");
+const ShardId kShardId = ShardId("shard0");
+class MetadataLoaderTest : public ShardServerTestFixture {
protected:
- void setUp() override {
- ShardingCatalogTestFixture::setUp();
- setRemote(HostAndPort("FakeRemoteClient:34567"));
- configTargeter()->setFindHostReturnValue(configHost);
- _maxCollVersion = ChunkVersion(1, 0, OID::gen());
- _loader.reset(new MetadataLoader);
+ /**
+ * Goes throught the chunks in 'metadata' and uses a DBDirectClient to check that they are all
+ * persisted. Also checks that 'totalNumChunksPersisted' is correct because 'metadata' is only
+ * aware of a shard's chunks.
+ */
+ void checkCollectionMetadataChunksMatchPersistedChunks(
+ const NamespaceString& nss,
+ const CollectionMetadata& metadata,
+ unsigned long long totalNumChunksPersisted) {
+ try {
+ DBDirectClient client(operationContext());
+ ASSERT_EQUALS(client.count(nss.ns()), totalNumChunksPersisted);
+
+ auto chunks = metadata.getChunks();
+ for (auto& chunk : chunks) {
+ Query query(BSON(ChunkType::minShardID() << chunk.first << ChunkType::max()
+ << chunk.second.getMaxKey()));
+ query.readPref(ReadPreference::Nearest, BSONArray());
+
+ std::unique_ptr<DBClientCursor> cursor = client.query(nss.ns().c_str(), query, 1);
+ ASSERT(cursor);
+
+ ASSERT(cursor->more());
+ BSONObj queryResult = cursor->nextSafe();
+ ChunkType foundChunk = assertGet(
+ ChunkType::fromShardBSON(queryResult, chunk.second.getVersion().epoch()));
+ ASSERT_BSONOBJ_EQ(chunk.first, foundChunk.getMin());
+ ASSERT_BSONOBJ_EQ(chunk.second.getMaxKey(), foundChunk.getMax());
+ }
+ } catch (const DBException& ex) {
+ ASSERT(false);
+ }
}
void expectFindOnConfigSendCollectionDefault() {
CollectionType collType;
- collType.setNs(NamespaceString{"test.foo"});
+ collType.setNs(kNss);
collType.setKeyPattern(BSON("a" << 1));
collType.setUnique(false);
collType.setUpdatedAt(Date_t::fromMillisSinceEpoch(1));
@@ -71,62 +110,37 @@ protected:
void expectFindOnConfigSendChunksDefault() {
BSONObj chunk = BSON(
ChunkType::name("test.foo-a_MinKey")
- << ChunkType::ns("test.foo")
+ << ChunkType::ns(kNss.ns())
<< ChunkType::min(BSON("a" << MINKEY))
<< ChunkType::max(BSON("a" << MAXKEY))
<< ChunkType::DEPRECATED_lastmod(Date_t::fromMillisSinceEpoch(_maxCollVersion.toLong()))
<< ChunkType::DEPRECATED_epoch(_maxCollVersion.epoch())
- << ChunkType::shard("shard0000"));
+ << ChunkType::shard(kShardId.toString()));
expectFindOnConfigSendBSONObjVector(std::vector<BSONObj>{chunk});
}
- MetadataLoader& loader() const {
- return *_loader;
- }
-
- void getMetadataFor(const OwnedPointerVector<ChunkType>& chunks, CollectionMetadata* metadata) {
- // Infer namespace, shard, epoch, keypattern from first chunk
- const ChunkType* firstChunk = *(chunks.vector().begin());
- const string ns = firstChunk->getNS();
- const string shardName = firstChunk->getShard().toString();
- const OID epoch = firstChunk->getVersion().epoch();
-
- CollectionType coll;
- coll.setNs(NamespaceString{ns});
- coll.setKeyPattern(BSON("a" << 1));
- coll.setUpdatedAt(Date_t::fromMillisSinceEpoch(1));
- coll.setEpoch(epoch);
- ASSERT_OK(coll.validate());
- std::vector<BSONObj> collToSend{coll.toBSON()};
-
- ChunkVersion version(1, 0, epoch);
- std::vector<BSONObj> chunksToSend;
- for (const auto chunkVal : chunks.vector()) {
- ChunkType chunk(*chunkVal);
-
- if (!chunk.isVersionSet()) {
- chunk.setVersion(version);
- version.incMajor();
- }
-
- ASSERT(chunk.validate().isOK());
- chunksToSend.push_back(chunk.toConfigBSON());
+ /**
+ * Helper to make a number of chunks that can then be manipulated in various ways in the tests.
+ */
+ std::vector<BSONObj> makeFourChunks() {
+ std::vector<BSONObj> chunks;
+ std::string names[] = {
+ "test.foo-a_MinKey", "test.foo-a_10", "test.foo-a_50", "test.foo-a_100"};
+ BSONObj mins[] = {BSON("a" << MINKEY), BSON("a" << 10), BSON("a" << 50), BSON("a" << 100)};
+ BSONObj maxs[] = {BSON("a" << 10), BSON("a" << 50), BSON("a" << 100), BSON("a" << MAXKEY)};
+ for (int i = 0; i < 4; ++i) {
+ _maxCollVersion.incMajor();
+ BSONObj chunk = BSON(ChunkType::name(names[i])
+ << ChunkType::ns(kNss.ns())
+ << ChunkType::min(mins[i])
+ << ChunkType::max(maxs[i])
+ << ChunkType::DEPRECATED_lastmod(
+ Date_t::fromMillisSinceEpoch(_maxCollVersion.toLong()))
+ << ChunkType::DEPRECATED_epoch(_maxCollVersion.epoch())
+ << ChunkType::shard(kShardId.toString()));
+ chunks.push_back(std::move(chunk));
}
-
- auto future = launchAsync([this, ns, shardName, metadata] {
- auto status = loader().makeCollectionMetadata(operationContext(),
- catalogClient(),
- ns,
- shardName,
- NULL, /* no old metadata */
- metadata);
- ASSERT_OK(status);
- });
-
- expectFindOnConfigSendBSONObjVector(collToSend);
- expectFindOnConfigSendBSONObjVector(chunksToSend);
-
- future.timed_get(kFutureTimeout);
+ return chunks;
}
ChunkVersion getMaxCollVersion() const {
@@ -138,19 +152,16 @@ protected:
}
private:
- const HostAndPort configHost{HostAndPort(CONFIG_HOST_PORT)};
-
- unique_ptr<MetadataLoader> _loader;
- ChunkVersion _maxCollVersion;
+ ChunkVersion _maxCollVersion{1, 0, OID::gen()};
};
// TODO: Test config server down
// TODO: Test read of chunks with new epoch
// TODO: Test that you can properly load config using format with deprecated fields?
-TEST_F(MetadataLoaderFixture, DroppedColl) {
+TEST_F(MetadataLoaderTest, DroppedColl) {
CollectionType collType;
- collType.setNs(NamespaceString{"test.foo"});
+ collType.setNs(kNss);
collType.setKeyPattern(BSON("a" << 1));
collType.setUpdatedAt(Date_t());
collType.setEpoch(OID());
@@ -160,8 +171,8 @@ TEST_F(MetadataLoaderFixture, DroppedColl) {
CollectionMetadata metadata;
auto status = MetadataLoader::makeCollectionMetadata(operationContext(),
catalogClient(),
- "test.foo",
- "shard0000",
+ kNss.ns(),
+ kShardId.toString(),
NULL, /* no old metadata */
&metadata);
ASSERT_EQUALS(status.code(), ErrorCodes::NamespaceNotFound);
@@ -170,13 +181,13 @@ TEST_F(MetadataLoaderFixture, DroppedColl) {
future.timed_get(kFutureTimeout);
}
-TEST_F(MetadataLoaderFixture, EmptyColl) {
+TEST_F(MetadataLoaderTest, EmptyColl) {
auto future = launchAsync([this] {
CollectionMetadata metadata;
auto status = MetadataLoader::makeCollectionMetadata(operationContext(),
catalogClient(),
- "test.foo",
- "shard0000",
+ kNss.ns(),
+ kShardId.toString(),
NULL, /* no old metadata */
&metadata);
ASSERT_EQUALS(status.code(), ErrorCodes::NamespaceNotFound);
@@ -185,14 +196,14 @@ TEST_F(MetadataLoaderFixture, EmptyColl) {
future.timed_get(kFutureTimeout);
}
-TEST_F(MetadataLoaderFixture, BadColl) {
- BSONObj badCollToSend = BSON(CollectionType::fullNs("test.foo"));
+TEST_F(MetadataLoaderTest, BadColl) {
+ BSONObj badCollToSend = BSON(CollectionType::fullNs(kNss.ns()));
auto future = launchAsync([this] {
CollectionMetadata metadata;
auto status = MetadataLoader::makeCollectionMetadata(operationContext(),
catalogClient(),
- "test.foo",
- "shard0000",
+ kNss.ns(),
+ kShardId.toString(),
NULL, /* no old metadata */
&metadata);
ASSERT_EQUALS(status.code(), ErrorCodes::NoSuchKey);
@@ -201,135 +212,108 @@ TEST_F(MetadataLoaderFixture, BadColl) {
future.timed_get(kFutureTimeout);
}
-TEST_F(MetadataLoaderFixture, BadChunk) {
- CollectionType collType;
- collType.setNs(NamespaceString{"test.foo"});
- collType.setUpdatedAt(Date_t::fromMillisSinceEpoch(1));
- collType.setKeyPattern(BSON("a" << 1));
- collType.setEpoch(OID::gen());
- ASSERT_OK(collType.validate());
-
+TEST_F(MetadataLoaderTest, BadChunk) {
ChunkType chunkInfo;
- chunkInfo.setNS(NamespaceString{"test.foo"}.ns());
- chunkInfo.setVersion(ChunkVersion(1, 0, collType.getEpoch()));
+ chunkInfo.setNS(kNss.ns());
+ chunkInfo.setVersion(ChunkVersion(1, 0, getMaxCollVersion().epoch()));
ASSERT(!chunkInfo.validate().isOK());
auto future = launchAsync([this] {
CollectionMetadata metadata;
auto status = MetadataLoader::makeCollectionMetadata(operationContext(),
catalogClient(),
- "test.foo",
- "shard0000",
+ kNss.ns(),
+ kShardId.toString(),
NULL, /* no old metadata */
&metadata);
ASSERT_EQUALS(status.code(), ErrorCodes::NoSuchKey);
});
- expectFindOnConfigSendBSONObjVector(std::vector<BSONObj>{collType.toBSON()});
+ expectFindOnConfigSendCollectionDefault();
expectFindOnConfigSendBSONObjVector(std::vector<BSONObj>{chunkInfo.toConfigBSON()});
future.timed_get(kFutureTimeout);
}
-TEST_F(MetadataLoaderFixture, NoChunksIsDropped) {
- OID epoch = OID::gen();
-
- CollectionType collType;
- collType.setNs(NamespaceString{"test.foo"});
- collType.setKeyPattern(BSON("a" << 1));
- collType.setUnique(false);
- collType.setUpdatedAt(Date_t::fromMillisSinceEpoch(1));
- collType.setEpoch(epoch);
-
+TEST_F(MetadataLoaderTest, NoChunksIsDropped) {
auto future = launchAsync([this] {
+ ON_BLOCK_EXIT([&] { Client::destroy(); });
+ Client::initThreadIfNotAlready("Test");
+ auto txn = cc().makeOperationContext();
+
CollectionMetadata metadata;
- auto status = MetadataLoader::makeCollectionMetadata(operationContext(),
+ auto status = MetadataLoader::makeCollectionMetadata(txn.get(),
catalogClient(),
- "test.foo",
- "shard0000",
+ kNss.ns(),
+ kShardId.toString(),
NULL, /* no old metadata */
&metadata);
// This is interpreted as a dropped ns, since we drop the chunks first
ASSERT_EQUALS(status.code(), ErrorCodes::NamespaceNotFound);
+
+ checkCollectionMetadataChunksMatchPersistedChunks(kChunkMetadataNss, metadata, 0);
});
- expectFindOnConfigSendBSONObjVector(std::vector<BSONObj>{collType.toBSON()});
- expectFindOnConfigSendErrorCode(ErrorCodes::NamespaceNotFound);
+ expectFindOnConfigSendCollectionDefault();
+ expectFindOnConfigSendBSONObjVector(std::vector<BSONObj>{});
+
future.timed_get(kFutureTimeout);
}
-TEST_F(MetadataLoaderFixture, CheckNumChunk) {
- OID epoch = OID::gen();
-
- CollectionType collType;
- collType.setNs(NamespaceString{"test.foo"});
- collType.setKeyPattern(BSON("a" << 1));
- collType.setUnique(false);
- collType.setUpdatedAt(Date_t::fromMillisSinceEpoch(1));
- collType.setEpoch(epoch);
- ASSERT_OK(collType.validate());
-
+TEST_F(MetadataLoaderTest, CheckNumChunk) {
// Need a chunk on another shard, otherwise the chunks are invalid in general and we
// can't load metadata
ChunkType chunkType;
- chunkType.setNS("test.foo");
- chunkType.setShard(ShardId("shard0001"));
+ chunkType.setNS(kNss.ns());
+ chunkType.setShard(ShardId("altshard"));
chunkType.setMin(BSON("a" << MINKEY));
chunkType.setMax(BSON("a" << MAXKEY));
- chunkType.setVersion(ChunkVersion(1, 0, epoch));
+ chunkType.setVersion(ChunkVersion(1, 0, getMaxCollVersion().epoch()));
ASSERT(chunkType.validate().isOK());
auto future = launchAsync([this] {
+ ON_BLOCK_EXIT([&] { Client::destroy(); });
+ Client::initThreadIfNotAlready("Test");
+ auto txn = cc().makeOperationContext();
+
CollectionMetadata metadata;
- auto status = MetadataLoader::makeCollectionMetadata(operationContext(),
+ auto status = MetadataLoader::makeCollectionMetadata(txn.get(),
catalogClient(),
- "test.foo",
- "shard0000",
+ kNss.ns(),
+ kShardId.toString(),
NULL, /* no old metadata */
&metadata);
- std::cout << "status: " << status << std::endl;
ASSERT_OK(status);
ASSERT_EQUALS(0U, metadata.getNumChunks());
ASSERT_EQUALS(1, metadata.getCollVersion().majorVersion());
ASSERT_EQUALS(0, metadata.getShardVersion().majorVersion());
- ASSERT_NOT_EQUALS(OID(), metadata.getCollVersion().epoch());
- ASSERT_NOT_EQUALS(OID(), metadata.getShardVersion().epoch());
+
+ checkCollectionMetadataChunksMatchPersistedChunks(kChunkMetadataNss, metadata, 1);
});
- expectFindOnConfigSendBSONObjVector(std::vector<BSONObj>{collType.toBSON()});
+ expectFindOnConfigSendCollectionDefault();
expectFindOnConfigSendBSONObjVector(std::vector<BSONObj>{chunkType.toConfigBSON()});
future.timed_get(kFutureTimeout);
}
-TEST_F(MetadataLoaderFixture, SingleChunkCheckNumChunk) {
+TEST_F(MetadataLoaderTest, SingleChunkCheckNumChunk) {
auto future = launchAsync([this] {
+ ON_BLOCK_EXIT([&] { Client::destroy(); });
+ Client::initThreadIfNotAlready("Test");
+ auto txn = cc().makeOperationContext();
+
CollectionMetadata metadata;
- auto status = MetadataLoader::makeCollectionMetadata(operationContext(),
+ auto status = MetadataLoader::makeCollectionMetadata(txn.get(),
catalogClient(),
- "test.foo",
- "shard0000",
+ kNss.ns(),
+ kShardId.toString(),
NULL, /* no old metadata */
&metadata);
ASSERT_OK(status);
ASSERT_EQUALS(1U, metadata.getNumChunks());
- });
-
- expectFindOnConfigSendCollectionDefault();
- expectFindOnConfigSendChunksDefault();
-
- future.timed_get(kFutureTimeout);
-}
+ ASSERT_EQUALS(getMaxCollVersion(), metadata.getCollVersion());
+ ASSERT_EQUALS(getMaxCollVersion(), metadata.getShardVersion());
-TEST_F(MetadataLoaderFixture, SingleChunkGetNext) {
- auto future = launchAsync([this] {
- CollectionMetadata metadata;
- auto status = MetadataLoader::makeCollectionMetadata(operationContext(),
- catalogClient(),
- "test.foo",
- "shard0000",
- NULL, /* no old metadata */
- &metadata);
- ChunkType chunkInfo;
- ASSERT_TRUE(metadata.getNextChunk(metadata.getMinKey(), &chunkInfo));
+ checkCollectionMetadataChunksMatchPersistedChunks(kChunkMetadataNss, metadata, 1);
});
expectFindOnConfigSendCollectionDefault();
@@ -338,73 +322,55 @@ TEST_F(MetadataLoaderFixture, SingleChunkGetNext) {
future.timed_get(kFutureTimeout);
}
-TEST_F(MetadataLoaderFixture, SingleChunkGetShardKey) {
+TEST_F(MetadataLoaderTest, SeveralChunksCheckNumChunks) {
auto future = launchAsync([this] {
+ ON_BLOCK_EXIT([&] { Client::destroy(); });
+ Client::initThreadIfNotAlready("Test");
+ auto txn = cc().makeOperationContext();
+
CollectionMetadata metadata;
- auto status = MetadataLoader::makeCollectionMetadata(operationContext(),
+ auto status = MetadataLoader::makeCollectionMetadata(txn.get(),
catalogClient(),
- "test.foo",
- "shard0000",
+ kNss.ns(),
+ kShardId.toString(),
NULL, /* no old metadata */
&metadata);
- ChunkType chunkInfo;
- ASSERT_BSONOBJ_EQ(metadata.getKeyPattern(), BSON("a" << 1));
+ ASSERT_OK(status);
+ ASSERT_EQUALS(4U, metadata.getNumChunks());
+ ASSERT_EQUALS(getMaxCollVersion(), metadata.getCollVersion());
+ ASSERT_EQUALS(getMaxCollVersion(), metadata.getShardVersion());
+
+ checkCollectionMetadataChunksMatchPersistedChunks(kChunkMetadataNss, metadata, 4);
});
expectFindOnConfigSendCollectionDefault();
- expectFindOnConfigSendChunksDefault();
+ expectFindOnConfigSendBSONObjVector(makeFourChunks());
future.timed_get(kFutureTimeout);
}
-TEST_F(MetadataLoaderFixture, SingleChunkGetMaxCollVersion) {
+TEST_F(MetadataLoaderTest, CollectionMetadataSetUp) {
auto future = launchAsync([this] {
- CollectionMetadata metadata;
- auto status = MetadataLoader::makeCollectionMetadata(operationContext(),
- catalogClient(),
- "test.foo",
- "shard0000",
- NULL, /* no old metadata */
- &metadata);
- ASSERT_TRUE(getMaxCollVersion().equals(metadata.getCollVersion()));
- });
-
- expectFindOnConfigSendCollectionDefault();
- expectFindOnConfigSendChunksDefault();
+ ON_BLOCK_EXIT([&] { Client::destroy(); });
+ Client::initThreadIfNotAlready("Test");
+ auto txn = cc().makeOperationContext();
- future.timed_get(kFutureTimeout);
-}
-TEST_F(MetadataLoaderFixture, SingleChunkGetMaxShardVersion) {
- auto future = launchAsync([this] {
CollectionMetadata metadata;
- auto status = MetadataLoader::makeCollectionMetadata(operationContext(),
+ auto status = MetadataLoader::makeCollectionMetadata(txn.get(),
catalogClient(),
- "test.foo",
- "shard0000",
+ kNss.ns(),
+ kShardId.toString(),
NULL, /* no old metadata */
&metadata);
+ ASSERT_BSONOBJ_EQ(metadata.getKeyPattern(), BSON("a" << 1));
+ ASSERT_TRUE(getMaxCollVersion().equals(metadata.getCollVersion()));
ASSERT_TRUE(getMaxShardVersion().equals(metadata.getShardVersion()));
- });
- expectFindOnConfigSendCollectionDefault();
- expectFindOnConfigSendChunksDefault();
-
- future.timed_get(kFutureTimeout);
-}
-TEST_F(MetadataLoaderFixture, NoChunks) {
- auto future = launchAsync([this] {
- CollectionMetadata metadata;
- auto status = MetadataLoader::makeCollectionMetadata(operationContext(),
- catalogClient(),
- "test.foo",
- "shard0000",
- NULL, /* no old metadata */
- &metadata);
- // NSNotFound because we're reloading with no old metadata
- ASSERT_EQUALS(status.code(), ErrorCodes::NamespaceNotFound);
+ checkCollectionMetadataChunksMatchPersistedChunks(kChunkMetadataNss, metadata, 1);
});
+
expectFindOnConfigSendCollectionDefault();
- expectFindOnConfigSendErrorCode(ErrorCodes::NamespaceNotFound);
+ expectFindOnConfigSendChunksDefault();
future.timed_get(kFutureTimeout);
}
diff --git a/src/mongo/db/s/sharding_state.cpp b/src/mongo/db/s/sharding_state.cpp
index dda78ad8ae8..bc9932a387c 100644
--- a/src/mongo/db/s/sharding_state.cpp
+++ b/src/mongo/db/s/sharding_state.cpp
@@ -84,7 +84,9 @@ namespace {
const auto getShardingState = ServiceContext::declareDecoration<ShardingState>();
// Max number of concurrent config server refresh threads
-const int kMaxConfigServerRefreshThreads = 3;
+// TODO: temporarily decreased from 3 to 1 to serialize refresh writes. Alternate per collection
+// serialization must be implemented: SERVER-28118
+const int kMaxConfigServerRefreshThreads = 1;
// Maximum number of times to try to refresh the collection metadata if conflicts are occurring
const int kMaxNumMetadataRefreshAttempts = 3;
diff --git a/src/mongo/s/SConscript b/src/mongo/s/SConscript
index 1ff6448f4e5..365634efbf5 100644
--- a/src/mongo/s/SConscript
+++ b/src/mongo/s/SConscript
@@ -124,6 +124,18 @@ env.Library(
)
env.Library(
+ target='shard_server_test_fixture',
+ source=[
+ 'shard_server_test_fixture.cpp',
+ ],
+ LIBDEPS=[
+ 'sharding_mongod_test_fixture',
+ '$BUILD_DIR/mongo/s/catalog/dist_lock_catalog_mock',
+ '$BUILD_DIR/mongo/s/catalog/dist_lock_manager_mock',
+ ],
+)
+
+env.Library(
target='sharding_mongod_test_fixture',
source=[
'sharding_mongod_test_fixture.cpp',
diff --git a/src/mongo/s/catalog/sharding_catalog_manager_impl.h b/src/mongo/s/catalog/sharding_catalog_manager_impl.h
index 842b11f6920..2b4cdd818a8 100644
--- a/src/mongo/s/catalog/sharding_catalog_manager_impl.h
+++ b/src/mongo/s/catalog/sharding_catalog_manager_impl.h
@@ -160,7 +160,8 @@ private:
/**
* Runs the listDatabases command on the specified host and returns the names of all databases
- * it returns excluding those named local and admin, since they serve administrative purpose.
+ * it returns excluding those named local, config and admin, since they serve administrative
+ * purposes.
*/
StatusWith<std::vector<std::string>> _getDBNamesListFromShard(
OperationContext* txn, std::shared_ptr<RemoteCommandTargeter> targeter);
diff --git a/src/mongo/s/catalog/sharding_catalog_manager_shard_operations_impl.cpp b/src/mongo/s/catalog/sharding_catalog_manager_shard_operations_impl.cpp
index a58febc9b57..a228092a386 100644
--- a/src/mongo/s/catalog/sharding_catalog_manager_shard_operations_impl.cpp
+++ b/src/mongo/s/catalog/sharding_catalog_manager_shard_operations_impl.cpp
@@ -499,7 +499,8 @@ StatusWith<std::vector<std::string>> ShardingCatalogManagerImpl::_getDBNamesList
for (const auto& dbEntry : cmdResult["databases"].Obj()) {
const auto& dbName = dbEntry["name"].String();
- if (!(dbName == NamespaceString::kAdminDb || dbName == NamespaceString::kLocalDb)) {
+ if (!(dbName == NamespaceString::kAdminDb || dbName == NamespaceString::kLocalDb ||
+ dbName == NamespaceString::kConfigDb)) {
dbNames.push_back(dbName);
}
}
diff --git a/src/mongo/s/catalog/sharding_catalog_test_fixture.cpp b/src/mongo/s/catalog/sharding_catalog_test_fixture.cpp
index 39a53498ded..092407324f3 100644
--- a/src/mongo/s/catalog/sharding_catalog_test_fixture.cpp
+++ b/src/mongo/s/catalog/sharding_catalog_test_fixture.cpp
@@ -43,16 +43,6 @@ ShardingCatalogTestFixture::ShardingCatalogTestFixture() = default;
ShardingCatalogTestFixture::~ShardingCatalogTestFixture() = default;
-void ShardingCatalogTestFixture::expectFindOnConfigSendErrorCode(ErrorCodes::Error code) {
- onCommand([&, code](const RemoteCommandRequest& request) {
- ASSERT_EQ(request.target, HostAndPort(CONFIG_HOST_PORT));
- ASSERT_EQ(request.dbname, "config");
- BSONObjBuilder responseBuilder;
- Command::appendCommandStatus(responseBuilder, Status(code, ""));
- return responseBuilder.obj();
- });
-}
-
void ShardingCatalogTestFixture::expectFindOnConfigSendBSONObjVector(std::vector<BSONObj> obj) {
onFindCommand([&, obj](const RemoteCommandRequest& request) {
ASSERT_EQ(request.target, HostAndPort(CONFIG_HOST_PORT));
diff --git a/src/mongo/s/catalog/sharding_catalog_test_fixture.h b/src/mongo/s/catalog/sharding_catalog_test_fixture.h
index 386135d9a6e..4c13a0e8dda 100644
--- a/src/mongo/s/catalog/sharding_catalog_test_fixture.h
+++ b/src/mongo/s/catalog/sharding_catalog_test_fixture.h
@@ -46,7 +46,6 @@ public:
protected:
static const std::string CONFIG_HOST_PORT;
- void expectFindOnConfigSendErrorCode(ErrorCodes::Error code);
void expectFindOnConfigSendBSONObjVector(std::vector<BSONObj> obj);
};
} // namespace mongo
diff --git a/src/mongo/s/shard_server_test_fixture.cpp b/src/mongo/s/shard_server_test_fixture.cpp
new file mode 100644
index 00000000000..272d6a00641
--- /dev/null
+++ b/src/mongo/s/shard_server_test_fixture.cpp
@@ -0,0 +1,109 @@
+/**
+ * Copyright (C) 2017 MongoDB Inc.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Affero General Public License, version 3,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Affero General Public License for more details.
+ *
+ * You should have received a copy of the GNU Affero General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ * As a special exception, the copyright holders give permission to link the
+ * code of portions of this program with the OpenSSL library under certain
+ * conditions as described in each individual source file and distribute
+ * linked combinations including the program with the OpenSSL library. You
+ * must comply with the GNU Affero General Public License in all respects for
+ * all of the code used other than as permitted herein. If you modify file(s)
+ * with this exception, you may extend this exception to your version of the
+ * file(s), but you are not obligated to do so. If you do not wish to do so,
+ * delete this exception statement from your version. If you delete this
+ * exception statement from all source files in the program, then also delete
+ * it in the license file.
+ */
+
+#include "mongo/platform/basic.h"
+
+#include "mongo/s/shard_server_test_fixture.h"
+
+#include "mongo/client/remote_command_targeter_mock.h"
+#include "mongo/db/commands.h"
+#include "mongo/db/repl/replication_coordinator_mock.h"
+#include "mongo/s/catalog/dist_lock_catalog_mock.h"
+#include "mongo/s/catalog/dist_lock_manager_mock.h"
+#include "mongo/s/catalog/sharding_catalog_client_impl.h"
+#include "mongo/stdx/memory.h"
+
+namespace mongo {
+
+namespace {
+
+const HostAndPort kConfigHostAndPort("dummy", 123);
+
+} // namespace
+
+ShardServerTestFixture::ShardServerTestFixture() = default;
+
+ShardServerTestFixture::~ShardServerTestFixture() = default;
+
+std::shared_ptr<RemoteCommandTargeterMock> ShardServerTestFixture::configTargeterMock() {
+ return RemoteCommandTargeterMock::get(shardRegistry()->getConfigShard()->getTargeter());
+}
+
+void ShardServerTestFixture::expectFindOnConfigSendErrorCode(ErrorCodes::Error code) {
+ onCommand([&, code](const executor::RemoteCommandRequest& request) {
+ ASSERT_EQ(request.target, kConfigHostAndPort);
+ ASSERT_EQ(request.dbname, "config");
+ BSONObjBuilder responseBuilder;
+ Command::appendCommandStatus(responseBuilder, Status(code, ""));
+ return responseBuilder.obj();
+ });
+}
+
+void ShardServerTestFixture::expectFindOnConfigSendBSONObjVector(std::vector<BSONObj> obj) {
+ onFindCommand([&, obj](const executor::RemoteCommandRequest& request) {
+ ASSERT_EQ(request.target, kConfigHostAndPort);
+ ASSERT_EQ(request.dbname, "config");
+ return obj;
+ });
+}
+
+void ShardServerTestFixture::setUp() {
+ ShardingMongodTestFixture::setUp();
+
+ replicationCoordinator()->alwaysAllowWrites(true);
+
+ // Initialize sharding components as a shard server.
+ serverGlobalParams.clusterRole = ClusterRole::ShardServer;
+ uassertStatusOK(
+ initializeGlobalShardingStateForMongodForTest(ConnectionString(kConfigHostAndPort)));
+
+ // Set the findHost() return value on the mock targeter so that later calls to the
+ // config targeter's findHost() return the appropriate value.
+ configTargeterMock()->setFindHostReturnValue(kConfigHostAndPort);
+}
+
+
+std::unique_ptr<DistLockCatalog> ShardServerTestFixture::makeDistLockCatalog(
+ ShardRegistry* shardRegistry) {
+ invariant(shardRegistry);
+ return stdx::make_unique<DistLockCatalogMock>();
+}
+
+std::unique_ptr<DistLockManager> ShardServerTestFixture::makeDistLockManager(
+ std::unique_ptr<DistLockCatalog> distLockCatalog) {
+ invariant(distLockCatalog);
+ return stdx::make_unique<DistLockManagerMock>(std::move(distLockCatalog));
+}
+
+std::unique_ptr<ShardingCatalogClient> ShardServerTestFixture::makeShardingCatalogClient(
+ std::unique_ptr<DistLockManager> distLockManager) {
+ invariant(distLockManager);
+ return stdx::make_unique<ShardingCatalogClientImpl>(std::move(distLockManager));
+}
+
+} // namespace mongo
diff --git a/src/mongo/s/shard_server_test_fixture.h b/src/mongo/s/shard_server_test_fixture.h
new file mode 100644
index 00000000000..e3e15a4d780
--- /dev/null
+++ b/src/mongo/s/shard_server_test_fixture.h
@@ -0,0 +1,88 @@
+/**
+ * Copyright (C) 2017 MongoDB Inc.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Affero General Public License, version 3,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Affero General Public License for more details.
+ *
+ * You should have received a copy of the GNU Affero General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ * As a special exception, the copyright holders give permission to link the
+ * code of portions of this program with the OpenSSL library under certain
+ * conditions as described in each individual source file and distribute
+ * linked combinations including the program with the OpenSSL library. You
+ * must comply with the GNU Affero General Public License in all respects for
+ * all of the code used other than as permitted herein. If you modify file(s)
+ * with this exception, you may extend this exception to your version of the
+ * file(s), but you are not obligated to do so. If you do not wish to do so,
+ * delete this exception statement from your version. If you delete this
+ * exception statement from all source files in the program, then also delete
+ * it in the license file.
+ */
+
+#pragma once
+
+#include "mongo/db/server_options.h"
+#include "mongo/s/sharding_mongod_test_fixture.h"
+
+namespace mongo {
+
+class RemoteCommandTargeterMock;
+
+/**
+ * Test fixture for shard components, as opposed to config or mongos components.
+ * Has a mock network and ephemeral storage engine provided by ShardingMongodTestFixture,
+ * additionally sets up mock dist lock catalog and manager with a real catalog client.
+ */
+class ShardServerTestFixture : public ShardingMongodTestFixture {
+public:
+ ShardServerTestFixture();
+ ~ShardServerTestFixture();
+
+ /**
+ * Returns the mock targeter for the config server. Useful to use like so,
+ *
+ * configTargeterMock()->setFindHostReturnValue(HostAndPort);
+ * configTargeterMock()->setFindHostReturnValue({ErrorCodes::InternalError, "can't target"})
+ *
+ * Remote calls always need to resolve a host with RemoteCommandTargeterMock::findHost, so it
+ * must be set.
+ */
+ std::shared_ptr<RemoteCommandTargeterMock> configTargeterMock();
+
+ void expectFindOnConfigSendErrorCode(ErrorCodes::Error code);
+
+ void expectFindOnConfigSendBSONObjVector(std::vector<BSONObj> obj);
+
+protected:
+ /**
+ * Sets up a ClusterRole::ShardServer replica set with a real catalog client and mock dist lock
+ * catalog and manager.
+ */
+ void setUp() override;
+
+ /**
+ * Creates a DistLockCatalogMock.
+ */
+ std::unique_ptr<DistLockCatalog> makeDistLockCatalog(ShardRegistry* shardRegistry) override;
+
+ /**
+ * Creates a DistLockManagerMock.
+ */
+ std::unique_ptr<DistLockManager> makeDistLockManager(
+ std::unique_ptr<DistLockCatalog> distLockCatalog) override;
+
+ /**
+ * Creates a real ShardingCatalogClient.
+ */
+ std::unique_ptr<ShardingCatalogClient> makeShardingCatalogClient(
+ std::unique_ptr<DistLockManager> distLockManager) override;
+};
+
+} // namespace mongo
diff --git a/src/mongo/s/sharding_mongod_test_fixture.cpp b/src/mongo/s/sharding_mongod_test_fixture.cpp
index 35e91e579ec..96694e87e77 100644
--- a/src/mongo/s/sharding_mongod_test_fixture.cpp
+++ b/src/mongo/s/sharding_mongod_test_fixture.cpp
@@ -109,6 +109,7 @@ void ShardingMongodTestFixture::setUp() {
repl::ReplSettings replSettings;
replSettings.setReplSetString(ConnectionString::forReplicaSet(_setName, _servers).toString());
+ replSettings.setMaster(true);
auto replCoordPtr = makeReplicationCoordinator(replSettings);
_replCoord = replCoordPtr.get();