summaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
authorsamantharitter <samantha.ritter@10gen.com>2017-09-19 15:37:56 -0400
committersamantharitter <samantha.ritter@10gen.com>2017-09-28 14:52:13 -0400
commitba7208d5724dd53f10f360a2aece4d39e79e6638 (patch)
tree2805d23d985f28bc1bcb78ce713d435addc086f8 /src
parent291274d1d98fda95dced9f3e6dc7db73db6984c0 (diff)
downloadmongo-ba7208d5724dd53f10f360a2aece4d39e79e6638.tar.gz
SERVER-29027 Allow collections in the config db to be sharded
Diffstat (limited to 'src')
-rw-r--r--src/mongo/db/s/config/configsvr_enable_sharding_command.cpp3
-rw-r--r--src/mongo/db/s/config/configsvr_shard_collection_command.cpp46
-rw-r--r--src/mongo/db/s/migration_source_manager.cpp5
-rw-r--r--src/mongo/s/catalog/SConscript1
-rw-r--r--src/mongo/s/catalog/sharding_catalog_append_db_stats_test.cpp221
-rw-r--r--src/mongo/s/catalog/sharding_catalog_client.h9
-rw-r--r--src/mongo/s/catalog/sharding_catalog_client_impl.cpp59
-rw-r--r--src/mongo/s/catalog/sharding_catalog_client_impl.h4
-rw-r--r--src/mongo/s/catalog/sharding_catalog_client_mock.cpp5
-rw-r--r--src/mongo/s/catalog/sharding_catalog_client_mock.h4
-rw-r--r--src/mongo/s/catalog/sharding_catalog_manager.h3
-rw-r--r--src/mongo/s/catalog/sharding_catalog_manager_collection_operations.cpp22
-rw-r--r--src/mongo/s/catalog/sharding_catalog_manager_database_operations.cpp7
-rw-r--r--src/mongo/s/catalog/sharding_catalog_shard_collection_test.cpp14
-rw-r--r--src/mongo/s/client/shard_local_test.cpp3
-rw-r--r--src/mongo/s/client/shard_registry.cpp5
-rw-r--r--src/mongo/s/client/shard_registry.h5
-rw-r--r--src/mongo/s/client/shard_registry_data_test.cpp2
-rw-r--r--src/mongo/s/commands/cluster_list_databases_cmd.cpp90
-rw-r--r--src/mongo/s/commands/cluster_write.cpp32
-rw-r--r--src/mongo/shell/utils_sh.js11
21 files changed, 187 insertions, 364 deletions
diff --git a/src/mongo/db/s/config/configsvr_enable_sharding_command.cpp b/src/mongo/db/s/config/configsvr_enable_sharding_command.cpp
index 13dc9f36081..f65cf04a3d9 100644
--- a/src/mongo/db/s/config/configsvr_enable_sharding_command.cpp
+++ b/src/mongo/db/s/config/configsvr_enable_sharding_command.cpp
@@ -112,8 +112,7 @@ public:
str::stream() << "invalid db name specified: " << dbname,
NamespaceString::validDBName(dbname, NamespaceString::DollarInDbNameBehavior::Allow));
- if (dbname == NamespaceString::kAdminDb || dbname == NamespaceString::kConfigDb ||
- dbname == NamespaceString::kLocalDb) {
+ if (dbname == NamespaceString::kAdminDb || dbname == NamespaceString::kLocalDb) {
return appendCommandStatus(result,
{ErrorCodes::InvalidOptions,
str::stream() << "can't shard " + dbname + " database"});
diff --git a/src/mongo/db/s/config/configsvr_shard_collection_command.cpp b/src/mongo/db/s/config/configsvr_shard_collection_command.cpp
index f3c78aca11b..3f95d89b31e 100644
--- a/src/mongo/db/s/config/configsvr_shard_collection_command.cpp
+++ b/src/mongo/db/s/config/configsvr_shard_collection_command.cpp
@@ -44,6 +44,7 @@
#include "mongo/db/query/collation/collator_factory_interface.h"
#include "mongo/db/repl/repl_set_config.h"
#include "mongo/db/repl/replication_coordinator.h"
+#include "mongo/db/sessions_collection.h"
#include "mongo/s/balancer_configuration.h"
#include "mongo/s/catalog/sharding_catalog_manager.h"
#include "mongo/s/catalog/type_database.h"
@@ -151,7 +152,9 @@ void validateAndDeduceFullRequestOptions(OperationContext* opCtx,
!shardKeyPattern.isHashedPattern() || !request->getUnique());
// Ensure the namespace is valid.
- uassert(ErrorCodes::IllegalOperation, "can't shard system namespaces", !nss.isSystem());
+ uassert(ErrorCodes::IllegalOperation,
+ "can't shard system namespaces",
+ !nss.isSystem() || nss.ns() == SessionsCollection::kSessionsFullNS);
// Ensure the collation is valid. Currently we only allow the simple collation.
bool simpleCollationSpecified = false;
@@ -782,8 +785,42 @@ public:
Grid::get(opCtx)->shardRegistry()->getAllShardIds(&shardIds);
const int numShards = shardIds.size();
- auto primaryShard = uassertStatusOK(
- Grid::get(opCtx)->shardRegistry()->getShard(opCtx, dbType.getPrimary()));
+ // Handle collections in the config db separately.
+ if (nss.db() == NamespaceString::kConfigDb) {
+ // Only whitelisted collections in config may be sharded
+ // (unless we are in test mode)
+ uassert(ErrorCodes::IllegalOperation,
+ "only special collections in the config db may be sharded",
+ nss.ns() == SessionsCollection::kSessionsFullNS ||
+ Command::testCommandsEnabled);
+
+ auto configShard = uassertStatusOK(
+ Grid::get(opCtx)->shardRegistry()->getShard(opCtx, dbType.getPrimary()));
+ ScopedDbConnection configConn(configShard->getConnString());
+ ON_BLOCK_EXIT([&configConn] { configConn.done(); });
+
+ // If this is a collection on the config db, it must be empty to be sharded,
+ // otherwise we might end up with chunks on the config servers.
+ uassert(ErrorCodes::IllegalOperation,
+ "collections in the config db must be empty to be sharded",
+ configConn->count(nss.ns()) == 0);
+ }
+
+ // For the config db, pick a new host shard for this collection, otherwise
+ // make a connection to the real primary shard for this database.
+ auto primaryShardId = [&]() {
+ if (nss.db() == NamespaceString::kConfigDb) {
+ uassert(ErrorCodes::IllegalOperation,
+ "cannot shard collections in config before there are shards",
+ numShards > 0);
+ return shardIds[0];
+ } else {
+ return dbType.getPrimary();
+ }
+ }();
+
+ auto primaryShard =
+ uassertStatusOK(Grid::get(opCtx)->shardRegistry()->getShard(opCtx, primaryShardId));
ScopedDbConnection conn(primaryShard->getConnString());
ON_BLOCK_EXIT([&conn] { conn.done(); });
@@ -843,7 +880,8 @@ public:
*request.getCollation(),
request.getUnique(),
initSplits,
- distributeInitialChunks);
+ distributeInitialChunks,
+ primaryShardId);
result << "collectionsharded" << nss.ns();
if (uuid) {
result << "collectionUUID" << *uuid;
diff --git a/src/mongo/db/s/migration_source_manager.cpp b/src/mongo/db/s/migration_source_manager.cpp
index 471a699a614..e16addcd83e 100644
--- a/src/mongo/db/s/migration_source_manager.cpp
+++ b/src/mongo/db/s/migration_source_manager.cpp
@@ -122,6 +122,11 @@ MigrationSourceManager::MigrationSourceManager(OperationContext* opCtx,
_collectionMetadata = CollectionShardingState::get(opCtx, getNss())->getMetadata();
_keyPattern = _collectionMetadata->getKeyPattern();
+
+ uassert(ErrorCodes::InvalidOptions,
+ "cannot move chunks for a collection that doesn't exist",
+ autoColl.getCollection());
+
if (autoColl.getCollection()->uuid()) {
_collectionUuid = autoColl.getCollection()->uuid().value();
}
diff --git a/src/mongo/s/catalog/SConscript b/src/mongo/s/catalog/SConscript
index e099aa25dc5..7b76bf58d1b 100644
--- a/src/mongo/s/catalog/SConscript
+++ b/src/mongo/s/catalog/SConscript
@@ -193,7 +193,6 @@ env.CppUnitTest(
env.CppUnitTest(
target='sharding_catalog_test',
source=[
- 'sharding_catalog_append_db_stats_test.cpp',
'sharding_catalog_drop_coll_test.cpp',
'sharding_catalog_log_change_test.cpp',
'sharding_catalog_test.cpp',
diff --git a/src/mongo/s/catalog/sharding_catalog_append_db_stats_test.cpp b/src/mongo/s/catalog/sharding_catalog_append_db_stats_test.cpp
deleted file mode 100644
index 69222f3d06e..00000000000
--- a/src/mongo/s/catalog/sharding_catalog_append_db_stats_test.cpp
+++ /dev/null
@@ -1,221 +0,0 @@
-/**
- * Copyright (C) 2015 MongoDB Inc.
- *
- * This program is free software: you can redistribute it and/or modify
- * it under the terms of the GNU Affero General Public License, version 3,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU Affero General Public License for more details.
- *
- * You should have received a copy of the GNU Affero General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
- *
- * As a special exception, the copyright holders give permission to link the
- * code of portions of this program with the OpenSSL library under certain
- * conditions as described in each individual source file and distribute
- * linked combinations including the program with the OpenSSL library. You
- * must comply with the GNU Affero General Public License in all respects for
- * all of the code used other than as permitted herein. If you modify file(s)
- * with this exception, you may extend this exception to your version of the
- * file(s), but you are not obligated to do so. If you do not wish to do so,
- * delete this exception statement from your version. If you delete this
- * exception statement from all source files in the program, then also delete
- * it in the license file.
- */
-
-#define MONGO_LOG_DEFAULT_COMPONENT ::mongo::logger::LogComponent::kSharding
-
-#include "mongo/platform/basic.h"
-
-#include "mongo/bson/json.h"
-#include "mongo/client/remote_command_targeter_mock.h"
-#include "mongo/executor/network_interface_mock.h"
-#include "mongo/rpc/metadata/repl_set_metadata.h"
-#include "mongo/rpc/metadata/tracking_metadata.h"
-#include "mongo/s/catalog/sharding_catalog_client_impl.h"
-#include "mongo/s/catalog/sharding_catalog_test_fixture.h"
-#include "mongo/stdx/future.h"
-#include "mongo/util/log.h"
-#include "mongo/util/time_support.h"
-
-namespace mongo {
-namespace {
-
-using executor::NetworkInterfaceMock;
-using executor::RemoteCommandRequest;
-using executor::RemoteCommandResponse;
-
-using ShardingCatalogClientAppendDbStatsTest = ShardingCatalogTestFixture;
-
-BSONObj getReplSecondaryOkMetadata() {
- BSONObjBuilder o;
- ReadPreferenceSetting(ReadPreference::PrimaryPreferred).toContainingBSON(&o);
- o.append(rpc::kReplSetMetadataFieldName, 1);
- return o.obj();
-}
-
-TEST_F(ShardingCatalogClientAppendDbStatsTest, BasicAppendDBStats) {
- configTargeter()->setFindHostReturnValue(HostAndPort("TestHost1"));
-
- BSONArrayBuilder builder;
- auto future = launchAsync([this, &builder] {
- ASSERT_OK(catalogClient()->appendInfoForConfigServerDatabases(
- operationContext(), BSON("listDatabases" << 1), &builder));
- });
-
- onCommand([](const RemoteCommandRequest& request) {
- ASSERT_BSONOBJ_EQ(getReplSecondaryOkMetadata(),
- rpc::TrackingMetadata::removeTrackingData(request.metadata));
-
- ASSERT_EQ("admin", request.dbname);
- ASSERT_BSONOBJ_EQ(BSON("listDatabases" << 1), request.cmdObj);
-
- return fromjson(R"({
- databases: [
- {
- name: 'admin',
- empty: false,
- sizeOnDisk: 11111
- },
- {
- name: 'local',
- empty: false,
- sizeOnDisk: 33333
- },
- {
- name: 'config',
- empty: false,
- sizeOnDisk: 40000
- }
- ],
- ok: 1
- })");
- });
-
- future.timed_get(kFutureTimeout);
-
- BSONArray dbList = builder.arr();
- std::map<std::string, long long> dbMap;
- BSONArrayIteratorSorted iter(dbList);
- while (iter.more()) {
- auto dbEntryObj = iter.next().Obj();
- dbMap[dbEntryObj["name"].String()] = dbEntryObj["sizeOnDisk"].numberLong();
- }
-
- auto adminIter = dbMap.find("admin");
- ASSERT_TRUE(adminIter != dbMap.end());
- ASSERT_EQ(11111, adminIter->second);
-
- auto configIter = dbMap.find("config");
- ASSERT_TRUE(configIter != dbMap.end());
- ASSERT_EQ(40000, configIter->second);
-
- auto localIter = dbMap.find("local");
- ASSERT_TRUE(localIter == dbMap.end());
-}
-
-TEST_F(ShardingCatalogClientAppendDbStatsTest, AppendDBStatsWithFilter) {
- configTargeter()->setFindHostReturnValue(HostAndPort("TestHost1"));
-
- BSONArrayBuilder builder;
- auto future = launchAsync([this, &builder] {
- ASSERT_OK(catalogClient()->appendInfoForConfigServerDatabases(
- operationContext(),
- BSON("listDatabases" << 1 << "filter" << BSON("name"
- << "config")),
- &builder));
- });
-
- onCommand([](const RemoteCommandRequest& request) {
- ASSERT_BSONOBJ_EQ(getReplSecondaryOkMetadata(),
- rpc::TrackingMetadata::removeTrackingData(request.metadata));
-
- ASSERT_EQ("admin", request.dbname);
- ASSERT_BSONOBJ_EQ(BSON("listDatabases" << 1 << "filter" << BSON("name"
- << "config")),
- request.cmdObj);
-
- return fromjson(R"({
- databases: [
- {
- name: 'config',
- empty: false,
- sizeOnDisk: 40000
- }
- ],
- ok: 1
- })");
- });
-
- future.timed_get(kFutureTimeout);
-}
-
-TEST_F(ShardingCatalogClientAppendDbStatsTest, ErrorRunningListDatabases) {
- configTargeter()->setFindHostReturnValue(HostAndPort("TestHost1"));
-
- BSONArrayBuilder builder;
- auto future = launchAsync([this, &builder] {
- auto status = catalogClient()->appendInfoForConfigServerDatabases(
- operationContext(), BSON("listDatabases" << 1), &builder);
- ASSERT_NOT_OK(status);
- ASSERT_EQ(ErrorCodes::AuthenticationFailed, status.code());
- ASSERT_FALSE(status.reason().empty());
- });
-
- onCommand([](const RemoteCommandRequest&) {
- return Status(ErrorCodes::AuthenticationFailed, "illegal");
- });
-
- future.timed_get(kFutureTimeout);
-}
-
-TEST_F(ShardingCatalogClientAppendDbStatsTest, MalformedListDatabasesResponse) {
- configTargeter()->setFindHostReturnValue(HostAndPort("TestHost1"));
-
- BSONArrayBuilder builder;
- auto future = launchAsync([this, &builder] {
- auto status = catalogClient()->appendInfoForConfigServerDatabases(
- operationContext(), BSON("listDatabases" << 1), &builder);
- ASSERT_NOT_OK(status);
- ASSERT_EQ(ErrorCodes::NoSuchKey, status.code());
- ASSERT_FALSE(status.reason().empty());
- });
-
- onCommand([](const RemoteCommandRequest&) { return BSON("ok" << 1); });
-
- future.timed_get(kFutureTimeout);
-}
-
-TEST_F(ShardingCatalogClientAppendDbStatsTest, MalformedListDatabasesEntryInResponse) {
- configTargeter()->setFindHostReturnValue(HostAndPort("TestHost1"));
-
- BSONArrayBuilder builder;
- auto future = launchAsync([this, &builder] {
- auto status = catalogClient()->appendInfoForConfigServerDatabases(
- operationContext(), BSON("listDatabases" << 1), &builder);
- ASSERT_NOT_OK(status);
- ASSERT_EQ(ErrorCodes::NoSuchKey, status.code());
- ASSERT_FALSE(status.reason().empty());
- });
-
- onCommand([](const RemoteCommandRequest&) {
- return fromjson(R"({
- databases: [
- {
- noname: 'admin',
- empty: false,
- sizeOnDisk: 11111
- }
- ],
- ok: 1
- })");
- });
-
- future.timed_get(kFutureTimeout);
-}
-
-} // unnamed namespace
-} // namespace mongo
diff --git a/src/mongo/s/catalog/sharding_catalog_client.h b/src/mongo/s/catalog/sharding_catalog_client.h
index 52896f57579..1285ca1f88b 100644
--- a/src/mongo/s/catalog/sharding_catalog_client.h
+++ b/src/mongo/s/catalog/sharding_catalog_client.h
@@ -367,15 +367,6 @@ public:
const WriteConcernOptions& writeConcern) = 0;
/**
- * Appends the information about the config and admin databases in the config server with the
- * format for listDatabases, based on the listDatabases command parameters in
- * 'listDatabasesCmd'.
- */
- virtual Status appendInfoForConfigServerDatabases(OperationContext* opCtx,
- const BSONObj& listDatabasesCmd,
- BSONArrayBuilder* builder) = 0;
-
- /**
* Obtains a reference to the distributed lock manager instance to use for synchronizing
* system-wide changes.
*
diff --git a/src/mongo/s/catalog/sharding_catalog_client_impl.cpp b/src/mongo/s/catalog/sharding_catalog_client_impl.cpp
index a6239faf98d..558cf54ef11 100644
--- a/src/mongo/s/catalog/sharding_catalog_client_impl.cpp
+++ b/src/mongo/s/catalog/sharding_catalog_client_impl.cpp
@@ -271,12 +271,22 @@ StatusWith<repl::OpTimeWith<DatabaseType>> ShardingCatalogClientImpl::getDatabas
return {ErrorCodes::InvalidNamespace, stream() << dbName << " is not a valid db name"};
}
- // The two databases that are hosted on the config server are config and admin
- if (dbName == "config" || dbName == "admin") {
+ // The admin database is always hosted on the config server.
+ if (dbName == "admin") {
DatabaseType dbt;
dbt.setName(dbName);
dbt.setSharded(false);
- dbt.setPrimary(ShardId("config"));
+ dbt.setPrimary(ShardRegistry::kConfigServerShardId);
+
+ return repl::OpTimeWith<DatabaseType>(dbt);
+ }
+
+ // The config database's primary shard is always config, and it is always sharded.
+ if (dbName == "config") {
+ DatabaseType dbt;
+ dbt.setName(dbName);
+ dbt.setSharded(true);
+ dbt.setPrimary(ShardRegistry::kConfigServerShardId);
return repl::OpTimeWith<DatabaseType>(dbt);
}
@@ -1242,49 +1252,6 @@ void ShardingCatalogClientImpl::_appendReadConcern(BSONObjBuilder* builder) {
readConcern.appendInfo(builder);
}
-Status ShardingCatalogClientImpl::appendInfoForConfigServerDatabases(
- OperationContext* opCtx, const BSONObj& listDatabasesCmd, BSONArrayBuilder* builder) {
- auto configShard = Grid::get(opCtx)->shardRegistry()->getConfigShard();
- auto resultStatus =
- configShard->runCommandWithFixedRetryAttempts(opCtx,
- kConfigPrimaryPreferredSelector,
- "admin",
- listDatabasesCmd,
- Shard::RetryPolicy::kIdempotent);
-
- if (!resultStatus.isOK()) {
- return resultStatus.getStatus();
- }
- if (!resultStatus.getValue().commandStatus.isOK()) {
- return resultStatus.getValue().commandStatus;
- }
-
- auto listDBResponse = std::move(resultStatus.getValue().response);
- BSONElement dbListArray;
- auto dbListStatus = bsonExtractTypedField(listDBResponse, "databases", Array, &dbListArray);
- if (!dbListStatus.isOK()) {
- return dbListStatus;
- }
-
- BSONObjIterator iter(dbListArray.Obj());
-
- while (iter.more()) {
- auto dbEntry = iter.next().Obj();
- string name;
- auto parseStatus = bsonExtractStringField(dbEntry, "name", &name);
-
- if (!parseStatus.isOK()) {
- return parseStatus;
- }
-
- if (name == "config" || name == "admin") {
- builder->append(dbEntry);
- }
- }
-
- return Status::OK();
-}
-
StatusWith<std::vector<KeysCollectionDocument>> ShardingCatalogClientImpl::getNewKeys(
OperationContext* opCtx,
StringData purpose,
diff --git a/src/mongo/s/catalog/sharding_catalog_client_impl.h b/src/mongo/s/catalog/sharding_catalog_client_impl.h
index bb9982fc57f..c11ec37f751 100644
--- a/src/mongo/s/catalog/sharding_catalog_client_impl.h
+++ b/src/mongo/s/catalog/sharding_catalog_client_impl.h
@@ -167,10 +167,6 @@ public:
DistLockManager* getDistLockManager() override;
- Status appendInfoForConfigServerDatabases(OperationContext* opCtx,
- const BSONObj& listDatabasesCmd,
- BSONArrayBuilder* builder) override;
-
/**
* Runs a read command against the config server with majority read concern.
*/
diff --git a/src/mongo/s/catalog/sharding_catalog_client_mock.cpp b/src/mongo/s/catalog/sharding_catalog_client_mock.cpp
index 60e95c15ea5..982e06e085f 100644
--- a/src/mongo/s/catalog/sharding_catalog_client_mock.cpp
+++ b/src/mongo/s/catalog/sharding_catalog_client_mock.cpp
@@ -208,11 +208,6 @@ DistLockManager* ShardingCatalogClientMock::getDistLockManager() {
return _distLockManager.get();
}
-Status ShardingCatalogClientMock::appendInfoForConfigServerDatabases(
- OperationContext* opCtx, const BSONObj& listDatabasesCmd, BSONArrayBuilder* builder) {
- return Status::OK();
-}
-
StatusWith<std::vector<KeysCollectionDocument>> ShardingCatalogClientMock::getNewKeys(
OperationContext* opCtx,
StringData purpose,
diff --git a/src/mongo/s/catalog/sharding_catalog_client_mock.h b/src/mongo/s/catalog/sharding_catalog_client_mock.h
index 05239d325a4..2b33aed396f 100644
--- a/src/mongo/s/catalog/sharding_catalog_client_mock.h
+++ b/src/mongo/s/catalog/sharding_catalog_client_mock.h
@@ -145,10 +145,6 @@ public:
DistLockManager* getDistLockManager() override;
- Status appendInfoForConfigServerDatabases(OperationContext* opCtx,
- const BSONObj& listDatabasesCmd,
- BSONArrayBuilder* builder) override;
-
StatusWith<std::vector<KeysCollectionDocument>> getNewKeys(
OperationContext* opCtx,
StringData purpose,
diff --git a/src/mongo/s/catalog/sharding_catalog_manager.h b/src/mongo/s/catalog/sharding_catalog_manager.h
index 4c78b2fa4ea..1b23e02b634 100644
--- a/src/mongo/s/catalog/sharding_catalog_manager.h
+++ b/src/mongo/s/catalog/sharding_catalog_manager.h
@@ -254,7 +254,8 @@ public:
const BSONObj& defaultCollation,
bool unique,
const std::vector<BSONObj>& initPoints,
- const bool distributeInitialChunks);
+ const bool distributeInitialChunks,
+ const ShardId& dbPrimaryShardId);
//
// Shard Operations
diff --git a/src/mongo/s/catalog/sharding_catalog_manager_collection_operations.cpp b/src/mongo/s/catalog/sharding_catalog_manager_collection_operations.cpp
index ec7b373e37c..7e1ea54d065 100644
--- a/src/mongo/s/catalog/sharding_catalog_manager_collection_operations.cpp
+++ b/src/mongo/s/catalog/sharding_catalog_manager_collection_operations.cpp
@@ -122,8 +122,13 @@ ChunkVersion createFirstChunks(OperationContext* opCtx,
0));
}
- // Since docs already exist for the collection, must use primary shard
- shardIds.push_back(primaryShardId);
+ // If docs already exist for the collection, must use primary shard,
+ // otherwise defer to passed-in distribution option.
+ if (numObjects == 0 && distributeInitialChunks) {
+ Grid::get(opCtx)->shardRegistry()->getAllShardIds(&shardIds);
+ } else {
+ shardIds.push_back(primaryShardId);
+ }
} else {
// Make sure points are unique and ordered
auto orderedPts = SimpleBSONObjComparator::kInstance.makeBSONObjSet();
@@ -221,15 +226,11 @@ void ShardingCatalogManager::shardCollection(OperationContext* opCtx,
const BSONObj& defaultCollation,
bool unique,
const vector<BSONObj>& initPoints,
- const bool distributeInitialChunks) {
+ const bool distributeInitialChunks,
+ const ShardId& dbPrimaryShardId) {
const auto catalogClient = Grid::get(opCtx)->catalogClient();
const auto shardRegistry = Grid::get(opCtx)->shardRegistry();
- auto dbEntry =
- uassertStatusOK(catalogClient->getDatabase(
- opCtx, nsToDatabase(ns), repl::ReadConcernLevel::kLocalReadConcern))
- .value;
- auto dbPrimaryShardId = dbEntry.getPrimary();
const auto primaryShard = uassertStatusOK(shardRegistry->getShard(opCtx, dbPrimaryShardId));
// Fail if there are partially written chunks from a previous failed shardCollection.
@@ -285,6 +286,9 @@ void ShardingCatalogManager::shardCollection(OperationContext* opCtx,
opCtx, ns, coll, true /*upsert*/));
}
+ auto shard = uassertStatusOK(shardRegistry->getShard(opCtx, dbPrimaryShardId));
+ invariant(!shard->isConfig());
+
// Tell the primary mongod to refresh its data
// TODO: Think the real fix here is for mongos to just
// assume that all collections are sharded, when we get there
@@ -296,8 +300,6 @@ void ShardingCatalogManager::shardCollection(OperationContext* opCtx,
collVersion,
true);
- auto shard = uassertStatusOK(shardRegistry->getShard(opCtx, dbPrimaryShardId));
-
auto ssvResponse =
shard->runCommandWithFixedRetryAttempts(opCtx,
ReadPreferenceSetting{ReadPreference::PrimaryOnly},
diff --git a/src/mongo/s/catalog/sharding_catalog_manager_database_operations.cpp b/src/mongo/s/catalog/sharding_catalog_manager_database_operations.cpp
index c7282833631..9765cf332b2 100644
--- a/src/mongo/s/catalog/sharding_catalog_manager_database_operations.cpp
+++ b/src/mongo/s/catalog/sharding_catalog_manager_database_operations.cpp
@@ -104,12 +104,17 @@ Status ShardingCatalogManager::createDatabase(OperationContext* opCtx, const std
Status ShardingCatalogManager::enableSharding(OperationContext* opCtx, const std::string& dbName) {
invariant(nsIsDbOnly(dbName));
- if (dbName == NamespaceString::kConfigDb || dbName == NamespaceString::kAdminDb) {
+ if (dbName == NamespaceString::kAdminDb) {
return {
ErrorCodes::IllegalOperation,
str::stream() << "Enabling sharding on system configuration databases is not allowed"};
}
+ // Sharding is enabled automatically on the config db.
+ if (dbName == NamespaceString::kConfigDb) {
+ return Status::OK();
+ }
+
// Lock the database globally to prevent conflicts with simultaneous database
// creation/modification.
auto scopedDistLock = Grid::get(opCtx)->catalogClient()->getDistLockManager()->lock(
diff --git a/src/mongo/s/catalog/sharding_catalog_shard_collection_test.cpp b/src/mongo/s/catalog/sharding_catalog_shard_collection_test.cpp
index b6229735979..3f0161c1783 100644
--- a/src/mongo/s/catalog/sharding_catalog_shard_collection_test.cpp
+++ b/src/mongo/s/catalog/sharding_catalog_shard_collection_test.cpp
@@ -71,6 +71,8 @@ using std::string;
using std::vector;
using unittest::assertGet;
+const ShardId testPrimaryShard = ShardId("shard0");
+
class ShardCollectionTest : public ConfigServerTestFixture {
public:
void expectCount(const HostAndPort& receivingHost,
@@ -140,7 +142,8 @@ TEST_F(ShardCollectionTest, anotherMongosSharding) {
defaultCollation,
false,
vector<BSONObj>{},
- false),
+ false,
+ testPrimaryShard),
AssertionException,
ErrorCodes::ManualInterventionRequired);
}
@@ -180,7 +183,8 @@ TEST_F(ShardCollectionTest, noInitialChunksOrData) {
defaultCollation,
false,
vector<BSONObj>{},
- false);
+ false,
+ testPrimaryShard);
});
// Report that no documents exist for the given collection on the primary shard
@@ -309,7 +313,8 @@ TEST_F(ShardCollectionTest, withInitialChunks) {
defaultCollation,
true,
vector<BSONObj>{splitPoint0, splitPoint1, splitPoint2, splitPoint3},
- true);
+ true,
+ testPrimaryShard);
});
// Expect the set shard version for that namespace
@@ -408,7 +413,8 @@ TEST_F(ShardCollectionTest, withInitialData) {
defaultCollation,
false,
vector<BSONObj>{},
- false);
+ false,
+ testPrimaryShard);
});
// Report that documents exist for the given collection on the primary shard, so that calling
diff --git a/src/mongo/s/client/shard_local_test.cpp b/src/mongo/s/client/shard_local_test.cpp
index aac2eaad6a8..cb969394ccc 100644
--- a/src/mongo/s/client/shard_local_test.cpp
+++ b/src/mongo/s/client/shard_local_test.cpp
@@ -40,6 +40,7 @@
#include "mongo/db/repl/replication_coordinator_mock.h"
#include "mongo/db/service_context_d_test_fixture.h"
#include "mongo/db/write_concern_options.h"
+#include "mongo/s/client/shard_registry.h"
#include "mongo/stdx/memory.h"
namespace mongo {
@@ -83,7 +84,7 @@ void ShardLocalTest::setUp() {
Client::initThreadIfNotAlready();
_opCtx = getGlobalServiceContext()->makeOperationContext(&cc());
serverGlobalParams.clusterRole = ClusterRole::ConfigServer;
- _shardLocal = stdx::make_unique<ShardLocal>(ShardId("config"));
+ _shardLocal = stdx::make_unique<ShardLocal>(ShardRegistry::kConfigServerShardId);
const repl::ReplSettings replSettings = {};
repl::setGlobalReplicationCoordinator(
new repl::ReplicationCoordinatorMock(_opCtx->getServiceContext(), replSettings));
diff --git a/src/mongo/s/client/shard_registry.cpp b/src/mongo/s/client/shard_registry.cpp
index 4c581f18475..a64eace6ba4 100644
--- a/src/mongo/s/client/shard_registry.cpp
+++ b/src/mongo/s/client/shard_registry.cpp
@@ -84,6 +84,8 @@ namespace {
const Seconds kRefreshPeriod(30);
} // namespace
+const ShardId ShardRegistry::kConfigServerShardId = ShardId("config");
+
ShardRegistry::ShardRegistry(std::unique_ptr<ShardFactory> shardFactory,
const ConnectionString& configServerCS)
: _shardFactory(std::move(shardFactory)), _initConfigServerCS(configServerCS) {}
@@ -184,7 +186,8 @@ void ShardRegistry::updateReplSetHosts(const ConnectionString& newConnString) {
void ShardRegistry::init() {
stdx::unique_lock<stdx::mutex> reloadLock(_reloadMutex);
invariant(_initConfigServerCS.isValid());
- auto configShard = _shardFactory->createShard(ShardId("config"), _initConfigServerCS);
+ auto configShard =
+ _shardFactory->createShard(ShardRegistry::kConfigServerShardId, _initConfigServerCS);
_data.addConfigShard(configShard);
// set to invalid so it cant be started more than once.
_initConfigServerCS = ConnectionString();
diff --git a/src/mongo/s/client/shard_registry.h b/src/mongo/s/client/shard_registry.h
index a1d6b333cf7..13b48b95ce4 100644
--- a/src/mongo/s/client/shard_registry.h
+++ b/src/mongo/s/client/shard_registry.h
@@ -141,6 +141,11 @@ class ShardRegistry {
public:
/**
+ * A ShardId for the config servers.
+ */
+ static const ShardId kConfigServerShardId;
+
+ /**
* Instantiates a new shard registry.
*
* @param shardFactory Makes shards
diff --git a/src/mongo/s/client/shard_registry_data_test.cpp b/src/mongo/s/client/shard_registry_data_test.cpp
index a005cb1aae8..089fb872306 100644
--- a/src/mongo/s/client/shard_registry_data_test.cpp
+++ b/src/mongo/s/client/shard_registry_data_test.cpp
@@ -85,7 +85,7 @@ private:
TEST_F(ShardRegistryDataTest, AddConfigShard) {
ConnectionString configCS("rs/dummy1:1234,dummy2:2345,dummy3:3456", ConnectionString::SET);
- auto configShard = shardFactory()->createShard(ShardId("config"), configCS);
+ auto configShard = shardFactory()->createShard(ShardRegistry::kConfigServerShardId, configCS);
ShardRegistryData data;
data.addConfigShard(configShard);
diff --git a/src/mongo/s/commands/cluster_list_databases_cmd.cpp b/src/mongo/s/commands/cluster_list_databases_cmd.cpp
index 2087dce822c..84f2cdc2ed5 100644
--- a/src/mongo/s/commands/cluster_list_databases_cmd.cpp
+++ b/src/mongo/s/commands/cluster_list_databases_cmd.cpp
@@ -95,6 +95,9 @@ public:
vector<ShardId> shardIds;
grid.shardRegistry()->getAllShardIds(&shardIds);
+ shardIds.emplace_back(ShardRegistry::kConfigServerShardId);
+
+ auto filteredCmd = filterCommandRequestForPassthrough(cmdObj);
for (const ShardId& shardId : shardIds) {
const auto shardStatus = grid.shardRegistry()->getShard(opCtx, shardId);
@@ -107,7 +110,7 @@ public:
opCtx,
ReadPreferenceSetting{ReadPreference::PrimaryPreferred},
"admin",
- filterCommandRequestForPassthrough(cmdObj),
+ filteredCmd,
Shard::RetryPolicy::kIdempotent));
uassertStatusOK(response.commandStatus);
BSONObj x = std::move(response.response);
@@ -117,6 +120,17 @@ public:
BSONObj dbObj = j.next().Obj();
const string name = dbObj["name"].String();
+
+ // If this is the admin db, only collect its stats from the config servers.
+ if (name == "admin" && !s->isConfig()) {
+ continue;
+ }
+
+ // We don't collect config server info for dbs other than "admin" and "config".
+ if (s->isConfig() && name != "config" && name != "admin") {
+ continue;
+ }
+
const long long size = dbObj["sizeOnDisk"].numberLong();
long long& sizeSumForDbAcrossShards = sizes[name];
@@ -137,60 +151,44 @@ public:
}
}
- BSONArrayBuilder dbListBuilder(result.subarrayStart("databases"));
- for (map<string, long long>::iterator i = sizes.begin(); i != sizes.end(); ++i) {
- const string name = i->first;
-
- if (name == "local") {
- // We don't return local, since all shards have their own independent local
- continue;
- }
+ // Now that we have aggregated results for all the shards, convert to a response,
+ // and compute total sizes.
+ long long totalSize = 0;
+ {
+ BSONArrayBuilder dbListBuilder(result.subarrayStart("databases"));
+ for (map<string, long long>::iterator i = sizes.begin(); i != sizes.end(); ++i) {
+ const string name = i->first;
+
+ if (name == "local") {
+ // We don't return local, since all shards have their own independent local
+ continue;
+ }
- if (name == "config" || name == "admin") {
- // Always get this from the config servers
- continue;
- }
+ long long size = i->second;
- long long size = i->second;
+ BSONObjBuilder temp;
+ temp.append("name", name);
+ if (!nameOnly) {
+ temp.appendNumber("sizeOnDisk", size);
+ temp.appendBool("empty", size == 1);
+ temp.append("shards", dbShardInfo[name]->obj());
- BSONObjBuilder temp;
- temp.append("name", name);
- if (!nameOnly) {
- temp.appendNumber("sizeOnDisk", size);
- temp.appendBool("empty", size == 1);
- temp.append("shards", dbShardInfo[name]->obj());
- }
+ uassert(ErrorCodes::BadValue,
+ str::stream() << "Found negative 'sizeOnDisk' in: " << name,
+ size >= 0);
- dbListBuilder.append(temp.obj());
- }
+ totalSize += size;
+ }
- // Get information for config and admin dbs from the config servers.
- auto catalogClient = grid.catalogClient();
- auto appendStatus = catalogClient->appendInfoForConfigServerDatabases(
- opCtx, filterCommandRequestForPassthrough(cmdObj), &dbListBuilder);
- dbListBuilder.doneFast();
- if (!appendStatus.isOK()) {
- result.resetToEmpty();
- return Command::appendCommandStatus(result, appendStatus);
+ dbListBuilder.append(temp.obj());
+ }
}
- if (nameOnly)
- return true;
-
- // Compute the combined total size based on the response we've built so far.
- long long totalSize = 0;
- for (auto&& dbElt : result.asTempObj()["databases"].Obj()) {
- long long sizeOnDisk;
- uassertStatusOK(bsonExtractIntegerField(dbElt.Obj(), "sizeOnDisk"_sd, &sizeOnDisk));
- uassert(ErrorCodes::BadValue,
- str::stream() << "Found negative 'sizeOnDisk' in: " << dbElt.Obj(),
- sizeOnDisk >= 0);
- totalSize += sizeOnDisk;
+ if (!nameOnly) {
+ result.appendNumber("totalSize", totalSize);
+ result.appendNumber("totalSizeMb", totalSize / (1024 * 1024));
}
- result.appendNumber("totalSize", totalSize);
- result.appendNumber("totalSizeMb", totalSize / (1024 * 1024));
-
return true;
}
diff --git a/src/mongo/s/commands/cluster_write.cpp b/src/mongo/s/commands/cluster_write.cpp
index a3004bb7632..12d44edc75b 100644
--- a/src/mongo/s/commands/cluster_write.cpp
+++ b/src/mongo/s/commands/cluster_write.cpp
@@ -32,6 +32,8 @@
#include "mongo/s/commands/cluster_write.h"
+#include <algorithm>
+
#include "mongo/base/status.h"
#include "mongo/client/connpool.h"
#include "mongo/db/lasterror.h"
@@ -183,7 +185,7 @@ void ClusterWriter::write(OperationContext* opCtx,
LastError::Disabled disableLastError(&LastError::get(opCtx->getClient()));
// Config writes and shard writes are done differently
- if (nss.db() == NamespaceString::kConfigDb || nss.db() == NamespaceString::kAdminDb) {
+ if (nss.db() == NamespaceString::kAdminDb) {
Grid::get(opCtx)->catalogClient()->writeConfigServerDirect(opCtx, request, response);
} else {
TargeterStats targeterStats;
@@ -194,7 +196,7 @@ void ClusterWriter::write(OperationContext* opCtx,
Status targetInitStatus = targeter.init(opCtx);
if (!targetInitStatus.isOK()) {
toBatchError({targetInitStatus.code(),
- str::stream() << "unable to target"
+ str::stream() << "unable to initialize targeter for"
<< (request.isInsertIndexRequest() ? " index" : "")
<< " write op for collection "
<< request.getTargetingNS().ns()
@@ -203,6 +205,32 @@ void ClusterWriter::write(OperationContext* opCtx,
return;
}
+ std::vector<std::unique_ptr<ShardEndpoint>> endpoints;
+ auto targetStatus = targeter.targetCollection(&endpoints);
+ if (!targetStatus.isOK()) {
+ toBatchError({targetStatus.code(),
+ str::stream() << "unable to target"
+ << (request.isInsertIndexRequest() ? " index" : "")
+ << " write op for collection "
+ << request.getTargetingNS().ns()
+ << causedBy(targetStatus)},
+ response);
+ return;
+ }
+
+ // Handle sharded config server writes differently.
+ if (std::any_of(endpoints.begin(), endpoints.end(), [](const auto& it) {
+ return it->shardName == ShardRegistry::kConfigServerShardId;
+ })) {
+ // There should be no namespaces that partially target config servers.
+ invariant(endpoints.size() == 1);
+
+ // For config servers, we do direct writes.
+ Grid::get(opCtx)->catalogClient()->writeConfigServerDirect(
+ opCtx, request, response);
+ return;
+ }
+
BatchWriteExec::executeBatch(opCtx, targeter, request, response, stats);
}
diff --git a/src/mongo/shell/utils_sh.js b/src/mongo/shell/utils_sh.js
index 074892cc758..cfbb5b11456 100644
--- a/src/mongo/shell/utils_sh.js
+++ b/src/mongo/shell/utils_sh.js
@@ -661,7 +661,16 @@ function printShardingStatus(configDB, verbose) {
}
output(1, "databases:");
- configDB.databases.find().sort({name: 1}).forEach(function(db) {
+
+ var databases = configDB.databases.find().sort({name: 1}).toArray();
+
+ // Special case the config db, since it doesn't have a record in config.databases.
+ databases.push({"_id": "config", "primary": "config", "partitioned": true});
+ databases.sort(function(a, b) {
+ return a["_id"] > b["_id"];
+ });
+
+ databases.forEach(function(db) {
var truthy = function(value) {
return !!value;
};