summaryrefslogtreecommitdiff
path: root/src/mongo/s
diff options
context:
space:
mode:
authorKaloian Manassiev <kaloian.manassiev@mongodb.com>2016-12-09 15:58:25 -0500
committerKaloian Manassiev <kaloian.manassiev@mongodb.com>2016-12-16 21:24:49 -0500
commitc7bf862d8d75fa14801cfe8997a693e05a88594a (patch)
treeec44e2e9e06e0e7c46a683d98e09806c22fff2ee /src/mongo/s
parent834fe4857af1cd75f2cd70f7682540dbe3a2ced6 (diff)
downloadmongo-c7bf862d8d75fa14801cfe8997a693e05a88594a.tar.gz
SERVER-27366 Remove DBConfig::enableSharding
Diffstat (limited to 'src/mongo/s')
-rw-r--r--src/mongo/s/catalog/sharding_catalog_client_impl.cpp8
-rw-r--r--src/mongo/s/commands/cluster_enable_sharding_cmd.cpp10
-rw-r--r--src/mongo/s/commands/cluster_map_reduce_cmd.cpp58
-rw-r--r--src/mongo/s/config.cpp13
-rw-r--r--src/mongo/s/config.h2
5 files changed, 45 insertions, 46 deletions
diff --git a/src/mongo/s/catalog/sharding_catalog_client_impl.cpp b/src/mongo/s/catalog/sharding_catalog_client_impl.cpp
index e72bbc7e689..9c8e776b378 100644
--- a/src/mongo/s/catalog/sharding_catalog_client_impl.cpp
+++ b/src/mongo/s/catalog/sharding_catalog_client_impl.cpp
@@ -320,7 +320,11 @@ StatusWith<ShardId> ShardingCatalogClientImpl::_selectShardForNewDatabase(
Status ShardingCatalogClientImpl::enableSharding(OperationContext* txn, const std::string& dbName) {
invariant(nsIsDbOnly(dbName));
- DatabaseType db;
+ if (dbName == NamespaceString::kConfigDb || dbName == NamespaceString::kAdminDb) {
+ return {
+ ErrorCodes::IllegalOperation,
+ str::stream() << "Enabling sharding on system configuration databases is not allowed"};
+ }
// Lock the database globally to prevent conflicts with simultaneous database
// creation/modification.
@@ -331,6 +335,8 @@ Status ShardingCatalogClientImpl::enableSharding(OperationContext* txn, const st
}
// Check for case sensitivity violations
+ DatabaseType db;
+
Status status = _checkDbDoesNotExist(txn, dbName, &db);
if (status.isOK()) {
// Database does not exist, create a new entry
diff --git a/src/mongo/s/commands/cluster_enable_sharding_cmd.cpp b/src/mongo/s/commands/cluster_enable_sharding_cmd.cpp
index 1254fca23b5..bd7adb2fbb6 100644
--- a/src/mongo/s/commands/cluster_enable_sharding_cmd.cpp
+++ b/src/mongo/s/commands/cluster_enable_sharding_cmd.cpp
@@ -105,15 +105,13 @@ public:
return false;
}
- Status status = grid.catalogClient(txn)->enableSharding(txn, dbname);
- if (status.isOK()) {
- audit::logEnableSharding(Client::getCurrent(), dbname);
- }
+ uassertStatusOK(Grid::get(txn)->catalogClient(txn)->enableSharding(txn, dbname));
+ audit::logEnableSharding(Client::getCurrent(), dbname);
// Make sure to force update of any stale metadata
- grid.catalogCache()->invalidate(dbname);
+ Grid::get(txn)->catalogCache()->invalidate(dbname);
- return appendCommandStatus(result, status);
+ return true;
}
} enableShardingCmd;
diff --git a/src/mongo/s/commands/cluster_map_reduce_cmd.cpp b/src/mongo/s/commands/cluster_map_reduce_cmd.cpp
index b2b863d7201..8d3a121c5a7 100644
--- a/src/mongo/s/commands/cluster_map_reduce_cmd.cpp
+++ b/src/mongo/s/commands/cluster_map_reduce_cmd.cpp
@@ -232,7 +232,7 @@ public:
}
// Ensure the input database exists
- auto status = grid.catalogCache()->getDatabase(txn, dbname);
+ auto status = Grid::get(txn)->catalogCache()->getDatabase(txn, dbname);
if (!status.isOK()) {
return appendCommandStatus(result, status.getStatus());
}
@@ -290,8 +290,8 @@ public:
if (!shardedInput && !shardedOutput && !customOutDB) {
LOG(1) << "simple MR, just passthrough";
- const auto shard =
- uassertStatusOK(grid.shardRegistry()->getShard(txn, confIn->getPrimaryId()));
+ const auto shard = uassertStatusOK(
+ Grid::get(txn)->shardRegistry()->getShard(txn, confIn->getPrimaryId()));
ShardConnection conn(shard->getConnString(), "");
@@ -351,7 +351,7 @@ public:
string server;
{
const auto shard = uassertStatusOK(
- grid.shardRegistry()->getShard(txn, mrResult.shardTargetId));
+ Grid::get(txn)->shardRegistry()->getShard(txn, mrResult.shardTargetId));
server = shard->getConnString().toString();
}
servers.insert(server);
@@ -444,8 +444,8 @@ public:
bool hasWCError = false;
if (!shardedOutput) {
- const auto shard =
- uassertStatusOK(grid.shardRegistry()->getShard(txn, confOut->getPrimaryId()));
+ const auto shard = uassertStatusOK(
+ Grid::get(txn)->shardRegistry()->getShard(txn, confOut->getPrimaryId()));
LOG(1) << "MR with single shard output, NS=" << outputCollNss.ns()
<< " primary=" << shard->toString();
@@ -470,8 +470,21 @@ public:
// Create the sharded collection if needed
if (!confOut->isSharded(outputCollNss.ns())) {
- // Enable sharding on db
- confOut->enableSharding(txn);
+ // Enable sharding on the output db
+ Status status = Grid::get(txn)->catalogClient(txn)->enableSharding(
+ txn, outputCollNss.db().toString());
+
+ // If the database has sharding already enabled, we can ignore the error
+ if (status.isOK()) {
+ // Invalidate the output database so it gets reloaded on the next fetch attempt
+ Grid::get(txn)->catalogCache()->invalidate(outputCollNss.db().toString());
+ } else if (status != ErrorCodes::AlreadyInitialized) {
+ uassertStatusOK(status);
+ }
+
+ confOut.reset();
+ confOut = uassertStatusOK(Grid::get(txn)->catalogCache()->getDatabase(
+ txn, outputCollNss.db().toString()));
// Shard collection according to split points
vector<BSONObj> sortedSplitPts;
@@ -498,28 +511,25 @@ public:
// representing the simple binary comparison collation.
BSONObj defaultCollation;
- Status status = grid.catalogClient(txn)->shardCollection(txn,
- outputCollNss.ns(),
- sortKeyPattern,
- defaultCollation,
- true,
- sortedSplitPts,
- outShardIds);
- if (!status.isOK()) {
- return appendCommandStatus(result, status);
- }
+ uassertStatusOK(
+ Grid::get(txn)->catalogClient(txn)->shardCollection(txn,
+ outputCollNss.ns(),
+ sortKeyPattern,
+ defaultCollation,
+ true,
+ sortedSplitPts,
+ outShardIds));
// Make sure the cached metadata for the collection knows that we are now sharded
- confOut = uassertStatusOK(
- grid.catalogCache()->getDatabase(txn, outputCollNss.db().toString()));
- confOut->getChunkManager(txn, outputCollNss.ns(), true /* force */);
+ confOut->getChunkManager(txn, outputCollNss.ns(), true /* reload */);
}
auto chunkSizes = SimpleBSONObjComparator::kInstance.makeBSONObjIndexedMap<int>();
{
// Take distributed lock to prevent split / migration.
- auto scopedDistLock = grid.catalogClient(txn)->getDistLockManager()->lock(
- txn, outputCollNss.ns(), "mr-post-process", kNoDistLockTimeout);
+ auto scopedDistLock =
+ Grid::get(txn)->catalogClient(txn)->getDistLockManager()->lock(
+ txn, outputCollNss.ns(), "mr-post-process", kNoDistLockTimeout);
if (!scopedDistLock.isOK()) {
return appendCommandStatus(result, scopedDistLock.getStatus());
}
@@ -550,7 +560,7 @@ public:
string server;
{
const auto shard = uassertStatusOK(
- grid.shardRegistry()->getShard(txn, mrResult.shardTargetId));
+ Grid::get(txn)->shardRegistry()->getShard(txn, mrResult.shardTargetId));
server = shard->getConnString().toString();
}
singleResult = mrResult.result;
diff --git a/src/mongo/s/config.cpp b/src/mongo/s/config.cpp
index a9edfdaf99b..a6d8ec7754b 100644
--- a/src/mongo/s/config.cpp
+++ b/src/mongo/s/config.cpp
@@ -161,19 +161,6 @@ void DBConfig::markNSNotSharded(const std::string& ns) {
}
}
-void DBConfig::enableSharding(OperationContext* txn) {
- invariant(_name != "config");
-
- stdx::lock_guard<stdx::mutex> lk(_lock);
-
- if (_shardingEnabled) {
- return;
- }
-
- _shardingEnabled = true;
- _save(txn);
-}
-
// Handles weird logic related to getting *either* a chunk manager *or* the collection primary
// shard
void DBConfig::getChunkManagerOrPrimary(OperationContext* txn,
diff --git a/src/mongo/s/config.h b/src/mongo/s/config.h
index 79893ddb2b2..def4a730772 100644
--- a/src/mongo/s/config.h
+++ b/src/mongo/s/config.h
@@ -128,8 +128,6 @@ public:
*/
void markNSNotSharded(const std::string& ns);
- void enableSharding(OperationContext* txn);
-
/**
* @return whether or not the 'ns' collection is partitioned
*/