summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorKaloian Manassiev <kaloian.manassiev@mongodb.com>2016-12-09 17:19:48 -0500
committerKaloian Manassiev <kaloian.manassiev@mongodb.com>2016-12-13 14:40:34 -0500
commitb07f96535de47c2da6700074e79ce0ee6eb1ae21 (patch)
tree51c4b4b03a931b3d2d0310917537a96c36f70779
parent898c16aad31369e59e3617a3423a5e38e19eb10b (diff)
downloadmongo-b07f96535de47c2da6700074e79ce0ee6eb1ae21.tar.gz
SERVER-27381 Remove DBConfig::dropDatabase
-rw-r--r--src/mongo/db/commands/dbcommands.cpp5
-rw-r--r--src/mongo/db/namespace_string.cpp1
-rw-r--r--src/mongo/db/namespace_string.h3
-rw-r--r--src/mongo/s/catalog/sharding_catalog_client.h1
-rw-r--r--src/mongo/s/commands/cluster_drop_database_cmd.cpp176
-rw-r--r--src/mongo/s/config.cpp176
-rw-r--r--src/mongo/s/config.h15
7 files changed, 143 insertions, 234 deletions
diff --git a/src/mongo/db/commands/dbcommands.cpp b/src/mongo/db/commands/dbcommands.cpp
index 5eea6f7e39f..31d621c788c 100644
--- a/src/mongo/db/commands/dbcommands.cpp
+++ b/src/mongo/db/commands/dbcommands.cpp
@@ -188,7 +188,8 @@ public:
string& errmsg,
BSONObjBuilder& result) {
// disallow dropping the config database
- if (serverGlobalParams.clusterRole == ClusterRole::ConfigServer && (dbname == "config")) {
+ if (serverGlobalParams.clusterRole == ClusterRole::ConfigServer &&
+ (dbname == NamespaceString::kConfigDb)) {
return appendCommandStatus(result,
Status(ErrorCodes::IllegalOperation,
"Cannot drop 'config' database if mongod started "
@@ -197,7 +198,7 @@ public:
if ((repl::getGlobalReplicationCoordinator()->getReplicationMode() !=
repl::ReplicationCoordinator::modeNone) &&
- (dbname == "local")) {
+ (dbname == NamespaceString::kLocalDb)) {
return appendCommandStatus(result,
Status(ErrorCodes::IllegalOperation,
"Cannot drop 'local' database while replication "
diff --git a/src/mongo/db/namespace_string.cpp b/src/mongo/db/namespace_string.cpp
index 9779bdd4139..6ee4d989ceb 100644
--- a/src/mongo/db/namespace_string.cpp
+++ b/src/mongo/db/namespace_string.cpp
@@ -104,6 +104,7 @@ bool legalClientSystemNS(StringData ns) {
constexpr StringData NamespaceString::kAdminDb;
constexpr StringData NamespaceString::kLocalDb;
+constexpr StringData NamespaceString::kConfigDb;
constexpr StringData NamespaceString::kSystemDotViewsCollectionName;
const NamespaceString NamespaceString::kConfigCollectionNamespace(kConfigCollection);
diff --git a/src/mongo/db/namespace_string.h b/src/mongo/db/namespace_string.h
index 8bac89fb362..220c188203c 100644
--- a/src/mongo/db/namespace_string.h
+++ b/src/mongo/db/namespace_string.h
@@ -63,6 +63,9 @@ public:
// Namespace for the local database
static constexpr StringData kLocalDb = "local"_sd;
+ // Namespace for the sharding config database
+ static constexpr StringData kConfigDb = "config"_sd;
+
// Name for the system views collection
static constexpr StringData kSystemDotViewsCollectionName = "system.views"_sd;
diff --git a/src/mongo/s/catalog/sharding_catalog_client.h b/src/mongo/s/catalog/sharding_catalog_client.h
index f5fbc0b6154..1d736fb222d 100644
--- a/src/mongo/s/catalog/sharding_catalog_client.h
+++ b/src/mongo/s/catalog/sharding_catalog_client.h
@@ -93,6 +93,7 @@ class ShardingCatalogClient {
MONGO_DISALLOW_COPYING(ShardingCatalogClient);
public:
+ // Constant to use for configuration data majority writes
static const WriteConcernOptions kMajorityWriteConcern;
virtual ~ShardingCatalogClient() = default;
diff --git a/src/mongo/s/commands/cluster_drop_database_cmd.cpp b/src/mongo/s/commands/cluster_drop_database_cmd.cpp
index b2875998b71..05ac92c22ed 100644
--- a/src/mongo/s/commands/cluster_drop_database_cmd.cpp
+++ b/src/mongo/s/commands/cluster_drop_database_cmd.cpp
@@ -30,93 +30,185 @@
#include "mongo/platform/basic.h"
-
#include "mongo/base/status.h"
#include "mongo/db/commands.h"
+#include "mongo/db/operation_context.h"
#include "mongo/s/catalog/catalog_cache.h"
+#include "mongo/s/catalog/dist_lock_manager.h"
+#include "mongo/s/catalog/sharding_catalog_client.h"
+#include "mongo/s/catalog/type_database.h"
+#include "mongo/s/client/shard_registry.h"
#include "mongo/s/config.h"
#include "mongo/s/grid.h"
+#include "mongo/s/sharding_raii.h"
#include "mongo/util/log.h"
namespace mongo {
-
-using std::shared_ptr;
-
namespace {
class DropDatabaseCmd : public Command {
public:
DropDatabaseCmd() : Command("dropDatabase") {}
- virtual bool slaveOk() const {
+ bool slaveOk() const override {
return true;
}
- virtual bool adminOnly() const {
+ bool adminOnly() const override {
return false;
}
-
- virtual bool supportsWriteConcern(const BSONObj& cmd) const override {
+ bool supportsWriteConcern(const BSONObj& cmd) const override {
return true;
}
- virtual void addRequiredPrivileges(const std::string& dbname,
- const BSONObj& cmdObj,
- std::vector<Privilege>* out) {
+ void addRequiredPrivileges(const std::string& dbname,
+ const BSONObj& cmdObj,
+ std::vector<Privilege>* out) override {
ActionSet actions;
actions.addAction(ActionType::dropDatabase);
out->push_back(Privilege(ResourcePattern::forDatabaseName(dbname), actions));
}
- virtual bool run(OperationContext* txn,
- const std::string& dbname,
- BSONObj& cmdObj,
- int options,
- std::string& errmsg,
- BSONObjBuilder& result) {
- // Disallow dropping the config database from mongos
- if (dbname == "config") {
- return appendCommandStatus(
- result, Status(ErrorCodes::IllegalOperation, "Cannot drop the config database"));
- }
+ bool run(OperationContext* txn,
+ const std::string& dbname,
+ BSONObj& cmdObj,
+ int options,
+ std::string& errmsg,
+ BSONObjBuilder& result) override {
+ uassert(ErrorCodes::IllegalOperation,
+ "Cannot drop the config database",
+ dbname != NamespaceString::kConfigDb);
+
+ uassert(ErrorCodes::BadValue,
+ "have to pass 1 as db parameter",
+ cmdObj.firstElement().isNumber() && cmdObj.firstElement().number() == 1);
+
+ auto const catalogClient = Grid::get(txn)->catalogClient(txn);
- BSONElement e = cmdObj.firstElement();
+ // Lock the database globally to prevent conflicts with simultaneous database
+ // creation/modification.
+ auto scopedDistLock = uassertStatusOK(catalogClient->getDistLockManager()->lock(
+ txn, dbname, "dropDatabase", DistLockManager::kDefaultLockTimeout));
- if (!e.isNumber() || e.number() != 1) {
- errmsg = "invalid params";
- return 0;
+ // Refresh the database metadata so it kicks off a full reload
+ Grid::get(txn)->catalogCache()->invalidate(dbname);
+
+ auto scopedDbStatus = ScopedShardDatabase::getExisting(txn, dbname);
+
+ if (scopedDbStatus == ErrorCodes::NamespaceNotFound) {
+ result.append("info", "database does not exist");
+ return true;
}
- // Refresh the database metadata
- grid.catalogCache()->invalidate(dbname);
+ uassertStatusOK(scopedDbStatus.getStatus());
- auto status = grid.catalogCache()->getDatabase(txn, dbname);
- if (!status.isOK()) {
- if (status == ErrorCodes::NamespaceNotFound) {
- result.append("info", "database does not exist");
- return true;
- }
+ catalogClient->logChange(txn,
+ "dropDatabase.start",
+ dbname,
+ BSONObj(),
+ ShardingCatalogClient::kMajorityWriteConcern);
+
+ auto const db = scopedDbStatus.getValue().db();
- return appendCommandStatus(result, status.getStatus());
+ std::set<std::string> namespaces;
+ db->getAllShardedCollections(namespaces);
+
+ std::set<ShardId> allShardIds;
+
+ // Drop the database's collections from metadata
+ for (const auto& ns : namespaces) {
+ const auto collectionShards =
+ _dropShardedCollectionFromConfig(txn, NamespaceString(ns));
+ allShardIds.insert(collectionShards.begin(), collectionShards.end());
}
- log() << "DROP DATABASE: " << dbname;
- shared_ptr<DBConfig> conf = status.getValue();
+ // Drop the database from the primary shard first
+ _dropDatabaseFromShard(txn, db->getPrimaryId(), dbname);
- // TODO: Make dropping logic saner and more tolerant of partial drops. This is
- // particularly important since a database drop can be aborted by *any* collection
- // with a distributed namespace lock taken (migrates/splits)
+ // Drop the database from each of the remaining shards
+ for (const ShardId& shardId : allShardIds) {
+ _dropDatabaseFromShard(txn, shardId, dbname);
+ }
- if (!conf->dropDatabase(txn, errmsg)) {
- return false;
+ // Remove the database entry from the metadata
+ Status status =
+ catalogClient->removeConfigDocuments(txn,
+ DatabaseType::ConfigNS,
+ BSON(DatabaseType::name(dbname)),
+ ShardingCatalogClient::kMajorityWriteConcern);
+ if (!status.isOK()) {
+ uassertStatusOK({status.code(),
+ str::stream() << "Could not remove database '" << dbname
+ << "' from metadata due to "
+ << status.reason()});
}
+ // Invalidate the database so the next access will do a full reload
+ Grid::get(txn)->catalogCache()->invalidate(dbname);
+
+ catalogClient->logChange(
+ txn, "dropDatabase", dbname, BSONObj(), ShardingCatalogClient::kMajorityWriteConcern);
+
result.append("dropped", dbname);
return true;
}
+private:
+ /**
+ * Drops the specified sharded collection from the config server metadata only and returns the
+ * set of shards on which it was located when it was being dropped.
+ *
+ * Throws DBException on failure.
+ */
+ static std::set<ShardId> _dropShardedCollectionFromConfig(OperationContext* txn,
+ NamespaceString nss) {
+ auto scopedCMStatus = ScopedChunkManager::refreshAndGet(txn, nss);
+
+ if (scopedCMStatus == ErrorCodes::NamespaceNotFound ||
+ scopedCMStatus == ErrorCodes::NamespaceNotSharded) {
+ // Skip collection if we cannot find it
+ return std::set<ShardId>{};
+ } else if (!scopedCMStatus.isOK()) {
+ uassertStatusOK({scopedCMStatus.getStatus().code(),
+ str::stream() << "Failed to drop collection " << nss.ns() << " due to "
+ << scopedCMStatus.getStatus().reason()});
+ }
+
+ auto const db = scopedCMStatus.getValue().db();
+ auto const cm = scopedCMStatus.getValue().cm();
+
+ std::set<ShardId> shardIds;
+ cm->getAllShardIds(&shardIds);
+
+ uassertStatusOK(Grid::get(txn)->catalogClient(txn)->dropCollection(txn, nss));
+
+ db->invalidateNs(nss.ns());
+
+ return shardIds;
+ }
+
+ /**
+ * Sends the 'drop' command for the specified database to the specified shard. Throws
+ * DBException on failure.
+ */
+ static void _dropDatabaseFromShard(OperationContext* txn,
+ const ShardId& shardId,
+ const std::string& dbName) {
+ const auto shard = uassertStatusOK(Grid::get(txn)->shardRegistry()->getShard(txn, shardId));
+ auto cmdDropDatabaseResult = uassertStatusOK(shard->runCommandWithFixedRetryAttempts(
+ txn,
+ ReadPreferenceSetting{ReadPreference::PrimaryOnly},
+ dbName,
+ BSON("dropDatabase" << 1 << WriteConcernOptions::kWriteConcernField
+ << txn->getWriteConcern().toBSON()),
+ Shard::RetryPolicy::kIdempotent));
+
+ uassertStatusOK(cmdDropDatabaseResult.commandStatus);
+ uassertStatusOK(cmdDropDatabaseResult.writeConcernStatus);
+ }
+
} clusterDropDatabaseCmd;
} // namespace
diff --git a/src/mongo/s/config.cpp b/src/mongo/s/config.cpp
index 1932bc4803f..9a0194d8139 100644
--- a/src/mongo/s/config.cpp
+++ b/src/mongo/s/config.cpp
@@ -32,12 +32,10 @@
#include "mongo/s/config.h"
-#include "mongo/client/connpool.h"
#include "mongo/db/client.h"
#include "mongo/db/lasterror.h"
#include "mongo/db/operation_context.h"
#include "mongo/db/write_concern.h"
-#include "mongo/s/balancer_configuration.h"
#include "mongo/s/catalog/catalog_cache.h"
#include "mongo/s/catalog/sharding_catalog_client.h"
#include "mongo/s/catalog/type_chunk.h"
@@ -176,32 +174,6 @@ void DBConfig::enableSharding(OperationContext* txn) {
_save(txn);
}
-bool DBConfig::removeSharding(OperationContext* txn, const string& ns) {
- stdx::lock_guard<stdx::mutex> lk(_lock);
-
- if (!_shardingEnabled) {
- warning() << "could not remove sharding for collection " << ns
- << ", sharding not enabled for db";
- return false;
- }
-
- CollectionInfoMap::iterator i = _collections.find(ns);
-
- if (i == _collections.end())
- return false;
-
- CollectionInfo& ci = _collections[ns];
- if (!ci.isSharded()) {
- warning() << "could not remove sharding for collection " << ns
- << ", no sharding information found";
- return false;
- }
-
- ci.unshard();
- _save(txn, false, true);
- return true;
-}
-
// Handles weird logic related to getting *either* a chunk manager *or* the collection primary
// shard
void DBConfig::getChunkManagerOrPrimary(OperationContext* txn,
@@ -539,154 +511,6 @@ bool DBConfig::reload(OperationContext* txn) {
return successful;
}
-bool DBConfig::dropDatabase(OperationContext* txn, string& errmsg) {
- /**
- * 1) update config server
- * 2) drop and reset sharded collections
- * 3) drop and reset primary
- * 4) drop everywhere to clean up loose ends
- */
-
- log() << "DBConfig::dropDatabase: " << _name;
- grid.catalogClient(txn)->logChange(
- txn, "dropDatabase.start", _name, BSONObj(), ShardingCatalogClient::kMajorityWriteConcern);
-
- // 1
- grid.catalogCache()->invalidate(_name);
-
- Status result = grid.catalogClient(txn)->removeConfigDocuments(
- txn,
- DatabaseType::ConfigNS,
- BSON(DatabaseType::name(_name)),
- ShardingCatalogClient::kMajorityWriteConcern);
- if (!result.isOK()) {
- errmsg = result.reason();
- log() << "could not drop '" << _name << "': " << errmsg;
- return false;
- }
-
- LOG(1) << "\t removed entry from config server for: " << _name;
-
- set<ShardId> shardIds;
-
- // 2
- while (true) {
- int num = 0;
- if (!_dropShardedCollections(txn, num, shardIds, errmsg)) {
- return 0;
- }
-
- log() << " DBConfig::dropDatabase: " << _name << " dropped sharded collections: " << num;
-
- if (num == 0) {
- break;
- }
- }
-
- // 3
- {
- const auto shard = uassertStatusOK(grid.shardRegistry()->getShard(txn, _primaryId));
-
- ScopedDbConnection conn(shard->getConnString(), 30.0);
- BSONObj res;
- if (!conn->dropDatabase(_name, txn->getWriteConcern(), &res)) {
- errmsg = res.toString() + " at " + _primaryId.toString();
- return 0;
- }
- conn.done();
- if (auto wcErrorElem = res["writeConcernError"]) {
- auto wcError = wcErrorElem.Obj();
- if (auto errMsgElem = wcError["errmsg"]) {
- errmsg = errMsgElem.str() + " at " + _primaryId.toString();
- return false;
- }
- }
- }
-
- // 4
- for (const ShardId& shardId : shardIds) {
- const auto shardStatus = grid.shardRegistry()->getShard(txn, shardId);
- if (!shardStatus.isOK()) {
- continue;
- }
-
- ScopedDbConnection conn(shardStatus.getValue()->getConnString(), 30.0);
- BSONObj res;
- if (!conn->dropDatabase(_name, txn->getWriteConcern(), &res)) {
- errmsg = res.toString() + " at " + shardId.toString();
- return 0;
- }
- conn.done();
- if (auto wcErrorElem = res["writeConcernError"]) {
- auto wcError = wcErrorElem.Obj();
- if (auto errMsgElem = wcError["errmsg"]) {
- errmsg = errMsgElem.str() + " at " + shardId.toString();
- return false;
- }
- }
- }
-
- LOG(1) << "\t dropped primary db for: " << _name;
-
- grid.catalogClient(txn)->logChange(
- txn, "dropDatabase", _name, BSONObj(), ShardingCatalogClient::kMajorityWriteConcern);
-
- return true;
-}
-
-bool DBConfig::_dropShardedCollections(OperationContext* txn,
- int& num,
- set<ShardId>& shardIds,
- string& errmsg) {
- num = 0;
- set<std::string> seen;
- while (true) {
- std::string aCollection;
- {
- stdx::lock_guard<stdx::mutex> lk(_lock);
-
- CollectionInfoMap::iterator i = _collections.begin();
- for (; i != _collections.end(); ++i) {
- if (i->second.isSharded()) {
- break;
- }
- }
-
- if (i == _collections.end()) {
- break;
- }
-
- aCollection = i->first;
- if (seen.count(aCollection)) {
- errmsg = "seen a collection twice!";
- return false;
- }
-
- seen.insert(aCollection);
- LOG(1) << "\t dropping sharded collection: " << aCollection;
-
- i->second.getCM()->getAllShardIds(&shardIds);
- }
- // drop lock before network activity
-
- uassertStatusOK(grid.catalogClient(txn)->dropCollection(txn, NamespaceString(aCollection)));
-
- // We should warn, but it's not a fatal error if someone else reloaded the db/coll as
- // unsharded in the meantime
- if (!removeSharding(txn, aCollection)) {
- warning() << "collection " << aCollection
- << " was reloaded as unsharded before drop completed"
- << " during drop of all collections";
- }
-
- num++;
- uassert(10184, "_dropShardedCollections too many collections - bailing", num < 100000);
- LOG(2) << "\t\t dropped " << num << " so far";
- }
-
- return true;
-}
-
void DBConfig::getAllShardIds(set<ShardId>* shardIds) {
dassert(shardIds);
diff --git a/src/mongo/s/config.h b/src/mongo/s/config.h
index 51274467154..0cba78036f9 100644
--- a/src/mongo/s/config.h
+++ b/src/mongo/s/config.h
@@ -34,14 +34,13 @@
#include "mongo/db/repl/optime.h"
#include "mongo/platform/atomic_word.h"
#include "mongo/s/client/shard.h"
-#include "mongo/util/concurrency/mutex.h"
+#include "mongo/stdx/mutex.h"
namespace mongo {
class ChunkManager;
class CollectionType;
class DatabaseType;
-class DBConfig;
class OperationContext;
struct CollectionInfo {
@@ -129,11 +128,6 @@ public:
void enableSharding(OperationContext* txn);
/**
- @return true if there was sharding info to remove
- */
- bool removeSharding(OperationContext* txn, const std::string& ns);
-
- /**
* @return whether or not the 'ns' collection is partitioned
*/
bool isSharded(const std::string& ns);
@@ -163,8 +157,6 @@ public:
bool load(OperationContext* txn);
bool reload(OperationContext* txn);
- bool dropDatabase(OperationContext*, std::string& errmsg);
-
void getAllShardIds(std::set<ShardId>* shardIds);
void getAllShardedCollections(std::set<std::string>& namespaces);
@@ -172,11 +164,6 @@ protected:
typedef std::map<std::string, CollectionInfo> CollectionInfoMap;
typedef AtomicUInt64::WordType Counter;
- bool _dropShardedCollections(OperationContext* txn,
- int& num,
- std::set<ShardId>& shardIds,
- std::string& errmsg);
-
/**
* Returns true if it is successful at loading the DBConfig, false if the database is not found,
* and throws on all other errors.