summaryrefslogtreecommitdiff
path: root/src/mongo/db/commands/dbhash.cpp
diff options
context:
space:
mode:
authorKaloian Manassiev <kaloian.manassiev@mongodb.com>2016-03-29 15:23:52 -0400
committerKaloian Manassiev <kaloian.manassiev@mongodb.com>2016-03-30 10:16:17 -0400
commit064c45a6a0688700d7836038db8057cda559dbd6 (patch)
tree17ac22e471cc6bb143f57d970151e5035285505e /src/mongo/db/commands/dbhash.cpp
parent38b1b8a675993d2d12ffba5976c7344223b56640 (diff)
downloadmongo-064c45a6a0688700d7836038db8057cda559dbd6.tar.gz
SERVER-23407 Get rid of public fields in the Command class
Diffstat (limited to 'src/mongo/db/commands/dbhash.cpp')
-rw-r--r--src/mongo/db/commands/dbhash.cpp333
1 files changed, 173 insertions, 160 deletions
diff --git a/src/mongo/db/commands/dbhash.cpp b/src/mongo/db/commands/dbhash.cpp
index 276c46c206f..dcb63a5936c 100644
--- a/src/mongo/db/commands/dbhash.cpp
+++ b/src/mongo/db/commands/dbhash.cpp
@@ -30,221 +30,234 @@
#define MONGO_LOG_DEFAULT_COMPONENT ::mongo::logger::LogComponent::kCommand
+#include "mongo/platform/basic.h"
+
#include "mongo/db/commands/dbhash.h"
+#include <map>
+#include <string>
#include "mongo/db/catalog/collection.h"
#include "mongo/db/catalog/database.h"
#include "mongo/db/catalog/database_catalog_entry.h"
-#include "mongo/db/client.h"
#include "mongo/db/commands.h"
#include "mongo/db/db_raii.h"
#include "mongo/db/exec/working_set_common.h"
#include "mongo/db/namespace_string.h"
#include "mongo/db/query/internal_plans.h"
+#include "mongo/stdx/mutex.h"
#include "mongo/util/log.h"
#include "mongo/util/md5.hpp"
#include "mongo/util/timer.h"
namespace mongo {
-using std::endl;
using std::list;
using std::set;
using std::string;
using std::unique_ptr;
using std::vector;
-DBHashCmd dbhashCmd;
-
-
-void logOpForDbHash(OperationContext* txn, const char* ns) {
- NamespaceString nsString(ns);
- dbhashCmd.wipeCacheForCollection(txn, nsString);
-}
-
-// ----
-
-DBHashCmd::DBHashCmd() : Command("dbHash", false, "dbhash") {}
-
-void DBHashCmd::addRequiredPrivileges(const std::string& dbname,
- const BSONObj& cmdObj,
- std::vector<Privilege>* out) {
- ActionSet actions;
- actions.addAction(ActionType::dbHash);
- out->push_back(Privilege(ResourcePattern::forDatabaseName(dbname), actions));
-}
-
-std::string DBHashCmd::hashCollection(OperationContext* opCtx,
- Database* db,
- const std::string& fullCollectionName,
- bool* fromCache) {
- stdx::unique_lock<stdx::mutex> cachedHashedLock(_cachedHashedMutex, stdx::defer_lock);
+namespace {
- NamespaceString ns(fullCollectionName);
+class DBHashCmd : public Command {
+public:
+ DBHashCmd() : Command("dbHash", false, "dbhash") {}
- if (isCachable(ns)) {
- cachedHashedLock.lock();
- string hash = _cachedHashed[ns.db().toString()][ns.coll().toString()];
- if (hash.size() > 0) {
- *fromCache = true;
- return hash;
- }
+ virtual bool slaveOk() const {
+ return true;
}
- *fromCache = false;
- Collection* collection = db->getCollection(fullCollectionName);
- if (!collection)
- return "";
-
- IndexDescriptor* desc = collection->getIndexCatalog()->findIdIndex(opCtx);
-
- unique_ptr<PlanExecutor> exec;
- if (desc) {
- exec = InternalPlanner::indexScan(opCtx,
- collection,
- desc,
- BSONObj(),
- BSONObj(),
- false, // endKeyInclusive
- PlanExecutor::YIELD_MANUAL,
- InternalPlanner::FORWARD,
- InternalPlanner::IXSCAN_FETCH);
- } else if (collection->isCapped()) {
- exec = InternalPlanner::collectionScan(
- opCtx, fullCollectionName, collection, PlanExecutor::YIELD_MANUAL);
- } else {
- log() << "can't find _id index for: " << fullCollectionName << endl;
- return "no _id _index";
+ virtual void addRequiredPrivileges(const std::string& dbname,
+ const BSONObj& cmdObj,
+ std::vector<Privilege>* out) {
+ ActionSet actions;
+ actions.addAction(ActionType::dbHash);
+ out->push_back(Privilege(ResourcePattern::forDatabaseName(dbname), actions));
}
- md5_state_t st;
- md5_init(&st);
+ virtual bool run(OperationContext* txn,
+ const string& dbname,
+ BSONObj& cmdObj,
+ int,
+ string& errmsg,
+ BSONObjBuilder& result) {
+ Timer timer;
+
+ set<string> desiredCollections;
+ if (cmdObj["collections"].type() == Array) {
+ BSONObjIterator i(cmdObj["collections"].Obj());
+ while (i.more()) {
+ BSONElement e = i.next();
+ if (e.type() != String) {
+ errmsg = "collections entries have to be strings";
+ return false;
+ }
+ desiredCollections.insert(e.String());
+ }
+ }
- long long n = 0;
- PlanExecutor::ExecState state;
- BSONObj c;
- verify(NULL != exec.get());
- while (PlanExecutor::ADVANCED == (state = exec->getNext(&c, NULL))) {
- md5_append(&st, (const md5_byte_t*)c.objdata(), c.objsize());
- n++;
- }
- if (PlanExecutor::IS_EOF != state) {
- warning() << "error while hashing, db dropped? ns=" << fullCollectionName << endl;
- uasserted(34371,
- "Plan executor error while running dbHash command: " +
- WorkingSetCommon::toStatusString(c));
- }
- md5digest d;
- md5_finish(&st, d);
- string hash = digestToString(d);
+ list<string> colls;
+ const string ns = parseNs(dbname, cmdObj);
+
+ // We lock the entire database in S-mode in order to ensure that the contents will not
+ // change for the snapshot.
+ ScopedTransaction scopedXact(txn, MODE_IS);
+ AutoGetDb autoDb(txn, ns, MODE_S);
+ Database* db = autoDb.getDb();
+ if (db) {
+ db->getDatabaseCatalogEntry()->getCollectionNamespaces(&colls);
+ colls.sort();
+ }
- if (cachedHashedLock.owns_lock()) {
- _cachedHashed[ns.db().toString()][ns.coll().toString()] = hash;
- }
+ result.append("host", prettyHostName());
- return hash;
-}
+ md5_state_t globalState;
+ md5_init(&globalState);
+
+ vector<string> cached;
-bool DBHashCmd::run(OperationContext* txn,
- const string& dbname,
- BSONObj& cmdObj,
- int,
- string& errmsg,
- BSONObjBuilder& result) {
- Timer timer;
-
- set<string> desiredCollections;
- if (cmdObj["collections"].type() == Array) {
- BSONObjIterator i(cmdObj["collections"].Obj());
- while (i.more()) {
- BSONElement e = i.next();
- if (e.type() != String) {
- errmsg = "collections entries have to be strings";
+ BSONObjBuilder bb(result.subobjStart("collections"));
+ for (list<string>::iterator i = colls.begin(); i != colls.end(); i++) {
+ string fullCollectionName = *i;
+ if (fullCollectionName.size() - 1 <= dbname.size()) {
+ errmsg = str::stream() << "weird fullCollectionName [" << fullCollectionName << "]";
return false;
}
- desiredCollections.insert(e.String());
- }
- }
+ string shortCollectionName = fullCollectionName.substr(dbname.size() + 1);
- list<string> colls;
- const string ns = parseNs(dbname, cmdObj);
-
- // We lock the entire database in S-mode in order to ensure that the contents will not
- // change for the snapshot.
- ScopedTransaction scopedXact(txn, MODE_IS);
- AutoGetDb autoDb(txn, ns, MODE_S);
- Database* db = autoDb.getDb();
- if (db) {
- db->getDatabaseCatalogEntry()->getCollectionNamespaces(&colls);
- colls.sort();
- }
+ if (shortCollectionName.find("system.") == 0)
+ continue;
- result.append("host", prettyHostName());
+ if (desiredCollections.size() > 0 && desiredCollections.count(shortCollectionName) == 0)
+ continue;
- md5_state_t globalState;
- md5_init(&globalState);
+ bool fromCache = false;
+ string hash = _hashCollection(txn, db, fullCollectionName, &fromCache);
- vector<string> cached;
+ bb.append(shortCollectionName, hash);
- BSONObjBuilder bb(result.subobjStart("collections"));
- for (list<string>::iterator i = colls.begin(); i != colls.end(); i++) {
- string fullCollectionName = *i;
- if (fullCollectionName.size() - 1 <= dbname.size()) {
- errmsg = str::stream() << "weird fullCollectionName [" << fullCollectionName << "]";
- return false;
+ md5_append(&globalState, (const md5_byte_t*)hash.c_str(), hash.size());
+ if (fromCache)
+ cached.push_back(fullCollectionName);
}
- string shortCollectionName = fullCollectionName.substr(dbname.size() + 1);
+ bb.done();
- if (shortCollectionName.find("system.") == 0)
- continue;
+ md5digest d;
+ md5_finish(&globalState, d);
+ string hash = digestToString(d);
- if (desiredCollections.size() > 0 && desiredCollections.count(shortCollectionName) == 0)
- continue;
+ result.append("md5", hash);
+ result.appendNumber("timeMillis", timer.millis());
- bool fromCache = false;
- string hash = hashCollection(txn, db, fullCollectionName, &fromCache);
+ result.append("fromCache", cached);
- bb.append(shortCollectionName, hash);
+ return 1;
+ }
- md5_append(&globalState, (const md5_byte_t*)hash.c_str(), hash.size());
- if (fromCache)
- cached.push_back(fullCollectionName);
+ void wipeCacheForCollection(OperationContext* txn, const NamespaceString& ns) {
+ if (!_isCachable(ns))
+ return;
+
+ txn->recoveryUnit()->onCommit([this, txn, ns] {
+ stdx::lock_guard<stdx::mutex> lk(_cachedHashedMutex);
+ if (ns.isCommand()) {
+ // The <dbName>.$cmd namespace can represent a command that
+ // modifies the entire database, e.g. dropDatabase, so we remove
+ // the cached entries for all collections in the database.
+ _cachedHashed.erase(ns.db().toString());
+ } else {
+ _cachedHashed[ns.db().toString()].erase(ns.coll().toString());
+ }
+ });
}
- bb.done();
- md5digest d;
- md5_finish(&globalState, d);
- string hash = digestToString(d);
+private:
+ bool _isCachable(const NamespaceString& ns) const {
+ return ns.isConfigDB();
+ }
- result.append("md5", hash);
- result.appendNumber("timeMillis", timer.millis());
+ std::string _hashCollection(OperationContext* opCtx,
+ Database* db,
+ const std::string& fullCollectionName,
+ bool* fromCache) {
+ stdx::unique_lock<stdx::mutex> cachedHashedLock(_cachedHashedMutex, stdx::defer_lock);
- result.append("fromCache", cached);
+ NamespaceString ns(fullCollectionName);
- return 1;
-}
+ if (_isCachable(ns)) {
+ cachedHashedLock.lock();
+ string hash = _cachedHashed[ns.db().toString()][ns.coll().toString()];
+ if (hash.size() > 0) {
+ *fromCache = true;
+ return hash;
+ }
+ }
-void DBHashCmd::wipeCacheForCollection(OperationContext* txn, const NamespaceString& ns) {
- if (!isCachable(ns))
- return;
-
- txn->recoveryUnit()->onCommit([this, txn, ns] {
- stdx::lock_guard<stdx::mutex> lk(_cachedHashedMutex);
- if (ns.isCommand()) {
- // The <dbName>.$cmd namespace can represent a command that
- // modifies the entire database, e.g. dropDatabase, so we remove
- // the cached entries for all collections in the database.
- _cachedHashed.erase(ns.db().toString());
+ *fromCache = false;
+ Collection* collection = db->getCollection(fullCollectionName);
+ if (!collection)
+ return "";
+
+ IndexDescriptor* desc = collection->getIndexCatalog()->findIdIndex(opCtx);
+
+ unique_ptr<PlanExecutor> exec;
+ if (desc) {
+ exec = InternalPlanner::indexScan(opCtx,
+ collection,
+ desc,
+ BSONObj(),
+ BSONObj(),
+ false, // endKeyInclusive
+ PlanExecutor::YIELD_MANUAL,
+ InternalPlanner::FORWARD,
+ InternalPlanner::IXSCAN_FETCH);
+ } else if (collection->isCapped()) {
+ exec = InternalPlanner::collectionScan(
+ opCtx, fullCollectionName, collection, PlanExecutor::YIELD_MANUAL);
} else {
- _cachedHashed[ns.db().toString()].erase(ns.coll().toString());
+ log() << "can't find _id index for: " << fullCollectionName;
+ return "no _id _index";
}
+ md5_state_t st;
+ md5_init(&st);
- });
-}
+ long long n = 0;
+ PlanExecutor::ExecState state;
+ BSONObj c;
+ verify(NULL != exec.get());
+ while (PlanExecutor::ADVANCED == (state = exec->getNext(&c, NULL))) {
+ md5_append(&st, (const md5_byte_t*)c.objdata(), c.objsize());
+ n++;
+ }
+ if (PlanExecutor::IS_EOF != state) {
+ warning() << "error while hashing, db dropped? ns=" << fullCollectionName;
+ uasserted(34371,
+ "Plan executor error while running dbHash command: " +
+ WorkingSetCommon::toStatusString(c));
+ }
+ md5digest d;
+ md5_finish(&st, d);
+ string hash = digestToString(d);
-bool DBHashCmd::isCachable(const NamespaceString& ns) const {
- return ns.isConfigDB();
-}
+ if (cachedHashedLock.owns_lock()) {
+ _cachedHashed[ns.db().toString()][ns.coll().toString()] = hash;
+ }
+
+ return hash;
+ }
+
+ stdx::mutex _cachedHashedMutex;
+ std::map<std::string, std::map<std::string, std::string>> _cachedHashed;
+
+} dbhashCmd;
+
+} // namespace
+
+void logOpForDbHash(OperationContext* txn, const char* ns) {
+ NamespaceString nsString(ns);
+ dbhashCmd.wipeCacheForCollection(txn, nsString);
}
+
+} // namespace mongo