summaryrefslogtreecommitdiff
path: root/src/mongo/db/commands/dbhash.cpp
diff options
context:
space:
mode:
Diffstat (limited to 'src/mongo/db/commands/dbhash.cpp')
-rw-r--r--src/mongo/db/commands/dbhash.cpp331
1 files changed, 159 insertions, 172 deletions
diff --git a/src/mongo/db/commands/dbhash.cpp b/src/mongo/db/commands/dbhash.cpp
index 14a4ab955d5..dd9db449300 100644
--- a/src/mongo/db/commands/dbhash.cpp
+++ b/src/mongo/db/commands/dbhash.cpp
@@ -46,210 +46,197 @@
namespace mongo {
- using std::endl;
- using std::list;
- using std::set;
- using std::string;
- using std::unique_ptr;
- using std::vector;
+using std::endl;
+using std::list;
+using std::set;
+using std::string;
+using std::unique_ptr;
+using std::vector;
- DBHashCmd dbhashCmd;
+DBHashCmd dbhashCmd;
- void logOpForDbHash(OperationContext* txn, const char* ns) {
- dbhashCmd.wipeCacheForCollection(txn, ns);
- }
+void logOpForDbHash(OperationContext* txn, const char* ns) {
+ dbhashCmd.wipeCacheForCollection(txn, ns);
+}
- // ----
+// ----
- DBHashCmd::DBHashCmd() : Command("dbHash", false, "dbhash") {
- }
-
- void DBHashCmd::addRequiredPrivileges(const std::string& dbname,
- const BSONObj& cmdObj,
- std::vector<Privilege>* out) {
- ActionSet actions;
- actions.addAction(ActionType::dbHash);
- out->push_back(Privilege(ResourcePattern::forDatabaseName(dbname), actions));
- }
+DBHashCmd::DBHashCmd() : Command("dbHash", false, "dbhash") {}
- std::string DBHashCmd::hashCollection(OperationContext* opCtx,
- Database* db,
- const std::string& fullCollectionName,
- bool* fromCache) {
- stdx::unique_lock<stdx::mutex> cachedHashedLock(_cachedHashedMutex, stdx::defer_lock);
-
- if ( isCachable( fullCollectionName ) ) {
- cachedHashedLock.lock();
- string hash = _cachedHashed[fullCollectionName];
- if ( hash.size() > 0 ) {
- *fromCache = true;
- return hash;
- }
- }
+void DBHashCmd::addRequiredPrivileges(const std::string& dbname,
+ const BSONObj& cmdObj,
+ std::vector<Privilege>* out) {
+ ActionSet actions;
+ actions.addAction(ActionType::dbHash);
+ out->push_back(Privilege(ResourcePattern::forDatabaseName(dbname), actions));
+}
- *fromCache = false;
- Collection* collection = db->getCollection( fullCollectionName );
- if ( !collection )
- return "";
-
- IndexDescriptor* desc = collection->getIndexCatalog()->findIdIndex( opCtx );
-
- unique_ptr<PlanExecutor> exec;
- if ( desc ) {
- exec.reset(InternalPlanner::indexScan(opCtx,
- collection,
- desc,
- BSONObj(),
- BSONObj(),
- false,
- InternalPlanner::FORWARD,
- InternalPlanner::IXSCAN_FETCH));
- }
- else if ( collection->isCapped() ) {
- exec.reset(InternalPlanner::collectionScan(opCtx,
- fullCollectionName,
- collection));
- }
- else {
- log() << "can't find _id index for: " << fullCollectionName << endl;
- return "no _id _index";
+std::string DBHashCmd::hashCollection(OperationContext* opCtx,
+ Database* db,
+ const std::string& fullCollectionName,
+ bool* fromCache) {
+ stdx::unique_lock<stdx::mutex> cachedHashedLock(_cachedHashedMutex, stdx::defer_lock);
+
+ if (isCachable(fullCollectionName)) {
+ cachedHashedLock.lock();
+ string hash = _cachedHashed[fullCollectionName];
+ if (hash.size() > 0) {
+ *fromCache = true;
+ return hash;
}
+ }
- md5_state_t st;
- md5_init(&st);
+ *fromCache = false;
+ Collection* collection = db->getCollection(fullCollectionName);
+ if (!collection)
+ return "";
+
+ IndexDescriptor* desc = collection->getIndexCatalog()->findIdIndex(opCtx);
+
+ unique_ptr<PlanExecutor> exec;
+ if (desc) {
+ exec.reset(InternalPlanner::indexScan(opCtx,
+ collection,
+ desc,
+ BSONObj(),
+ BSONObj(),
+ false,
+ InternalPlanner::FORWARD,
+ InternalPlanner::IXSCAN_FETCH));
+ } else if (collection->isCapped()) {
+ exec.reset(InternalPlanner::collectionScan(opCtx, fullCollectionName, collection));
+ } else {
+ log() << "can't find _id index for: " << fullCollectionName << endl;
+ return "no _id _index";
+ }
- long long n = 0;
- PlanExecutor::ExecState state;
- BSONObj c;
- verify(NULL != exec.get());
- while (PlanExecutor::ADVANCED == (state = exec->getNext(&c, NULL))) {
- md5_append( &st , (const md5_byte_t*)c.objdata() , c.objsize() );
- n++;
- }
- if (PlanExecutor::IS_EOF != state) {
- warning() << "error while hashing, db dropped? ns=" << fullCollectionName << endl;
- }
- md5digest d;
- md5_finish(&st, d);
- string hash = digestToString( d );
+ md5_state_t st;
+ md5_init(&st);
- if (cachedHashedLock.owns_lock()) {
- _cachedHashed[fullCollectionName] = hash;
- }
+ long long n = 0;
+ PlanExecutor::ExecState state;
+ BSONObj c;
+ verify(NULL != exec.get());
+ while (PlanExecutor::ADVANCED == (state = exec->getNext(&c, NULL))) {
+ md5_append(&st, (const md5_byte_t*)c.objdata(), c.objsize());
+ n++;
+ }
+ if (PlanExecutor::IS_EOF != state) {
+ warning() << "error while hashing, db dropped? ns=" << fullCollectionName << endl;
+ }
+ md5digest d;
+ md5_finish(&st, d);
+ string hash = digestToString(d);
- return hash;
+ if (cachedHashedLock.owns_lock()) {
+ _cachedHashed[fullCollectionName] = hash;
}
- bool DBHashCmd::run(OperationContext* txn,
- const string& dbname,
- BSONObj& cmdObj,
- int,
- string& errmsg,
- BSONObjBuilder& result) {
- Timer timer;
-
- set<string> desiredCollections;
- if ( cmdObj["collections"].type() == Array ) {
- BSONObjIterator i( cmdObj["collections"].Obj() );
- while ( i.more() ) {
- BSONElement e = i.next();
- if ( e.type() != String ) {
- errmsg = "collections entries have to be strings";
- return false;
- }
- desiredCollections.insert( e.String() );
- }
- }
+ return hash;
+}
- list<string> colls;
- const string ns = parseNs(dbname, cmdObj);
-
- // We lock the entire database in S-mode in order to ensure that the contents will not
- // change for the snapshot.
- ScopedTransaction scopedXact(txn, MODE_IS);
- AutoGetDb autoDb(txn, ns, MODE_S);
- Database* db = autoDb.getDb();
- if (db) {
- db->getDatabaseCatalogEntry()->getCollectionNamespaces(&colls);
- colls.sort();
+bool DBHashCmd::run(OperationContext* txn,
+ const string& dbname,
+ BSONObj& cmdObj,
+ int,
+ string& errmsg,
+ BSONObjBuilder& result) {
+ Timer timer;
+
+ set<string> desiredCollections;
+ if (cmdObj["collections"].type() == Array) {
+ BSONObjIterator i(cmdObj["collections"].Obj());
+ while (i.more()) {
+ BSONElement e = i.next();
+ if (e.type() != String) {
+ errmsg = "collections entries have to be strings";
+ return false;
+ }
+ desiredCollections.insert(e.String());
}
+ }
- result.appendNumber( "numCollections" , (long long)colls.size() );
- result.append( "host" , prettyHostName() );
+ list<string> colls;
+ const string ns = parseNs(dbname, cmdObj);
+
+ // We lock the entire database in S-mode in order to ensure that the contents will not
+ // change for the snapshot.
+ ScopedTransaction scopedXact(txn, MODE_IS);
+ AutoGetDb autoDb(txn, ns, MODE_S);
+ Database* db = autoDb.getDb();
+ if (db) {
+ db->getDatabaseCatalogEntry()->getCollectionNamespaces(&colls);
+ colls.sort();
+ }
- md5_state_t globalState;
- md5_init(&globalState);
+ result.appendNumber("numCollections", (long long)colls.size());
+ result.append("host", prettyHostName());
- vector<string> cached;
+ md5_state_t globalState;
+ md5_init(&globalState);
- BSONObjBuilder bb( result.subobjStart( "collections" ) );
- for ( list<string>::iterator i=colls.begin(); i != colls.end(); i++ ) {
- string fullCollectionName = *i;
- if ( fullCollectionName.size() -1 <= dbname.size() ) {
- errmsg = str::stream() << "weird fullCollectionName [" << fullCollectionName << "]";
- return false;
- }
- string shortCollectionName = fullCollectionName.substr( dbname.size() + 1 );
+ vector<string> cached;
- if ( shortCollectionName.find( "system." ) == 0 )
- continue;
+ BSONObjBuilder bb(result.subobjStart("collections"));
+ for (list<string>::iterator i = colls.begin(); i != colls.end(); i++) {
+ string fullCollectionName = *i;
+ if (fullCollectionName.size() - 1 <= dbname.size()) {
+ errmsg = str::stream() << "weird fullCollectionName [" << fullCollectionName << "]";
+ return false;
+ }
+ string shortCollectionName = fullCollectionName.substr(dbname.size() + 1);
- if ( desiredCollections.size() > 0 &&
- desiredCollections.count( shortCollectionName ) == 0 )
- continue;
+ if (shortCollectionName.find("system.") == 0)
+ continue;
- bool fromCache = false;
- string hash = hashCollection( txn, db, fullCollectionName, &fromCache );
+ if (desiredCollections.size() > 0 && desiredCollections.count(shortCollectionName) == 0)
+ continue;
- bb.append( shortCollectionName, hash );
+ bool fromCache = false;
+ string hash = hashCollection(txn, db, fullCollectionName, &fromCache);
- md5_append( &globalState , (const md5_byte_t*)hash.c_str() , hash.size() );
- if ( fromCache )
- cached.push_back( fullCollectionName );
- }
- bb.done();
+ bb.append(shortCollectionName, hash);
- md5digest d;
- md5_finish(&globalState, d);
- string hash = digestToString( d );
+ md5_append(&globalState, (const md5_byte_t*)hash.c_str(), hash.size());
+ if (fromCache)
+ cached.push_back(fullCollectionName);
+ }
+ bb.done();
- result.append( "md5" , hash );
- result.appendNumber( "timeMillis", timer.millis() );
+ md5digest d;
+ md5_finish(&globalState, d);
+ string hash = digestToString(d);
- result.append( "fromCache", cached );
+ result.append("md5", hash);
+ result.appendNumber("timeMillis", timer.millis());
- return 1;
- }
+ result.append("fromCache", cached);
- class DBHashCmd::DBHashLogOpHandler : public RecoveryUnit::Change {
- public:
- DBHashLogOpHandler(DBHashCmd* dCmd,
- StringData ns):
- _dCmd(dCmd),
- _ns(ns.toString()) {
+ return 1;
+}
- }
- void commit() {
- stdx::lock_guard<stdx::mutex> lk( _dCmd->_cachedHashedMutex );
- _dCmd->_cachedHashed.erase(_ns);
- }
- void rollback() { }
-
- private:
- DBHashCmd *_dCmd;
- const std::string _ns;
- };
-
- void DBHashCmd::wipeCacheForCollection(OperationContext* txn,
- StringData ns) {
- if ( !isCachable( ns ) )
- return;
- txn->recoveryUnit()->registerChange(new DBHashLogOpHandler(this, ns));
+class DBHashCmd::DBHashLogOpHandler : public RecoveryUnit::Change {
+public:
+ DBHashLogOpHandler(DBHashCmd* dCmd, StringData ns) : _dCmd(dCmd), _ns(ns.toString()) {}
+ void commit() {
+ stdx::lock_guard<stdx::mutex> lk(_dCmd->_cachedHashedMutex);
+ _dCmd->_cachedHashed.erase(_ns);
}
+ void rollback() {}
- bool DBHashCmd::isCachable( StringData ns ) const {
- return ns.startsWith( "config." );
- }
+private:
+ DBHashCmd* _dCmd;
+ const std::string _ns;
+};
+
+void DBHashCmd::wipeCacheForCollection(OperationContext* txn, StringData ns) {
+ if (!isCachable(ns))
+ return;
+ txn->recoveryUnit()->registerChange(new DBHashLogOpHandler(this, ns));
+}
+bool DBHashCmd::isCachable(StringData ns) const {
+ return ns.startsWith("config.");
+}
}