summaryrefslogtreecommitdiff
path: root/src/mongo/db/index_rebuilder.cpp
diff options
context:
space:
mode:
Diffstat (limited to 'src/mongo/db/index_rebuilder.cpp')
-rw-r--r--src/mongo/db/index_rebuilder.cpp193
1 files changed, 93 insertions, 100 deletions
diff --git a/src/mongo/db/index_rebuilder.cpp b/src/mongo/db/index_rebuilder.cpp
index 3dbbb133e20..d416d9fe087 100644
--- a/src/mongo/db/index_rebuilder.cpp
+++ b/src/mongo/db/index_rebuilder.cpp
@@ -52,129 +52,122 @@
namespace mongo {
- using std::endl;
- using std::string;
- using std::vector;
+using std::endl;
+using std::string;
+using std::vector;
namespace {
- void checkNS(OperationContext* txn, const std::list<std::string>& nsToCheck) {
- bool firstTime = true;
- for (std::list<std::string>::const_iterator it = nsToCheck.begin();
- it != nsToCheck.end();
- ++it) {
-
- string ns = *it;
+void checkNS(OperationContext* txn, const std::list<std::string>& nsToCheck) {
+ bool firstTime = true;
+ for (std::list<std::string>::const_iterator it = nsToCheck.begin(); it != nsToCheck.end();
+ ++it) {
+ string ns = *it;
+
+ LOG(3) << "IndexRebuilder::checkNS: " << ns;
+
+ // This write lock is held throughout the index building process
+ // for this namespace.
+ ScopedTransaction transaction(txn, MODE_IX);
+ Lock::DBLock lk(txn->lockState(), nsToDatabaseSubstring(ns), MODE_X);
+ OldClientContext ctx(txn, ns);
+
+ Collection* collection = ctx.db()->getCollection(ns);
+ if (collection == NULL)
+ continue;
+
+ IndexCatalog* indexCatalog = collection->getIndexCatalog();
+
+ if (collection->ns().isOplog() && indexCatalog->numIndexesTotal(txn) > 0) {
+ warning() << ns << " had illegal indexes, removing";
+ indexCatalog->dropAllIndexes(txn, true);
+ continue;
+ }
- LOG(3) << "IndexRebuilder::checkNS: " << ns;
- // This write lock is held throughout the index building process
- // for this namespace.
- ScopedTransaction transaction(txn, MODE_IX);
- Lock::DBLock lk(txn->lockState(), nsToDatabaseSubstring(ns), MODE_X);
- OldClientContext ctx(txn, ns);
+ MultiIndexBlock indexer(txn, collection);
- Collection* collection = ctx.db()->getCollection(ns);
- if ( collection == NULL )
- continue;
+ {
+ WriteUnitOfWork wunit(txn);
+ vector<BSONObj> indexesToBuild = indexCatalog->getAndClearUnfinishedIndexes(txn);
- IndexCatalog* indexCatalog = collection->getIndexCatalog();
+ // The indexes have now been removed from system.indexes, so the only record is
+ // in-memory. If there is a journal commit between now and when insert() rewrites
+ // the entry and the db crashes before the new system.indexes entry is journalled,
+ // the index will be lost forever. Thus, we must stay in the same WriteUnitOfWork
+ // to ensure that no journaling will happen between now and the entry being
+ // re-written in MultiIndexBlock::init(). The actual index building is done outside
+ // of this WUOW.
- if ( collection->ns().isOplog() && indexCatalog->numIndexesTotal( txn ) > 0 ) {
- warning() << ns << " had illegal indexes, removing";
- indexCatalog->dropAllIndexes(txn, true);
+ if (indexesToBuild.empty()) {
continue;
}
+ log() << "found " << indexesToBuild.size() << " interrupted index build(s) on " << ns;
- MultiIndexBlock indexer(txn, collection);
-
- {
- WriteUnitOfWork wunit(txn);
- vector<BSONObj> indexesToBuild = indexCatalog->getAndClearUnfinishedIndexes(txn);
-
- // The indexes have now been removed from system.indexes, so the only record is
- // in-memory. If there is a journal commit between now and when insert() rewrites
- // the entry and the db crashes before the new system.indexes entry is journalled,
- // the index will be lost forever. Thus, we must stay in the same WriteUnitOfWork
- // to ensure that no journaling will happen between now and the entry being
- // re-written in MultiIndexBlock::init(). The actual index building is done outside
- // of this WUOW.
-
- if (indexesToBuild.empty()) {
- continue;
- }
-
- log() << "found " << indexesToBuild.size()
- << " interrupted index build(s) on " << ns;
-
- if (firstTime) {
- log() << "note: restart the server with --noIndexBuildRetry "
- << "to skip index rebuilds";
- firstTime = false;
- }
-
- if (!serverGlobalParams.indexBuildRetry) {
- log() << " not rebuilding interrupted indexes";
- wunit.commit();
- continue;
- }
-
- uassertStatusOK(indexer.init(indexesToBuild));
+ if (firstTime) {
+ log() << "note: restart the server with --noIndexBuildRetry "
+ << "to skip index rebuilds";
+ firstTime = false;
+ }
+ if (!serverGlobalParams.indexBuildRetry) {
+ log() << " not rebuilding interrupted indexes";
wunit.commit();
+ continue;
}
- try {
- uassertStatusOK(indexer.insertAllDocumentsInCollection());
+ uassertStatusOK(indexer.init(indexesToBuild));
- WriteUnitOfWork wunit(txn);
- indexer.commit();
- wunit.commit();
- }
- catch (const DBException& e) {
- error() << "Index rebuilding did not complete: " << e.toString();
- log() << "note: restart the server with --noIndexBuildRetry to skip index rebuilds";
- // If anything went wrong, leave the indexes partially built so that we pick them up
- // again on restart.
- indexer.abortWithoutCleanup();
- fassertFailedNoTrace(26100);
- }
- catch (...) {
- // If anything went wrong, leave the indexes partially built so that we pick them up
- // again on restart.
- indexer.abortWithoutCleanup();
- throw;
- }
+ wunit.commit();
}
- }
-} // namespace
- void restartInProgressIndexesFromLastShutdown(OperationContext* txn) {
- AuthorizationSession::get(txn->getClient())->grantInternalAuthorization();
+ try {
+ uassertStatusOK(indexer.insertAllDocumentsInCollection());
+
+ WriteUnitOfWork wunit(txn);
+ indexer.commit();
+ wunit.commit();
+ } catch (const DBException& e) {
+ error() << "Index rebuilding did not complete: " << e.toString();
+ log() << "note: restart the server with --noIndexBuildRetry to skip index rebuilds";
+ // If anything went wrong, leave the indexes partially built so that we pick them up
+ // again on restart.
+ indexer.abortWithoutCleanup();
+ fassertFailedNoTrace(26100);
+ } catch (...) {
+ // If anything went wrong, leave the indexes partially built so that we pick them up
+ // again on restart.
+ indexer.abortWithoutCleanup();
+ throw;
+ }
+ }
+}
+} // namespace
- std::vector<std::string> dbNames;
+void restartInProgressIndexesFromLastShutdown(OperationContext* txn) {
+ AuthorizationSession::get(txn->getClient())->grantInternalAuthorization();
- StorageEngine* storageEngine = getGlobalServiceContext()->getGlobalStorageEngine();
- storageEngine->listDatabases( &dbNames );
+ std::vector<std::string> dbNames;
- try {
- std::list<std::string> collNames;
- for (std::vector<std::string>::const_iterator dbName = dbNames.begin();
- dbName < dbNames.end();
- ++dbName) {
+ StorageEngine* storageEngine = getGlobalServiceContext()->getGlobalStorageEngine();
+ storageEngine->listDatabases(&dbNames);
- ScopedTransaction scopedXact(txn, MODE_IS);
- AutoGetDb autoDb(txn, *dbName, MODE_S);
+ try {
+ std::list<std::string> collNames;
+ for (std::vector<std::string>::const_iterator dbName = dbNames.begin();
+ dbName < dbNames.end();
+ ++dbName) {
+ ScopedTransaction scopedXact(txn, MODE_IS);
+ AutoGetDb autoDb(txn, *dbName, MODE_S);
- Database* db = autoDb.getDb();
- db->getDatabaseCatalogEntry()->getCollectionNamespaces(&collNames);
- }
- checkNS(txn, collNames);
- }
- catch (const DBException& e) {
- error() << "Index verification did not complete: " << e.toString();
- fassertFailedNoTrace(18643);
+ Database* db = autoDb.getDb();
+ db->getDatabaseCatalogEntry()->getCollectionNamespaces(&collNames);
}
- LOG(1) << "checking complete" << endl;
+ checkNS(txn, collNames);
+ } catch (const DBException& e) {
+ error() << "Index verification did not complete: " << e.toString();
+ fassertFailedNoTrace(18643);
}
+ LOG(1) << "checking complete" << endl;
+}
}