diff options
Diffstat (limited to 'src/mongo/db')
-rw-r--r-- | src/mongo/db/cap.cpp | 4 | ||||
-rw-r--r-- | src/mongo/db/index.cpp | 10 | ||||
-rw-r--r-- | src/mongo/db/index_rebuilder.cpp | 28 | ||||
-rw-r--r-- | src/mongo/db/index_rebuilder.h | 7 | ||||
-rw-r--r-- | src/mongo/db/index_update.cpp | 96 | ||||
-rw-r--r-- | src/mongo/db/index_update.h | 1 | ||||
-rw-r--r-- | src/mongo/db/namespace_details.cpp | 39 | ||||
-rw-r--r-- | src/mongo/db/namespace_details.h | 12 | ||||
-rw-r--r-- | src/mongo/db/ops/update.cpp | 13 | ||||
-rw-r--r-- | src/mongo/db/pdfile.cpp | 89 | ||||
-rw-r--r-- | src/mongo/db/pdfile.h | 18 | ||||
-rw-r--r-- | src/mongo/db/prefetch.cpp | 2 |
12 files changed, 210 insertions, 109 deletions
diff --git a/src/mongo/db/cap.cpp b/src/mongo/db/cap.cpp index 4fe4b9d6946..9989b6cfbc1 100644 --- a/src/mongo/db/cap.cpp +++ b/src/mongo/db/cap.cpp @@ -427,8 +427,8 @@ namespace mongo { void NamespaceDetails::emptyCappedCollection( const char *ns ) { DEV verify( this == nsdetails(ns) ); massert( 13424, "collection must be capped", isCapped() ); - massert( 13425, "background index build in progress", !indexBuildInProgress ); - + massert( 13425, "background index build in progress", !indexBuildsInProgress ); + vector<BSONObj> indexes = Helpers::findAll( Namespace( ns ).getSisterNS( "system.indexes" ) , BSON( "ns" << ns ) ); for ( unsigned i=0; i<indexes.size(); i++ ) { indexes[i] = indexes[i].copy(); diff --git a/src/mongo/db/index.cpp b/src/mongo/db/index.cpp index e8c85882cf4..22c268209b7 100644 --- a/src/mongo/db/index.cpp +++ b/src/mongo/db/index.cpp @@ -250,7 +250,7 @@ namespace mongo { void getIndexChanges(vector<IndexChanges>& v, const char *ns, NamespaceDetails& d, BSONObj newObj, BSONObj oldObj, bool &changedId) { - int z = d.nIndexesBeingBuilt(); + int z = d.getTotalIndexCount(); v.resize(z); for( int i = 0; i < z; i++ ) { IndexDetails& idx = d.idx(i); @@ -269,7 +269,7 @@ namespace mongo { } void dupCheck(vector<IndexChanges>& v, NamespaceDetails& d, DiskLoc curObjLoc) { - int z = d.nIndexesBeingBuilt(); + int z = d.getTotalIndexCount(); for( int i = 0; i < z; i++ ) { IndexDetails& idx = d.idx(i); v[i].dupCheck(idx, curObjLoc); @@ -354,12 +354,6 @@ namespace mongo { uasserted(12505,s); } - /* we can't build a new index for the ns if a build is already in progress in the background - - EVEN IF this is a foreground build. - */ - uassert(12588, "cannot add index with a background operation in progress", - !BackgroundOperation::inProgForNs(sourceNS.c_str())); - /* this is because we want key patterns like { _id : 1 } and { _id : <someobjid> } to all be treated as the same pattern. */ diff --git a/src/mongo/db/index_rebuilder.cpp b/src/mongo/db/index_rebuilder.cpp index 5ae2b389510..9567b7d5507 100644 --- a/src/mongo/db/index_rebuilder.cpp +++ b/src/mongo/db/index_rebuilder.cpp @@ -50,6 +50,7 @@ namespace mongo { DBDirectClient cli; scoped_ptr<DBClientCursor> cursor(cli.query(systemNS, Query())); + // This depends on system.namespaces not changing while we iterate while (cursor->more()) { BSONObj nsDoc = cursor->next(); const char* ns = nsDoc["name"].valuestrsafe(); @@ -57,7 +58,7 @@ namespace mongo { Client::Context ctx(ns, dbpath, false, false); NamespaceDetails* nsd = nsdetails(ns); - if (!nsd || !nsd->indexBuildInProgress) { + if (!nsd || !nsd->indexBuildsInProgress) { continue; } @@ -67,27 +68,38 @@ namespace mongo { if (!cmdLine.indexBuildRetry) { // If we crash between unsetting the inProg flag and cleaning up the index, the // index space will be lost. - getDur().writingInt(nsd->indexBuildInProgress) = 0; - nsd->idx(nsd->nIndexes).kill_idx(); + int inProg = nsd->indexBuildsInProgress; + getDur().writingInt(nsd->indexBuildsInProgress) = 0; + + for (int i = 0; i < inProg; i++) { + nsd->idx(nsd->nIndexes+i).kill_idx(); + } + continue; } - retryIndexBuild(dbName, nsd); + // We go from right to left building these indexes, so that indexBuildInProgress-- has + // the correct effect of "popping" an index off the list. + while (nsd->indexBuildsInProgress > 0) { + retryIndexBuild(dbName, nsd, nsd->nIndexes+nsd->indexBuildsInProgress-1); + } } } - void IndexRebuilder::retryIndexBuild(const std::string& dbName, NamespaceDetails* nsd) { + void IndexRebuilder::retryIndexBuild(const std::string& dbName, + NamespaceDetails* nsd, + const int index) { // details.info is always a valid system.indexes entry because DataFileMgr::insert journals // creating the index doc and then insert_makeIndex durably assigns its DiskLoc to info. - // indexBuildInProgress is set after that, so if it is set, info must be set. - IndexDetails& details = nsd->idx(nsd->nIndexes); + // indexBuildsInProgress is set after that, so if it is set, info must be set. + IndexDetails& details = nsd->idx(index); // First, clean up the in progress index build. Save the system.indexes entry so that we // can add it again afterwards. BSONObj indexObj = details.info.obj().getOwned(); // Clean up the in-progress index build - getDur().writingInt(nsd->indexBuildInProgress) = 0; + getDur().writingInt(nsd->indexBuildsInProgress) -= 1; details.kill_idx(); // The index has now been removed from system.indexes, so the only record of it is in- // memory. If there is a journal commit between now and when insert() rewrites the entry and diff --git a/src/mongo/db/index_rebuilder.h b/src/mongo/db/index_rebuilder.h index 3fd308f9cc9..05e30ea4daa 100644 --- a/src/mongo/db/index_rebuilder.h +++ b/src/mongo/db/index_rebuilder.h @@ -36,9 +36,12 @@ namespace mongo { void checkDB(const std::string& dbname); /** - * Actually retry the index build on a given namespace. + * Actually retry an index build on a given namespace. + * @param dbName the name of the database for accessing db.system.indexes + * @param nsd the namespace details of the namespace building the index + * @param index the offset into nsd's index array of the partially-built index */ - void retryIndexBuild(const std::string& dbName, NamespaceDetails* nsd); + void retryIndexBuild(const std::string& dbName, NamespaceDetails* nsd, const int index); }; extern IndexRebuilder indexRebuilder; diff --git a/src/mongo/db/index_update.cpp b/src/mongo/db/index_update.cpp index 4d47f0038ec..b2ab2d02fde 100644 --- a/src/mongo/db/index_update.cpp +++ b/src/mongo/db/index_update.cpp @@ -72,14 +72,15 @@ namespace mongo { int n = d->nIndexes; for ( int i = 0; i < n; i++ ) _unindexRecord(d->idx(i), obj, dl, !noWarn); - if( d->indexBuildInProgress ) { // background index - // always pass nowarn here, as this one may be missing for valid reasons as we are concurrently building it - _unindexRecord(d->idx(n), obj, dl, false); + + for (int i = 0; i < d->indexBuildsInProgress; i++) { // background index + // Always pass nowarn here, as this one may be missing for valid reasons as we are + // concurrently building it + _unindexRecord(d->idx(n+i), obj, dl, false); } } /* step one of adding keys to index idxNo for a new record - @return true means done. false means multikey involved and more work to do */ void fetchIndexInserters(BSONObjSet & /*out*/keys, IndexInterface::IndexInserter &inserter, @@ -103,7 +104,7 @@ namespace mongo { idxNo, idx, recordLoc, *keys.begin(), ordering, dupsAllowed)); } catch (AssertionException& e) { - if( e.getCode() == 10287 && idxNo == d->nIndexes ) { + if( e.getCode() == 10287 && idxNo >= d->nIndexes ) { DEV log() << "info: caught key already in index on bg indexing (ok)" << endl; } else { @@ -123,7 +124,7 @@ namespace mongo { IndexInterface::IndexInserter inserter; // Step 1, read phase. - int n = d->nIndexesBeingBuilt(); + int n = d->getTotalIndexCount(); { BSONObjSet keys; for ( int i = 0; i < n; i++ ) { @@ -159,7 +160,7 @@ namespace mongo { try { ii.bt_insert(idx.head, loc, *k, ordering, dupsAllowed, idx); } catch (AssertionException& e) { - if( e.getCode() == 10287 && (int) i == d->nIndexes ) { + if( e.getCode() == 10287 && (int) i >= d->nIndexes ) { DEV log() << "info: caught key already in index on bg indexing (ok)" << endl; } else { @@ -202,7 +203,7 @@ namespace mongo { ii.bt_insert(idx.head, recordLoc, *i, ordering, dupsAllowed, idx); } catch (AssertionException& e) { - if( e.getCode() == 10287 && idxNo == d->nIndexes ) { + if( e.getCode() == 10287 && idxNo >= d->nIndexes ) { DEV log() << "info: caught key already in index on bg indexing (ok)" << endl; continue; } @@ -318,13 +319,12 @@ namespace mongo { uint64_t fastBuildIndex(const char* ns, NamespaceDetails* d, IndexDetails& idx, - int32_t idxNo, bool mayInterrupt) { CurOp * op = cc().curop(); Timer t; - tlog(1) << "fastBuildIndex " << ns << " idxNo:" << idxNo << ' ' << idx.info.obj().toString() << endl; + tlog(1) << "fastBuildIndex " << ns << ' ' << idx.info.obj().toString() << endl; bool dupsAllowed = !idx.unique() || ignoreUniqueIndex(idx); bool dropDups = idx.dropDups() || inDBRepair; @@ -348,8 +348,10 @@ namespace mongo { // Ensure the index and external sorter have a consistent index interface (and sort order). fassert( 16408, &idx.idxInterface() == &sorter.getIndexInterface() ); - if( phase1->multi ) + if( phase1->multi ) { + int idxNo = IndexBuildsInProgress::get(ns, idx.info.obj()["name"].valuestr()); d->setIndexIsMultikey(ns, idxNo); + } if ( logLevel > 1 ) printMemInfo( "before final sort" ); phase1->sorter->sort( mayInterrupt ); @@ -395,7 +397,8 @@ namespace mongo { class BackgroundIndexBuildJob : public BackgroundOperation { - unsigned long long addExistingToIndex(const char *ns, NamespaceDetails *d, IndexDetails& idx, int idxNo) { + unsigned long long addExistingToIndex(const char *ns, NamespaceDetails *d, + IndexDetails& idx) { bool dupsAllowed = !idx.unique(); bool dropDups = idx.dropDups(); @@ -409,6 +412,14 @@ namespace mongo { cc.reset( new ClientCursor(QueryOption_NoCursorTimeout, c, ns) ); } + std::string idxName = idx.indexName(); + int idxNo = IndexBuildsInProgress::get(ns, idxName); + massert(16574, "Couldn't find index being built", idxNo != -1); + + // After this yields in the loop, idx may point at a different index (if indexes get + // flipped, see insert_makeIndex) or even an empty IndexDetails, so nothing below should + // depend on idx. idxNo should be recalculated after each yield. + while ( cc->ok() ) { BSONObj js = cc->current(); try { @@ -444,6 +455,13 @@ namespace mongo { } break; } + + // Recalculate idxNo if we yielded + idxNo = IndexBuildsInProgress::get(ns, idxName); + // This index must still be around, because this is thread that would clean + // it up + massert(16575, "cannot find index build anymore", idxNo != -1); + numDropped++; } else { @@ -458,8 +476,14 @@ namespace mongo { if ( cc->yieldSometimes( ClientCursor::WillNeed ) ) { progress.setTotalWhileRunning( d->stats.nrecords ); + + // Recalculate idxNo if we yielded + idxNo = IndexBuildsInProgress::get(ns, idxName); + // Someone may have interrupted the index build + massert(16576, "cannot find index build anymore", idxNo != -1); } else { + idxNo = -1; cc.release(); uasserted(12584, "cursor gone during bg index"); break; @@ -480,7 +504,7 @@ namespace mongo { uassert( 13130 , "can't start bg index b/c in recursive lock (db.eval?)" , !Lock::nested() ); bgJobsInProgress.insert(d); } - void done(const char *ns, NamespaceDetails *d) { + void done(const char *ns) { NamespaceDetailsTransient::get(ns).addedIndex(); // clear query optimizer cache Lock::assertWriteLocked(ns); } @@ -488,27 +512,25 @@ namespace mongo { public: BackgroundIndexBuildJob(const char *ns) : BackgroundOperation(ns) { } - unsigned long long go(string ns, NamespaceDetails *d, IndexDetails& idx, int idxNo) { + unsigned long long go(string ns, NamespaceDetails *d, IndexDetails& idx) { unsigned long long n = 0; prep(ns.c_str(), d); - verify( idxNo == d->nIndexes ); try { idx.head.writing() = idx.idxInterface().addBucket(idx); - n = addExistingToIndex(ns.c_str(), d, idx, idxNo); + n = addExistingToIndex(ns.c_str(), d, idx); + // idx may point at an invalid index entry at this point } catch(...) { if( cc().database() && nsdetails(ns.c_str()) == d ) { - verify( idxNo == d->nIndexes ); - done(ns.c_str(), d); + done(ns.c_str()); } else { log() << "ERROR: db gone during bg index?" << endl; } throw; } - verify( idxNo == d->nIndexes ); - done(ns.c_str(), d); + done(ns.c_str()); return n; } }; @@ -517,58 +539,28 @@ namespace mongo { void buildAnIndex(const std::string& ns, NamespaceDetails* d, IndexDetails& idx, - int32_t idxNo, bool background, bool mayInterrupt) { tlog() << "build index " << ns << ' ' << idx.keyPattern() << ( background ? " background" : "" ) << endl; Timer t; unsigned long long n; - verify( !BackgroundOperation::inProgForNs(ns.c_str()) ); // should have been checked earlier, better not be... verify( Lock::isWriteLocked(ns) ); // Build index spec here in case the collection is empty and the index details are invalid idx.getSpec(); if( inDBRepair || !background ) { - n = fastBuildIndex(ns.c_str(), d, idx, idxNo, mayInterrupt); + n = fastBuildIndex(ns.c_str(), d, idx, mayInterrupt); verify( !idx.head.isNull() ); } else { BackgroundIndexBuildJob j(ns.c_str()); - n = j.go(ns, d, idx, idxNo); + n = j.go(ns, d, idx); } tlog() << "build index done. scanned " << n << " total records. " << t.millis() / 1000.0 << " secs" << endl; } - /* add keys to indexes for a new record */ -#if 0 - static void oldIndexRecord__notused(NamespaceDetails *d, BSONObj obj, DiskLoc loc) { - int n = d->nIndexesBeingBuilt(); - for ( int i = 0; i < n; i++ ) { - try { - bool unique = d->idx(i).unique(); - addKeysToIndex(d, i, obj, loc, /*dupsAllowed*/!unique); - } - catch( DBException& ) { - /* try to roll back previously added index entries - note <= i (not < i) is important here as the index we were just attempted - may be multikey and require some cleanup. - */ - for( int j = 0; j <= i; j++ ) { - try { - _unindexRecord(d->idx(j), obj, loc, false); - } - catch(...) { - LOG(3) << "unindex fails on rollback after unique failure\n"; - } - } - throw; - } - } - } -#endif - extern BSONObj id_obj; // { _id : 1 } void ensureHaveIdIndex(const char* ns, bool mayInterrupt) { diff --git a/src/mongo/db/index_update.h b/src/mongo/db/index_update.h index cc70687b6cb..ad71d293ce0 100644 --- a/src/mongo/db/index_update.h +++ b/src/mongo/db/index_update.h @@ -34,7 +34,6 @@ namespace mongo { void buildAnIndex(const std::string& ns, NamespaceDetails *d, IndexDetails& idx, - int32_t idxNo, bool background, bool mayInterrupt); diff --git a/src/mongo/db/namespace_details.cpp b/src/mongo/db/namespace_details.cpp index 15a9bf6453c..fc85cbcb42b 100644 --- a/src/mongo/db/namespace_details.cpp +++ b/src/mongo/db/namespace_details.cpp @@ -70,7 +70,7 @@ namespace mongo { multiKeyIndexBits = 0; reservedA = 0; extraOffset = 0; - indexBuildInProgress = 0; + indexBuildsInProgress = 0; memset(reserved, 0, sizeof(reserved)); } @@ -116,7 +116,7 @@ namespace mongo { cout << "ns " << firstExtent.toString() << ' ' << lastExtent.toString() << " nidx:" << nIndexes << '\n'; cout << "ns " << stats.datasize << ' ' << stats.nrecords << ' ' << nIndexes << '\n'; cout << "ns " << isCapped() << ' ' << _paddingFactor << ' ' << _systemFlags << ' ' << _userFlags << ' ' << dataFileVersion << '\n'; - cout << "ns " << multiKeyIndexBits << ' ' << indexBuildInProgress << '\n'; + cout << "ns " << multiKeyIndexBits << ' ' << indexBuildsInProgress << '\n'; cout << "ns " << (int) reserved[0] << ' ' << (int) reserved[59]; cout << endl; } @@ -517,22 +517,41 @@ namespace mongo { return e; } - void NamespaceDetails::setIndexIsMultikey(const char *thisns, int i) { - dassert( i < NIndexesMax ); - unsigned long long x = ((unsigned long long) 1) << i; - if( multiKeyIndexBits & x ) return; - *getDur().writing(&multiKeyIndexBits) |= x; + void NamespaceDetails::setIndexIsMultikey(const char *thisns, int i, bool multikey) { + massert(16577, "index number greater than NIndexesMax", i < NIndexesMax ); + + unsigned long long mask = 1ULL << i; + + if (multikey) { + // Shortcut if the bit is already set correctly + if (multiKeyIndexBits & mask) { + return; + } + + *getDur().writing(&multiKeyIndexBits) |= mask; + } + else { + // Shortcut if the bit is already set correctly + if (!(multiKeyIndexBits & mask)) { + return; + } + + // Invert mask: all 1's except a 0 at the ith bit + mask = ~mask; + *getDur().writing(&multiKeyIndexBits) &= mask; + } + NamespaceDetailsTransient::get(thisns).clearQueryCache(); } IndexDetails& NamespaceDetails::getNextIndexDetails(const char* thisns) { IndexDetails *id; try { - id = &idx(nIndexes,true); + id = &idx(getTotalIndexCount(), true); } catch(DBException&) { - allocExtra(thisns, nIndexes); - id = &idx(nIndexes,false); + allocExtra(thisns, getTotalIndexCount()); + id = &idx(getTotalIndexCount(), false); } return *id; } diff --git a/src/mongo/db/namespace_details.h b/src/mongo/db/namespace_details.h index d75ace1dd77..898aeb58ed6 100644 --- a/src/mongo/db/namespace_details.h +++ b/src/mongo/db/namespace_details.h @@ -97,7 +97,7 @@ namespace mongo { unsigned long long reservedA; long long extraOffset; // where the $extra info is located (bytes relative to this) public: - int indexBuildInProgress; // 1 if in prog + int indexBuildsInProgress; // Number of indexes currently being built private: int _userFlags; char reserved[72]; @@ -182,7 +182,7 @@ namespace mongo { /* when a background index build is in progress, we don't count the index in nIndexes until complete, yet need to still use it in _indexRecord() - thus we use this function for that. */ - int nIndexesBeingBuilt() const { return nIndexes + indexBuildInProgress; } + int getTotalIndexCount() const { return nIndexes + indexBuildsInProgress; } /* NOTE: be careful with flags. are we manipulating them in read locks? if so, this isn't thread safe. TODO @@ -197,12 +197,6 @@ namespace mongo { IndexDetails& idx(int idxNo, bool missingExpected = false ); - /** get the IndexDetails for the index currently being built in the background. (there is at most one) */ - IndexDetails& inProgIdx() { - DEV verify(indexBuildInProgress); - return idx(nIndexes); - } - class IndexIterator { public: int pos() { return i; } // note this is the next one to come @@ -225,7 +219,7 @@ namespace mongo { for these, we have to do some dedup work on queries. */ bool isMultikey(int i) const { return (multiKeyIndexBits & (((unsigned long long) 1) << i)) != 0; } - void setIndexIsMultikey(const char *thisns, int i); + void setIndexIsMultikey(const char *thisns, int i, bool multikey = true); /** * This fetches the IndexDetails for the next empty index slot. The caller must populate diff --git a/src/mongo/db/ops/update.cpp b/src/mongo/db/ops/update.cpp index 8d5ec164f65..456cc166042 100644 --- a/src/mongo/db/ops/update.cpp +++ b/src/mongo/db/ops/update.cpp @@ -161,9 +161,11 @@ namespace mongo { bool isOperatorUpdate = updateobj.firstElementFieldName()[0] == '$'; int modsIsIndexed = false; // really the # of indexes if ( isOperatorUpdate ) { - if( d && d->indexBuildInProgress ) { + if( d && d->indexBuildsInProgress ) { set<string> bgKeys; - d->inProgIdx().keyPattern().getFieldNames(bgKeys); + for (int i = 0; i < d->indexBuildsInProgress; i++) { + d->idx(d->nIndexes+i).keyPattern().getFieldNames(bgKeys); + } mods.reset( new ModSet(updateobj, nsdt->indexKeys(), &bgKeys, forReplication) ); } else { @@ -250,10 +252,11 @@ namespace mongo { break; nsdt = &NamespaceDetailsTransient::get(ns); if ( mods.get() && ! mods->isIndexed() ) { - // we need to re-check indexes set<string> bgKeys; - if ( d->indexBuildInProgress ) - d->inProgIdx().keyPattern().getFieldNames(bgKeys); + for (int i = 0; i < d->indexBuildsInProgress; i++) { + // we need to re-check indexes + d->idx(d->nIndexes+i).keyPattern().getFieldNames(bgKeys); + } mods->updateIsIndexed( nsdt->indexKeys() , &bgKeys ); modsIsIndexed = mods->isIndexed(); } diff --git a/src/mongo/db/pdfile.cpp b/src/mongo/db/pdfile.cpp index e09388f07b9..8a7c3ef264a 100644 --- a/src/mongo/db/pdfile.cpp +++ b/src/mongo/db/pdfile.cpp @@ -113,7 +113,6 @@ namespace mongo { BackgroundOperation::BackgroundOperation(const char *ns) : _ns(ns) { SimpleMutex::scoped_lock lk(m); dbsInProg[_ns.db]++; - verify( nsInProg.count(_ns.ns()) == 0 ); nsInProg.insert(_ns.ns()); } @@ -1163,7 +1162,7 @@ namespace mongo { /* have any index keys changed? */ { int keyUpdates = 0; - int z = d->nIndexesBeingBuilt(); + int z = d->getTotalIndexCount(); for ( int x = 0; x < z; x++ ) { IndexDetails& idx = d->idx(x); IndexInterface& ii = idx.idxInterface(); @@ -1417,7 +1416,10 @@ namespace mongo { background = false; } - int idxNo = tableToIndex->nIndexes; + // The total number of indexes right before we write to the collection + int oldNIndexes = -1; + int idxNo = tableToIndex->getTotalIndexCount(); + std::string idxName = info["name"].valuestr(); // Set curop description before setting indexBuildInProg, so that there's something // commands can find and kill as soon as indexBuildInProg is set. Only set this if it's a @@ -1433,8 +1435,8 @@ namespace mongo { getDur().writingDiskLoc(idx.info) = loc; try { - getDur().writingInt(tableToIndex->indexBuildInProgress) = 1; - buildAnIndex(tabletoidxns, tableToIndex, idx, idxNo, background, mayInterrupt); + getDur().writingInt(tableToIndex->indexBuildsInProgress) += 1; + buildAnIndex(tabletoidxns, tableToIndex, idx, background, mayInterrupt); } catch (DBException& e) { // save our error msg string as an exception or dropIndexes will overwrite our message @@ -1450,6 +1452,8 @@ namespace mongo { saveerrmsg = e.what(); } + // Recalculate the index # so we can remove it from the list in the next catch + idxNo = IndexBuildsInProgress::get(tabletoidxns.c_str(), idxName); // roll back this index idx.kill_idx(); @@ -1458,9 +1462,39 @@ namespace mongo { throw; } + // Recompute index numbers + tableToIndex = nsdetails(tabletoidxns.c_str()); + idxNo = IndexBuildsInProgress::get(tabletoidxns.c_str(), idxName); + verify(idxNo > -1); + + // Make sure the newly created index is relocated to nIndexes, if it isn't already there + if (idxNo != tableToIndex->nIndexes) { + log() << "switching indexes at position " << idxNo << " and " + << tableToIndex->nIndexes << endl; + // We cannot use idx here, as it may point to a different index entry if it was + // flipped during building + IndexDetails temp = tableToIndex->idx(idxNo); + *getDur().writing(&tableToIndex->idx(idxNo)) = + tableToIndex->idx(tableToIndex->nIndexes); + *getDur().writing(&tableToIndex->idx(tableToIndex->nIndexes)) = temp; + + // We also have to flip multikey entries + bool tempMultikey = tableToIndex->isMultikey(idxNo); + tableToIndex->setIndexIsMultikey(tabletoidxns.c_str(), idxNo, + tableToIndex->isMultikey(tableToIndex->nIndexes)); + tableToIndex->setIndexIsMultikey(tabletoidxns.c_str(), tableToIndex->nIndexes, + tempMultikey); + + idxNo = tableToIndex->nIndexes; + } + + // Store the current total of indexes in case something goes wrong actually adding the + // index + oldNIndexes = tableToIndex->getTotalIndexCount(); + // clear transient info caches so they refresh; increments nIndexes tableToIndex->addIndex(tabletoidxns.c_str()); - getDur().writingInt(tableToIndex->indexBuildInProgress) = 0; + getDur().writingInt(tableToIndex->indexBuildsInProgress) -= 1; } catch (...) { // Generally, this will be called as an exception from building the index bubbles up. @@ -1469,18 +1503,51 @@ namespace mongo { // successfully finished building and addIndex or kill_idx threw. // Check if nIndexes was incremented - if (idxNo < tableToIndex->nIndexes) { - // TODO: this will have to change when we can have multiple simultanious index - // builds - getDur().writingInt(tableToIndex->nIndexes) -= 1; + if (oldNIndexes != -1 && oldNIndexes != tableToIndex->nIndexes) { + getDur().writingInt(tableToIndex->nIndexes) = oldNIndexes; } - getDur().writingInt(tableToIndex->indexBuildInProgress) = 0; + // Move any other in prog indexes "back" one. It is important that idxNo is set + // correctly so that the correct index is removed + IndexBuildsInProgress::remove(tabletoidxns.c_str(), idxNo); + getDur().writingInt(tableToIndex->indexBuildsInProgress) -= 1; throw; } } + // indexName is passed in because index details may not be pointing to something valid at this + // point + int IndexBuildsInProgress::get(const char* ns, const std::string& indexName) { + Lock::assertWriteLocked(ns); + NamespaceDetails* nsd = nsdetails(ns); + + // Go through unfinished index builds and try to find this index + for (int i=nsd->nIndexes; i<nsd->nIndexes+nsd->indexBuildsInProgress; i++) { + if (indexName == nsd->idx(i).indexName()) { + return i; + } + } + + return -1; + } + + void IndexBuildsInProgress::remove(const char* ns, int offset) { + Lock::assertWriteLocked(ns); + NamespaceDetails* nsd = nsdetails(ns); + + for (int i=offset; i<nsd->getTotalIndexCount(); i++) { + if (i < NamespaceDetails::NIndexesMax-1) { + *getDur().writing(&nsd->idx(i)) = nsd->idx(i+1); + nsd->setIndexIsMultikey(ns, i, nsd->isMultikey(i+1)); + } + else { + *getDur().writing(&nsd->idx(i)) = IndexDetails(); + nsd->setIndexIsMultikey(ns, i, false); + } + } + } + DiskLoc DataFileMgr::insert(const char* ns, const void* obuf, int32_t len, diff --git a/src/mongo/db/pdfile.h b/src/mongo/db/pdfile.h index 0b77e8e327e..5f3d6220277 100644 --- a/src/mongo/db/pdfile.h +++ b/src/mongo/db/pdfile.h @@ -668,4 +668,22 @@ namespace mongo { void addRecordToRecListInExtent(Record* r, DiskLoc loc); + /** + * Static helpers to manipulate the list of unfinished index builds. + */ + class IndexBuildsInProgress { + public: + /** + * Find an unfinished index build by name. Does not search finished index builds. + */ + static int get(const char* ns, const std::string& indexName); + + /** + * Remove an unfinished index build from the list of index builds and move every subsequent + * unfinished index build back one. E.g., if x, y, z, and w are building and someone kills + * y, this method would rearrange the list to be x, z, w, (empty), etc. + */ + static void remove(const char* ns, int offset); + }; + } // namespace mongo diff --git a/src/mongo/db/prefetch.cpp b/src/mongo/db/prefetch.cpp index dd9e7706c17..f3fc4ec2d69 100644 --- a/src/mongo/db/prefetch.cpp +++ b/src/mongo/db/prefetch.cpp @@ -124,7 +124,7 @@ namespace mongo { { // indexCount includes all indexes, including ones // in the process of being built - int indexCount = nsd->nIndexesBeingBuilt(); + int indexCount = nsd->getTotalIndexCount(); for ( int indexNo = 0; indexNo < indexCount; indexNo++ ) { // This will page in all index pages for the given object. try { |