summaryrefslogtreecommitdiff
path: root/src/mongo/db
diff options
context:
space:
mode:
authorAaron <aaron@10gen.com>2012-10-25 14:07:23 -0700
committerAaron <aaron@10gen.com>2012-11-08 16:32:38 -0800
commit6a51b6b01e4ebdd723e6ad33f07934d5558f9ad7 (patch)
tree28bcdb963e99f213a281148a53b3c801c5fb41d8 /src/mongo/db
parentf5d6b28def1f23def5cd1ba551f5dc73277407ff (diff)
downloadmongo-6a51b6b01e4ebdd723e6ad33f07934d5558f9ad7.tar.gz
SERVER-3067 Add killop support for foreground index builds.
Diffstat (limited to 'src/mongo/db')
-rw-r--r--src/mongo/db/btree.cpp2
-rw-r--r--src/mongo/db/btreebuilder.cpp28
-rw-r--r--src/mongo/db/btreebuilder.h6
-rw-r--r--src/mongo/db/cap.cpp5
-rw-r--r--src/mongo/db/cloner.cpp2
-rw-r--r--src/mongo/db/commands/mr.cpp2
-rw-r--r--src/mongo/db/compact.cpp4
-rw-r--r--src/mongo/db/compact.h4
-rw-r--r--src/mongo/db/dbcommands.cpp6
-rw-r--r--src/mongo/db/extsort.cpp20
-rw-r--r--src/mongo/db/extsort.h12
-rw-r--r--src/mongo/db/index.cpp41
-rw-r--r--src/mongo/db/index.h22
-rw-r--r--src/mongo/db/index_update.cpp126
-rw-r--r--src/mongo/db/index_update.h51
-rw-r--r--src/mongo/db/instance.cpp10
-rw-r--r--src/mongo/db/kill_current_op.cpp4
-rw-r--r--src/mongo/db/kill_current_op.h3
-rw-r--r--src/mongo/db/namespace_details.cpp15
-rw-r--r--src/mongo/db/oplog.cpp31
-rw-r--r--src/mongo/db/ops/update.cpp6
-rw-r--r--src/mongo/db/pdfile.cpp45
-rw-r--r--src/mongo/db/pdfile.h45
23 files changed, 323 insertions, 167 deletions
diff --git a/src/mongo/db/btree.cpp b/src/mongo/db/btree.cpp
index ffe049d9df1..f9dbd023ac5 100644
--- a/src/mongo/db/btree.cpp
+++ b/src/mongo/db/btree.cpp
@@ -1433,7 +1433,7 @@ namespace mongo {
template< class V >
DiskLoc BtreeBucket<V>::addBucket(const IndexDetails& id) {
string ns = id.indexNamespace();
- DiskLoc loc = theDataFileMgr.insert(ns.c_str(), 0, V::BucketSize, true);
+ DiskLoc loc = theDataFileMgr.insert(ns.c_str(), 0, V::BucketSize, false, true);
BtreeBucket *b = BTREEMOD(loc);
b->init();
return loc;
diff --git a/src/mongo/db/btreebuilder.cpp b/src/mongo/db/btreebuilder.cpp
index 46d04659a78..26c8e27c4b4 100644
--- a/src/mongo/db/btreebuilder.cpp
+++ b/src/mongo/db/btreebuilder.cpp
@@ -28,6 +28,7 @@
#include "stats/counters.h"
#include "dur_commitjob.h"
#include "btreebuilder.h"
+#include "mongo/db/kill_current_op.h"
namespace mongo {
@@ -92,7 +93,7 @@ namespace mongo {
}
template<class V>
- void BtreeBuilder<V>::buildNextLevel(DiskLoc loc) {
+ void BtreeBuilder<V>::buildNextLevel(DiskLoc loc, bool mayInterrupt) {
int levels = 1;
while( 1 ) {
if( loc.btree<V>()->tempNext().isNull() ) {
@@ -108,6 +109,8 @@ namespace mongo {
DiskLoc xloc = loc;
while( !xloc.isNull() ) {
+ killCurrentOp.checkForInterrupt( !mayInterrupt );
+
if ( getDur().commitIfNeeded() ) {
b = cur.btreemod<V>();
up = upLoc.btreemod<V>();
@@ -154,30 +157,11 @@ namespace mongo {
/** when all addKeys are done, we then build the higher levels of the tree */
template<class V>
- void BtreeBuilder<V>::commit() {
- buildNextLevel(first);
+ void BtreeBuilder<V>::commit(bool mayInterrupt) {
+ buildNextLevel(first, mayInterrupt);
committed = true;
}
- template<class V>
- BtreeBuilder<V>::~BtreeBuilder() {
- DESTRUCTOR_GUARD(
- if( !committed ) {
- LOG(2) << "Rolling back partially built index space" << endl;
- DiskLoc x = first;
- while( !x.isNull() ) {
- DiskLoc next = x.btree<V>()->tempNext();
- string ns = idx.indexNamespace();
- theDataFileMgr._deleteRecord(nsdetails(ns.c_str()), ns.c_str(), x.rec(), x);
- x = next;
- getDur().commitIfNeeded();
- }
- verify( idx.head.isNull() );
- LOG(2) << "done rollback" << endl;
- }
- )
- }
-
template class BtreeBuilder<V0>;
template class BtreeBuilder<V1>;
diff --git a/src/mongo/db/btreebuilder.h b/src/mongo/db/btreebuilder.h
index 251f698672a..a671d3895df 100644
--- a/src/mongo/db/btreebuilder.h
+++ b/src/mongo/db/btreebuilder.h
@@ -43,12 +43,10 @@ namespace mongo {
BtreeBucket<V> *b;
void newBucket();
- void buildNextLevel(DiskLoc);
+ void buildNextLevel(DiskLoc loc, bool mayInterrupt);
void mayCommitProgressDurably();
public:
- ~BtreeBuilder();
-
BtreeBuilder(bool _dupsAllowed, IndexDetails& _idx);
/**
@@ -61,7 +59,7 @@ namespace mongo {
* commit work. if not called, destructor will clean up partially completed work
* (in case exception has happened).
*/
- void commit();
+ void commit(bool mayInterrupt);
unsigned long long getn() { return n; }
};
diff --git a/src/mongo/db/cap.cpp b/src/mongo/db/cap.cpp
index 1d2d45b1e5d..e41c730cf07 100644
--- a/src/mongo/db/cap.cpp
+++ b/src/mongo/db/cap.cpp
@@ -484,7 +484,10 @@ namespace mongo {
}
for ( unsigned i=0; i<indexes.size(); i++ ) {
- theDataFileMgr.insertWithObjMod( Namespace( ns ).getSisterNS( "system.indexes" ).c_str() , indexes[i] , true );
+ theDataFileMgr.insertWithObjMod(Namespace( ns ).getSisterNS( "system.indexes" ).c_str(),
+ indexes[i],
+ false,
+ true);
}
}
diff --git a/src/mongo/db/cloner.cpp b/src/mongo/db/cloner.cpp
index e08adaa6940..9ee582d2885 100644
--- a/src/mongo/db/cloner.cpp
+++ b/src/mongo/db/cloner.cpp
@@ -32,8 +32,6 @@ namespace mongo {
BSONElement getErrField(const BSONObj& o);
- void ensureHaveIdIndex(const char *ns);
-
bool replAuthenticate(DBClientBase *);
/** Selectively release the mutex based on a parameter. */
diff --git a/src/mongo/db/commands/mr.cpp b/src/mongo/db/commands/mr.cpp
index 7d6217d0f37..42e8ad346d3 100644
--- a/src/mongo/db/commands/mr.cpp
+++ b/src/mongo/db/commands/mr.cpp
@@ -579,7 +579,7 @@ namespace mongo {
*/
void State::_insertToInc( BSONObj& o ) {
verify( _onDisk );
- theDataFileMgr.insertWithObjMod( _config.incLong.c_str() , o , true );
+ theDataFileMgr.insertWithObjMod( _config.incLong.c_str(), o, false, true );
getDur().commitIfNeeded();
}
diff --git a/src/mongo/db/compact.cpp b/src/mongo/db/compact.cpp
index 1650eac6573..247b8924a6c 100644
--- a/src/mongo/db/compact.cpp
+++ b/src/mongo/db/compact.cpp
@@ -37,8 +37,6 @@
namespace mongo {
- void addRecordToRecListInExtent(Record *r, DiskLoc loc);
- DiskLoc allocateSpaceForANewRecord(const char *ns, NamespaceDetails *d, int lenWHdr, bool god);
void freeExtents(DiskLoc firstExt, DiskLoc lastExt);
/* this should be done in alloc record not here, but doing here for now.
@@ -123,7 +121,7 @@ namespace mongo {
{
// extract keys for all indexes we will be rebuilding
for( int x = 0; x < nidx; x++ ) {
- phase1[x].addKeys(indexSpecs[x], objOld, loc);
+ phase1[x].addKeys(indexSpecs[x], objOld, loc, false);
}
}
}
diff --git a/src/mongo/db/compact.h b/src/mongo/db/compact.h
index 6b6298e6b21..cae7a50d949 100644
--- a/src/mongo/db/compact.h
+++ b/src/mongo/db/compact.h
@@ -32,7 +32,7 @@ namespace mongo {
unsigned long long nkeys;
bool multi; // multikey index
- void addKeys(const IndexSpec& spec, const BSONObj& o, DiskLoc loc) {
+ void addKeys(const IndexSpec& spec, const BSONObj& o, DiskLoc loc, bool mayInterrupt) {
BSONObjSet keys;
spec.getKeys(o, keys);
int k = 0;
@@ -40,7 +40,7 @@ namespace mongo {
if( ++k == 2 ) {
multi = true;
}
- sorter->add(*i, loc);
+ sorter->add(*i, loc, mayInterrupt);
nkeys++;
}
n++;
diff --git a/src/mongo/db/dbcommands.cpp b/src/mongo/db/dbcommands.cpp
index 89977af87d3..400cd0a130f 100644
--- a/src/mongo/db/dbcommands.cpp
+++ b/src/mongo/db/dbcommands.cpp
@@ -722,7 +722,9 @@ namespace mongo {
for ( list<BSONObj>::iterator i=all.begin(); i!=all.end(); i++ ) {
BSONObj o = *i;
LOG(1) << "reIndex ns: " << toDeleteNs << " index: " << o << endl;
- theDataFileMgr.insertWithObjMod( Namespace( toDeleteNs.c_str() ).getSisterNS( "system.indexes" ).c_str() , o , true );
+ string systemIndexesNs =
+ Namespace( toDeleteNs.c_str() ).getSisterNS( "system.indexes" );
+ theDataFileMgr.insertWithObjMod( systemIndexesNs.c_str(), o, false, true );
}
result.append( "nIndexes" , (int)all.size() );
@@ -1469,7 +1471,7 @@ namespace mongo {
{
Lock::DBWrite lk(ns);
Client::Context ctx( ns );
- theDataFileMgr.insertWithObjMod( ns.c_str(), obj, true );
+ theDataFileMgr.insertWithObjMod( ns.c_str(), obj, false, true );
}
return true;
}
diff --git a/src/mongo/db/extsort.cpp b/src/mongo/db/extsort.cpp
index 8152f32b7ba..badd4561b5f 100644
--- a/src/mongo/db/extsort.cpp
+++ b/src/mongo/db/extsort.cpp
@@ -40,13 +40,13 @@ namespace mongo {
HLMutex BSONObjExternalSorter::_extSortMutex("s");
IndexInterface *BSONObjExternalSorter::extSortIdxInterface;
Ordering BSONObjExternalSorter::extSortOrder( Ordering::make(BSONObj()) );
+ bool BSONObjExternalSorter::extSortMayInterrupt( false );
unsigned long long BSONObjExternalSorter::_compares = 0;
unsigned long long BSONObjExternalSorter::_uniqueNumber = 0;
static SimpleMutex _uniqueNumberMutex( "uniqueNumberMutex" );
/*static*/
int BSONObjExternalSorter::_compare(IndexInterface& i, const Data& l, const Data& r, const Ordering& order) {
- RARELY killCurrentOp.checkForInterrupt();
_compares++;
int x = i.keyCompare(l.first, r.first, order);
if ( x )
@@ -59,6 +59,7 @@ namespace mongo {
DEV RARELY {
_extSortMutex.dassertLocked(); // must be as we use a global var
}
+ RARELY killCurrentOp.checkForInterrupt(!extSortMayInterrupt);
Data * l = (Data*)lv;
Data * r = (Data*)rv;
return _compare(*extSortIdxInterface, *l, *r, extSortOrder);
@@ -97,28 +98,29 @@ namespace mongo {
wassert( removed == 1 + _files.size() );
}
- void BSONObjExternalSorter::_sortInMem() {
+ void BSONObjExternalSorter::_sortInMem( bool mayInterrupt ) {
// extSortComp needs to use glpbals
// qsort_r only seems available on bsd, which is what i really want to use
HLMutex::scoped_lock lk(_extSortMutex);
extSortIdxInterface = &_idxi;
extSortOrder = Ordering::make(_order);
+ extSortMayInterrupt = mayInterrupt;
_cur->sort( BSONObjExternalSorter::extSortComp );
}
- void BSONObjExternalSorter::sort() {
+ void BSONObjExternalSorter::sort( bool mayInterrupt ) {
uassert( 10048 , "already sorted" , ! _sorted );
_sorted = true;
if ( _cur && _files.size() == 0 ) {
- _sortInMem();
+ _sortInMem( mayInterrupt );
LOG(1) << "\t\t not using file. size:" << _curSizeSoFar << " _compares:" << _compares << endl;
return;
}
if ( _cur ) {
- finishMap();
+ finishMap( mayInterrupt );
}
if ( _cur ) {
@@ -131,7 +133,7 @@ namespace mongo {
}
- void BSONObjExternalSorter::add( const BSONObj& o , const DiskLoc & loc ) {
+ void BSONObjExternalSorter::add( const BSONObj& o, const DiskLoc& loc, bool mayInterrupt ) {
uassert( 10049 , "sorted already" , ! _sorted );
if ( ! _cur ) {
@@ -146,20 +148,20 @@ namespace mongo {
_curSizeSoFar += size + sizeof( DiskLoc ) + sizeof( BSONObj );
if ( _cur->hasSpace() == false || _curSizeSoFar > _maxFilesize ) {
- finishMap();
+ finishMap( mayInterrupt );
LOG(1) << "finishing map" << endl;
}
}
- void BSONObjExternalSorter::finishMap() {
+ void BSONObjExternalSorter::finishMap( bool mayInterrupt ) {
uassert( 10050 , "bad" , _cur );
_curSizeSoFar = 0;
if ( _cur->size() == 0 )
return;
- _sortInMem();
+ _sortInMem( mayInterrupt );
stringstream ss;
ss << _root.string() << "/file." << _files.size();
diff --git a/src/mongo/db/extsort.h b/src/mongo/db/extsort.h
index 853c02283ca..92b7fea4d85 100644
--- a/src/mongo/db/extsort.h
+++ b/src/mongo/db/extsort.h
@@ -58,6 +58,7 @@ namespace mongo {
static IndexInterface *extSortIdxInterface;
static Ordering extSortOrder;
+ static bool extSortMayInterrupt;
static int extSortComp( const void *lv, const void *rv );
class FileIterator : boost::noncopyable {
@@ -96,13 +97,10 @@ namespace mongo {
};
- void add( const BSONObj& o , const DiskLoc & loc );
- void add( const BSONObj& o , int a , int b ) {
- add( o , DiskLoc( a , b ) );
- }
+ void add( const BSONObj& o, const DiskLoc& loc, bool mayInterrupt );
/* call after adding values, and before fetching the iterator */
- void sort();
+ void sort( bool mayInterrupt );
auto_ptr<Iterator> iterator() {
uassert( 10052 , "not sorted" , _sorted );
@@ -122,10 +120,10 @@ namespace mongo {
private:
- void _sortInMem();
+ void _sortInMem( bool mayInterrupt );
void sort( const std::string& file );
- void finishMap();
+ void finishMap( bool mayInterrupt );
BSONObj _order;
long _maxFilesize;
diff --git a/src/mongo/db/index.cpp b/src/mongo/db/index.cpp
index 62b95ec11a9..1925d2e5773 100644
--- a/src/mongo/db/index.cpp
+++ b/src/mongo/db/index.cpp
@@ -16,18 +16,20 @@
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
+#include "mongo/pch.h"
+
+#include "mongo/db/index.h"
+
#include <boost/checked_delete.hpp>
-#include "pch.h"
-#include "namespace-inl.h"
-#include "index.h"
-#include "btree.h"
-#include "background.h"
-#include "repl/rs.h"
-#include "ops/delete.h"
+#include "mongo/db/background.h"
+#include "mongo/db/btree.h"
+#include "mongo/db/index_update.h"
+#include "mongo/db/namespace-inl.h"
+#include "mongo/db/ops/delete.h"
+#include "mongo/db/repl/rs.h"
#include "mongo/util/scopeguard.h"
-
namespace mongo {
IndexInterface::IndexInserter::IndexInserter() {}
@@ -282,21 +284,12 @@ namespace mongo {
return true;
}
- /* Prepare to build an index. Does not actually build it (except for a special _id case).
- - We validate that the params are good
- - That the index does not already exist
- - Creates the source collection if it DNE
-
- example of 'io':
- { ns : 'test.foo', name : 'z', key : { z : 1 } }
-
- throws DBException
-
- @param sourceNS - source NS we are indexing
- @param sourceCollection - its details ptr
- @return true if ok to continue. when false we stop/fail silently (index already exists)
- */
- bool prepareToBuildIndex(const BSONObj& io, bool god, string& sourceNS, NamespaceDetails *&sourceCollection, BSONObj& fixedIndexObject ) {
+ bool prepareToBuildIndex(const BSONObj& io,
+ bool mayInterrupt,
+ bool god,
+ string& sourceNS,
+ NamespaceDetails*& sourceCollection,
+ BSONObj& fixedIndexObject) {
sourceCollection = 0;
// logical name of the index. todo: get rid of the name, we don't need it!
@@ -364,7 +357,7 @@ namespace mongo {
*/
if ( IndexDetails::isIdIndexPattern(key) ) {
if( !god ) {
- ensureHaveIdIndex( sourceNS.c_str() );
+ ensureHaveIdIndex( sourceNS.c_str(), mayInterrupt );
return false;
}
}
diff --git a/src/mongo/db/index.h b/src/mongo/db/index.h
index b4e093da12b..a522d58a906 100644
--- a/src/mongo/db/index.h
+++ b/src/mongo/db/index.h
@@ -261,5 +261,27 @@ namespace mongo {
void assureSysIndexesEmptied(const char *ns, IndexDetails *exceptForIdIndex);
int removeFromSysIndexes(const char *ns, const char *idxName);
+ /**
+ * Prepare to build an index. Does not actually build it (except for a special _id case).
+ * - We validate that the params are good
+ * - That the index does not already exist
+ * - Creates the source collection if it DNE
+ *
+ * example of 'io':
+ * { ns : 'test.foo', name : 'z', key : { z : 1 } }
+ *
+ * @throws DBException
+ *
+ * @param mayInterrupt - When true, killop may interrupt the function call.
+ * @param sourceNS - source NS we are indexing
+ * @param sourceCollection - its details ptr
+ * @return true if ok to continue. when false we stop/fail silently (index already exists)
+ */
+ bool prepareToBuildIndex(const BSONObj& io,
+ bool mayInterrupt,
+ bool god,
+ string& sourceNS,
+ NamespaceDetails*& sourceCollection,
+ BSONObj& fixedIndexObject);
} // namespace mongo
diff --git a/src/mongo/db/index_update.cpp b/src/mongo/db/index_update.cpp
index 74cdd6826eb..0e268b8ce61 100644
--- a/src/mongo/db/index_update.cpp
+++ b/src/mongo/db/index_update.cpp
@@ -215,20 +215,47 @@ namespace mongo {
}
}
- SortPhaseOne *precalced = 0;
+ void addKeysToPhaseOne( const char* ns,
+ const IndexDetails& idx,
+ const BSONObj& order,
+ SortPhaseOne* phaseOne,
+ int64_t nrecords,
+ ProgressMeter* progressMeter,
+ bool mayInterrupt ) {
+ shared_ptr<Cursor> cursor = theDataFileMgr.findAll( ns );
+ phaseOne->sorter.reset( new BSONObjExternalSorter( idx.idxInterface(), order ) );
+ phaseOne->sorter->hintNumObjects( nrecords );
+ const IndexSpec& spec = idx.getSpec();
+ while ( cursor->ok() ) {
+ RARELY killCurrentOp.checkForInterrupt( !mayInterrupt );
+ BSONObj o = cursor->current();
+ DiskLoc loc = cursor->currLoc();
+ phaseOne->addKeys( spec, o, loc, mayInterrupt );
+ cursor->advance();
+ progressMeter->hit();
+ if ( logLevel > 1 && phaseOne->n % 10000 == 0 ) {
+ printMemInfo( "\t iterating objects" );
+ }
+ }
+ }
template< class V >
- void buildBottomUpPhases2And3(bool dupsAllowed, IndexDetails& idx, BSONObjExternalSorter& sorter,
- bool dropDups, set<DiskLoc> &dupsToDrop, CurOp * op, SortPhaseOne *phase1, ProgressMeterHolder &pm,
- Timer& t
- )
- {
+ void buildBottomUpPhases2And3( bool dupsAllowed,
+ IndexDetails& idx,
+ BSONObjExternalSorter& sorter,
+ bool dropDups,
+ set<DiskLoc>& dupsToDrop,
+ CurOp* op,
+ SortPhaseOne* phase1,
+ ProgressMeterHolder& pm,
+ Timer& t,
+ bool mayInterrupt ) {
BtreeBuilder<V> btBuilder(dupsAllowed, idx);
BSONObj keyLast;
auto_ptr<BSONObjExternalSorter::Iterator> i = sorter.iterator();
verify( pm == op->setMessage( "index: (2/3) btree bottom up" , phase1->nkeys , 10 ) );
while( i->more() ) {
- RARELY killCurrentOp.checkForInterrupt();
+ RARELY killCurrentOp.checkForInterrupt( !mayInterrupt );
BSONObjExternalSorter::Data d = i->next();
try {
@@ -264,14 +291,37 @@ namespace mongo {
pm.finished();
op->setMessage( "index: (3/3) btree-middle" );
LOG(t.seconds() > 10 ? 0 : 1 ) << "\t done building bottom layer, going to commit" << endl;
- btBuilder.commit();
+ btBuilder.commit( mayInterrupt );
if ( btBuilder.getn() != phase1->nkeys && ! dropDups ) {
warning() << "not all entries were added to the index, probably some keys were too large" << endl;
}
}
+ void doDropDups( const char* ns,
+ NamespaceDetails* d,
+ const set<DiskLoc>& dupsToDrop,
+ bool mayInterrupt ) {
+ for( set<DiskLoc>::const_iterator i = dupsToDrop.begin(); i != dupsToDrop.end(); ++i ) {
+ RARELY killCurrentOp.checkForInterrupt( !mayInterrupt );
+ theDataFileMgr.deleteRecord( d,
+ ns,
+ i->rec(),
+ *i,
+ false /* cappedOk */,
+ true /* noWarn */,
+ isMaster( ns ) /* logOp */ );
+ getDur().commitIfNeeded();
+ }
+ }
+
+ SortPhaseOne* precalced = 0;
+
// throws DBException
- unsigned long long fastBuildIndex(const char *ns, NamespaceDetails *d, IndexDetails& idx, int idxNo) {
+ uint64_t fastBuildIndex(const char* ns,
+ NamespaceDetails* d,
+ IndexDetails& idx,
+ int32_t idxNo,
+ bool mayInterrupt) {
CurOp * op = cc().curop();
Timer t;
@@ -292,21 +342,7 @@ namespace mongo {
SortPhaseOne *phase1 = precalced;
if( phase1 == 0 ) {
phase1 = &_ours;
- SortPhaseOne& p1 = *phase1;
- shared_ptr<Cursor> c = theDataFileMgr.findAll(ns);
- p1.sorter.reset( new BSONObjExternalSorter(idx.idxInterface(), order) );
- p1.sorter->hintNumObjects( d->stats.nrecords );
- const IndexSpec& spec = idx.getSpec();
- while ( c->ok() ) {
- BSONObj o = c->current();
- DiskLoc loc = c->currLoc();
- p1.addKeys(spec, o, loc);
- c->advance();
- pm.hit();
- if ( logLevel > 1 && p1.n % 10000 == 0 ) {
- printMemInfo( "\t iterating objects" );
- }
- };
+ addKeysToPhaseOne( ns, idx, order, phase1, d->stats.nrecords, pm.get(), mayInterrupt );
}
pm.finished();
@@ -318,7 +354,7 @@ namespace mongo {
d->setIndexIsMultikey(ns, idxNo);
if ( logLevel > 1 ) printMemInfo( "before final sort" );
- phase1->sorter->sort();
+ phase1->sorter->sort( mayInterrupt );
if ( logLevel > 1 ) printMemInfo( "after final sort" );
LOG(t.seconds() > 5 ? 0 : 1) << "\t external sort used : " << sorter.numFiles() << " files " << " in " << t.seconds() << " secs" << endl;
@@ -327,19 +363,34 @@ namespace mongo {
/* build index --- */
if( idx.version() == 0 )
- buildBottomUpPhases2And3<V0>(dupsAllowed, idx, sorter, dropDups, dupsToDrop, op, phase1, pm, t);
+ buildBottomUpPhases2And3<V0>(dupsAllowed,
+ idx,
+ sorter,
+ dropDups,
+ dupsToDrop,
+ op,
+ phase1,
+ pm,
+ t,
+ mayInterrupt);
else if( idx.version() == 1 )
- buildBottomUpPhases2And3<V1>(dupsAllowed, idx, sorter, dropDups, dupsToDrop, op, phase1, pm, t);
+ buildBottomUpPhases2And3<V1>(dupsAllowed,
+ idx,
+ sorter,
+ dropDups,
+ dupsToDrop,
+ op,
+ phase1,
+ pm,
+ t,
+ mayInterrupt);
else
verify(false);
if( dropDups )
log() << "\t fastBuildIndex dupsToDrop:" << dupsToDrop.size() << endl;
- for( set<DiskLoc>::iterator i = dupsToDrop.begin(); i != dupsToDrop.end(); i++ ){
- theDataFileMgr.deleteRecord( d, ns, i->rec(), *i, false /* cappedOk */ , true /* noWarn */ , isMaster( ns ) /* logOp */ );
- getDur().commitIfNeeded();
- }
+ doDropDups(ns, d, dupsToDrop, mayInterrupt);
return phase1->n;
}
@@ -488,7 +539,12 @@ namespace mongo {
};
// throws DBException
- void buildAnIndex(const std::string& ns, NamespaceDetails *d, IndexDetails& idx, int idxNo, bool background) {
+ void buildAnIndex(const std::string& ns,
+ NamespaceDetails* d,
+ IndexDetails& idx,
+ int32_t idxNo,
+ bool background,
+ bool mayInterrupt) {
tlog() << "build index " << ns << ' ' << idx.keyPattern() << ( background ? " background" : "" ) << endl;
Timer t;
unsigned long long n;
@@ -502,7 +558,7 @@ namespace mongo {
idx.getSpec();
if( inDBRepair || !background ) {
- n = fastBuildIndex(ns.c_str(), d, idx, idxNo);
+ n = fastBuildIndex(ns.c_str(), d, idx, idxNo, mayInterrupt);
verify( !idx.head.isNull() );
}
else {
@@ -542,7 +598,7 @@ namespace mongo {
extern BSONObj id_obj; // { _id : 1 }
- void ensureHaveIdIndex(const char *ns) {
+ void ensureHaveIdIndex(const char* ns, bool mayInterrupt) {
NamespaceDetails *d = nsdetails(ns);
if ( d == 0 || d->isSystemFlagSet(NamespaceDetails::Flag_HaveIdIndex) )
return;
@@ -566,7 +622,7 @@ namespace mongo {
BSONObj o = b.done();
/* edge case: note the insert could fail if we have hit maxindexes already */
- theDataFileMgr.insert(system_indexes.c_str(), o.objdata(), o.objsize(), true);
+ theDataFileMgr.insert(system_indexes.c_str(), o.objdata(), o.objsize(), mayInterrupt, true);
}
/* remove bit from a bit array - actually remove its slot, not a clear
diff --git a/src/mongo/db/index_update.h b/src/mongo/db/index_update.h
index 81a777d3b2b..cc70687b6cb 100644
--- a/src/mongo/db/index_update.h
+++ b/src/mongo/db/index_update.h
@@ -19,6 +19,7 @@
#include "mongo/db/diskloc.h"
#include "mongo/db/index.h"
#include "mongo/db/jsobj.h"
+#include "mongo/platform/cstdint.h"
namespace mongo {
class NamespaceDetails;
@@ -33,8 +34,9 @@ namespace mongo {
void buildAnIndex(const std::string& ns,
NamespaceDetails *d,
IndexDetails& idx,
- int idxNo,
- bool background);
+ int32_t idxNo,
+ bool background,
+ bool mayInterrupt);
// add index keys for a newly inserted record
// done in two steps/phases to allow potential deferal of write lock portion in the future
@@ -52,4 +54,47 @@ namespace mongo {
bool dropIndexes( NamespaceDetails *d, const char *ns, const char *name, string &errmsg, BSONObjBuilder &anObjBuilder, bool maydeleteIdIndex );
-}
+ /**
+ * Add an _id index to namespace @param 'ns' if not already present.
+ * @param mayInterrupt When true, killop may interrupt the function call.
+ */
+ void ensureHaveIdIndex(const char* ns, bool mayInterrupt);
+
+ ////// The remaining functions are only included in this header file for unit testing.
+
+ class BSONObjExternalSorter;
+ class CurOp;
+ class ProgressMeter;
+ class ProgressMeterHolder;
+ struct SortPhaseOne;
+ class Timer;
+
+ /** Extract index keys from the @param 'ns' to the external sorter in @param 'phaseOne'. */
+ void addKeysToPhaseOne( const char* ns,
+ const IndexDetails& idx,
+ const BSONObj& order,
+ SortPhaseOne* phaseOne,
+ int64_t nrecords,
+ ProgressMeter* progressMeter,
+ bool mayInterrupt );
+
+ /** Popuate the index @param 'idx' using the keys contained in @param 'sorter'. */
+ template< class V >
+ void buildBottomUpPhases2And3( bool dupsAllowed,
+ IndexDetails& idx,
+ BSONObjExternalSorter& sorter,
+ bool dropDups,
+ set<DiskLoc>& dupsToDrop,
+ CurOp* op,
+ SortPhaseOne* phase1,
+ ProgressMeterHolder& pm,
+ Timer& t,
+ bool mayInterrupt );
+
+ /** Drop duplicate documents from the set @param 'dupsToDrop'. */
+ void doDropDups( const char* ns,
+ NamespaceDetails* d,
+ const set<DiskLoc>& dupsToDrop,
+ bool mayInterrupt );
+
+} // namespace mongo
diff --git a/src/mongo/db/instance.cpp b/src/mongo/db/instance.cpp
index 4887d7d2755..04991bb968b 100644
--- a/src/mongo/db/instance.cpp
+++ b/src/mongo/db/instance.cpp
@@ -747,7 +747,15 @@ namespace mongo {
uassert( 13511 , "document to insert can't have $ fields" , e.fieldName()[0] != '$' );
}
}
- theDataFileMgr.insertWithObjMod(ns, js, false); // js may be modified in the call to add an _id field.
+ theDataFileMgr.insertWithObjMod(ns,
+ // May be modified in the call to add an _id field.
+ js,
+ // Only permit interrupting an (index build) insert if the
+ // insert comes from a socket client request rather than a
+ // parent operation using the client interface. The parent
+ // operation might not support interrupts.
+ cc().curop()->parent() == NULL,
+ false);
logOp("i", ns, js);
}
diff --git a/src/mongo/db/kill_current_op.cpp b/src/mongo/db/kill_current_op.cpp
index 1b6b892e9be..5d3e2f1070f 100644
--- a/src/mongo/db/kill_current_op.cpp
+++ b/src/mongo/db/kill_current_op.cpp
@@ -113,4 +113,8 @@ namespace mongo {
return "interrupted";
return "";
}
+
+ void KillCurrentOp::reset() {
+ _globalKill = false;
+ }
}
diff --git a/src/mongo/db/kill_current_op.h b/src/mongo/db/kill_current_op.h
index dd39462b134..2f4a7eb04d3 100644
--- a/src/mongo/db/kill_current_op.h
+++ b/src/mongo/db/kill_current_op.h
@@ -65,6 +65,9 @@ namespace mongo {
**/
void notifyAllWaiters();
+ /** Reset the object to its initial state. Only for testing. */
+ void reset();
+
private:
void interruptJs( AtomicUInt *op );
volatile bool _globalKill;
diff --git a/src/mongo/db/namespace_details.cpp b/src/mongo/db/namespace_details.cpp
index 40d68da06b7..e78da5b7a12 100644
--- a/src/mongo/db/namespace_details.cpp
+++ b/src/mongo/db/namespace_details.cpp
@@ -715,7 +715,11 @@ namespace mongo {
BSONObj newEntry = applyUpdateOperators( oldEntry , BSON( "$set" << BSON( "options.flags" << userFlags() ) ) );
verify( 1 == deleteObjects( system_namespaces.c_str() , oldEntry , true , false , true ) );
- theDataFileMgr.insert( system_namespaces.c_str() , newEntry.objdata() , newEntry.objsize() , true );
+ theDataFileMgr.insert( system_namespaces.c_str(),
+ newEntry.objdata(),
+ newEntry.objsize(),
+ false,
+ true );
}
bool NamespaceDetails::setUserFlag( int flags ) {
@@ -788,7 +792,7 @@ namespace mongo {
char database[256];
nsToDatabase(ns, database);
string s = string(database) + ".system.namespaces";
- theDataFileMgr.insert(s.c_str(), j.objdata(), j.objsize(), true);
+ theDataFileMgr.insert(s.c_str(), j.objdata(), j.objsize(), false, true);
}
void renameNamespace( const char *from, const char *to, bool stayTemp) {
@@ -857,7 +861,12 @@ namespace mongo {
newIndexSpecB << "ns" << to;
}
BSONObj newIndexSpec = newIndexSpecB.done();
- DiskLoc newIndexSpecLoc = theDataFileMgr.insert( s.c_str(), newIndexSpec.objdata(), newIndexSpec.objsize(), true, false );
+ DiskLoc newIndexSpecLoc = theDataFileMgr.insert( s.c_str(),
+ newIndexSpec.objdata(),
+ newIndexSpec.objsize(),
+ false,
+ true,
+ false );
int indexI = details->findIndexByName( oldIndexSpec.getStringField( "name" ) );
IndexDetails &indexDetails = details->idx(indexI);
string oldIndexNs = indexDetails.indexNamespace();
diff --git a/src/mongo/db/oplog.cpp b/src/mongo/db/oplog.cpp
index f346ca8e0a4..faaa696e3e7 100644
--- a/src/mongo/db/oplog.cpp
+++ b/src/mongo/db/oplog.cpp
@@ -16,21 +16,24 @@
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
-#include "pch.h"
-#include "oplog.h"
-#include "repl_block.h"
-#include "repl.h"
-#include "commands.h"
-#include "repl/rs.h"
-#include "stats/counters.h"
-#include "../util/file.h"
-#include "../util/startup_test.h"
-#include "queryoptimizer.h"
-#include "ops/update.h"
-#include "ops/delete.h"
+#include "mongo/pch.h"
+
+#include "mongo/db/oplog.h"
+
+#include "mongo/db/commands.h"
+#include "mongo/db/index_update.h"
#include "mongo/db/instance.h"
+#include "mongo/db/ops/update.h"
+#include "mongo/db/ops/delete.h"
+#include "mongo/db/queryoptimizer.h"
+#include "mongo/db/repl.h"
+#include "mongo/db/repl_block.h"
#include "mongo/db/repl/bgsync.h"
+#include "mongo/db/repl/rs.h"
+#include "mongo/db/stats/counters.h"
#include "mongo/util/elapsed_tracker.h"
+#include "mongo/util/file.h"
+#include "mongo/util/startup_test.h"
namespace mongo {
@@ -778,7 +781,7 @@ namespace mongo {
else {
// probably don't need this since all replicated colls have _id indexes now
// but keep it just in case
- RARELY if ( nsd && !nsd->isCapped() ) { ensureHaveIdIndex(ns); }
+ RARELY if ( nsd && !nsd->isCapped() ) { ensureHaveIdIndex(ns, false); }
/* todo : it may be better to do an insert here, and then catch the dup key exception and do update
then. very few upserts will not be inserts...
@@ -795,7 +798,7 @@ namespace mongo {
// probably don't need this since all replicated colls have _id indexes now
// but keep it just in case
- RARELY if ( nsd && !nsd->isCapped() ) { ensureHaveIdIndex(ns); }
+ RARELY if ( nsd && !nsd->isCapped() ) { ensureHaveIdIndex(ns, false); }
OpDebug debug;
BSONObj updateCriteria = op.getObjectField("o2");
diff --git a/src/mongo/db/ops/update.cpp b/src/mongo/db/ops/update.cpp
index 954d2e0b1f3..8d5ec164f65 100644
--- a/src/mongo/db/ops/update.cpp
+++ b/src/mongo/db/ops/update.cpp
@@ -199,7 +199,7 @@ namespace mongo {
checkNoMods( updateobj );
debug.upsert = true;
BSONObj no = updateobj;
- theDataFileMgr.insertWithObjMod(ns, no, su);
+ theDataFileMgr.insertWithObjMod(ns, no, false, su);
return UpdateResult( 0 , 0 , 1 , no );
}
}
@@ -433,7 +433,7 @@ namespace mongo {
BSONObj newObj = mods->createNewFromQuery( patternOrig );
checkNoMods( newObj );
debug.fastmodinsert = true;
- theDataFileMgr.insertWithObjMod(ns, newObj, su);
+ theDataFileMgr.insertWithObjMod(ns, newObj, false, su);
if ( logop )
logOp( "i", ns, newObj, 0, 0, fromMigrate );
@@ -443,7 +443,7 @@ namespace mongo {
checkNoMods( updateobj );
debug.upsert = true;
BSONObj no = updateobj;
- theDataFileMgr.insertWithObjMod(ns, no, su);
+ theDataFileMgr.insertWithObjMod(ns, no, false, su);
if ( logop )
logOp( "i", ns, no, 0, 0, fromMigrate );
return UpdateResult( 0 , 0 , 1 , no );
diff --git a/src/mongo/db/pdfile.cpp b/src/mongo/db/pdfile.cpp
index 0995c872b4c..8a68a3d0b70 100644
--- a/src/mongo/db/pdfile.cpp
+++ b/src/mongo/db/pdfile.cpp
@@ -35,7 +35,6 @@ _ disallow system* manipulations from the database.
#include "mongo/db/pdfile_private.h"
#include "mongo/db/background.h"
#include "mongo/db/btree.h"
-#include "mongo/db/btreebuilder.h"
#include "mongo/db/commands/server_status.h"
#include "mongo/db/compact.h"
#include "mongo/db/curop-inl.h"
@@ -159,7 +158,7 @@ namespace mongo {
if ( ( strstr( ns, ".system." ) == 0 || legalClientSystemNS( ns , false ) ) &&
strstr( ns, FREELIST_NS ) == 0 ) {
LOG( 1 ) << "adding _id index for collection " << ns << endl;
- ensureHaveIdIndex( ns );
+ ensureHaveIdIndex( ns, false );
}
}
@@ -1134,7 +1133,7 @@ namespace mongo {
uassert( 10003 , "failing update: objects in a capped ns cannot grow", !(d && d->isCapped()));
d->paddingTooSmall();
deleteRecord(ns, toupdate, dl);
- DiskLoc res = insert(ns, objNew.objdata(), objNew.objsize(), god);
+ DiskLoc res = insert(ns, objNew.objdata(), objNew.objsize(), false, god);
if (debug.nmoved == -1) // default of -1 rather than 0
debug.nmoved = 1;
@@ -1259,22 +1258,20 @@ namespace mongo {
void DataFileMgr::insertAndLog( const char *ns, const BSONObj &o, bool god, bool fromMigrate ) {
BSONObj tmp = o;
- insertWithObjMod( ns, tmp, god );
+ insertWithObjMod( ns, tmp, false, god );
logOp( "i", ns, tmp, 0, 0, fromMigrate );
}
/** @param o the object to insert. can be modified to add _id and thus be an in/out param
*/
- DiskLoc DataFileMgr::insertWithObjMod(const char *ns, BSONObj &o, bool god) {
+ DiskLoc DataFileMgr::insertWithObjMod(const char* ns, BSONObj& o, bool mayInterrupt, bool god) {
bool addedID = false;
- DiskLoc loc = insert( ns, o.objdata(), o.objsize(), god, true, &addedID );
+ DiskLoc loc = insert( ns, o.objdata(), o.objsize(), mayInterrupt, god, true, &addedID );
if( addedID && !loc.isNull() )
o = BSONObj::make( loc.rec() );
return loc;
}
- bool prepareToBuildIndex(const BSONObj& io, bool god, string& sourceNS, NamespaceDetails *&sourceCollection, BSONObj& fixedIndexObject );
-
// We are now doing two btree scans for all unique indexes (one here, and one when we've
// written the record to the collection. This could be made more efficient inserting
// dummy data here, keeping pointers to the btree nodes holding the dummy data and then
@@ -1390,7 +1387,10 @@ namespace mongo {
return d;
}
- void NOINLINE_DECL insert_makeIndex(NamespaceDetails *tableToIndex, const string& tabletoidxns, const DiskLoc& loc) {
+ void NOINLINE_DECL insert_makeIndex(NamespaceDetails* tableToIndex,
+ const string& tabletoidxns,
+ const DiskLoc& loc,
+ bool mayInterrupt) {
uassert( 13143 , "can't create index on system.indexes" , tabletoidxns.find( ".system.indexes" ) == string::npos );
BSONObj info = loc.obj();
@@ -1407,7 +1407,7 @@ namespace mongo {
IndexDetails& idx = tableToIndex->addIndex(tabletoidxns.c_str(), !background); // clear transient info caches so they refresh; increments nIndexes
getDur().writingDiskLoc(idx.info) = loc;
try {
- buildAnIndex(tabletoidxns, tableToIndex, idx, idxNo, background);
+ buildAnIndex(tabletoidxns, tableToIndex, idx, idxNo, background, mayInterrupt);
}
catch( DBException& e ) {
// save our error msg string as an exception or dropIndexes will overwrite our message
@@ -1438,15 +1438,13 @@ namespace mongo {
}
}
- /* if god==true, you may pass in obuf of NULL and then populate the returned DiskLoc
- after the call -- that will prevent a double buffer copy in some cases (btree.cpp).
-
- @param mayAddIndex almost always true, except for invocation from rename namespace command.
- @param addedID if not null, set to true if adding _id element. you must assure false before calling
- if using.
- */
-
- DiskLoc DataFileMgr::insert(const char *ns, const void *obuf, int len, bool god, bool mayAddIndex, bool *addedID) {
+ DiskLoc DataFileMgr::insert(const char* ns,
+ const void* obuf,
+ int32_t len,
+ bool mayInterrupt,
+ bool god,
+ bool mayAddIndex,
+ bool* addedID) {
bool wouldAddIndex = false;
massert( 10093 , "cannot insert into reserved $ collection", god || NamespaceString::normal( ns ) );
uassert( 10094 , str::stream() << "invalid ns: " << ns , isValidNS( ns ) );
@@ -1469,7 +1467,12 @@ namespace mongo {
if ( addIndex ) {
verify( obuf );
BSONObj io((const char *) obuf);
- if( !prepareToBuildIndex(io, god, tabletoidxns, tableToIndex, fixedIndexObject ) ) {
+ if( !prepareToBuildIndex(io,
+ mayInterrupt,
+ god,
+ tabletoidxns,
+ tableToIndex,
+ fixedIndexObject) ) {
// prepare creates _id itself, or this indicates to fail the build silently (such
// as if index already exists)
return DiskLoc();
@@ -1583,7 +1586,7 @@ namespace mongo {
NamespaceDetailsTransient::get( ns ).notifyOfWriteOp();
if ( tableToIndex ) {
- insert_makeIndex(tableToIndex, tabletoidxns, loc);
+ insert_makeIndex(tableToIndex, tabletoidxns, loc, mayInterrupt);
}
/* add this record to our indexes */
diff --git a/src/mongo/db/pdfile.h b/src/mongo/db/pdfile.h
index 984ea5cfa1e..797aac2585b 100644
--- a/src/mongo/db/pdfile.h
+++ b/src/mongo/db/pdfile.h
@@ -33,6 +33,7 @@
#include "mongo/db/namespace-inl.h"
#include "mongo/db/namespace_details-inl.h"
#include "mongo/db/namespacestring.h"
+#include "mongo/platform/cstdint.h"
#include "mongo/util/log.h"
#include "mongo/util/mmap.h"
@@ -131,16 +132,37 @@ namespace mongo {
// The object o may be updated if modified on insert.
void insertAndLog( const char *ns, const BSONObj &o, bool god = false, bool fromMigrate = false );
- /** insert will add an _id to the object if not present. if you would like to see the final object
- after such an addition, use this method.
- @param o both and in and out param
- */
- DiskLoc insertWithObjMod(const char *ns, BSONObj & /*out*/o, bool god = false);
+ /**
+ * insert() will add an _id to the object if not present. If you would like to see the
+ * final object after such an addition, use this method.
+ * @param o both and in and out param
+ * @param mayInterrupt When true, killop may interrupt the function call.
+ */
+ DiskLoc insertWithObjMod(const char* ns,
+ BSONObj& /*out*/o,
+ bool mayInterrupt = false,
+ bool god = false);
/** @param obj in value only for this version. */
void insertNoReturnVal(const char *ns, BSONObj o, bool god = false);
- DiskLoc insert(const char *ns, const void *buf, int len, bool god = false, bool mayAddIndex = true, bool *addedID = 0);
+ /**
+ * Insert the contents of @param buf with length @param len into namespace @param ns.
+ * @param mayInterrupt When true, killop may interrupt the function call.
+ * @param god if true, you may pass in obuf of NULL and then populate the returned DiskLoc
+ * after the call -- that will prevent a double buffer copy in some cases (btree.cpp).
+ * @param mayAddIndex almost always true, except for invocation from rename namespace
+ * command.
+ * @param addedID if not null, set to true if adding _id element. You must assure false
+ * before calling if using.
+ */
+ DiskLoc insert(const char* ns,
+ const void* buf,
+ int32_t len,
+ bool mayInterrupt = false,
+ bool god = false,
+ bool mayAddIndex = true,
+ bool* addedID = 0);
static shared_ptr<Cursor> findAll(const char *ns, const DiskLoc &startLoc = DiskLoc());
/* special version of insert for transaction logging -- streamlined a bit.
@@ -604,10 +626,15 @@ namespace mongo {
return reinterpret_cast<DeletedRecord*>(getRecord(dl));
}
- void ensureHaveIdIndex(const char *ns);
-
inline BSONObj BSONObj::make(const Record* r ) {
return BSONObj( r->data() );
}
-
+
+ DiskLoc allocateSpaceForANewRecord(const char* ns,
+ NamespaceDetails* d,
+ int32_t lenWHdr,
+ bool god);
+
+ void addRecordToRecListInExtent(Record* r, DiskLoc loc);
+
} // namespace mongo