summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorEliot Horowitz <eliot@10gen.com>2014-01-04 01:38:14 -0500
committerEliot Horowitz <eliot@10gen.com>2014-01-09 14:21:49 -0500
commitea5d43f74e4ddf990a156ce37b05369cd9ee3479 (patch)
treefa61abd8eb81549c34b101cf217da74b3f20da04
parent6b2bd70cf5d7f2cb04b351fc24279b702ebb6fec (diff)
downloadmongo-ea5d43f74e4ddf990a156ce37b05369cd9ee3479.tar.gz
SERVER-12213: do not use idxNo or IndexDetails in any non-short term transient way
-rw-r--r--jstests/replsets/indexbg-restart-sigkill-secondary-noretry.js5
-rw-r--r--src/mongo/SConscript1
-rw-r--r--src/mongo/db/catalog/index_catalog.cpp669
-rw-r--r--src/mongo/db/catalog/index_catalog.h88
-rw-r--r--src/mongo/db/catalog/index_catalog_internal.cpp94
-rw-r--r--src/mongo/db/catalog/index_catalog_internal.h105
-rw-r--r--src/mongo/db/commands/dbhash.cpp3
-rw-r--r--src/mongo/db/database.cpp2
-rw-r--r--src/mongo/db/dbcommands.cpp2
-rw-r--r--src/mongo/db/dbhelpers.cpp4
-rw-r--r--src/mongo/db/index/btree_access_method.cpp24
-rw-r--r--src/mongo/db/index/btree_access_method_internal.h2
-rw-r--r--src/mongo/db/index/haystack_access_method.cpp3
-rw-r--r--src/mongo/db/index/index_descriptor.h20
-rw-r--r--src/mongo/db/index_rebuilder.cpp41
-rw-r--r--src/mongo/db/instance.cpp2
-rw-r--r--src/mongo/db/namespace_details.cpp5
-rw-r--r--src/mongo/db/namespace_details.h10
-rw-r--r--src/mongo/db/query/internal_plans.h5
-rw-r--r--src/mongo/db/structure/btree/state.cpp91
-rw-r--r--src/mongo/db/structure/btree/state.h29
-rw-r--r--src/mongo/db/structure/collection.cpp16
-rw-r--r--src/mongo/db/structure/collection.h13
-rw-r--r--src/mongo/dbtests/btreebuildertests.cpp3
-rw-r--r--src/mongo/dbtests/indexupdatetests.cpp19
-rw-r--r--src/mongo/dbtests/namespacetests.cpp2
-rw-r--r--src/mongo/s/d_migrate.cpp2
-rw-r--r--src/mongo/s/d_split.cpp9
-rw-r--r--src/mongo/util/stack_introspect.cpp11
29 files changed, 867 insertions, 413 deletions
diff --git a/jstests/replsets/indexbg-restart-sigkill-secondary-noretry.js b/jstests/replsets/indexbg-restart-sigkill-secondary-noretry.js
index 0e5bbfa59a4..8d2a748fcb7 100644
--- a/jstests/replsets/indexbg-restart-sigkill-secondary-noretry.js
+++ b/jstests/replsets/indexbg-restart-sigkill-secondary-noretry.js
@@ -68,6 +68,11 @@ assert.soon( function() {
return 2 == secondDB.system.indexes.count( {ns:"bgIndexNoRetrySec.jstests_bgsec"} ); },
"index not created on secondary (prior to restart)", 30000, 50 );
+// wait till minvalid
+assert.soon( function() {
+ return secondDB.jstests_bgsec.findOne( { i : -1 } ) != null; },
+ "doc after index not on secondary (prior to restart)", 30000, 50 );
+
// restart secondary and reconnect
jsTest.log("Restarting secondary");
replTest.restart(secondId, {}, /*signal=*/ 9, /*wait=*/true);
diff --git a/src/mongo/SConscript b/src/mongo/SConscript
index 17651610422..ec3ad71f77f 100644
--- a/src/mongo/SConscript
+++ b/src/mongo/SConscript
@@ -585,6 +585,7 @@ serverOnlyFiles = [ "db/curop.cpp",
"db/client.cpp",
"db/database.cpp",
"db/catalog/index_catalog.cpp",
+ "db/catalog/index_catalog_internal.cpp",
"db/catalog/index_create.cpp",
"db/structure/collection.cpp",
"db/structure/collection_info_cache.cpp",
diff --git a/src/mongo/db/catalog/index_catalog.cpp b/src/mongo/db/catalog/index_catalog.cpp
index 445a5df1466..b0382e7dc29 100644
--- a/src/mongo/db/catalog/index_catalog.cpp
+++ b/src/mongo/db/catalog/index_catalog.cpp
@@ -51,7 +51,6 @@
#include "mongo/db/index_names.h"
#include "mongo/db/jsobj.h"
#include "mongo/db/keypattern.h"
-#include "mongo/db/ops/count.h"
#include "mongo/db/ops/delete.h"
#include "mongo/db/query/internal_plans.h"
#include "mongo/db/repl/rs.h" // this is ugly
@@ -71,20 +70,65 @@ namespace mongo {
// -------------
IndexCatalog::IndexCatalog( Collection* collection, NamespaceDetails* details )
- : _magic(INDEX_CATALOG_MAGIC), _collection( collection ), _details( details ),
- _descriptorCache( NamespaceDetails::NIndexesMax ),
- _accessMethodCache( NamespaceDetails::NIndexesMax ),
- _forcedBtreeAccessMethodCache( NamespaceDetails::NIndexesMax ) {
+ : _magic(654321), _collection( collection ), _details( details ) {
}
IndexCatalog::~IndexCatalog() {
- _checkMagic();
+ if ( _magic != 654321 ) {
+ // only do this check if we haven't been initialized
+ _checkMagic();
+ }
_magic = 123456;
+ }
+
+ Status IndexCatalog::init() {
+
+ NamespaceDetails::IndexIterator ii = _details->ii(true);
+ while ( ii.more() ) {
+ IndexDetails& id = ii.next();
+ int idxNo = ii.pos() - 1;
+
+ if ( idxNo >= _details->getCompletedIndexCount() ) {
+ _leftOverIndexes.push_back( id.info.obj().getOwned() );
+ continue;
+ }
+
+ auto_ptr<IndexDescriptor> descriptor( new IndexDescriptor( _collection,
+ id.info.obj().getOwned() ) );
+
+ NamespaceDetails* indexMetadata =
+ _collection->_database->namespaceIndex().details( descriptor->indexNamespace() );
+ massert( 17329,
+ str::stream() << "no NamespaceDetails for index: " << descriptor->toString(),
+ indexMetadata );
- for ( unsigned i = 0; i < _descriptorCache.capacity(); i++ ) {
- _deleteCacheEntry(i);
+ auto_ptr<RecordStore> recordStore( new RecordStore( descriptor->indexNamespace() ) );
+ recordStore->init( indexMetadata, _collection->getExtentManager(), false );
+
+ auto_ptr<BtreeInMemoryState> state( new BtreeInMemoryState( _collection,
+ descriptor.get(),
+ recordStore.release() ) );
+ state->setIsReady( true );
+
+ auto_ptr<IndexAccessMethod> accessMethod( _createAccessMethod( descriptor.get(),
+ state.get() ) );
+
+ auto_ptr<IndexCatalogEntry> entry( new IndexCatalogEntry( descriptor.release(),
+ state.release(),
+ accessMethod.release() ) );
+ _entries.add( entry.release() );
+ }
+
+ // we clean up any unfinished indexes right here
+ // its too weird to leave in any state
+ // if someone wants to clean them, they can call prepOneUnfinishedIndex
+
+ if ( _leftOverIndexes.size() ) {
+ log() << "found " << _leftOverIndexes.size() << " index(es) that wasn't finished before shutdown";
}
+ _magic = INDEX_CATALOG_MAGIC;
+ return Status::OK();
}
bool IndexCatalog::ok() const {
@@ -92,16 +136,22 @@ namespace mongo {
}
void IndexCatalog::_checkMagic() const {
- dassert( _descriptorCache.capacity() == NamespaceDetails::NIndexesMax );
- dassert( _accessMethodCache.capacity() == NamespaceDetails::NIndexesMax );
- dassert( _forcedBtreeAccessMethodCache.capacity() == NamespaceDetails::NIndexesMax );
-
- if ( ok() )
+ if ( ok() ) {
return;
+ }
log() << "IndexCatalog::_magic wrong, is : " << _magic;
fassertFailed(17198);
}
+ void IndexCatalog::_checkUnifished() const {
+
+ massert( 17338,
+ str::stream()
+ << "IndexCatalog has left over indexes that must be cleared"
+ << " ns: " << _collection->ns().ns(),
+ _leftOverIndexes.size() == 0 );
+ }
+
bool IndexCatalog::_shouldOverridePlugin(const BSONObj& keyPattern) {
string pluginName = IndexNames::findPluginName(keyPattern);
bool known = IndexNames::isKnownName(pluginName);
@@ -185,6 +235,9 @@ namespace mongo {
}
Status IndexCatalog::createIndex( BSONObj spec, bool mayInterrupt ) {
+ Lock::assertWriteLocked( _collection->_database->name() );
+ _checkMagic();
+ _checkUnifished();
/**
* There are 2 main variables so(4 possibilies) for how we build indexes
* variable 1 - size of collection
@@ -227,6 +280,22 @@ namespace mongo {
return s;
}
+ // -------------
+ // no disk modifications yet
+ // going to prep some stuff, without any mods
+ // -------------
+
+ IndexDescriptor* descriptor = new IndexDescriptor( _collection, spec.getOwned() );
+ auto_ptr<IndexDescriptor> descriptorCleaner( descriptor );
+
+ auto_ptr<RecordStore> recordStore( new RecordStore( descriptor->indexNamespace() ) );
+
+
+ // -------------
+ // now going to touch disk
+ // -------------
+
+ // -------- system.indexes
Collection* systemIndexes = db->getCollection( db->_indexesName );
if ( !systemIndexes ) {
@@ -241,33 +310,62 @@ namespace mongo {
string idxName = spec["name"].valuestr();
- // Set curop description before setting indexBuildInProg, so that there's something
- // commands can find and kill as soon as indexBuildInProg is set. Only set this if it's a
- // killable index, so we don't overwrite commands in currentOp.
- if ( mayInterrupt ) {
- cc().curop()->setQuery( spec );
+ // ------- allocates IndexDetails
+ IndexBuildBlock indexBuildBlock( this,
+ idxName,
+ descriptor->indexNamespace(),
+ loc.getValue() );
+
+ // ------- allocate RecordsStore
+ {
+ Database* db = _collection->_database;
+ NamespaceIndex& nsi = db->namespaceIndex();
+ verify( nsi.details( descriptor->indexNamespace() ) == NULL );
+ nsi.add_ns( descriptor->indexNamespace(), DiskLoc(), false );
+ NamespaceDetails* nsd = nsi.details( descriptor->indexNamespace() );
+ verify(nsd);
+ recordStore->init( nsd, _collection->getExtentManager(), false );
+ db->_addNamespaceToCatalog( descriptor->indexNamespace(), NULL );
+ }
+
+ // ---------
+ // finish creating in memory state
+ // ---------
+ {
+ auto_ptr<BtreeInMemoryState> state( new BtreeInMemoryState( _collection,
+ descriptor,
+ recordStore.release() ) );
+
+ auto_ptr<IndexAccessMethod> accessMethod( _createAccessMethod( descriptor,
+ state.get() ) );
+
+ auto_ptr<IndexCatalogEntry> entry( new IndexCatalogEntry( descriptorCleaner.release(),
+ state.release(),
+ accessMethod.release() ) );
+
+ _entries.add( entry.release() );
}
- IndexBuildBlock indexBuildBlock( this, idxName, loc.getValue() );
- verify( indexBuildBlock.indexDetails() );
+ // sanity check
+ verify( _details->_catalogFindIndexByName( idxName, true ) >= 0 );
+
+ IndexCatalogEntry* entry = _entries.find( descriptor );
+ verify( entry );
try {
- int idxNo = _details->findIndexByName( idxName, true );
- verify( idxNo >= 0 );
- IndexDetails* id = &_details->idx(idxNo);
+ // Set curop description before setting indexBuildInProg, so that there's something
+ // commands can find and kill as soon as indexBuildInProg is set. Only set this if it's a
+ // killable index, so we don't overwrite commands in currentOp.
+ if ( mayInterrupt ) {
+ cc().curop()->setQuery( spec );
+ }
- scoped_ptr<IndexDescriptor> desc( new IndexDescriptor( _collection, idxNo,
- id->info.obj().getOwned() ) );
- auto_ptr<BtreeInMemoryState> btreeState( createInMemory( desc.get() ) );
- buildAnIndex( _collection, btreeState.get(), mayInterrupt );
+ buildAnIndex( _collection, entry->state(), mayInterrupt );
indexBuildBlock.success();
- // in case we got any access methods or something like that
- // TEMP until IndexDescriptor has to direct refs
- idxNo = _details->findIndexByName( idxName, true );
- verify( idxNo >= 0 );
- _deleteCacheEntry( idxNo );
+ int idxNo = _details->_catalogFindIndexByName( idxName, true );
+ verify( idxNo < numIndexesReady() );
return Status::OK();
}
@@ -276,28 +374,22 @@ namespace mongo {
<< " spec: " << spec
<< " error: " << e;
- // in case we got any access methods or something like that
- // TEMP until IndexDescriptor has to direct refs
- int idxNo = _details->findIndexByName( idxName, true );
- verify( idxNo >= 0 );
- _deleteCacheEntry( idxNo );
-
ErrorCodes::Error codeToUse = ErrorCodes::fromInt( e.getCode() );
if ( codeToUse == ErrorCodes::UnknownError )
return Status( ErrorCodes::InternalError, e.what(), e.getCode() );
return Status( codeToUse, e.what() );
}
-
-
}
IndexCatalog::IndexBuildBlock::IndexBuildBlock( IndexCatalog* catalog,
const StringData& indexName,
+ const StringData& indexNamespace,
const DiskLoc& loc )
: _catalog( catalog ),
_ns( _catalog->_collection->ns().ns() ),
_indexName( indexName.toString() ),
- _indexDetails( NULL ) {
+ _indexNamespace( indexNamespace.toString() ),
+ _inProgress( false ) {
_nsd = _catalog->_collection->details();
@@ -306,16 +398,16 @@ namespace mongo {
verify( _catalog->_collection->ok() );
verify( !loc.isNull() );
- _indexDetails = &_nsd->getNextIndexDetails( _ns.c_str() );
+ IndexDetails* indexDetails = &_nsd->getNextIndexDetails( _ns.c_str() );
+ _inProgress = true;
try {
// we don't want to kill a half formed IndexDetails, so be carefule
LOG(1) << "creating index with info @ " << loc;
- getDur().writingDiskLoc( _indexDetails->info ) = loc;
+ getDur().writingDiskLoc( indexDetails->info ) = loc;
}
catch ( DBException& e ) {
log() << "got exception trying to assign loc to IndexDetails" << e;
- _indexDetails = NULL;
return;
}
@@ -324,38 +416,44 @@ namespace mongo {
}
catch ( DBException& e ) {
log() << "got exception trying to incrementStats _indexBuildsInProgress: " << e;
- _indexDetails = NULL;
+ _inProgress = false;
return;
}
}
IndexCatalog::IndexBuildBlock::~IndexBuildBlock() {
- if ( !_indexDetails ) {
+ if ( !_inProgress ) {
// taken care of already
return;
}
+ _inProgress = false; // defensive
fassert( 17204, _catalog->_collection->ok() );
- int idxNo = _nsd->findIndexByName( _indexName, true );
+ int idxNo = _nsd->_catalogFindIndexByName( _indexName, true );
fassert( 17205, idxNo >= 0 );
- _catalog->_dropIndex( idxNo );
+ IndexCatalogEntry* entry = _catalog->_entries.find( _indexName );
+ if ( entry ) {
+ _catalog->_dropIndex( entry );
+ }
+ else {
+ _catalog->_deleteIndexFromDisk( _indexName,
+ _indexNamespace,
+ idxNo );
+ }
- _indexDetails = NULL;
}
void IndexCatalog::IndexBuildBlock::success() {
- fassert( 17206, _indexDetails );
- BSONObj keyPattern = _indexDetails->keyPattern().getOwned();
-
- _indexDetails = NULL;
+ fassert( 17206, _inProgress );
+ _inProgress = false;
fassert( 17207, _catalog->_collection->ok() );
- int idxNo = _nsd->findIndexByName( _indexName, true );
+ int idxNo = _nsd->_catalogFindIndexByName( _indexName, true );
fassert( 17202, idxNo >= 0 );
// Make sure the newly created index is relocated to nIndexes, if it isn't already there
@@ -367,10 +465,6 @@ namespace mongo {
_nsd->swapIndex( idxNo, toIdxNo );
- // neither of these should be used in queries yet, so nothing should be caching these
- _catalog->_deleteCacheEntry( idxNo );
- _catalog->_deleteCacheEntry( toIdxNo );
-
idxNo = _nsd->getCompletedIndexCount();
}
@@ -379,9 +473,15 @@ namespace mongo {
_catalog->_collection->infoCache()->addedIndex();
- _catalog->_fixDescriptorCacheNumbers();
+ IndexDescriptor* desc = _catalog->findIndexByName( _indexName, true );
+ fassert( 17330, desc );
+ IndexCatalogEntry* entry = _catalog->_entries.find( desc );
+ fassert( 17331, entry );
+
+ entry->state()->setIsReady( true );
- IndexLegacy::postBuildHook( _catalog->_collection, keyPattern );
+ IndexLegacy::postBuildHook( _catalog->_collection,
+ _catalog->findIndexByName( _indexName )->keyPattern() );
}
@@ -499,7 +599,7 @@ namespace mongo {
{
// Check both existing and in-progress indexes (2nd param = true)
- const int idx = _details->findIndexByName(name, true);
+ const int idx = _details->_catalogFindIndexByName(name, true);
if (idx >= 0) {
// index already exists.
const IndexDetails& indexSpec( _details->idx(idx) );
@@ -580,6 +680,8 @@ namespace mongo {
}
Status IndexCatalog::dropAllIndexes( bool includingIdIndex ) {
+ Lock::assertWriteLocked( _collection->_database->name() );
+
BackgroundOperation::assertNoBgOpInProgForNs( _collection->ns().ns() );
// there may be pointers pointing at keys in the btree(s). kill them.
@@ -589,61 +691,85 @@ namespace mongo {
// make sure nothing in progress
verify( numIndexesTotal() == numIndexesReady() );
- LOG(4) << " d->nIndexes was " << numIndexesTotal() << std::endl;
-
- IndexDetails *idIndex = 0;
-
- for ( int i = 0; i < numIndexesTotal(); i++ ) {
+ bool haveIdIndex = false;
- if ( !includingIdIndex && _details->idx(i).isIdIndex() ) {
- idIndex = &_details->idx(i);
- continue;
+ vector<string> indexNamesToDrop;
+ {
+ int seen = 0;
+ IndexIterator ii = getIndexIterator( true );
+ while ( ii.more() ) {
+ seen++;
+ IndexDescriptor* desc = ii.next();
+ if ( desc->isIdIndex() && includingIdIndex == false ) {
+ haveIdIndex = true;
+ continue;
+ }
+ indexNamesToDrop.push_back( desc->indexName() );
}
+ verify( seen == numIndexesTotal() );
+ }
- Status s = dropIndex( i );
- if ( !s.isOK() )
- return s;
- i--;
+ for ( size_t i = 0; i < indexNamesToDrop.size(); i++ ) {
+ string indexName = indexNamesToDrop[i];
+ IndexDescriptor* desc = findIndexByName( indexName, true );
+ verify( desc );
+ LOG(1) << "\t dropAllIndexes dropping: " << desc->toString();
+ IndexCatalogEntry* entry = _entries.find( desc );
+ verify( entry );
+ _dropIndex( entry );
}
// verify state is sane post cleaning
long long numSystemIndexesEntries = 0;
{
- BSONObj nsQuery = BSON( "query" << BSON( "ns" << _collection->ns() ) );
- string errmsg;
- int errCode;
- numSystemIndexesEntries = runCount( _collection->_database->_indexesName,
- nsQuery,
- errmsg,
- errCode );
- verify( numSystemIndexesEntries >= 0 );
- if ( errmsg.size() ) {
- error() << "counting system.indexes failed: " << errmsg;
- fassertFailed( 17323 );
+ Collection* systemIndexes =
+ _collection->_database->getCollection( _collection->_database->_indexesName );
+ if ( systemIndexes ) {
+ EqualityMatchExpression expr;
+ BSONObj nsBSON = BSON( "ns" << _collection->ns() );
+ verify( expr.init( "ns", nsBSON.firstElement() ).isOK() );
+ numSystemIndexesEntries = systemIndexes->countTableScan( &expr );
+ }
+ else {
+ // this is ok, 0 is the right number
}
}
-
- if ( idIndex ) {
+ if ( haveIdIndex ) {
fassert( 17324, numIndexesTotal() == 1 );
fassert( 17325, numIndexesReady() == 1 );
fassert( 17326, numSystemIndexesEntries == 1 );
+ fassert( 17336, _entries.size() == 1 );
}
else {
+ if ( numIndexesTotal() || numSystemIndexesEntries || _entries.size() ) {
+ error() << "about to fassert - "
+ << " numIndexesTotal(): " << numIndexesTotal()
+ << " numSystemIndexesEntries: " << numSystemIndexesEntries
+ << " _entries.size(): " << _entries.size()
+ << " indexNamesToDrop: " << indexNamesToDrop.size()
+ << " haveIdIndex: " << haveIdIndex;
+ }
fassert( 17327, numIndexesTotal() == 0 );
fassert( 17328, numSystemIndexesEntries == 0 );
+ fassert( 17337, _entries.size() == 0 );
}
return Status::OK();
}
Status IndexCatalog::dropIndex( IndexDescriptor* desc ) {
- return dropIndex( desc->getIndexNumber() );
+ Lock::assertWriteLocked( _collection->_database->name() );
+ IndexCatalogEntry* entry = _entries.find( desc );
+ if ( !entry )
+ return Status( ErrorCodes::InternalError, "cannot find index to delete" );
+ if ( !entry->state()->isReady() )
+ return Status( ErrorCodes::InternalError, "cannot delete not ready index" );
+ return _dropIndex( entry );
}
- Status IndexCatalog::dropIndex( int idxNo ) {
-
+ Status IndexCatalog::_dropIndex( IndexCatalogEntry* entry ) {
/**
* IndexState in order
* <db>.system.indexes
@@ -652,23 +778,12 @@ namespace mongo {
*/
// ----- SANITY CHECKS -------------
-
- verify( idxNo >= 0 );
- verify( idxNo < numIndexesReady() );
- verify( numIndexesReady() == numIndexesTotal() );
-
- // ------ CLEAR CACHES, ETC -----------
+ verify( entry );
BackgroundOperation::assertNoBgOpInProgForNs( _collection->ns().ns() );
-
- return _dropIndex( idxNo );
- }
-
- Status IndexCatalog::_dropIndex( int idxNo ) {
- verify( idxNo < numIndexesTotal() );
- verify( idxNo >= 0 );
-
_checkMagic();
+ _checkUnifished();
+
// there may be pointers pointing at keys in the btree(s). kill them.
// TODO: can this can only clear cursors on this index?
ClientCursor::invalidate( _collection->ns().ns() );
@@ -676,34 +791,26 @@ namespace mongo {
// wipe out stats
_collection->infoCache()->reset();
- string indexNamespace = _details->idx( idxNo ).indexNamespace();
- string indexName = _details->idx( idxNo ).indexName();
+ string indexNamespace = entry->descriptor()->indexNamespace();
+ string indexName = entry->descriptor()->indexName();
- // delete my entries first so we don't have invalid pointers lying around
- _deleteCacheEntry(idxNo);
+ int idxNo = _details->_catalogFindIndexByName( indexName, true );
+ verify( idxNo >= 0 );
// --------- START REAL WORK ----------
audit::logDropIndex( currentClient.get(), indexName, _collection->ns().ns() );
+ _entries.remove( entry->descriptor() );
+ entry = NULL;
+
try {
_details->clearSystemFlag( NamespaceDetails::Flag_HaveIdIndex );
// **** this is the first disk change ****
-
- // data + system.namespaces
- Status status = _collection->_database->_dropNS( indexNamespace );
- if ( !status.isOK() ) {
- LOG(2) << "IndexDetails::kill(): couldn't drop index " << indexNamespace;
- }
-
- // all info in the .ns file
- _details->_removeIndexFromMe( idxNo );
-
- // remove from system.indexes
- int n = _removeFromSystemIndexes( indexName );
- wassert( n == 1 );
-
+ _deleteIndexFromDisk( indexName,
+ indexNamespace,
+ idxNo );
}
catch ( std::exception& ) {
// this is bad, and we don't really know state
@@ -712,9 +819,6 @@ namespace mongo {
log() << "error dropping index: " << indexNamespace
<< " going to leak some memory to be safe";
- _descriptorCache.clear();
- _accessMethodCache.clear();
- _forcedBtreeAccessMethodCache.clear();
_collection->_database->_clearCollectionCache( indexNamespace );
@@ -723,41 +827,30 @@ namespace mongo {
_collection->_database->_clearCollectionCache( indexNamespace );
- // now that is really gone can fix arrays
-
_checkMagic();
- for ( unsigned i = static_cast<unsigned>(idxNo); i < _descriptorCache.capacity(); i++ ) {
- _deleteCacheEntry(i);
- }
-
- _fixDescriptorCacheNumbers();
-
return Status::OK();
}
- void IndexCatalog::_deleteCacheEntry( unsigned i ) {
- delete _descriptorCache[i];
- _descriptorCache[i] = NULL;
-
- delete _accessMethodCache[i];
- _accessMethodCache[i] = NULL;
-
- delete _forcedBtreeAccessMethodCache[i];
- _forcedBtreeAccessMethodCache[i] = NULL;
- }
-
- void IndexCatalog::_fixDescriptorCacheNumbers() {
+ void IndexCatalog::_deleteIndexFromDisk( const string& indexName,
+ const string& indexNamespace,
+ int idxNo ) {
+ verify( idxNo >= 0 );
+ verify( _details->_catalogFindIndexByName( indexName, true ) == idxNo );
- for ( unsigned i=0; i < _descriptorCache.capacity(); i++ ) {
- if ( !_descriptorCache[i] )
- continue;
- fassert( 17230, static_cast<int>( i ) < numIndexesTotal() );
- IndexDetails& id = _details->idx( i );
- fassert( 17227, _descriptorCache[i]->_indexNumber == static_cast<int>( i ) );
- fassert( 17228, id.info.obj() == _descriptorCache[i]->_infoObj );
+ // data + system.namespacesa
+ Status status = _collection->_database->_dropNS( indexNamespace );
+ if ( !status.isOK() ) {
+ LOG(2) << "IndexDetails::kill(): couldn't drop extents for "
+ << indexNamespace;
}
+ // all info in the .ns file
+ _details->_removeIndexFromMe( idxNo );
+
+ // remove from system.indexes
+ int n = _removeFromSystemIndexes( indexName );
+ wassert( n == 1 );
}
int IndexCatalog::_removeFromSystemIndexes( const StringData& indexName ) {
@@ -765,33 +858,30 @@ namespace mongo {
b.append( "ns", _collection->ns() );
b.append( "name", indexName );
BSONObj cond = b.obj(); // e.g.: { name: "ts_1", ns: "foo.coll" }
-
return static_cast<int>( deleteObjects( _collection->_database->_indexesName,
- cond, false, false, true ) );
+ cond,
+ false,
+ false,
+ true ) );
}
- BSONObj IndexCatalog::prepOneUnfinishedIndex() {
- verify( _details->_indexBuildsInProgress > 0 );
-
- // details.info is always a valid system.indexes entry because DataFileMgr::insert journals
- // creating the index doc and then insert_makeIndex durably assigns its DiskLoc to info.
- // indexBuildsInProgress is set after that, so if it is set, info must be set.
- int offset = numIndexesTotal() - 1;
-
- BSONObj info = _details->idx(offset).info.obj().getOwned();
+ vector<BSONObj> IndexCatalog::getAndClearUnfinishedIndexes() {
+ vector<BSONObj> toReturn = _leftOverIndexes;
+ _leftOverIndexes.clear();
+ for ( size_t i = 0; i < toReturn.size(); i++ ) {
+ BSONObj spec = toReturn[i];
- Status s = _dropIndex( offset );
+ IndexDescriptor desc( _collection, spec );
- massert( 17200,
- str::stream() << "failed to to dropIndex in prepOneUnfinishedIndex: " << s.toString(),
- s.isOK() );
-
- return info;
- }
+ int idxNo = _details->_catalogFindIndexByName( desc.indexName(), true );
+ verify( idxNo >= 0 );
+ verify( idxNo >= numIndexesReady() );
- void IndexCatalog::markMultikey( const IndexDescriptor* idx, bool isMultikey ) {
- if ( _details->setIndexIsMultikey( idx->_indexNumber, isMultikey ) )
- _collection->infoCache()->clearQueryCache();
+ _deleteIndexFromDisk( desc.indexName(),
+ desc.indexNamespace(),
+ idxNo );
+ }
+ return toReturn;
}
void IndexCatalog::updateTTLSetting( const IndexDescriptor* idx, long long newExpireSeconds ) {
@@ -821,6 +911,13 @@ namespace mongo {
}
}
+ bool IndexCatalog::isMultikey( const IndexDescriptor* idx ) {
+ IndexCatalogEntry* entry = _entries.find( idx );
+ verify( entry );
+ return entry->state()->isMultikey();
+ }
+
+
// ---------------------------
int IndexCatalog::numIndexesTotal() const {
@@ -836,6 +933,54 @@ namespace mongo {
|| findIdIndex() != NULL;
}
+ IndexCatalog::IndexIterator::IndexIterator( const IndexCatalog* cat,
+ bool includeUnfinishedIndexes )
+ : _includeUnfinishedIndexes( includeUnfinishedIndexes ),
+ _catalog( cat ),
+ _iterator( cat->_entries.begin() ),
+ _start( true ),
+ _prev( NULL ),
+ _next( NULL ) {
+ }
+
+ bool IndexCatalog::IndexIterator::more() {
+ if ( _start ) {
+ _advance();
+ _start = false;
+ }
+ return _next != NULL;
+ }
+
+ IndexDescriptor* IndexCatalog::IndexIterator::next() {
+ if ( !more() )
+ return NULL;
+ _prev = _next;
+ _advance();
+ return _prev->descriptor();
+ }
+
+ IndexAccessMethod* IndexCatalog::IndexIterator::accessMethod( IndexDescriptor* desc ) {
+ verify( desc == _prev->descriptor() );
+ return _prev->accessMethod();
+ }
+
+ void IndexCatalog::IndexIterator::_advance() {
+ _next = NULL;
+
+ while ( _iterator != _catalog->_entries.end() ) {
+ IndexCatalogEntry* entry = *_iterator;
+ ++_iterator;
+
+ if ( _includeUnfinishedIndexes ||
+ entry->state()->isReady() ) {
+ _next = entry;
+ return;
+ }
+ }
+
+ }
+
+
IndexDescriptor* IndexCatalog::findIdIndex() const {
IndexIterator ii = getIndexIterator( false );
while ( ii.more() ) {
@@ -899,35 +1044,19 @@ namespace mongo {
}
}
-
- IndexDescriptor* IndexCatalog::_getDescriptor( int idxNo ) const {
- _checkMagic();
- verify( idxNo < numIndexesTotal() );
-
- if ( _descriptorCache[idxNo] )
- return _descriptorCache[idxNo];
-
- IndexDetails* id = &_details->idx(idxNo);
-
- if ( static_cast<unsigned>( idxNo ) >= _descriptorCache.size() )
- _descriptorCache.resize( idxNo + 1 );
-
- _descriptorCache[idxNo] = new IndexDescriptor( _collection, idxNo,
- id->info.obj().getOwned());
- return _descriptorCache[idxNo];
+ IndexAccessMethod* IndexCatalog::getIndex( const IndexDescriptor* desc ) {
+ IndexCatalogEntry* entry = _entries.find( desc );
+ massert( 17334, "cannot find index entry", entry );
+ return entry->accessMethod();
}
IndexAccessMethod* IndexCatalog::getBtreeIndex( const IndexDescriptor* desc ) {
- _checkMagic();
- int idxNo = desc->getIndexNumber();
-
- if ( _forcedBtreeAccessMethodCache[idxNo] ) {
- return _forcedBtreeAccessMethodCache[idxNo];
+ IndexCatalogEntry* entry = _entries.find( desc );
+ massert( 17335, "cannot find index entry", entry );
+ if ( !entry->forcedBtreeIndex() ) {
+ entry->setForcedBtreeIndex( new BtreeAccessMethod( entry->state() ) );
}
-
- BtreeAccessMethod* newlyCreated = new BtreeAccessMethod( createInMemory( desc ) );
- _forcedBtreeAccessMethodCache[idxNo] = newlyCreated;
- return newlyCreated;
+ return entry->forcedBtreeIndex();
}
BtreeBasedAccessMethod* IndexCatalog::getBtreeBasedIndex( const IndexDescriptor* desc ) {
@@ -950,108 +1079,66 @@ namespace mongo {
}
- IndexAccessMethod* IndexCatalog::getIndex( const IndexDescriptor* desc ) {
- _checkMagic();
- int idxNo = desc->getIndexNumber();
-
- if ( _accessMethodCache[idxNo] ) {
- return _accessMethodCache[idxNo];
- }
-
- auto_ptr<BtreeInMemoryState> state( createInMemory( desc ) );
-
- IndexAccessMethod* newlyCreated = 0;
-
+ IndexAccessMethod* IndexCatalog::_createAccessMethod( const IndexDescriptor* desc,
+ BtreeInMemoryState* state ) {
string type = _getAccessMethodName(desc->keyPattern());
- if (IndexNames::HASHED == type) {
- newlyCreated = new HashAccessMethod( state.release() );
- }
- else if (IndexNames::GEO_2DSPHERE == type) {
- newlyCreated = new S2AccessMethod( state.release() );
- }
- else if (IndexNames::TEXT == type) {
- newlyCreated = new FTSAccessMethod( state.release() );
- }
- else if (IndexNames::GEO_HAYSTACK == type) {
- newlyCreated = new HaystackAccessMethod( state.release() );
- }
- else if ("" == type) {
- newlyCreated = new BtreeAccessMethod( state.release() );
- }
- else if (IndexNames::GEO_2D == type) {
- newlyCreated = new TwoDAccessMethod( state.release() );
- }
- else {
- log() << "Can't find index for keypattern " << desc->keyPattern();
- verify(0);
- return NULL;
- }
+ if (IndexNames::HASHED == type)
+ return new HashAccessMethod( state );
- _accessMethodCache[idxNo] = newlyCreated;
+ if (IndexNames::GEO_2DSPHERE == type)
+ return new S2AccessMethod( state );
- return newlyCreated;
- }
+ if (IndexNames::TEXT == type)
+ return new FTSAccessMethod( state );
- BtreeInMemoryState* IndexCatalog::createInMemory( const IndexDescriptor* descriptor ) {
- int idxNo = _details->findIndexByName( descriptor->indexName(), true );
- verify( idxNo >= 0 );
+ if (IndexNames::GEO_HAYSTACK == type)
+ return new HaystackAccessMethod( state );
- Database* db = _collection->_database;
- NamespaceDetails* nsd = db->namespaceIndex().details( descriptor->indexNamespace() );
- if ( !nsd ) {
- // have to create!
- db->namespaceIndex().add_ns( descriptor->indexNamespace(), DiskLoc(), false );
- nsd = db->namespaceIndex().details( descriptor->indexNamespace() );
- db->_addNamespaceToCatalog( descriptor->indexNamespace(), NULL );
- verify(nsd);
- }
+ if ("" == type)
+ return new BtreeAccessMethod( state );
- RecordStore* rs = new RecordStore( descriptor->indexNamespace() );
- rs->init( nsd, _collection->getExtentManager(), false );
+ if (IndexNames::GEO_2D == type)
+ return new TwoDAccessMethod( state );
- return new BtreeInMemoryState( _collection,
- descriptor,
- rs,
- &_details->idx( idxNo ) );
+ log() << "Can't find index for keypattern " << desc->keyPattern();
+ verify(0);
+ return NULL;
}
IndexDetails* IndexCatalog::_getIndexDetails( const IndexDescriptor* descriptor ) const {
- verify( descriptor->getIndexNumber() >= 0 );
- IndexDetails* id = &_details->idx( descriptor->getIndexNumber() );
- DEV verify( id->indexName() == descriptor->indexName() );
- return id;
+ int idxNo = _details->_catalogFindIndexByName( descriptor->indexName(), true );
+ verify( idxNo >= 0 );
+ return &_details->idx( idxNo );
}
// ---------------------------
- Status IndexCatalog::_indexRecord( int idxNo, const BSONObj& obj, const DiskLoc &loc ) {
- IndexDescriptor* desc = _getDescriptor( idxNo );
- verify(desc);
- IndexAccessMethod* iam = getIndex( desc );
- verify(iam);
-
+ Status IndexCatalog::_indexRecord( IndexCatalogEntry* index,
+ const BSONObj& obj,
+ const DiskLoc &loc ) {
InsertDeleteOptions options;
options.logIfError = false;
- options.dupsAllowed =
- ignoreUniqueIndex( desc ) ||
- ( !KeyPattern::isIdKeyPattern(desc->keyPattern()) && !desc->unique() );
+
+ bool isUnique =
+ KeyPattern::isIdKeyPattern(index->descriptor()->keyPattern()) ||
+ index->descriptor()->unique();
+
+ options.dupsAllowed = ignoreUniqueIndex( index->descriptor() ) || !isUnique;
int64_t inserted;
- return iam->insert(obj, loc, options, &inserted);
+ return index->accessMethod()->insert(obj, loc, options, &inserted);
}
- Status IndexCatalog::_unindexRecord( int idxNo, const BSONObj& obj, const DiskLoc &loc, bool logIfError ) {
- IndexDescriptor* desc = _getDescriptor( idxNo );
- verify( desc );
- IndexAccessMethod* iam = getIndex( desc );
- verify( iam );
-
+ Status IndexCatalog::_unindexRecord( IndexCatalogEntry* index,
+ const BSONObj& obj,
+ const DiskLoc &loc,
+ bool logIfError ) {
InsertDeleteOptions options;
options.logIfError = logIfError;
int64_t removed;
- Status status = iam->remove(obj, loc, options, &removed);
+ Status status = index->accessMethod()->remove(obj, loc, options, &removed);
if ( !status.isOK() ) {
problem() << "Couldn't unindex record " << obj.toString()
@@ -1064,22 +1151,35 @@ namespace mongo {
void IndexCatalog::indexRecord( const BSONObj& obj, const DiskLoc &loc ) {
- for ( int i = 0; i < numIndexesTotal(); i++ ) {
+ for ( IndexCatalogEntryContainer::const_iterator i = _entries.begin();
+ i != _entries.end();
+ ++i ) {
+
+ IndexCatalogEntry* entry = *i;
+
try {
- Status s = _indexRecord( i, obj, loc );
+ Status s = _indexRecord( entry, obj, loc );
uassert(s.location(), s.reason(), s.isOK() );
}
catch ( AssertionException& ae ) {
LOG(2) << "IndexCatalog::indexRecord failed: " << ae;
- for ( int j = 0; j <= i; j++ ) {
+ for ( IndexCatalogEntryContainer::const_iterator j = _entries.begin();
+ j != _entries.end();
+ ++j ) {
+
+ IndexCatalogEntry* toDelete = *j;
+
try {
- _unindexRecord( j, obj, loc, false );
+ _unindexRecord( toDelete, obj, loc, false );
}
catch ( DBException& e ) {
LOG(1) << "IndexCatalog::indexRecord rollback failed: " << e;
}
+
+ if ( toDelete == entry )
+ break;
}
throw;
@@ -1089,12 +1189,15 @@ namespace mongo {
}
void IndexCatalog::unindexRecord( const BSONObj& obj, const DiskLoc& loc, bool noWarn ) {
- int numIndices = numIndexesTotal();
+ for ( IndexCatalogEntryContainer::const_iterator i = _entries.begin();
+ i != _entries.end();
+ ++i ) {
+
+ IndexCatalogEntry* entry = *i;
- for (int i = 0; i < numIndices; i++) {
- // If i >= d->nIndexes, it's a background index, and we DO NOT want to log anything.
- bool logIfError = ( i < numIndexesTotal() ) ? !noWarn : false;
- _unindexRecord( i, obj, loc, logIfError );
+ // If it's a background index, we DO NOT want to log anything.
+ bool logIfError = entry->state()->isReady() ? !noWarn : false;
+ _unindexRecord( entry, obj, loc, logIfError );
}
}
diff --git a/src/mongo/db/catalog/index_catalog.h b/src/mongo/db/catalog/index_catalog.h
index 6f4c53217ce..30b396b23a3 100644
--- a/src/mongo/db/catalog/index_catalog.h
+++ b/src/mongo/db/catalog/index_catalog.h
@@ -34,6 +34,7 @@
#include "mongo/db/diskloc.h"
#include "mongo/db/jsobj.h"
+#include "mongo/db/catalog/index_catalog_internal.h"
namespace mongo {
@@ -56,6 +57,9 @@ namespace mongo {
IndexCatalog( Collection* collection, NamespaceDetails* details );
~IndexCatalog();
+ // must be called before used
+ Status init();
+
bool ok() const;
// ---- accessors -----
@@ -103,20 +107,25 @@ namespace mongo {
class IndexIterator {
public:
- bool more() const { return _i < _n; }
- IndexDescriptor* next() { return _catalog->_getDescriptor( _i++ ); }
+ bool more();
+ IndexDescriptor* next();
+
+ // returns the access method for the last return IndexDescriptor
+ IndexAccessMethod* accessMethod( IndexDescriptor* desc );
private:
- IndexIterator( const IndexCatalog* cat, bool includeUnfinishedIndexes ) {
- _catalog = cat;
- if ( includeUnfinishedIndexes )
- _n = _catalog->numIndexesTotal();
- else
- _n = _catalog->numIndexesReady();
- _i = 0;
- }
- int _i;
- int _n;
+ IndexIterator( const IndexCatalog* cat, bool includeUnfinishedIndexes );
+
+ void _advance();
+
+ bool _includeUnfinishedIndexes;
const IndexCatalog* _catalog;
+ IndexCatalogEntryContainer::const_iterator _iterator;
+
+ bool _start;
+
+ IndexCatalogEntry* _prev;
+ IndexCatalogEntry* _next;
+
friend class IndexCatalog;
};
@@ -135,14 +144,12 @@ namespace mongo {
Status dropAllIndexes( bool includingIdIndex );
Status dropIndex( IndexDescriptor* desc );
- Status dropIndex( int idxNo );
/**
- * will drop an uncompleted index and return spec
- * after this, the index can be rebuilt
- * @return the info for a single index to retry
+ * will drop all uncompleted indexes and return specs
+ * after this, the indexes can be rebuilt
*/
- BSONObj prepOneUnfinishedIndex();
+ vector<BSONObj> getAndClearUnfinishedIndexes();
// ---- modify single index
@@ -152,26 +159,29 @@ namespace mongo {
*/
void updateTTLSetting( const IndexDescriptor* idx, long long newExpireSeconds );
- void markMultikey( const IndexDescriptor* idx, bool isMultikey = true );
+ bool isMultikey( const IndexDescriptor* idex );
// --- these probably become private?
class IndexBuildBlock {
public:
- IndexBuildBlock( IndexCatalog* catalog, const StringData& indexName, const DiskLoc& loc );
+ IndexBuildBlock( IndexCatalog* catalog,
+ const StringData& indexName,
+ const StringData& indexNamespace,
+ const DiskLoc& locInSystemIndexes );
~IndexBuildBlock();
- IndexDetails* indexDetails() { return _indexDetails; }
-
void success();
private:
IndexCatalog* _catalog;
string _ns;
string _indexName;
+ string _indexNamespace;
NamespaceDetails* _nsd; // for the collection, not index
- IndexDetails* _indexDetails;
+
+ bool _inProgress;
};
// ----- data modifiers ------
@@ -203,10 +213,9 @@ namespace mongo {
private:
- BtreeInMemoryState* createInMemory( const IndexDescriptor* descriptor );
-
- void _deleteCacheEntry( unsigned i );
- void _fixDescriptorCacheNumbers();
+ // creates a new thing, no caching
+ IndexAccessMethod* _createAccessMethod( const IndexDescriptor* desc,
+ BtreeInMemoryState* state );
Status _upgradeDatabaseMinorVersionIfNeeded( const string& newPluginName );
@@ -221,33 +230,36 @@ namespace mongo {
*/
string _getAccessMethodName(const BSONObj& keyPattern);
- // throws
- // never returns NULL
- IndexDescriptor* _getDescriptor( int idxNo ) const;
-
IndexDetails* _getIndexDetails( const IndexDescriptor* descriptor ) const;
void _checkMagic() const;
+ void _checkUnifished() const;
- Status _indexRecord( int idxNo, const BSONObj& obj, const DiskLoc &loc );
- Status _unindexRecord( int idxNo, const BSONObj& obj, const DiskLoc &loc, bool logIfError );
+ Status _indexRecord( IndexCatalogEntry* index, const BSONObj& obj, const DiskLoc &loc );
+ Status _unindexRecord( IndexCatalogEntry* index, const BSONObj& obj, const DiskLoc &loc,
+ bool logIfError );
/**
* this does no sanity checks
*/
- Status _dropIndex( int idxNo );
+ Status _dropIndex( IndexCatalogEntry* entry );
+
+ // just does diskc hanges
+ // doesn't change memory state, etc...
+ void _deleteIndexFromDisk( const string& indexName,
+ const string& indexNamespace,
+ int idxNo );
int _magic;
Collection* _collection;
NamespaceDetails* _details;
- // these are caches, not source of truth
- // they should be treated as such
- mutable std::vector<IndexDescriptor*> _descriptorCache; // XXX-ERH mutable here is temp
- std::vector<IndexAccessMethod*> _accessMethodCache;
- std::vector<BtreeAccessMethod*> _forcedBtreeAccessMethodCache;
+ IndexCatalogEntryContainer _entries;
+
+ std::vector<BSONObj> _leftOverIndexes;
static const BSONObj _idObj; // { _id : 1 }
+
};
}
diff --git a/src/mongo/db/catalog/index_catalog_internal.cpp b/src/mongo/db/catalog/index_catalog_internal.cpp
new file mode 100644
index 00000000000..ce2eb5cd2f7
--- /dev/null
+++ b/src/mongo/db/catalog/index_catalog_internal.cpp
@@ -0,0 +1,94 @@
+// index_catalog_internal.h
+
+/**
+* Copyright (C) 2013 10gen Inc.
+*
+* This program is free software: you can redistribute it and/or modify
+* it under the terms of the GNU Affero General Public License, version 3,
+* as published by the Free Software Foundation.
+*
+* This program is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU Affero General Public License for more details.
+*
+* You should have received a copy of the GNU Affero General Public License
+* along with this program. If not, see <http://www.gnu.org/licenses/>.
+*
+* As a special exception, the copyright holders give permission to link the
+* code of portions of this program with the OpenSSL library under certain
+* conditions as described in each individual source file and distribute
+* linked combinations including the program with the OpenSSL library. You
+* must comply with the GNU Affero General Public License in all respects for
+* all of the code used other than as permitted herein. If you modify file(s)
+* with this exception, you may extend this exception to your version of the
+* file(s), but you are not obligated to do so. If you do not wish to do so,
+* delete this exception statement from your version. If you delete this
+* exception statement from all source files in the program, then also delete
+* it in the license file.
+*/
+
+#include "mongo/db/catalog/index_catalog_internal.h"
+
+#include "mongo/db/index/index_access_method.h"
+#include "mongo/db/index/index_descriptor.h"
+#include "mongo/db/structure/btree/state.h"
+
+namespace mongo {
+
+ IndexCatalogEntry::IndexCatalogEntry( IndexDescriptor* descriptor,
+ BtreeInMemoryState* state,
+ IndexAccessMethod* accessMethod )
+ : _descriptor( descriptor ),
+ _state( state ),
+ _accessMethod( accessMethod ),
+ _forcedBtreeIndex( 0 ) {
+ }
+
+ IndexCatalogEntry::~IndexCatalogEntry() {
+ delete _accessMethod;
+ delete _state;
+ delete _descriptor;
+ }
+
+ const IndexCatalogEntry* IndexCatalogEntryContainer::find( const IndexDescriptor* desc ) const {
+ for ( const_iterator i = begin(); i != end(); ++i ) {
+ const IndexCatalogEntry* e = *i;
+ if ( e->descriptor() == desc )
+ return e;
+ }
+ return NULL;
+ }
+
+ IndexCatalogEntry* IndexCatalogEntryContainer::find( const IndexDescriptor* desc ) {
+ for ( iterator i = begin(); i != end(); ++i ) {
+ IndexCatalogEntry* e = *i;
+ if ( e->descriptor() == desc )
+ return e;
+ }
+ return NULL;
+ }
+
+ IndexCatalogEntry* IndexCatalogEntryContainer::find( const string& name ) {
+ for ( iterator i = begin(); i != end(); ++i ) {
+ IndexCatalogEntry* e = *i;
+ if ( e->descriptor()->indexName() == name )
+ return e;
+ }
+ return NULL;
+ }
+
+ bool IndexCatalogEntryContainer::remove( const IndexDescriptor* desc ) {
+ for ( std::vector<IndexCatalogEntry*>::iterator i = _entries.mutableVector().begin();
+ i != _entries.mutableVector().end();
+ ++i ) {
+ IndexCatalogEntry* e = *i;
+ if ( e->descriptor() != desc )
+ continue;
+ _entries.mutableVector().erase( i );
+ return true;
+ }
+ return false;
+ }
+
+}
diff --git a/src/mongo/db/catalog/index_catalog_internal.h b/src/mongo/db/catalog/index_catalog_internal.h
new file mode 100644
index 00000000000..895dc390801
--- /dev/null
+++ b/src/mongo/db/catalog/index_catalog_internal.h
@@ -0,0 +1,105 @@
+// index_catalog_internal.h
+
+/**
+* Copyright (C) 2013 10gen Inc.
+*
+* This program is free software: you can redistribute it and/or modify
+* it under the terms of the GNU Affero General Public License, version 3,
+* as published by the Free Software Foundation.
+*
+* This program is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU Affero General Public License for more details.
+*
+* You should have received a copy of the GNU Affero General Public License
+* along with this program. If not, see <http://www.gnu.org/licenses/>.
+*
+* As a special exception, the copyright holders give permission to link the
+* code of portions of this program with the OpenSSL library under certain
+* conditions as described in each individual source file and distribute
+* linked combinations including the program with the OpenSSL library. You
+* must comply with the GNU Affero General Public License in all respects for
+* all of the code used other than as permitted herein. If you modify file(s)
+* with this exception, you may extend this exception to your version of the
+* file(s), but you are not obligated to do so. If you do not wish to do so,
+* delete this exception statement from your version. If you delete this
+* exception statement from all source files in the program, then also delete
+* it in the license file.
+*/
+
+#pragma once
+
+#include <string>
+
+#include "mongo/base/owned_pointer_vector.h"
+
+namespace mongo {
+
+ class IndexDescriptor;
+ class BtreeInMemoryState;
+ class IndexAccessMethod;
+
+ class IndexCatalogEntry {
+ public:
+ IndexCatalogEntry( IndexDescriptor* descriptor,
+ BtreeInMemoryState* state,
+ IndexAccessMethod* accessMethod );
+
+ ~IndexCatalogEntry();
+
+ IndexDescriptor* descriptor() { return _descriptor; }
+ BtreeInMemoryState* state() { return _state; }
+ IndexAccessMethod* accessMethod() { return _accessMethod; }
+
+ const IndexDescriptor* descriptor() const { return _descriptor; }
+ const BtreeInMemoryState* state() const { return _state; }
+ const IndexAccessMethod* accessMethod() const { return _accessMethod; }
+
+ IndexAccessMethod* forcedBtreeIndex() { return _forcedBtreeIndex; }
+ void setForcedBtreeIndex( IndexAccessMethod* iam ) { _forcedBtreeIndex = iam; }
+
+ private:
+ IndexDescriptor* _descriptor; // owned here
+ BtreeInMemoryState* _state; // owned here
+ IndexAccessMethod* _accessMethod; // owned here
+ IndexAccessMethod* _forcedBtreeIndex; // owned here
+ };
+
+ class IndexCatalogEntryContainer {
+ public:
+
+ typedef std::vector<IndexCatalogEntry*>::const_iterator const_iterator;
+ typedef std::vector<IndexCatalogEntry*>::const_iterator iterator;
+
+ const_iterator begin() const { return _entries.vector().begin(); }
+ const_iterator end() const { return _entries.vector().end(); }
+
+ iterator begin() { return _entries.vector().begin(); }
+ iterator end() { return _entries.vector().end(); }
+
+ // TODO: these have to be SUPER SUPER FAST
+ // maybe even some pointer trickery is in order
+ const IndexCatalogEntry* find( const IndexDescriptor* desc ) const;
+ IndexCatalogEntry* find( const IndexDescriptor* desc );
+
+ IndexCatalogEntry* find( const std::string& name );
+
+
+ unsigned size() const { return _entries.size(); }
+ // -----------------
+
+ bool remove( const IndexDescriptor* desc );
+
+ // pass ownership to EntryContainer
+ void add( IndexCatalogEntry* entry ) { _entries.mutableVector().push_back( entry ); }
+
+ // TODO: should the findIndexBy* methods be done here
+ // and proxied in IndexCatatalog
+ //IndexCatalogEntry* findIndexByName();
+
+ private:
+ OwnedPointerVector<IndexCatalogEntry> _entries;
+ };
+
+}
diff --git a/src/mongo/db/commands/dbhash.cpp b/src/mongo/db/commands/dbhash.cpp
index 78e0e33cf35..65bbd6ab9f8 100644
--- a/src/mongo/db/commands/dbhash.cpp
+++ b/src/mongo/db/commands/dbhash.cpp
@@ -88,7 +88,8 @@ namespace mongo {
auto_ptr<Runner> runner;
if ( desc ) {
- runner.reset(InternalPlanner::indexScan(desc,
+ runner.reset(InternalPlanner::indexScan(collection,
+ desc,
BSONObj(),
BSONObj(),
false,
diff --git a/src/mongo/db/database.cpp b/src/mongo/db/database.cpp
index 32e37b40ee7..ccbfb407b02 100644
--- a/src/mongo/db/database.cpp
+++ b/src/mongo/db/database.cpp
@@ -432,7 +432,7 @@ namespace mongo {
if ( !newIndexSpecLoc.isOK() )
return newIndexSpecLoc.getStatus();
- int indexI = details->findIndexByName( oldIndexSpec.getStringField( "name" ) );
+ int indexI = details->_catalogFindIndexByName( oldIndexSpec.getStringField( "name" ) );
IndexDetails &indexDetails = details->idx(indexI);
string oldIndexNs = indexDetails.indexNamespace();
indexDetails.info = newIndexSpecLoc.getValue();
diff --git a/src/mongo/db/dbcommands.cpp b/src/mongo/db/dbcommands.cpp
index 6988e65cbf2..17cbf3dda9f 100644
--- a/src/mongo/db/dbcommands.cpp
+++ b/src/mongo/db/dbcommands.cpp
@@ -912,7 +912,7 @@ namespace mongo {
min = Helpers::toKeyFormat( kp.extendRangeBound( min, false ) );
max = Helpers::toKeyFormat( kp.extendRangeBound( max, false ) );
- runner.reset(InternalPlanner::indexScan(idx, min, max, false));
+ runner.reset(InternalPlanner::indexScan(collection, idx, min, max, false));
}
long long avgObjSize = collection->details()->dataSize() / collection->numRecords();
diff --git a/src/mongo/db/dbhelpers.cpp b/src/mongo/db/dbhelpers.cpp
index 8368e560b2a..c1ad434acac 100644
--- a/src/mongo/db/dbhelpers.cpp
+++ b/src/mongo/db/dbhelpers.cpp
@@ -354,7 +354,7 @@ namespace mongo {
IndexDescriptor* desc =
collection->getIndexCatalog()->findIndexByKeyPattern( indexKeyPattern.toBSON() );
- auto_ptr<Runner> runner(InternalPlanner::indexScan(desc, min, max,
+ auto_ptr<Runner> runner(InternalPlanner::indexScan(collection, desc, min, max,
maxInclusive,
InternalPlanner::FORWARD,
InternalPlanner::IXSCAN_FETCH));
@@ -492,7 +492,7 @@ namespace mongo {
bool isLargeChunk = false;
long long docCount = 0;
- auto_ptr<Runner> runner(InternalPlanner::indexScan(idx, min, max, false));
+ auto_ptr<Runner> runner(InternalPlanner::indexScan(collection, idx, min, max, false));
// we can afford to yield here because any change to the base data that we might miss is
// already being queued and will be migrated in the 'transferMods' stage
runner->setYieldPolicy(Runner::YIELD_AUTO);
diff --git a/src/mongo/db/index/btree_access_method.cpp b/src/mongo/db/index/btree_access_method.cpp
index f88d5249f5e..f3317725bdd 100644
--- a/src/mongo/db/index/btree_access_method.cpp
+++ b/src/mongo/db/index/btree_access_method.cpp
@@ -62,7 +62,7 @@ namespace mongo {
for (BSONObjSet::const_iterator i = keys.begin(); i != keys.end(); ++i) {
try {
- _interface->bt_insert(_btreeState.get(),
+ _interface->bt_insert(_btreeState,
_btreeState->head(),
loc,
*i,
@@ -70,7 +70,7 @@ namespace mongo {
true);
++*numInserted;
} catch (AssertionException& e) {
- if (10287 == e.getCode() && _descriptor->isBackgroundIndex()) {
+ if (10287 == e.getCode() && !_btreeState->isReady()) {
// This is the duplicate key exception. We ignore it for some reason in BG
// indexing.
DEV log() << "info: key already in index during bg indexing (ok)\n";
@@ -101,7 +101,7 @@ namespace mongo {
bool ret = false;
try {
- ret = _interface->unindex(_btreeState.get(),
+ ret = _interface->unindex(_btreeState,
_btreeState->head(),
key,
loc);
@@ -168,7 +168,7 @@ namespace mongo {
int unusedPos;
bool unusedFound;
DiskLoc unusedDiskLoc;
- _interface->locate(_btreeState.get(),
+ _interface->locate(_btreeState,
_btreeState->head(),
*i,
unusedPos,
@@ -182,12 +182,12 @@ namespace mongo {
DiskLoc BtreeBasedAccessMethod::findSingle( const BSONObj& key ) {
if ( 0 == _descriptor->version() ) {
- return _btreeState->getHeadBucket<V0>()->findSingle( _btreeState.get(),
+ return _btreeState->getHeadBucket<V0>()->findSingle( _btreeState,
_btreeState->head(),
key );
}
if ( 1 == _descriptor->version() ) {
- return _btreeState->getHeadBucket<V1>()->findSingle( _btreeState.get(),
+ return _btreeState->getHeadBucket<V1>()->findSingle( _btreeState,
_btreeState->head(),
key );
}
@@ -196,7 +196,7 @@ namespace mongo {
Status BtreeBasedAccessMethod::validate(int64_t* numKeys) {
- *numKeys = _interface->fullValidate(_btreeState.get(),
+ *numKeys = _interface->fullValidate(_btreeState,
_btreeState->head(),
_descriptor->keyPattern());
return Status::OK();
@@ -223,12 +223,12 @@ namespace mongo {
if (checkForDups) {
for (vector<BSONObj*>::iterator i = data->added.begin(); i != data->added.end(); i++) {
- if (_interface->wouldCreateDup(_btreeState.get(),
+ if (_interface->wouldCreateDup(_btreeState,
_btreeState->head(),
**i, record)) {
status->_isValid = false;
return Status(ErrorCodes::DuplicateKey,
- _interface->dupKeyError(_btreeState.get(),
+ _interface->dupKeyError(_btreeState,
_btreeState->head(),
**i));
}
@@ -253,7 +253,7 @@ namespace mongo {
}
for (size_t i = 0; i < data->added.size(); ++i) {
- _interface->bt_insert(_btreeState.get(),
+ _interface->bt_insert(_btreeState,
_btreeState->head(),
data->loc,
*data->added[i],
@@ -262,7 +262,7 @@ namespace mongo {
}
for (size_t i = 0; i < data->removed.size(); ++i) {
- _interface->unindex(_btreeState.get(),
+ _interface->unindex(_btreeState,
_btreeState->head(),
*data->removed[i],
data->loc);
@@ -304,7 +304,7 @@ namespace mongo {
}
Status BtreeAccessMethod::newCursor(IndexCursor** out) const {
- *out = new BtreeIndexCursor(_btreeState.get(), _interface);
+ *out = new BtreeIndexCursor(_btreeState, _interface);
return Status::OK();
}
diff --git a/src/mongo/db/index/btree_access_method_internal.h b/src/mongo/db/index/btree_access_method_internal.h
index 1adc108774f..dc9b41e41db 100644
--- a/src/mongo/db/index/btree_access_method_internal.h
+++ b/src/mongo/db/index/btree_access_method_internal.h
@@ -92,7 +92,7 @@ namespace mongo {
virtual void getKeys(const BSONObj &obj, BSONObjSet *keys) = 0;
- scoped_ptr<BtreeInMemoryState> _btreeState; // OWNED HERE
+ BtreeInMemoryState* _btreeState; // owned by IndexCatalogEntry
const IndexDescriptor* _descriptor;
// There are 2 types of Btree disk formats. We put them both behind one interface.
diff --git a/src/mongo/db/index/haystack_access_method.cpp b/src/mongo/db/index/haystack_access_method.cpp
index 592a94f87c5..d67be2e506b 100644
--- a/src/mongo/db/index/haystack_access_method.cpp
+++ b/src/mongo/db/index/haystack_access_method.cpp
@@ -182,7 +182,8 @@ namespace mongo {
unordered_set<DiskLoc, DiskLoc::Hasher> thisPass;
- scoped_ptr<Runner> runner(InternalPlanner::indexScan(_descriptor, key, key, true));
+ scoped_ptr<Runner> runner(InternalPlanner::indexScan(_btreeState->collection(),
+ _descriptor, key, key, true));
Runner::RunnerState state;
DiskLoc loc;
while (Runner::RUNNER_ADVANCED == (state = runner->getNext(NULL, &loc))) {
diff --git a/src/mongo/db/index/index_descriptor.h b/src/mongo/db/index/index_descriptor.h
index 309123478de..59bb26800ae 100644
--- a/src/mongo/db/index/index_descriptor.h
+++ b/src/mongo/db/index/index_descriptor.h
@@ -54,9 +54,9 @@ namespace mongo {
* OnDiskIndexData is a pointer to the memory mapped per-index data.
* infoObj is a copy of the index-describing BSONObj contained in the OnDiskIndexData.
*/
- IndexDescriptor(Collection* collection, int indexNumber,BSONObj infoObj)
+ IndexDescriptor(Collection* collection, BSONObj infoObj)
: _magic(123987),
- _collection(collection), _indexNumber(indexNumber),
+ _collection(collection),
_infoObj(infoObj.getOwned()),
_numFields(infoObj.getObjectField("key").nFields()),
_keyPattern(infoObj.getObjectField("key").getOwned()),
@@ -124,7 +124,7 @@ namespace mongo {
bool isSparse() const { return _sparse; }
// Is this index multikey?
- bool isMultikey() const { _checkOk(); return _collection->details()->isMultikey(_indexNumber); }
+ bool isMultikey() const { _checkOk(); return _collection->getIndexCatalog()->isMultikey( this ); }
bool isIdIndex() const { _checkOk(); return _isIdIndex; }
@@ -146,14 +146,6 @@ namespace mongo {
// Return the info object.
const BSONObj& infoObj() const { _checkOk(); return _infoObj; }
- // Is this index being created in the background?
- bool isBackgroundIndex() const {
- return _indexNumber >= _collection->details()->getCompletedIndexCount();
- }
-
- // this is the collection over which the index is over
- Collection* getIndexedCollection() const { return _collection; }
-
// this is the owner of this IndexDescriptor
IndexCatalog* getIndexCatalog() const { return _collection->getIndexCatalog(); }
@@ -166,17 +158,11 @@ namespace mongo {
verify(0);
}
- int getIndexNumber() const { return _indexNumber; }
-
int _magic;
// Related catalog information of the parent collection
Collection* _collection;
- // What # index are we in the catalog represented by _namespaceDetails? Needed for setting
- // and getting multikey.
- int _indexNumber;
-
// The BSONObj describing the index. Accessed through the various members above.
const BSONObj _infoObj;
diff --git a/src/mongo/db/index_rebuilder.cpp b/src/mongo/db/index_rebuilder.cpp
index b02f7a1910f..4422fd91b1a 100644
--- a/src/mongo/db/index_rebuilder.cpp
+++ b/src/mongo/db/index_rebuilder.cpp
@@ -90,46 +90,39 @@ namespace mongo {
IndexCatalog* indexCatalog = collection->getIndexCatalog();
- if ( indexCatalog->numIndexesInProgress() == 0 ) {
+ vector<BSONObj> indexesToBuild = indexCatalog->getAndClearUnfinishedIndexes();
+
+ // The indexes have now been removed from system.indexes, so the only record is
+ // in-memory. If there is a journal commit between now and when insert() rewrites
+ // the entry and the db crashes before the new system.indexes entry is journalled,
+ // the index will be lost forever. Thus, we're assuming no journaling will happen
+ // between now and the entry being re-written.
+
+ if ( indexesToBuild.size() == 0 ) {
continue;
}
- log() << "found interrupted index build(s) on " << *it << endl;
+ log() << "found " << indexesToBuild.size()
+ << " interrupted index build(s) on " << *it;
if (firstTime) {
log() << "note: restart the server with --noIndexBuildRetry to skip index rebuilds";
firstTime = false;
}
- // If the indexBuildRetry flag isn't set, just clear the inProg flag
if (!serverGlobalParams.indexBuildRetry) {
- // If we crash between unsetting the inProg flag and cleaning up the index, the
- // index space will be lost.
- while ( indexCatalog->numIndexesInProgress() > 0 ) {
- // ignoring return as we're just destroying these
- indexCatalog->prepOneUnfinishedIndex();
- }
+ log() << " not rebuilding interrupted indexes";
continue;
}
- // We go from right to left building these indexes, so that indexBuildInProgress-- has
- // the correct effect of "popping" an index off the list.
- while ( indexCatalog->numIndexesInProgress() > 0 ) {
- // First, clean up the in progress index build. Save the system.indexes entry so that we
- // can add it again afterwards.
- BSONObj indexObj = indexCatalog->prepOneUnfinishedIndex();
+ // TODO: these can/should/must be done in parallel
+ for ( size_t i = 0; i < indexesToBuild.size(); i++ ) {
+ BSONObj indexObj = indexesToBuild[i];
- // The index has now been removed from system.indexes, so the only record of it is
- // in-memory. If there is a journal commit between now and when insert() rewrites
- // the entry and the db crashes before the new system.indexes entry is journalled,
- // the index will be lost forever. Thus, we're assuming no journaling will happen
- // between now and the entry being re-written.
+ log() << "going to rebuild: " << indexObj;
Status status = indexCatalog->createIndex( indexObj, false );
- if ( status.code() == ErrorCodes::IndexAlreadyExists ) {
- // no-op
- }
- else if ( !status.isOK() ) {
+ if ( !status.isOK() ) {
log() << "building index failed: " << status.toString() << " index: " << indexObj;
}
diff --git a/src/mongo/db/instance.cpp b/src/mongo/db/instance.cpp
index 60013891a8e..51decb5d43c 100644
--- a/src/mongo/db/instance.cpp
+++ b/src/mongo/db/instance.cpp
@@ -1240,7 +1240,7 @@ namespace {
return;
}
#endif
- tryToOutputFatal( "dbexit: really exiting now" );
+ tryToOutputFatal( "dbexit: really exiting now\n" );
if ( c ) c->shutdown();
::_exit(rc);
}
diff --git a/src/mongo/db/namespace_details.cpp b/src/mongo/db/namespace_details.cpp
index 23a2685b8b2..1aee576390c 100644
--- a/src/mongo/db/namespace_details.cpp
+++ b/src/mongo/db/namespace_details.cpp
@@ -704,9 +704,8 @@ namespace mongo {
}
}
- // @return offset in indexes[]
- int NamespaceDetails::findIndexByName(const StringData& name,
- bool includeBackgroundInProgress) {
+ int NamespaceDetails::_catalogFindIndexByName(const StringData& name,
+ bool includeBackgroundInProgress) {
IndexIterator i = ii(includeBackgroundInProgress);
while( i.more() ) {
if ( name == i.next().info.obj().getStringField("name") )
diff --git a/src/mongo/db/namespace_details.h b/src/mongo/db/namespace_details.h
index 8fc6b1800be..29854891bff 100644
--- a/src/mongo/db/namespace_details.h
+++ b/src/mongo/db/namespace_details.h
@@ -42,6 +42,7 @@
#include "mongo/platform/unordered_map.h"
namespace mongo {
+ class BtreeInMemoryState;
class Database;
class IndexCatalog;
@@ -335,9 +336,6 @@ namespace mongo {
}
}
- // @return offset in indexes[]
- int findIndexByName(const StringData& name, bool includeBackgroundInProgress = false);
-
const int systemFlags() const { return _systemFlags; }
bool isSystemFlagSet( int flag ) const { return _systemFlags & flag; }
void setSystemFlag( int flag );
@@ -405,6 +403,10 @@ namespace mongo {
NamespaceDetails *writingWithExtra();
private:
+ // @return offset in indexes[]
+ int _catalogFindIndexByName( const StringData& name,
+ bool includeBackgroundInProgress = false);
+
void _removeIndexFromMe( int idx );
/**
@@ -419,8 +421,10 @@ namespace mongo {
DiskLoc __stdAlloc(int len, bool willBeAt);
void compact(); // combine adjacent deleted records
+ friend class Database;
friend class NamespaceIndex;
friend class IndexCatalog;
+ friend class BtreeInMemoryState;
struct ExtraOld {
// note we could use this field for more chaining later, so don't waste it:
diff --git a/src/mongo/db/query/internal_plans.h b/src/mongo/db/query/internal_plans.h
index fe96e1a9930..7fdf03fba98 100644
--- a/src/mongo/db/query/internal_plans.h
+++ b/src/mongo/db/query/internal_plans.h
@@ -86,13 +86,14 @@ namespace mongo {
/**
* Return an index scan. Caller owns returned pointer.
*/
- static Runner* indexScan(const IndexDescriptor* descriptor,
+ static Runner* indexScan(const Collection* collection,
+ const IndexDescriptor* descriptor,
const BSONObj& startKey, const BSONObj& endKey,
bool endKeyInclusive, Direction direction = FORWARD,
int options = 0) {
verify(descriptor);
- const NamespaceString& ns = descriptor->getIndexedCollection()->ns();
+ const NamespaceString& ns = collection->ns();
IndexScanParams params;
params.descriptor = descriptor;
diff --git a/src/mongo/db/structure/btree/state.cpp b/src/mongo/db/structure/btree/state.cpp
index a869293d332..3a8bfbb7daa 100644
--- a/src/mongo/db/structure/btree/state.cpp
+++ b/src/mongo/db/structure/btree/state.cpp
@@ -39,23 +39,102 @@ namespace mongo {
BtreeInMemoryState::BtreeInMemoryState( Collection* collection,
const IndexDescriptor* descriptor,
- RecordStore* recordStore,
- IndexDetails* details )
+ RecordStore* recordStore )
: _collection( collection ),
_descriptor( descriptor ),
_recordStore( recordStore ),
- _indexDetails( details ),
_ordering( Ordering::make( descriptor->keyPattern() ) ) {
+ _isReady = false;
+ _isMultikeySet = false;
+ _head.Null();
}
- const DiskLoc& BtreeInMemoryState::head() const { return _indexDetails->head; }
+ int BtreeInMemoryState::_indexNo() const {
+ NamespaceDetails* nsd = _collection->details();
+ int idxNo = nsd->_catalogFindIndexByName( _descriptor->indexName(), true );
+ fassert( 17333, idxNo >= 0 );
+ return idxNo;
+ }
+
+ const DiskLoc& BtreeInMemoryState::head() const {
+ if ( _head.isNull() ) {
+ _head = _catalogFindHeadFromDisk();
+ return _head;
+ }
+
+ DEV {
+ if ( _head != _catalogFindHeadFromDisk() ) {
+ log() << "_head: " << _head
+ << " _catalogFindHeadFromDisk(): " << _catalogFindHeadFromDisk();
+ }
+ verify( _head == _catalogFindHeadFromDisk() );
+ }
+
+ return _head;
+ }
+
+ DiskLoc BtreeInMemoryState::_catalogFindHeadFromDisk() const {
+ NamespaceDetails* nsd = _collection->details();
+ int idxNo = _indexNo();
+ return nsd->idx( idxNo ).head;
+ }
void BtreeInMemoryState::setHead( DiskLoc newHead ) {
- _indexDetails->head.writing() = newHead;
+ NamespaceDetails* nsd = _collection->details();
+ int idxNo = _indexNo();
+ IndexDetails& id = nsd->idx( idxNo );
+ id.head.writing() = newHead;
+ _head = newHead;
}
void BtreeInMemoryState::setMultikey() {
- _collection->getIndexCatalog()->markMultikey( _descriptor, true );
+ NamespaceDetails* nsd = _collection->details();
+ int idxNo = _indexNo();
+ if ( nsd->setIndexIsMultikey( idxNo, true ) )
+ _collection->infoCache()->clearQueryCache();
+
+ _isMultikeySet = true;
+ _isMultikey = true;
+ }
+
+ bool BtreeInMemoryState::isMultikey() const {
+ if ( _isMultikeySet ) {
+ DEV {
+ NamespaceDetails* nsd = _collection->details();
+ int idxNo = _indexNo();
+ verify( _isMultikey == nsd->isMultikey( idxNo ) );
+ }
+ return _isMultikey;
+ }
+
+ NamespaceDetails* nsd = _collection->details();
+ int idxNo = _indexNo();
+ _isMultikey = nsd->isMultikey( idxNo );
+ _isMultikeySet = true;
+ return _isMultikey;
+ }
+
+
+ bool BtreeInMemoryState::isReady() const {
+ DEV _debugCheckVerifyReady();
+ return _isReady;
}
+ void BtreeInMemoryState::setIsReady( bool isReady ) {
+ _isReady = isReady;
+ DEV _debugCheckVerifyReady();
+ if ( isReady ) {
+ // get caches ready
+ head();
+ isMultikey();
+
+ fassert( 17339, _head == _catalogFindHeadFromDisk() );
+ fassert( 17340, _isMultikey == _collection->details()->isMultikey( _indexNo() ) );
+ }
+ }
+
+ void BtreeInMemoryState::_debugCheckVerifyReady() const {
+ bool real = _indexNo() < _collection->getIndexCatalog()->numIndexesReady();
+ verify( real == _isReady );
+ }
}
diff --git a/src/mongo/db/structure/btree/state.h b/src/mongo/db/structure/btree/state.h
index 156e1976edd..be190060e05 100644
--- a/src/mongo/db/structure/btree/state.h
+++ b/src/mongo/db/structure/btree/state.h
@@ -38,6 +38,7 @@
namespace mongo {
class Collection;
+ class IndexCatalog;
class IndexDetails;
class IndexDescriptor;
class RecordStore;
@@ -55,8 +56,7 @@ namespace mongo {
public:
BtreeInMemoryState( Collection* collection,
const IndexDescriptor* descriptor,
- RecordStore* recordStore,
- IndexDetails* details );
+ RecordStore* recordStore );
const Collection* collection() const { return _collection; }
@@ -72,8 +72,12 @@ namespace mongo {
void setHead( DiskLoc newHead );
+ bool isMultikey() const;
+
void setMultikey();
+ // if this ready is ready for queries
+ bool isReady() const;
// ------------
@@ -84,6 +88,17 @@ namespace mongo {
const BtreeBucket<V>* getBucket( const DiskLoc& loc ) const;
private:
+
+ int _indexNo() const;
+
+ void setIsReady( bool isReady );
+
+ void _debugCheckVerifyReady() const;
+
+ DiskLoc _catalogFindHeadFromDisk() const;
+
+ // ---------
+
// the collection this index is over, not storage for the index
Collection* _collection; // not-owned here
@@ -92,10 +107,16 @@ namespace mongo {
// the record store for this index (where its buckets go)
scoped_ptr<RecordStore> _recordStore; // OWNED HERE
- IndexDetails* _indexDetails; // TODO: remove
-
Ordering _ordering;
+ bool _isReady; // cache of NamespaceDetails
+
+ mutable DiskLoc _head; // cache of IndexDetails or Null
+
+ mutable bool _isMultikeySet;
+ mutable bool _isMultikey; // cache of NamespaceDetails
+
+ friend class IndexCatalog;
};
}
diff --git a/src/mongo/db/structure/collection.cpp b/src/mongo/db/structure/collection.cpp
index e27c033e70d..5daf53172bf 100644
--- a/src/mongo/db/structure/collection.cpp
+++ b/src/mongo/db/structure/collection.cpp
@@ -61,6 +61,7 @@ namespace mongo {
&database->getExtentManager(),
_ns.coll() == "system.indexes" );
_magic = 1357924;
+ _indexCatalog.init();
}
Collection::~Collection() {
@@ -104,6 +105,21 @@ namespace mongo {
return new FlatIterator( this, start, dir );
}
+ int64_t Collection::countTableScan( const MatchExpression* expression ) {
+ scoped_ptr<CollectionIterator> iterator( getIterator( DiskLoc(),
+ false,
+ CollectionScanParams::FORWARD ) );
+ int64_t count = 0;
+ while ( !iterator->isEOF() ) {
+ DiskLoc loc = iterator->getNext();
+ BSONObj obj = docFor( loc );
+ if ( expression->matchesBSON( obj ) )
+ count++;
+ }
+
+ return count;
+ }
+
BSONObj Collection::docFor( const DiskLoc& loc ) {
Record* rec = getExtentManager()->recordFor( loc );
return BSONObj::make( rec->accessed() );
diff --git a/src/mongo/db/structure/collection.h b/src/mongo/db/structure/collection.h
index 807b742457a..a59ae419b97 100644
--- a/src/mongo/db/structure/collection.h
+++ b/src/mongo/db/structure/collection.h
@@ -91,10 +91,21 @@ namespace mongo {
BSONObj docFor( const DiskLoc& loc );
// ---- things that should move to a CollectionAccessMethod like thing
-
+ /**
+ * canonical to get all would be
+ * getIterator( DiskLoc(), false, CollectionScanParams::FORWARD )
+ */
CollectionIterator* getIterator( const DiskLoc& start, bool tailable,
const CollectionScanParams::Direction& dir) const;
+
+ /**
+ * does a table scan to do a count
+ * this should only be used at a very low level
+ * does no yielding, indexes, etc...
+ */
+ int64_t countTableScan( const MatchExpression* expression );
+
void deleteDocument( const DiskLoc& loc,
bool cappedOK = false,
bool noWarn = false,
diff --git a/src/mongo/dbtests/btreebuildertests.cpp b/src/mongo/dbtests/btreebuildertests.cpp
index be9ab6fb30f..3ac5f049396 100644
--- a/src/mongo/dbtests/btreebuildertests.cpp
+++ b/src/mongo/dbtests/btreebuildertests.cpp
@@ -42,6 +42,8 @@ namespace BtreeBuilderTests {
static const char* const _ns = "unittests.btreebuilder";
DBDirectClient _client;
+ // QUERY_MIGRATION
+#if 0
/**
* Test fixture for a write locked test using collection _ns. Includes functionality to
* partially construct a new IndexDetails in a manner that supports proper cleanup in
@@ -85,6 +87,7 @@ namespace BtreeBuilderTests {
private:
Client::WriteContext _ctx;
};
+#endif
// QUERY_MIGRATION
#if 0
diff --git a/src/mongo/dbtests/indexupdatetests.cpp b/src/mongo/dbtests/indexupdatetests.cpp
index 1bac1a89a38..6b94a713994 100644
--- a/src/mongo/dbtests/indexupdatetests.cpp
+++ b/src/mongo/dbtests/indexupdatetests.cpp
@@ -66,6 +66,8 @@ namespace IndexUpdateTests {
return _ctx.ctx().db()->getCollection( _ns );
}
protected:
+ // QUERY_MIGRATION
+#if 0
/** @return IndexDetails for a new index on a:1, with the info field populated. */
IndexDescriptor* addIndexWithInfo() {
BSONObj indexInfo = BSON( "v" << 1 <<
@@ -88,11 +90,13 @@ namespace IndexUpdateTests {
return collection()->getIndexCatalog()->findIndexByName( "a_1" );
}
-
+#endif
Client::WriteContext _ctx;
};
/** addKeysToPhaseOne() adds keys from a collection's documents to an external sorter. */
+ // QUERY_MIGRATION
+#if 0
class AddKeysToPhaseOne : public IndexBuildBase {
public:
void run() {
@@ -170,6 +174,7 @@ namespace IndexUpdateTests {
private:
bool _mayInterrupt;
};
+#endif
// QUERY_MIGRATION
#if 0
@@ -549,7 +554,8 @@ namespace IndexUpdateTests {
BSON( "ns" << _ns << "name" << "a_1" ) ) );
}
};
-
+ // QUERY_MIGRATION
+#if 0
class IndexBuildInProgressTest : public IndexBuildBase {
public:
void run() {
@@ -603,6 +609,7 @@ namespace IndexUpdateTests {
return new IndexCatalog::IndexBuildBlock( _ctx.ctx().db()->getCollection( _ns )->getIndexCatalog(), name, infoLoc );
}
};
+#endif
/**
* Fixture class that has a basic compound index.
@@ -774,9 +781,9 @@ namespace IndexUpdateTests {
}
void setupTests() {
- add<AddKeysToPhaseOne>();
- add<InterruptAddKeysToPhaseOne>( false );
- add<InterruptAddKeysToPhaseOne>( true );
+ //add<AddKeysToPhaseOne>();
+ //add<InterruptAddKeysToPhaseOne>( false );
+ //add<InterruptAddKeysToPhaseOne>( true );
// QUERY_MIGRATION
//add<BuildBottomUp>();
//add<InterruptBuildBottomUp>( false );
@@ -790,7 +797,7 @@ namespace IndexUpdateTests {
add<InsertBuildIdIndexInterruptDisallowed>();
add<DirectClientEnsureIndexInterruptDisallowed>();
add<HelpersEnsureIndexInterruptDisallowed>();
- add<IndexBuildInProgressTest>();
+ //add<IndexBuildInProgressTest>();
add<SameSpecDifferentOption>();
add<SameSpecSameOptions>();
add<DifferentSpecSameName>();
diff --git a/src/mongo/dbtests/namespacetests.cpp b/src/mongo/dbtests/namespacetests.cpp
index c88146a7255..3feded8bae0 100644
--- a/src/mongo/dbtests/namespacetests.cpp
+++ b/src/mongo/dbtests/namespacetests.cpp
@@ -66,7 +66,7 @@ namespace NamespaceTests {
BSONObj bobj = builder.done();
- _index.reset( new IndexDescriptor( NULL, -1, bobj ) );
+ _index.reset( new IndexDescriptor( NULL, bobj ) );
_keyPattern = key().getOwned();
diff --git a/src/mongo/s/d_migrate.cpp b/src/mongo/s/d_migrate.cpp
index e5073baafac..d4c5bf4123f 100644
--- a/src/mongo/s/d_migrate.cpp
+++ b/src/mongo/s/d_migrate.cpp
@@ -409,7 +409,7 @@ namespace mongo {
BSONObj min = Helpers::toKeyFormat( kp.extendRangeBound( _min, false ) );
BSONObj max = Helpers::toKeyFormat( kp.extendRangeBound( _max, false ) );
- auto_ptr<Runner> runner(InternalPlanner::indexScan(idx, min, max, false));
+ auto_ptr<Runner> runner(InternalPlanner::indexScan(collection, idx, min, max, false));
// we can afford to yield here because any change to the base data that we might miss is
// already being queued and will be migrated in the 'transferMods' stage
runner->setYieldPolicy(Runner::YIELD_AUTO);
diff --git a/src/mongo/s/d_split.cpp b/src/mongo/s/d_split.cpp
index 184f9ab3bc4..0cbeba0573f 100644
--- a/src/mongo/s/d_split.cpp
+++ b/src/mongo/s/d_split.cpp
@@ -144,7 +144,7 @@ namespace mongo {
max = Helpers::toKeyFormat( kp.extendRangeBound( max, false ) );
}
- auto_ptr<Runner> runner(InternalPlanner::indexScan(idx, min, max,
+ auto_ptr<Runner> runner(InternalPlanner::indexScan(collection, idx, min, max,
false, InternalPlanner::FORWARD));
runner->setYieldPolicy(Runner::YIELD_AUTO);
@@ -376,7 +376,7 @@ namespace mongo {
long long currCount = 0;
long long numChunks = 0;
- auto_ptr<Runner> runner(InternalPlanner::indexScan(idx, min, max,
+ auto_ptr<Runner> runner(InternalPlanner::indexScan(collection, idx, min, max,
false, InternalPlanner::FORWARD));
BSONObj currKey;
@@ -435,7 +435,7 @@ namespace mongo {
currCount = 0;
log() << "splitVector doing another cycle because of force, keyCount now: " << keyCount << endl;
- runner.reset(InternalPlanner::indexScan(idx, min, max,
+ runner.reset(InternalPlanner::indexScan(collection, idx, min, max,
false, InternalPlanner::FORWARD));
runner->setYieldPolicy(Runner::YIELD_AUTO);
@@ -861,7 +861,8 @@ namespace mongo {
BSONObj newmin = Helpers::toKeyFormat( kp.extendRangeBound( chunk.min, false) );
BSONObj newmax = Helpers::toKeyFormat( kp.extendRangeBound( chunk.max, false) );
- auto_ptr<Runner> runner(InternalPlanner::indexScan(idx, newmin, newmax, false));
+ auto_ptr<Runner> runner(InternalPlanner::indexScan(collection, idx,
+ newmin, newmax, false));
// check if exactly one document found
if (Runner::RUNNER_ADVANCED == runner->getNext(NULL, NULL)) {
diff --git a/src/mongo/util/stack_introspect.cpp b/src/mongo/util/stack_introspect.cpp
index c14838236cd..76d9e06231d 100644
--- a/src/mongo/util/stack_introspect.cpp
+++ b/src/mongo/util/stack_introspect.cpp
@@ -94,6 +94,16 @@ namespace mongo {
return EXEMPT;
}
+ if ( name.find( "_debugCheck" ) != string::npos ) {
+ // use this for generic debugging paths
+ return EXEMPT;
+ }
+
+ if ( name.find( "_catalog" ) != string::npos ) {
+ // internal catalog methods are exempt
+ return EXEMPT;
+ }
+
// For sharding dbtests - page fault exceptions cause sporadic errors unrelated to tests
if ( name.find( "ScopedDistributedLock" ) != string::npos ) {
return EXEMPT;
@@ -102,6 +112,7 @@ namespace mongo {
if ( name.find( "CollectionInfo" ) != string::npos ) {
return EXEMPT;
}
+
// End sharding dbtest exemptions
return method == clazz ? YES : NO;