summaryrefslogtreecommitdiff
path: root/src/mongo
diff options
context:
space:
mode:
authorEliot Horowitz <eliot@10gen.com>2013-06-14 15:40:10 -0400
committerEliot Horowitz <eliot@10gen.com>2013-06-14 15:40:43 -0400
commitfcc103ba403fd35d96675f217c0e56d73eb14469 (patch)
tree27fd1fc361d3e915f4a32bdddc76aa47c6154abc /src/mongo
parentf9667a3f90c57086bc9d03f4350b76e2a02cd6b3 (diff)
downloadmongo-fcc103ba403fd35d96675f217c0e56d73eb14469.tar.gz
SERVER-6405: first step of cleaning NamespaceDetails
Diffstat (limited to 'src/mongo')
-rw-r--r--src/mongo/db/cap.cpp91
-rw-r--r--src/mongo/db/cloner.cpp2
-rw-r--r--src/mongo/db/commands/storage_details.cpp6
-rw-r--r--src/mongo/db/compact.cpp32
-rw-r--r--src/mongo/db/cursor.cpp34
-rw-r--r--src/mongo/db/dbcommands.cpp38
-rw-r--r--src/mongo/db/dbcommands_admin.cpp55
-rw-r--r--src/mongo/db/dbhelpers.cpp6
-rw-r--r--src/mongo/db/index.cpp6
-rw-r--r--src/mongo/db/index.h3
-rw-r--r--src/mongo/db/index/btree_based_builder.cpp4
-rw-r--r--src/mongo/db/index/index_descriptor.h2
-rw-r--r--src/mongo/db/index_rebuilder.cpp29
-rw-r--r--src/mongo/db/index_rebuilder.h5
-rw-r--r--src/mongo/db/index_update.cpp91
-rw-r--r--src/mongo/db/namespace_details-inl.h2
-rw-r--r--src/mongo/db/namespace_details.cpp236
-rw-r--r--src/mongo/db/namespace_details.h156
-rw-r--r--src/mongo/db/ops/count.cpp2
-rw-r--r--src/mongo/db/pdfile.cpp151
-rw-r--r--src/mongo/db/query_optimizer_internal.cpp4
-rw-r--r--src/mongo/db/queryutil.cpp8
-rw-r--r--src/mongo/db/repl/finding_start_cursor.cpp10
-rw-r--r--src/mongo/db/repl/rs_initialsync.cpp2
-rw-r--r--src/mongo/db/repl/rs_rollback.cpp2
-rw-r--r--src/mongo/dbtests/btreetests.inl150
-rw-r--r--src/mongo/dbtests/clienttests.cpp6
-rw-r--r--src/mongo/dbtests/indexupdatetests.cpp24
-rw-r--r--src/mongo/dbtests/namespacetests.cpp40
-rw-r--r--src/mongo/dbtests/pdfiletests.cpp82
-rw-r--r--src/mongo/dbtests/queryoptimizertests.cpp2
-rw-r--r--src/mongo/dbtests/queryutiltests.cpp2
-rw-r--r--src/mongo/s/d_migrate.cpp4
-rw-r--r--src/mongo/s/d_split.cpp8
-rw-r--r--src/mongo/tools/dump.cpp26
-rw-r--r--src/mongo/util/touch_pages.cpp8
36 files changed, 748 insertions, 581 deletions
diff --git a/src/mongo/db/cap.cpp b/src/mongo/db/cap.cpp
index 8d0c4f9a35b..fda329f0748 100644
--- a/src/mongo/db/cap.cpp
+++ b/src/mongo/db/cap.cpp
@@ -114,34 +114,34 @@ namespace mongo {
void NamespaceDetails::cappedCheckMigrate() {
// migrate old NamespaceDetails format
verify( isCapped() );
- if ( capExtent.a() == 0 && capExtent.getOfs() == 0 ) {
+ if ( _capExtent.a() == 0 && _capExtent.getOfs() == 0 ) {
//capFirstNewRecord = DiskLoc();
- capFirstNewRecord.writing().setInvalid();
+ _capFirstNewRecord.writing().setInvalid();
// put all the DeletedRecords in cappedListOfAllDeletedRecords()
for ( int i = 1; i < Buckets; ++i ) {
- DiskLoc first = deletedList[ i ];
+ DiskLoc first = _deletedList[ i ];
if ( first.isNull() )
continue;
DiskLoc last = first;
for (; !last.drec()->nextDeleted().isNull(); last = last.drec()->nextDeleted() );
last.drec()->nextDeleted().writing() = cappedListOfAllDeletedRecords();
cappedListOfAllDeletedRecords().writing() = first;
- deletedList[i].writing() = DiskLoc();
+ _deletedList[i].writing() = DiskLoc();
}
// NOTE cappedLastDelRecLastExtent() set to DiskLoc() in above
// Last, in case we're killed before getting here
- capExtent.writing() = firstExtent;
+ _capExtent.writing() = _firstExtent;
}
}
bool NamespaceDetails::inCapExtent( const DiskLoc &dl ) const {
verify( !dl.isNull() );
// We could have a rec or drec, doesn't matter.
- bool res = dl.drec()->myExtentLoc(dl) == capExtent;
+ bool res = dl.drec()->myExtentLoc(dl) == _capExtent;
DEV {
// old implementation. this check is temp to test works the same. new impl should be a little faster.
- verify( res == (dl.drec()->myExtent( dl ) == capExtent.ext()) );
+ verify( res == (dl.drec()->myExtent( dl ) == _capExtent.ext()) );
}
return res;
}
@@ -157,7 +157,7 @@ namespace mongo {
void NamespaceDetails::advanceCapExtent( const char *ns ) {
// We want cappedLastDelRecLastExtent() to be the last DeletedRecord of the prev cap extent
// (or DiskLoc() if new capExtent == firstExtent)
- if ( capExtent == lastExtent )
+ if ( _capExtent == _lastExtent )
getDur().writingDiskLoc( cappedLastDelRecLastExtent() ) = DiskLoc();
else {
DiskLoc i = cappedFirstDeletedInCurExtent();
@@ -165,13 +165,14 @@ namespace mongo {
getDur().writingDiskLoc( cappedLastDelRecLastExtent() ) = i;
}
- getDur().writingDiskLoc( capExtent ) = theCapExtent()->xnext.isNull() ? firstExtent : theCapExtent()->xnext;
+ getDur().writingDiskLoc( _capExtent ) =
+ theCapExtent()->xnext.isNull() ? _firstExtent : theCapExtent()->xnext;
/* this isn't true if a collection has been renamed...that is ok just used for diagnostics */
//dassert( theCapExtent()->ns == ns );
theCapExtent()->assertOk();
- getDur().writingDiskLoc( capFirstNewRecord ) = DiskLoc();
+ getDur().writingDiskLoc( _capFirstNewRecord ) = DiskLoc();
}
DiskLoc NamespaceDetails::__capAlloc( int len ) {
@@ -229,24 +230,24 @@ namespace mongo {
theCapExtent()->assertOk();
DiskLoc firstEmptyExtent;
while ( 1 ) {
- if ( stats.nrecords < maxCappedDocs() ) {
+ if ( _stats.nrecords < maxCappedDocs() ) {
loc = __capAlloc( len );
if ( !loc.isNull() )
break;
}
// If on first iteration through extents, don't delete anything.
- if ( !capFirstNewRecord.isValid() ) {
+ if ( !_capFirstNewRecord.isValid() ) {
advanceCapExtent( ns );
- if ( capExtent != firstExtent )
- capFirstNewRecord.writing().setInvalid();
+ if ( _capExtent != _firstExtent )
+ _capFirstNewRecord.writing().setInvalid();
// else signal done with first iteration through extents.
continue;
}
- if ( !capFirstNewRecord.isNull() &&
- theCapExtent()->firstRecord == capFirstNewRecord ) {
+ if ( !_capFirstNewRecord.isNull() &&
+ theCapExtent()->firstRecord == _capFirstNewRecord ) {
// We've deleted all records that were allocated on the previous
// iteration through this extent.
advanceCapExtent( ns );
@@ -255,9 +256,9 @@ namespace mongo {
if ( theCapExtent()->firstRecord.isNull() ) {
if ( firstEmptyExtent.isNull() )
- firstEmptyExtent = capExtent;
+ firstEmptyExtent = _capExtent;
advanceCapExtent( ns );
- if ( firstEmptyExtent == capExtent ) {
+ if ( firstEmptyExtent == _capExtent ) {
maybeComplain( ns, len );
return DiskLoc();
}
@@ -273,22 +274,22 @@ namespace mongo {
<< ", len: " << len
<< ", maxPasses: " << maxPasses
<< ", _maxDocsInCapped: " << _maxDocsInCapped
- << ", nrecords: " << stats.nrecords
- << ", datasize: " << stats.datasize;
+ << ", nrecords: " << _stats.nrecords
+ << ", datasize: " << _stats.datasize;
msgasserted(10345, sb.str());
}
}
// Remember first record allocated on this iteration through capExtent.
- if ( capFirstNewRecord.isValid() && capFirstNewRecord.isNull() )
- getDur().writingDiskLoc(capFirstNewRecord) = loc;
+ if ( _capFirstNewRecord.isValid() && _capFirstNewRecord.isNull() )
+ getDur().writingDiskLoc(_capFirstNewRecord) = loc;
return loc;
}
void NamespaceDetails::dumpExtents() {
cout << "dumpExtents:" << endl;
- for ( DiskLoc i = firstExtent; !i.isNull(); i = i.ext()->xnext ) {
+ for ( DiskLoc i = _firstExtent; !i.isNull(); i = i.ext()->xnext ) {
Extent *e = i.ext();
stringstream ss;
e->dump(ss);
@@ -297,16 +298,16 @@ namespace mongo {
}
void NamespaceDetails::cappedDumpDelInfo() {
- cout << "dl[0]: " << deletedList[0].toString() << endl;
- for( DiskLoc z = deletedList[0]; !z.isNull(); z = z.drec()->nextDeleted() ) {
+ cout << "dl[0]: " << _deletedList[0].toString() << endl;
+ for( DiskLoc z = _deletedList[0]; !z.isNull(); z = z.drec()->nextDeleted() ) {
cout << " drec:" << z.toString() << " dreclen:" << hex << z.drec()->lengthWithHeaders() <<
" ext:" << z.drec()->myExtent(z)->myLoc.toString() << endl;
}
- cout << "dl[1]: " << deletedList[1].toString() << endl;
+ cout << "dl[1]: " << _deletedList[1].toString() << endl;
}
void NamespaceDetails::cappedTruncateLastDelUpdate() {
- if ( capExtent == firstExtent ) {
+ if ( _capExtent == _firstExtent ) {
// Only one extent of the collection is in use, so there
// is no deleted record in a previous extent, so nullify
// cappedLastDelRecLastExtent().
@@ -361,7 +362,7 @@ namespace mongo {
// TODO The algorithm used in this function cannot generate an
// empty collection, but we could call emptyCappedCollection() in
// this case instead of asserting.
- uassert( 13415, "emptying the collection is not allowed", stats.nrecords > 1 );
+ uassert( 13415, "emptying the collection is not allowed", _stats.nrecords > 1 );
// Delete the newest record, and coalesce the new deleted
// record with existing deleted records.
@@ -382,7 +383,7 @@ namespace mongo {
// NOTE Because we didn't delete the last document, and
// capLooped() is false, capExtent is not the first extent
// so xprev will be nonnull.
- capExtent.writing() = theCapExtent()->xprev;
+ _capExtent.writing() = theCapExtent()->xprev;
theCapExtent()->assertOk();
// update cappedLastDelRecLastExtent()
@@ -397,7 +398,7 @@ namespace mongo {
// NOTE In this comparison, curr and potentially capFirstNewRecord
// may point to invalid data, but we can still compare the
// references themselves.
- if ( curr == capFirstNewRecord ) {
+ if ( curr == _capFirstNewRecord ) {
// Set 'capExtent' to the first nonempty extent prior to the
// initial capExtent. There must be such an extent because we
@@ -406,19 +407,19 @@ namespace mongo {
// In this case we will keep the initial capExtent and specify
// that all records contained within are on the fresh rather than
// stale side of the extent.
- DiskLoc newCapExtent = capExtent;
+ DiskLoc newCapExtent = _capExtent;
do {
// Find the previous extent, looping if necessary.
- newCapExtent = ( newCapExtent == firstExtent ) ? lastExtent : newCapExtent.ext()->xprev;
+ newCapExtent = ( newCapExtent == _firstExtent ) ? _lastExtent : newCapExtent.ext()->xprev;
newCapExtent.ext()->assertOk();
}
while ( newCapExtent.ext()->firstRecord.isNull() );
- capExtent.writing() = newCapExtent;
+ _capExtent.writing() = newCapExtent;
// Place all documents in the new capExtent on the fresh side
// of the capExtent by setting capFirstNewRecord to the first
// document in the new capExtent.
- capFirstNewRecord.writing() = theCapExtent()->firstRecord;
+ _capFirstNewRecord.writing() = theCapExtent()->firstRecord;
// update cappedLastDelRecLastExtent()
cappedTruncateLastDelUpdate();
@@ -429,14 +430,14 @@ namespace mongo {
void NamespaceDetails::emptyCappedCollection( const char *ns ) {
DEV verify( this == nsdetails(ns) );
massert( 13424, "collection must be capped", isCapped() );
- massert( 13425, "background index build in progress", !indexBuildsInProgress );
+ massert( 13425, "background index build in progress", !_indexBuildsInProgress );
vector<BSONObj> indexes = Helpers::findAll( Namespace( ns ).getSisterNS( "system.indexes" ) , BSON( "ns" << ns ) );
for ( unsigned i=0; i<indexes.size(); i++ ) {
indexes[i] = indexes[i].copy();
}
- if ( nIndexes ) {
+ if ( _nIndexes ) {
string errmsg;
BSONObjBuilder note;
bool res = dropIndexes( this , ns , "*" , errmsg , note , true );
@@ -455,27 +456,27 @@ namespace mongo {
t->cappedListOfAllDeletedRecords() = DiskLoc();
// preserve firstExtent/lastExtent
- t->capExtent = firstExtent;
- t->stats.datasize = stats.nrecords = 0;
+ t->_capExtent = _firstExtent;
+ t->_stats.datasize = _stats.nrecords = 0;
// lastExtentSize preserve
// nIndexes preserve 0
// capped preserve true
// max preserve
t->_paddingFactor = 1.0;
t->_systemFlags = 0;
- t->capFirstNewRecord = DiskLoc();
- t->capFirstNewRecord.setInvalid();
+ t->_capFirstNewRecord = DiskLoc();
+ t->_capFirstNewRecord.setInvalid();
t->cappedLastDelRecLastExtent().setInvalid();
// dataFileVersion preserve
// indexFileVersion preserve
- t->multiKeyIndexBits = 0;
- t->reservedA = 0;
- t->extraOffset = 0;
+ t->_multiKeyIndexBits = 0;
+ t->_reservedA = 0;
+ t->_extraOffset = 0;
// indexBuildInProgress preserve 0
- memset(t->reserved, 0, sizeof(t->reserved));
+ memset(t->_reserved, 0, sizeof(t->_reserved));
// Reset all existing extents and recreate the deleted list.
- for( DiskLoc ext = firstExtent; !ext.isNull(); ext = ext.ext()->xnext ) {
+ for( DiskLoc ext = _firstExtent; !ext.isNull(); ext = ext.ext()->xnext ) {
DiskLoc prev = ext.ext()->xprev;
DiskLoc next = ext.ext()->xnext;
DiskLoc empty = ext.ext()->reuse( ns, true );
diff --git a/src/mongo/db/cloner.cpp b/src/mongo/db/cloner.cpp
index 38948b3e5de..6ea03d86f44 100644
--- a/src/mongo/db/cloner.cpp
+++ b/src/mongo/db/cloner.cpp
@@ -859,7 +859,7 @@ namespace mongo {
indexesInProg = stopIndexBuilds(dbname, cmdObj);
capped = nsd->isCapped();
if ( capped )
- for( DiskLoc i = nsd->firstExtent; !i.isNull(); i = i.ext()->xnext )
+ for( DiskLoc i = nsd->firstExtent(); !i.isNull(); i = i.ext()->xnext )
size += i.ext()->length;
}
diff --git a/src/mongo/db/commands/storage_details.cpp b/src/mongo/db/commands/storage_details.cpp
index 9197ce2e6df..b86ec1dff5e 100644
--- a/src/mongo/db/commands/storage_details.cpp
+++ b/src/mongo/db/commands/storage_details.cpp
@@ -368,7 +368,7 @@ namespace {
*/
const Extent* getNthExtent(int extentNum, const NamespaceDetails* nsd) {
int curExtent = 0;
- for (Extent* ex = DataFileMgr::getExtent(nsd->firstExtent);
+ for (Extent* ex = DataFileMgr::getExtent(nsd->firstExtent());
ex != NULL;
ex = ex->getNextExtent()) {
@@ -564,7 +564,7 @@ namespace {
if (processingDeletedRecords) {
for (int bucketNum = 0; bucketNum < mongo::Buckets; bucketNum++) {
- DiskLoc dl = nsd->deletedList[bucketNum];
+ DiskLoc dl = nsd->deletedListEntry(bucketNum);
while (!dl.isNull()) {
DeletedRecord* dr = dl.drec();
processDeletedRecord(dl, dr, ex, params, bucketNum, sliceData,
@@ -714,7 +714,7 @@ namespace {
success = analyzeExtent(nsd, ex, subCommand, globalParams, errmsg, outputBuilder);
}
else {
- const DiskLoc dl = nsd->firstExtent;
+ const DiskLoc dl = nsd->firstExtent();
if (dl.isNull()) {
errmsg = "no extents in namespace";
return false;
diff --git a/src/mongo/db/compact.cpp b/src/mongo/db/compact.cpp
index 2dd42009ae1..ccce11779ce 100644
--- a/src/mongo/db/compact.cpp
+++ b/src/mongo/db/compact.cpp
@@ -146,19 +146,16 @@ namespace mongo {
}
} // if !L.isNull()
- verify( d->firstExtent == diskloc );
- verify( d->lastExtent != diskloc );
+ verify( d->firstExtent() == diskloc );
+ verify( d->lastExtent() != diskloc );
DiskLoc newFirst = e->xnext;
- d->firstExtent.writing() = newFirst;
+ d->firstExtent().writing() = newFirst;
newFirst.ext()->xprev.writing().Null();
getDur().writing(e)->markEmpty();
freeExtents( diskloc, diskloc );
+
// update datasize/record count for this namespace's extent
- {
- NamespaceDetails::Stats *s = getDur().writing(&d->stats);
- s->datasize += datasize;
- s->nrecords += nrecords;
- }
+ d->incrementStats( datasize, nrecords );
getDur().commitIfNeeded();
@@ -180,7 +177,7 @@ namespace mongo {
getDur().commitIfNeeded();
list<DiskLoc> extents;
- for( DiskLoc L = d->firstExtent; !L.isNull(); L = L.ext()->xnext )
+ for( DiskLoc L = d->firstExtent(); !L.isNull(); L = L.ext()->xnext )
extents.push_back(L);
log() << "compact " << extents.size() << " extents" << endl;
@@ -191,7 +188,8 @@ namespace mongo {
// same data, but might perform a little different after compact?
NamespaceDetailsTransient::get(ns).clearQueryCache();
- int nidx = d->nIndexes;
+ verify( d->getCompletedIndexCount() == d->getTotalIndexCount() );
+ int nidx = d->getCompletedIndexCount();
scoped_array<BSONObj> indexSpecs( new BSONObj[nidx] );
{
NamespaceDetails::IndexIterator ii = d->ii();
@@ -219,12 +217,10 @@ namespace mongo {
}
log() << "compact orphan deleted lists" << endl;
- for( int i = 0; i < Buckets; i++ ) {
- d->deletedList[i].writing().Null();
- }
+ d->orphanDeletedList();
// Start over from scratch with our extent sizing and growth
- d->lastExtentSize=0;
+ d->setLastExtentSize( 0 );
// before dropping indexes, at least make sure we can allocate one extent!
uassert(14025, "compact error no space available to allocate", !allocateSpaceForANewRecord(ns, d, Record::HeaderSize+1, false).isNull());
@@ -245,11 +241,7 @@ namespace mongo {
// reset data size and record counts to 0 for this namespace
// as we're about to tally them up again for each new extent
- {
- NamespaceDetails::Stats *s = getDur().writing(&d->stats);
- s->datasize = 0;
- s->nrecords = 0;
- }
+ d->setStats( 0, 0 );
for( list<DiskLoc>::iterator i = extents.begin(); i != extents.end(); i++ ) {
skipped += compactExtent(ns, d, *i, n++, nidx, validate, pf, pb);
@@ -260,7 +252,7 @@ namespace mongo {
result.append("invalidObjects", skipped);
}
- verify( d->firstExtent.ext()->xprev.isNull() );
+ verify( d->firstExtent().ext()->xprev.isNull() );
// indexes will do their own progress meter?
pm.finished();
diff --git a/src/mongo/db/cursor.cpp b/src/mongo/db/cursor.cpp
index 1ce5da1b36a..fd714d5b326 100644
--- a/src/mongo/db/cursor.cpp
+++ b/src/mongo/db/cursor.cpp
@@ -95,9 +95,9 @@ namespace mongo {
if ( !nsd->capLooped() )
start = nsd->firstRecord();
else {
- start = nsd->capExtent.ext()->firstRecord;
- if ( !start.isNull() && start == nsd->capFirstNewRecord ) {
- start = nsd->capExtent.ext()->lastRecord;
+ start = nsd->capExtent().ext()->firstRecord;
+ if ( !start.isNull() && start == nsd->capFirstNewRecord() ) {
+ start = nsd->capExtent().ext()->lastRecord;
start = nextLoop( nsd, start );
}
}
@@ -114,16 +114,16 @@ namespace mongo {
DiskLoc i = prev;
// Last record
- if ( i == nsd->capExtent.ext()->lastRecord )
+ if ( i == nsd->capExtent().ext()->lastRecord )
return DiskLoc();
i = nextLoop( nsd, i );
// If we become capFirstNewRecord from same extent, advance to next extent.
- if ( i == nsd->capFirstNewRecord &&
- i != nsd->capExtent.ext()->firstRecord )
- i = nextLoop( nsd, nsd->capExtent.ext()->lastRecord );
+ if ( i == nsd->capFirstNewRecord() &&
+ i != nsd->capExtent().ext()->firstRecord )
+ i = nextLoop( nsd, nsd->capExtent().ext()->lastRecord );
// If we have just gotten to beginning of capExtent, skip to capFirstNewRecord
- if ( i == nsd->capExtent.ext()->firstRecord )
- i = nsd->capFirstNewRecord;
+ if ( i == nsd->capExtent().ext()->firstRecord )
+ i = nsd->capFirstNewRecord();
return i;
}
@@ -137,7 +137,7 @@ namespace mongo {
start = nsd->lastRecord();
}
else {
- start = nsd->capExtent.ext()->lastRecord;
+ start = nsd->capExtent().ext()->lastRecord;
}
}
curr = start;
@@ -152,26 +152,26 @@ namespace mongo {
DiskLoc i = prev;
// Last record
- if ( nsd->capFirstNewRecord == nsd->capExtent.ext()->firstRecord ) {
- if ( i == nextLoop( nsd, nsd->capExtent.ext()->lastRecord ) ) {
+ if ( nsd->capFirstNewRecord() == nsd->capExtent().ext()->firstRecord ) {
+ if ( i == nextLoop( nsd, nsd->capExtent().ext()->lastRecord ) ) {
return DiskLoc();
}
}
else {
- if ( i == nsd->capExtent.ext()->firstRecord ) {
+ if ( i == nsd->capExtent().ext()->firstRecord ) {
return DiskLoc();
}
}
// If we are capFirstNewRecord, advance to prev extent, otherwise just get prev.
- if ( i == nsd->capFirstNewRecord )
- i = prevLoop( nsd, nsd->capExtent.ext()->firstRecord );
+ if ( i == nsd->capFirstNewRecord() )
+ i = prevLoop( nsd, nsd->capExtent().ext()->firstRecord );
else
i = prevLoop( nsd, i );
// If we just became last in cap extent, advance past capFirstNewRecord
// (We know capExtent.ext()->firstRecord != capFirstNewRecord, since would
// have returned DiskLoc() earlier otherwise.)
- if ( i == nsd->capExtent.ext()->lastRecord )
- i = reverse()->next( nsd->capFirstNewRecord );
+ if ( i == nsd->capExtent().ext()->lastRecord )
+ i = reverse()->next( nsd->capFirstNewRecord() );
return i;
}
diff --git a/src/mongo/db/dbcommands.cpp b/src/mongo/db/dbcommands.cpp
index 9e6e76e5a95..e60372f2edf 100644
--- a/src/mongo/db/dbcommands.cpp
+++ b/src/mongo/db/dbcommands.cpp
@@ -1202,7 +1202,7 @@ namespace mongo {
Client::Context ctx( ns );
NamespaceDetails *d = nsdetails(ns);
- if ( ! d || d->stats.nrecords == 0 ) {
+ if ( ! d || d->numRecords() == 0 ) {
result.appendNumber( "size" , 0 );
result.appendNumber( "numObjects" , 0 );
result.append( "millis" , timer.millis() );
@@ -1214,8 +1214,8 @@ namespace mongo {
shared_ptr<Cursor> c;
if ( min.isEmpty() && max.isEmpty() ) {
if ( estimate ) {
- result.appendNumber( "size" , d->stats.datasize );
- result.appendNumber( "numObjects" , d->stats.nrecords );
+ result.appendNumber( "size" , d->dataSize() );
+ result.appendNumber( "numObjects" , d->numRecords() );
result.append( "millis" , timer.millis() );
return 1;
}
@@ -1246,7 +1246,7 @@ namespace mongo {
c.reset( BtreeCursor::make( d, *idx, min, max, false, 1 ) );
}
- long long avgObjSize = d->stats.datasize / d->stats.nrecords;
+ long long avgObjSize = d->dataSize() / d->numRecords();
long long maxSize = jsobj["maxSize"].numberLong();
long long maxObjects = jsobj["maxObjects"].numberLong();
@@ -1304,9 +1304,9 @@ namespace mongo {
log() << "error: have index [" << collNS << "] but no NamespaceDetails" << endl;
continue;
}
- totalSize += mine->stats.datasize;
+ totalSize += mine->dataSize();
if ( details )
- details->appendNumber( d.indexName() , mine->stats.datasize / scale );
+ details->appendNumber( d.indexName() , mine->dataSize() / scale );
}
return totalSize;
}
@@ -1355,19 +1355,19 @@ namespace mongo {
bool verbose = jsobj["verbose"].trueValue();
- long long size = nsd->stats.datasize / scale;
- result.appendNumber( "count" , nsd->stats.nrecords );
+ long long size = nsd->dataSize() / scale;
+ result.appendNumber( "count" , nsd->numRecords() );
result.appendNumber( "size" , size );
- if( nsd->stats.nrecords )
- result.append ( "avgObjSize" , double(size) / double(nsd->stats.nrecords) );
+ if( nsd->numRecords() )
+ result.append ( "avgObjSize" , double(size) / double(nsd->numRecords()) );
int numExtents;
BSONArrayBuilder extents;
result.appendNumber( "storageSize" , nsd->storageSize( &numExtents , verbose ? &extents : 0 ) / scale );
result.append( "numExtents" , numExtents );
- result.append( "nindexes" , nsd->nIndexes );
- result.append( "lastExtentSize" , nsd->lastExtentSize / scale );
+ result.append( "nindexes" , nsd->getCompletedIndexCount() );
+ result.append( "lastExtentSize" , nsd->lastExtentSize() / scale );
result.append( "paddingFactor" , nsd->paddingFactor() );
result.append( "systemFlags" , nsd->systemFlags() );
result.append( "userFlags" , nsd->userFlags() );
@@ -1552,17 +1552,17 @@ namespace mongo {
}
ncollections += 1;
- objects += nsd->stats.nrecords;
- size += nsd->stats.datasize;
+ objects += nsd->numRecords();
+ size += nsd->dataSize();
int temp;
storageSize += nsd->storageSize( &temp );
numExtents += temp;
- indexes += nsd->nIndexes;
+ indexes += nsd->getCompletedIndexCount();
indexSize += getIndexSizeForCollection(dbname, ns);
}
-
+
result.append ( "db" , dbname );
result.appendNumber( "collections" , ncollections );
result.appendNumber( "objects" , objects );
@@ -1628,9 +1628,9 @@ namespace mongo {
string toNs = dbname + "." + to;
NamespaceDetails *nsd = nsdetails( fromNs );
massert( 10301 , "source collection " + fromNs + " does not exist", nsd );
- long long excessSize = nsd->stats.datasize - size * 2; // datasize and extentSize can't be compared exactly, so add some padding to 'size'
- DiskLoc extent = nsd->firstExtent;
- for( ; excessSize > extent.ext()->length && extent != nsd->lastExtent; extent = extent.ext()->xnext ) {
+ long long excessSize = nsd->dataSize() - size * 2; // datasize and extentSize can't be compared exactly, so add some padding to 'size'
+ DiskLoc extent = nsd->firstExtent();
+ for( ; excessSize > extent.ext()->length && extent != nsd->lastExtent(); extent = extent.ext()->xnext ) {
excessSize -= extent.ext()->length;
LOG( 2 ) << "cloneCollectionAsCapped skipping extent of size " << extent.ext()->length << endl;
LOG( 6 ) << "excessSize: " << excessSize << endl;
diff --git a/src/mongo/db/dbcommands_admin.cpp b/src/mongo/db/dbcommands_admin.cpp
index 09a4242db2f..7fd831dd35f 100644
--- a/src/mongo/db/dbcommands_admin.cpp
+++ b/src/mongo/db/dbcommands_admin.cpp
@@ -82,8 +82,7 @@ namespace mongo {
return 0;
}
- for ( int i = 0; i < Buckets; i++ )
- d->deletedList[i].Null();
+ d->orphanDeletedList();
result.append("ns", dropns.c_str());
return 1;
@@ -228,18 +227,18 @@ namespace mongo {
result.appendNumber("max", d->maxCappedDocs());
}
- result.append("firstExtent", str::stream() << d->firstExtent.toString()
- << " ns:" << d->firstExtent.ext()->nsDiagnostic.toString());
- result.append( "lastExtent", str::stream() << d->lastExtent.toString()
- << " ns:" << d->lastExtent.ext()->nsDiagnostic.toString());
+ result.append( "firstExtent", str::stream() << d->firstExtent().toString()
+ << " ns:" << d->firstExtent().ext()->nsDiagnostic.toString());
+ result.append( "lastExtent", str::stream() << d->lastExtent().toString()
+ << " ns:" << d->lastExtent().ext()->nsDiagnostic.toString());
BSONArrayBuilder extentData;
int extentCount = 0;
try {
- d->firstExtent.ext()->assertOk();
- d->lastExtent.ext()->assertOk();
+ d->firstExtent().ext()->assertOk();
+ d->lastExtent().ext()->assertOk();
- DiskLoc extentDiskLoc = d->firstExtent;
+ DiskLoc extentDiskLoc = d->firstExtent();
while (!extentDiskLoc.isNull()) {
Extent* thisExtent = extentDiskLoc.ext();
if (full) {
@@ -258,9 +257,9 @@ namespace mongo {
errors << sb.str();
valid = false;
}
- if (nextDiskLoc.isNull() && extentDiskLoc != d->lastExtent) {
+ if (nextDiskLoc.isNull() && extentDiskLoc != d->lastExtent()) {
StringBuilder sb;
- sb << "'lastExtent' pointer " << d->lastExtent.toString()
+ sb << "'lastExtent' pointer " << d->lastExtent().toString()
<< " does not point to last extent in list " << extentDiskLoc.toString();
errors << sb.str();
valid = false;
@@ -282,42 +281,42 @@ namespace mongo {
if ( full )
result.appendArray( "extents" , extentData.arr() );
- result.appendNumber("datasize", d->stats.datasize);
- result.appendNumber("nrecords", d->stats.nrecords);
- result.appendNumber("lastExtentSize", d->lastExtentSize);
+ result.appendNumber("datasize", d->dataSize());
+ result.appendNumber("nrecords", d->numRecords());
+ result.appendNumber("lastExtentSize", d->lastExtentSize());
result.appendNumber("padding", d->paddingFactor());
try {
bool testingLastExtent = false;
try {
- if (d->firstExtent.isNull()) {
+ if (d->firstExtent().isNull()) {
errors << "'firstExtent' pointer is null";
valid=false;
}
else {
- result.append("firstExtentDetails", d->firstExtent.ext()->dump());
- if (!d->firstExtent.ext()->xprev.isNull()) {
+ result.append("firstExtentDetails", d->firstExtent().ext()->dump());
+ if (!d->firstExtent().ext()->xprev.isNull()) {
StringBuilder sb;
- sb << "'xprev' pointer in 'firstExtent' " << d->firstExtent.toString()
- << " is " << d->firstExtent.ext()->xprev.toString()
+ sb << "'xprev' pointer in 'firstExtent' " << d->firstExtent().toString()
+ << " is " << d->firstExtent().ext()->xprev.toString()
<< ", should be null";
errors << sb.str();
valid=false;
}
}
testingLastExtent = true;
- if (d->lastExtent.isNull()) {
+ if (d->lastExtent().isNull()) {
errors << "'lastExtent' pointer is null";
valid=false;
}
else {
- if (d->firstExtent != d->lastExtent) {
- result.append("lastExtentDetails", d->lastExtent.ext()->dump());
- if (!d->lastExtent.ext()->xnext.isNull()) {
+ if (d->firstExtent() != d->lastExtent()) {
+ result.append("lastExtentDetails", d->lastExtent().ext()->dump());
+ if (!d->lastExtent().ext()->xnext.isNull()) {
StringBuilder sb;
- sb << "'xnext' pointer in 'lastExtent' " << d->lastExtent.toString()
- << " is " << d->lastExtent.ext()->xnext.toString()
+ sb << "'xnext' pointer in 'lastExtent' " << d->lastExtent().toString()
+ << " is " << d->lastExtent().ext()->xnext.toString()
<< ", should be null";
errors << sb.str();
valid = false;
@@ -434,7 +433,7 @@ namespace mongo {
BSONArrayBuilder deletedListArray;
for ( int i = 0; i < Buckets; i++ ) {
- deletedListArray << d->deletedList[i].isNull();
+ deletedListArray << d->deletedListEntry(i).isNull();
}
int ndel = 0;
@@ -442,7 +441,7 @@ namespace mongo {
BSONArrayBuilder delBucketSizes;
int incorrect = 0;
for ( int i = 0; i < Buckets; i++ ) {
- DiskLoc loc = d->deletedList[i];
+ DiskLoc loc = d->deletedListEntry(i);
try {
int k = 0;
while ( !loc.isNull() ) {
@@ -493,7 +492,7 @@ namespace mongo {
int idxn = 0;
try {
- result.append("nIndexes", d->nIndexes);
+ result.append("nIndexes", d->getCompletedIndexCount());
BSONObjBuilder indexes; // not using subObjStart to be exception safe
NamespaceDetails::IndexIterator i = d->ii();
while( i.more() ) {
diff --git a/src/mongo/db/dbhelpers.cpp b/src/mongo/db/dbhelpers.cpp
index 65680c009b8..d9d13b4495f 100644
--- a/src/mongo/db/dbhelpers.cpp
+++ b/src/mongo/db/dbhelpers.cpp
@@ -55,7 +55,7 @@ namespace mongo {
}
}
- if( d->nIndexes >= NamespaceDetails::NIndexesMax ) {
+ if( d->getCompletedIndexCount() >= NamespaceDetails::NIndexesMax ) {
problem() << "Helper::ensureIndex fails, MaxIndexes exceeded " << ns << '\n';
return;
}
@@ -473,10 +473,10 @@ namespace mongo {
// sizes will vary
long long avgDocsWhenFull;
long long avgDocSizeBytes;
- const long long totalDocsInNS = details->stats.nrecords;
+ const long long totalDocsInNS = details->numRecords();
if ( totalDocsInNS > 0 ) {
// TODO: Figure out what's up here
- avgDocSizeBytes = details->stats.datasize / totalDocsInNS;
+ avgDocSizeBytes = details->dataSize() / totalDocsInNS;
avgDocsWhenFull = maxChunkSizeBytes / avgDocSizeBytes;
avgDocsWhenFull = std::min( kMaxDocsPerChunk + 1,
130 * avgDocsWhenFull / 100 /* slack */);
diff --git a/src/mongo/db/index.cpp b/src/mongo/db/index.cpp
index 57fd1c4ca42..63ed35fba51 100644
--- a/src/mongo/db/index.cpp
+++ b/src/mongo/db/index.cpp
@@ -55,7 +55,7 @@ namespace mongo {
call. repair database is the clean solution, but this gives one a lighter weight
partial option. see dropIndexes()
*/
- void assureSysIndexesEmptied(const char *ns, IndexDetails *idIndex) {
+ int assureSysIndexesEmptied(const char *ns, IndexDetails *idIndex) {
string system_indexes = cc().database()->name + ".system.indexes";
BSONObjBuilder b;
b.append("ns", ns);
@@ -67,6 +67,7 @@ namespace mongo {
if( n ) {
log() << "info: assureSysIndexesEmptied cleaned up " << n << " entries" << endl;
}
+ return n;
}
int IndexDetails::keyPatternOffset( const string& key ) const {
@@ -83,6 +84,7 @@ namespace mongo {
/* delete this index. does NOT clean up the system catalog
(system.indexes or system.namespaces) -- only NamespaceIndex.
+ TOOD: above comment is wrong, also, document durability assumptions
*/
void IndexDetails::kill_idx() {
string ns = indexNamespace(); // e.g. foo.coll.$ts_1
@@ -224,7 +226,7 @@ namespace mongo {
return false;
}
- if ( sourceCollection->nIndexes >= NamespaceDetails::NIndexesMax ) {
+ if ( sourceCollection->getTotalIndexCount() >= NamespaceDetails::NIndexesMax ) {
stringstream ss;
ss << "add index fails, too many indexes for " << sourceNS << " key:" << key.toString();
string s = ss.str();
diff --git a/src/mongo/db/index.h b/src/mongo/db/index.h
index 8ee862e887f..75009743f43 100644
--- a/src/mongo/db/index.h
+++ b/src/mongo/db/index.h
@@ -176,7 +176,8 @@ namespace mongo {
class NamespaceDetails;
// changedId should be initialized to false
- void assureSysIndexesEmptied(const char *ns, IndexDetails *exceptForIdIndex);
+ // @return how many things were deleted
+ int assureSysIndexesEmptied(const char *ns, IndexDetails *exceptForIdIndex);
int removeFromSysIndexes(const char *ns, const char *idxName);
/**
diff --git a/src/mongo/db/index/btree_based_builder.cpp b/src/mongo/db/index/btree_based_builder.cpp
index 2f3d7d5f172..42618de14e9 100644
--- a/src/mongo/db/index/btree_based_builder.cpp
+++ b/src/mongo/db/index/btree_based_builder.cpp
@@ -188,10 +188,10 @@ namespace mongo {
/* get and sort all the keys ----- */
ProgressMeterHolder pm(op->setMessage("index: (1/3) external sort",
"Index: (1/3) External Sort Progress",
- d->stats.nrecords,
+ d->numRecords(),
10));
SortPhaseOne phase1;
- addKeysToPhaseOne(d, ns, idx, order, &phase1, d->stats.nrecords, pm.get(),
+ addKeysToPhaseOne(d, ns, idx, order, &phase1, d->numRecords(), pm.get(),
mayInterrupt, idxNo );
pm.finished();
diff --git a/src/mongo/db/index/index_descriptor.h b/src/mongo/db/index/index_descriptor.h
index 14595983408..7f64f07dc5f 100644
--- a/src/mongo/db/index/index_descriptor.h
+++ b/src/mongo/db/index/index_descriptor.h
@@ -140,7 +140,7 @@ namespace mongo {
// Is this index being created in the background?
bool isBackgroundIndex() {
- return _indexNumber >= _namespaceDetails->nIndexes;
+ return _indexNumber >= _namespaceDetails->getCompletedIndexCount();
}
private:
diff --git a/src/mongo/db/index_rebuilder.cpp b/src/mongo/db/index_rebuilder.cpp
index 8a4f47d1999..6c16b073b20 100644
--- a/src/mongo/db/index_rebuilder.cpp
+++ b/src/mongo/db/index_rebuilder.cpp
@@ -67,7 +67,7 @@ namespace mongo {
Client::WriteContext ctx(ns);
NamespaceDetails* nsd = nsdetails(ns);
- if (!nsd || !nsd->indexBuildsInProgress) {
+ if ( nsd == NULL || nsd->getIndexBuildsInProgress() == 0 ) {
continue;
}
@@ -82,40 +82,23 @@ namespace mongo {
if (!cmdLine.indexBuildRetry) {
// If we crash between unsetting the inProg flag and cleaning up the index, the
// index space will be lost.
- int inProg = nsd->indexBuildsInProgress;
-
- getDur().writingInt(nsd->indexBuildsInProgress) = 0;
-
- for (int i = 0; i < inProg; i++) {
- nsd->idx(nsd->nIndexes+i).kill_idx();
- }
-
+ nsd->blowAwayInProgressIndexEntries();
continue;
}
// We go from right to left building these indexes, so that indexBuildInProgress-- has
// the correct effect of "popping" an index off the list.
- while (nsd->indexBuildsInProgress > 0) {
- retryIndexBuild(dbName, nsd, nsd->nIndexes+nsd->indexBuildsInProgress-1);
+ while ( nsd->getTotalIndexCount() > nsd->getCompletedIndexCount() ) {
+ retryIndexBuild(dbName, nsd);
}
}
}
void IndexRebuilder::retryIndexBuild(const std::string& dbName,
- NamespaceDetails* nsd,
- const int index) {
- // details.info is always a valid system.indexes entry because DataFileMgr::insert journals
- // creating the index doc and then insert_makeIndex durably assigns its DiskLoc to info.
- // indexBuildsInProgress is set after that, so if it is set, info must be set.
- IndexDetails& details = nsd->idx(index);
-
+ NamespaceDetails* nsd ) {
// First, clean up the in progress index build. Save the system.indexes entry so that we
// can add it again afterwards.
- BSONObj indexObj = details.info.obj().getOwned();
-
- // Clean up the in-progress index build
- getDur().writingInt(nsd->indexBuildsInProgress) -= 1;
- details.kill_idx();
+ BSONObj indexObj = nsd->prepOneUnfinishedIndex();
// The index has now been removed from system.indexes, so the only record of it is in-
// memory. If there is a journal commit between now and when insert() rewrites the entry and
diff --git a/src/mongo/db/index_rebuilder.h b/src/mongo/db/index_rebuilder.h
index 4abc2d133ee..2935630da01 100644
--- a/src/mongo/db/index_rebuilder.h
+++ b/src/mongo/db/index_rebuilder.h
@@ -43,9 +43,8 @@ namespace mongo {
* @param nsd the namespace details of the namespace building the index
* @param index the offset into nsd's index array of the partially-built index
*/
- void retryIndexBuild(const std::string& dbName,
- NamespaceDetails* nsd,
- const int index);
+ void retryIndexBuild(const std::string& dbName,
+ NamespaceDetails* nsd );
};
extern IndexRebuilder indexRebuilder;
diff --git a/src/mongo/db/index_update.cpp b/src/mongo/db/index_update.cpp
index 24a387a3e0c..84bdd44c10f 100644
--- a/src/mongo/db/index_update.cpp
+++ b/src/mongo/db/index_update.cpp
@@ -30,7 +30,6 @@
#include "mongo/db/repl/is_master.h"
#include "mongo/db/repl/rs.h"
#include "mongo/util/processinfo.h"
-#include "mongo/util/startup_test.h"
namespace mongo {
@@ -63,7 +62,7 @@ namespace mongo {
for (int i = 0; i < numIndices; i++) {
// If i >= d->nIndexes, it's a background index, and we DO NOT want to log anything.
- bool logIfError = (i < nsd->nIndexes) ? !noWarn : false;
+ bool logIfError = (i < nsd->getCompletedIndexCount()) ? !noWarn : false;
_unindexRecord(nsd, i, obj, dl, logIfError);
}
}
@@ -129,7 +128,7 @@ namespace mongo {
ProgressMeter& progress = cc().curop()->setMessage("bg index build",
"Background Index Build Progress",
- d->stats.nrecords);
+ d->numRecords());
unsigned long long n = 0;
unsigned long long numDropped = 0;
@@ -199,7 +198,7 @@ namespace mongo {
getDur().commitIfNeeded();
if ( cc->yieldSometimes( ClientCursor::WillNeed ) ) {
- progress.setTotalWhileRunning( d->stats.nrecords );
+ progress.setTotalWhileRunning( d->numRecords() );
// Recalculate idxNo if we yielded
idxNo = IndexBuildsInProgress::get(ns, idxName);
@@ -317,51 +316,44 @@ namespace mongo {
theDataFileMgr.insert(system_indexes.c_str(), o.objdata(), o.objsize(), mayInterrupt, true);
}
- /* remove bit from a bit array - actually remove its slot, not a clear
- note: this function does not work with x == 63 -- that is ok
- but keep in mind in the future if max indexes were extended to
- exactly 64 it would be a problem
- */
- unsigned long long removeBit(unsigned long long b, int x) {
- unsigned long long tmp = b;
- return
- (tmp & ((((unsigned long long) 1) << x)-1)) |
- ((tmp >> (x+1)) << x);
- }
-
bool dropIndexes(NamespaceDetails *d, const char *ns, const char *name, string &errmsg, BSONObjBuilder &anObjBuilder, bool mayDeleteIdIndex) {
BackgroundOperation::assertNoBgOpInProgForNs(ns);
- d = d->writingWithExtra();
- d->aboutToDeleteAnIndex();
-
/* there may be pointers pointing at keys in the btree(s). kill them. */
ClientCursor::invalidate(ns);
// delete a specific index or all?
if ( *name == '*' && name[1] == 0 ) {
- LOG(4) << " d->nIndexes was " << d->nIndexes << std::endl;
- anObjBuilder.append("nIndexesWas", (double)d->nIndexes);
+ // this should be covered by assertNoBgOpInProgForNs above, but being paranoid
+ verify( d->getCompletedIndexCount() == d->getTotalIndexCount() );
+
+ LOG(4) << " d->nIndexes was " << d->getCompletedIndexCount() << std::endl;
+ anObjBuilder.appendNumber("nIndexesWas", d->getCompletedIndexCount() );
IndexDetails *idIndex = 0;
- if( d->nIndexes ) {
- for ( int i = 0; i < d->nIndexes; i++ ) {
+
+ while ( 1 ) {
+ bool didAnything = false;
+
+ for ( int i = 0; i < d->getCompletedIndexCount(); i++ ) {
+
if ( !mayDeleteIdIndex && d->idx(i).isIdIndex() ) {
idIndex = &d->idx(i);
+ continue;
}
- else {
- d->idx(i).kill_idx();
- }
+ didAnything = true;
+ d->removeIndex( i );
+ break;
}
- d->nIndexes = 0;
+
+ if ( !didAnything )
+ break;
}
+
if ( idIndex ) {
- d->getNextIndexDetails(ns) = *idIndex;
- d->addIndex(ns);
- wassert( d->nIndexes == 1 );
+ verify( d->getCompletedIndexCount() == 1 );
}
- /* assuming here that id index is not multikey: */
- d->multiKeyIndexBits = 0;
- assureSysIndexesEmptied(ns, idIndex);
+
+ verify( 0 == assureSysIndexesEmptied(ns, idIndex) );
anObjBuilder.append("msg", mayDeleteIdIndex ?
"indexes dropped for collection" :
"non-_id indexes dropped for collection");
@@ -370,23 +362,16 @@ namespace mongo {
// delete just one index
int x = d->findIndexByName(name);
if ( x >= 0 ) {
- LOG(4) << " d->nIndexes was " << d->nIndexes << endl;
- anObjBuilder.append("nIndexesWas", (double)d->nIndexes);
-
- /* note it is important we remove the IndexDetails with this
- call, otherwise, on recreate, the old one would be reused, and its
- IndexDetails::info ptr would be bad info.
- */
- IndexDetails *id = &d->idx(x);
- if ( !mayDeleteIdIndex && id->isIdIndex() ) {
+
+ if ( !mayDeleteIdIndex && d->idx(x).isIdIndex() ) {
errmsg = "may not delete _id index";
return false;
}
- id->kill_idx();
- d->multiKeyIndexBits = removeBit(d->multiKeyIndexBits, x);
- d->nIndexes--;
- for ( int i = x; i < d->nIndexes; i++ )
- d->idx(i) = d->idx(i+1);
+
+ LOG(4) << " d->nIndexes was " << d->getCompletedIndexCount() << endl;
+ anObjBuilder.appendNumber("nIndexesWas", d->getCompletedIndexCount() );
+
+ d->removeIndex( x );
}
else {
int n = removeFromSysIndexes(ns, name); // just in case an orphaned listing there - i.e. should have been repaired but wasn't
@@ -401,16 +386,4 @@ namespace mongo {
return true;
}
- class IndexUpdateTest : public StartupTest {
- public:
- void run() {
- verify( removeBit(1, 0) == 0 );
- verify( removeBit(2, 0) == 1 );
- verify( removeBit(2, 1) == 0 );
- verify( removeBit(255, 1) == 127 );
- verify( removeBit(21, 2) == 9 );
- verify( removeBit(0x4000000000000001ULL, 62) == 1 );
- }
- } iu_unittest;
-
} // namespace mongo
diff --git a/src/mongo/db/namespace_details-inl.h b/src/mongo/db/namespace_details-inl.h
index 9e8ccbb37e6..a0b8a1d1a7b 100644
--- a/src/mongo/db/namespace_details-inl.h
+++ b/src/mongo/db/namespace_details-inl.h
@@ -98,7 +98,7 @@ namespace mongo {
bool includeBackgroundInProgress) {
d = _d;
i = 0;
- n = includeBackgroundInProgress ? d->getTotalIndexCount() : d->nIndexes;
+ n = includeBackgroundInProgress ? d->getTotalIndexCount() : d->_nIndexes;
}
}
diff --git a/src/mongo/db/namespace_details.cpp b/src/mongo/db/namespace_details.cpp
index e5da0d6feb6..857267ec2ae 100644
--- a/src/mongo/db/namespace_details.cpp
+++ b/src/mongo/db/namespace_details.cpp
@@ -31,6 +31,8 @@
#include "mongo/db/pdfile.h"
#include "mongo/scripting/engine.h"
#include "mongo/util/hashtab.h"
+#include "mongo/util/startup_test.h"
+
namespace mongo {
@@ -49,29 +51,29 @@ namespace mongo {
NamespaceDetails::NamespaceDetails( const DiskLoc &loc, bool capped ) {
/* be sure to initialize new fields here -- doesn't default to zeroes the way we use it */
- firstExtent = lastExtent = capExtent = loc;
- stats.datasize = stats.nrecords = 0;
- lastExtentSize = 0;
- nIndexes = 0;
+ _firstExtent = _lastExtent = _capExtent = loc;
+ _stats.datasize = _stats.nrecords = 0;
+ _lastExtentSize = 0;
+ _nIndexes = 0;
_isCapped = capped;
_maxDocsInCapped = 0x7fffffff; // no limit (value is for pre-v2.3.2 compatability)
_paddingFactor = 1.0;
_systemFlags = 0;
_userFlags = 0;
- capFirstNewRecord = DiskLoc();
+ _capFirstNewRecord = DiskLoc();
// Signal that we are on first allocation iteration through extents.
- capFirstNewRecord.setInvalid();
+ _capFirstNewRecord.setInvalid();
// For capped case, signal that we are doing initial extent allocation.
if ( capped )
cappedLastDelRecLastExtent().setInvalid();
- verify( sizeof(dataFileVersion) == 2 );
- dataFileVersion = 0;
- indexFileVersion = 0;
- multiKeyIndexBits = 0;
- reservedA = 0;
- extraOffset = 0;
- indexBuildsInProgress = 0;
- memset(reserved, 0, sizeof(reserved));
+ verify( sizeof(_dataFileVersion) == 2 );
+ _dataFileVersion = 0;
+ _indexFileVersion = 0;
+ _multiKeyIndexBits = 0;
+ _reservedA = 0;
+ _extraOffset = 0;
+ _indexBuildsInProgress = 0;
+ memset(_reserved, 0, sizeof(_reserved));
}
bool NamespaceIndex::exists() const {
@@ -220,7 +222,7 @@ namespace mongo {
}
else {
int b = bucket(d->lengthWithHeaders());
- DiskLoc& list = deletedList[b];
+ DiskLoc& list = _deletedList[b];
DiskLoc oldHead = list;
getDur().writingDiskLoc(list) = dloc;
d->nextDeleted() = oldHead;
@@ -341,8 +343,8 @@ namespace mongo {
DiskLoc bestmatch;
int bestmatchlen = 0x7fffffff;
int b = bucket(len);
- DiskLoc cur = deletedList[b];
- prev = &deletedList[b];
+ DiskLoc cur = _deletedList[b];
+ prev = &_deletedList[b];
int extra = 5; // look for a better fit, a little.
int chain = 0;
while ( 1 ) {
@@ -368,8 +370,8 @@ namespace mongo {
// out of space. alloc a new extent.
return DiskLoc();
}
- cur = deletedList[b];
- prev = &deletedList[b];
+ cur = _deletedList[b];
+ prev = &_deletedList[b];
continue;
}
DeletedRecord *r = cur.drec();
@@ -415,7 +417,7 @@ namespace mongo {
void NamespaceDetails::dumpDeleted(set<DiskLoc> *extents) {
for ( int i = 0; i < Buckets; i++ ) {
- DiskLoc dl = deletedList[i];
+ DiskLoc dl = _deletedList[i];
while ( !dl.isNull() ) {
DeletedRecord *r = dl.drec();
DiskLoc extLoc(dl.a(), r->extentOfs());
@@ -432,7 +434,7 @@ namespace mongo {
}
DiskLoc NamespaceDetails::firstRecord( const DiskLoc &startExtent ) const {
- for (DiskLoc i = startExtent.isNull() ? firstExtent : startExtent;
+ for (DiskLoc i = startExtent.isNull() ? _firstExtent : startExtent;
!i.isNull(); i = i.ext()->xnext ) {
if ( !i.ext()->firstRecord.isNull() )
return i.ext()->firstRecord;
@@ -441,7 +443,7 @@ namespace mongo {
}
DiskLoc NamespaceDetails::lastRecord( const DiskLoc &startExtent ) const {
- for (DiskLoc i = startExtent.isNull() ? lastExtent : startExtent;
+ for (DiskLoc i = startExtent.isNull() ? _lastExtent : startExtent;
!i.isNull(); i = i.ext()->xprev ) {
if ( !i.ext()->lastRecord.isNull() )
return i.ext()->lastRecord;
@@ -454,16 +456,16 @@ namespace mongo {
if ( ++n_complaints_cap < 8 ) {
out() << "couldn't make room for new record (len: " << len << ") in capped ns " << ns << '\n';
int i = 0;
- for ( DiskLoc e = firstExtent; !e.isNull(); e = e.ext()->xnext, ++i ) {
+ for ( DiskLoc e = _firstExtent; !e.isNull(); e = e.ext()->xnext, ++i ) {
out() << " Extent " << i;
- if ( e == capExtent )
+ if ( e == _capExtent )
out() << " (capExtent)";
out() << '\n';
out() << " magic: " << hex << e.ext()->magic << dec << " extent->ns: " << e.ext()->nsDiagnostic.toString() << '\n';
out() << " fr: " << e.ext()->firstRecord.toString() <<
" lr: " << e.ext()->lastRecord.toString() << " extent->len: " << e.ext()->length << '\n';
}
- verify( len * 5 > lastExtentSize ); // assume it is unusually large record; if not, something is broken
+ verify( len * 5 > _lastExtentSize ); // assume it is unusually large record; if not, something is broken
}
}
@@ -526,8 +528,8 @@ namespace mongo {
Extra *e = ni->newExtra(ns, i, this);
long ofs = e->ofsFrom(this);
if( i == 0 ) {
- verify( extraOffset == 0 );
- *getDur().writing(&extraOffset) = ofs;
+ verify( _extraOffset == 0 );
+ *getDur().writing(&_extraOffset) = ofs;
verify( extra() == e );
}
else {
@@ -545,21 +547,21 @@ namespace mongo {
if (multikey) {
// Shortcut if the bit is already set correctly
- if (multiKeyIndexBits & mask) {
+ if (_multiKeyIndexBits & mask) {
return;
}
- *getDur().writing(&multiKeyIndexBits) |= mask;
+ *getDur().writing(&_multiKeyIndexBits) |= mask;
}
else {
// Shortcut if the bit is already set correctly
- if (!(multiKeyIndexBits & mask)) {
+ if (!(_multiKeyIndexBits & mask)) {
return;
}
// Invert mask: all 1's except a 0 at the ith bit
mask = ~mask;
- *getDur().writing(&multiKeyIndexBits) &= mask;
+ *getDur().writing(&_multiKeyIndexBits) &= mask;
}
NamespaceDetailsTransient::get(thisns).clearQueryCache();
@@ -579,13 +581,13 @@ namespace mongo {
/* you MUST call when adding an index. see pdfile.cpp */
void NamespaceDetails::addIndex(const char* thisns) {
- (*getDur().writing(&nIndexes))++;
NamespaceDetailsTransient::get(thisns).addedIndex();
+ (*getDur().writing(&_nIndexes))++;
}
// must be called when renaming a NS to fix up extra
void NamespaceDetails::copyingFrom(const char *thisns, NamespaceDetails *src) {
- extraOffset = 0; // we are a copy -- the old value is wrong. fixing it up below.
+ _extraOffset = 0; // we are a copy -- the old value is wrong. fixing it up below.
Extra *se = src->extra();
int n = NIndexesBase;
if( se ) {
@@ -599,7 +601,7 @@ namespace mongo {
e->setNext( nxt->ofsFrom(this) );
e = nxt;
}
- verify( extraOffset );
+ verify( _extraOffset );
}
}
@@ -619,7 +621,7 @@ namespace mongo {
}
long long NamespaceDetails::storageSize( int * numExtents , BSONArrayBuilder * extentInfo ) const {
- Extent * e = firstExtent.ext();
+ Extent * e = _firstExtent.ext();
verify( e );
long long total = 0;
@@ -799,7 +801,47 @@ namespace mongo {
void NamespaceDetails::clearSystemFlag( int flag ) {
getDur().writingInt(_systemFlags) &= ~flag;
}
-
+
+ void NamespaceDetails::setLastExtentSize( int newMax ) {
+ if ( _lastExtentSize == newMax )
+ return;
+ getDur().writingInt(_lastExtentSize) = newMax;
+ }
+
+ void NamespaceDetails::incrementStats( long long dataSizeIncrement,
+ long long numRecordsIncrement ) {
+
+ // durability todo : this could be a bit annoying / slow to record constantly
+ Stats* s = getDur().writing( &_stats );
+ s->datasize += dataSizeIncrement;
+ s->nrecords += numRecordsIncrement;
+ }
+
+ void NamespaceDetails::setStats( long long dataSize,
+ long long numRecords ) {
+ Stats* s = getDur().writing( &_stats );
+ s->datasize = dataSize;
+ s->nrecords = numRecords;
+ }
+
+
+ void NamespaceDetails::setFirstExtent( DiskLoc newFirstExtent ) {
+ getDur().writingDiskLoc( _firstExtent ) = newFirstExtent;
+ }
+
+ void NamespaceDetails::setLastExtent( DiskLoc newLastExtent ) {
+ getDur().writingDiskLoc( _lastExtent ) = newLastExtent;
+ }
+
+ void NamespaceDetails::setFirstExtentInvalid() {
+ getDur().writingDiskLoc( _firstExtent ).setInvalid();
+ }
+
+ void NamespaceDetails::setLastExtentInvalid() {
+ getDur().writingDiskLoc( _lastExtent ).setInvalid();
+ }
+
+
/**
* keeping things in sync this way is a bit of a hack
* and the fact that we have to pass in ns again
@@ -859,7 +901,6 @@ namespace mongo {
}
verify( _paddingFactor >= 1 );
-
if ( isUserFlagSet( Flag_UsePowerOf2Sizes ) ) {
// quantize to the nearest bucketSize (or nearest 1mb boundary for large sizes).
return quantizePowerOf2AllocationSpace(minRecordSize);
@@ -869,6 +910,115 @@ namespace mongo {
return static_cast<int>(minRecordSize * _paddingFactor);
}
+ NamespaceDetails::IndexBuildBlock::IndexBuildBlock( const string& ns, const string& indexName )
+ : _ns( ns ), _indexName( indexName ) {
+
+ NamespaceDetails* nsd = nsdetails( _ns );
+ verify( nsd );
+ getDur().writingInt( nsd->_indexBuildsInProgress ) += 1;
+ }
+
+ NamespaceDetails::IndexBuildBlock::~IndexBuildBlock() {
+ NamespaceDetails* nsd = nsdetails( _ns );
+ if ( nsd ) {
+ getDur().writingInt( nsd->_indexBuildsInProgress ) -= 1;
+ }
+ }
+
+ /* remove bit from a bit array - actually remove its slot, not a clear
+ note: this function does not work with x == 63 -- that is ok
+ but keep in mind in the future if max indexes were extended to
+ exactly 64 it would be a problem
+ */
+ unsigned long long removeAndSlideBit(unsigned long long b, int x) {
+ unsigned long long tmp = b;
+ return
+ (tmp & ((((unsigned long long) 1) << x)-1)) |
+ ((tmp >> (x+1)) << x);
+ }
+
+ void NamespaceDetails::removeIndex( int idxNumber ) {
+ verify( idxNumber >= 0 );
+ verify( idxNumber < _nIndexes );
+ verify( _indexBuildsInProgress == 0 );
+
+ /* note it is important we remove the IndexDetails with this
+ call, otherwise, on recreate, the old one would be reused, and its
+ IndexDetails::info ptr would be bad info.
+ */
+
+ aboutToDeleteAnIndex();
+
+ _removeIndex( idxNumber );
+ }
+
+ void NamespaceDetails::_removeIndex( int idxNumber ) {
+
+ // TODO: don't do this whole thing, do it piece meal for readability
+ NamespaceDetails* d = writingWithExtra();
+
+ IndexDetails *id = &d->idx(idxNumber);
+ id->kill_idx();
+
+ // fix the _multiKeyIndexBits, by moving all bits above me down one
+ d->_multiKeyIndexBits = removeAndSlideBit(d->_multiKeyIndexBits, idxNumber);
+
+ if ( idxNumber >= _nIndexes )
+ d->_indexBuildsInProgress--;
+ else
+ d->_nIndexes--;
+
+ for ( int i = idxNumber; i < getTotalIndexCount(); i++ )
+ d->idx(i) = d->idx(i+1);
+
+ d->idx( getTotalIndexCount() ) = IndexDetails();
+ }
+
+ BSONObj NamespaceDetails::prepOneUnfinishedIndex() {
+ verify( _indexBuildsInProgress > 0 );
+
+ // details.info is always a valid system.indexes entry because DataFileMgr::insert journals
+ // creating the index doc and then insert_makeIndex durably assigns its DiskLoc to info.
+ // indexBuildsInProgress is set after that, so if it is set, info must be set.
+ int offset = getTotalIndexCount() - 1;
+
+ BSONObj info = idx(offset).info.obj().getOwned();
+
+ _removeIndex( offset );
+
+ return info;
+ }
+
+ void NamespaceDetails::blowAwayInProgressIndexEntries() {
+ int inProg = _indexBuildsInProgress;
+
+ getDur().writingInt(_indexBuildsInProgress) = 0;
+
+ for (int i = 0; i < inProg; i++) {
+ idx( _nIndexes + i ).kill_idx();
+ }
+
+ }
+
+ void NamespaceDetails::swapIndex( const char* ns, int a, int b ) {
+
+ // flip main meta data
+ IndexDetails temp = idx(a);
+ *getDur().writing(&idx(a)) = idx(b);
+ *getDur().writing(&idx(b)) = temp;
+
+ // flip multi key bits
+ bool tempMultikey = isMultikey(a);
+ setIndexIsMultikey( ns, a, isMultikey(b) );
+ setIndexIsMultikey( ns, b, tempMultikey );
+ }
+
+ void NamespaceDetails::orphanDeletedList() {
+ for( int i = 0; i < Buckets; i++ ) {
+ _deletedList[i].writing().Null();
+ }
+ }
+
/* ------------------------------------------------------------------------- */
/* add a new namespace to the system catalog (<dbname>.system.namespaces).
@@ -991,4 +1141,16 @@ namespace mongo {
return false;
}
+ class IndexUpdateTest : public StartupTest {
+ public:
+ void run() {
+ verify( removeAndSlideBit(1, 0) == 0 );
+ verify( removeAndSlideBit(2, 0) == 1 );
+ verify( removeAndSlideBit(2, 1) == 0 );
+ verify( removeAndSlideBit(255, 1) == 127 );
+ verify( removeAndSlideBit(21, 2) == 9 );
+ verify( removeAndSlideBit(0x4000000000000001ULL, 62) == 1 );
+ }
+ } iu_unittest;
+
} // namespace mongo
diff --git a/src/mongo/db/namespace_details.h b/src/mongo/db/namespace_details.h
index c35324ac6c6..255b280d364 100644
--- a/src/mongo/db/namespace_details.h
+++ b/src/mongo/db/namespace_details.h
@@ -55,9 +55,13 @@ namespace mongo {
public:
enum { NIndexesMax = 64, NIndexesExtra = 30, NIndexesBase = 10 };
+ private:
+
/*-------- data fields, as present on disk : */
- DiskLoc firstExtent;
- DiskLoc lastExtent;
+
+ DiskLoc _firstExtent;
+ DiskLoc _lastExtent;
+
/* NOTE: capped collections v1 override the meaning of deletedList.
deletedList[0] points to a list of free records (DeletedRecord's) for all extents in
the capped namespace.
@@ -65,16 +69,18 @@ namespace mongo {
changes, this value is updated. !deletedList[1].isValid() when this value is not
yet computed.
*/
- DiskLoc deletedList[Buckets];
+ DiskLoc _deletedList[Buckets];
+
// ofs 168 (8 byte aligned)
struct Stats {
// datasize and nrecords MUST Be adjacent code assumes!
long long datasize; // this includes padding, but not record headers
long long nrecords;
- } stats;
- int lastExtentSize;
- int nIndexes;
- private:
+ } _stats;
+
+ int _lastExtentSize;
+ int _nIndexes;
+
// ofs 192
IndexDetails _indexes[NIndexesBase];
@@ -85,21 +91,22 @@ namespace mongo {
double _paddingFactor; // 1.0 = no padding.
// ofs 386 (16)
int _systemFlags; // things that the system sets/cares about
- public:
- DiskLoc capExtent; // the "current" extent we're writing too for a capped collection
- DiskLoc capFirstNewRecord;
- unsigned short dataFileVersion; // NamespaceDetails version. So we can do backward compatibility in the future. See filever.h
- unsigned short indexFileVersion;
- unsigned long long multiKeyIndexBits;
- private:
+
+ DiskLoc _capExtent; // the "current" extent we're writing too for a capped collection
+ DiskLoc _capFirstNewRecord;
+
+ unsigned short _dataFileVersion; // NamespaceDetails version. So we can do backward compatibility in the future. See filever.h
+ unsigned short _indexFileVersion;
+ unsigned long long _multiKeyIndexBits;
+
// ofs 400 (16)
- unsigned long long reservedA;
- long long extraOffset; // where the $extra info is located (bytes relative to this)
- public:
- int indexBuildsInProgress; // Number of indexes currently being built
- private:
+ unsigned long long _reservedA;
+ long long _extraOffset; // where the $extra info is located (bytes relative to this)
+
+ int _indexBuildsInProgress; // Number of indexes currently being built
+
int _userFlags;
- char reserved[72];
+ char _reserved[72];
/*-------- end data 496 bytes */
public:
explicit NamespaceDetails( const DiskLoc &loc, bool _capped );
@@ -130,8 +137,8 @@ namespace mongo {
}
};
Extra* extra() {
- if( extraOffset == 0 ) return 0;
- return (Extra *) (((char *) this) + extraOffset);
+ if( _extraOffset == 0 ) return 0;
+ return (Extra *) (((char *) this) + _extraOffset);
}
/* add extra space for indexes when more than 10 */
Extra* allocExtra(const char *ns, int nindexessofar);
@@ -143,8 +150,15 @@ namespace mongo {
/* dump info on all extents for this namespace. for debugging. */
void dumpExtents();
+ public:
+ const DiskLoc& capExtent() const { return _capExtent; }
+ const DiskLoc capFirstNewRecord() const { return _capFirstNewRecord; }
+
+ DiskLoc& capExtent() { return _capExtent; }
+ DiskLoc& capFirstNewRecord() { return _capFirstNewRecord; }
+
private:
- Extent *theCapExtent() const { return capExtent.ext(); }
+ Extent *theCapExtent() const { return _capExtent.ext(); }
void advanceCapExtent( const char *ns );
DiskLoc __capAlloc(int len);
DiskLoc cappedAlloc(const char *ns, int len);
@@ -153,19 +167,51 @@ namespace mongo {
public:
+ const DiskLoc& firstExtent() const { return _firstExtent; }
+ const DiskLoc& lastExtent() const { return _lastExtent; }
+
+ DiskLoc& firstExtent() { return _firstExtent; }
+ DiskLoc& lastExtent() { return _lastExtent; }
+
+ void setFirstExtent( DiskLoc newFirstExtent );
+ void setLastExtent( DiskLoc newLastExtent );
+
+ void setFirstExtentInvalid();
+ void setLastExtentInvalid();
+
+
+ long long dataSize() const { return _stats.datasize; }
+ long long numRecords() const { return _stats.nrecords; }
+
+ void incrementStats( long long dataSizeIncrement,
+ long long numRecordsIncrement );
+
+ void setStats( long long dataSizeIncrement,
+ long long numRecordsIncrement );
+
+
bool isCapped() const { return _isCapped; }
long long maxCappedDocs() const;
void setMaxCappedDocs( long long max );
+
+ int lastExtentSize() const { return _lastExtentSize; }
+ void setLastExtentSize( int newMax );
+
+ const DiskLoc& deletedListEntry( int bucket ) const { return _deletedList[bucket]; }
+ DiskLoc& deletedListEntry( int bucket ) { return _deletedList[bucket]; }
+
+ void orphanDeletedList();
+
/**
* @param max in and out, will be adjusted
* @return if the value is valid at all
*/
static bool validMaxCappedDocs( long long* max );
- DiskLoc& cappedListOfAllDeletedRecords() { return deletedList[0]; }
- DiskLoc& cappedLastDelRecLastExtent() { return deletedList[1]; }
+ DiskLoc& cappedListOfAllDeletedRecords() { return _deletedList[0]; }
+ DiskLoc& cappedLastDelRecLastExtent() { return _deletedList[1]; }
void cappedDumpDelInfo();
- bool capLooped() const { return _isCapped && capFirstNewRecord.isValid(); }
+ bool capLooped() const { return _isCapped && _capFirstNewRecord.isValid(); }
bool inCapExtent( const DiskLoc &dl ) const;
void cappedCheckMigrate();
/**
@@ -181,7 +227,11 @@ namespace mongo {
/* when a background index build is in progress, we don't count the index in nIndexes until
complete, yet need to still use it in _indexRecord() - thus we use this function for that.
*/
- int getTotalIndexCount() const { return nIndexes + indexBuildsInProgress; }
+ int getTotalIndexCount() const { return _nIndexes + _indexBuildsInProgress; }
+
+ int getCompletedIndexCount() const { return _nIndexes; }
+
+ int getIndexBuildsInProgress() const { return _indexBuildsInProgress; }
/* NOTE: be careful with flags. are we manipulating them in read locks? if so,
this isn't thread safe. TODO
@@ -219,7 +269,7 @@ namespace mongo {
for a single document. see multikey in docs.
for these, we have to do some dedup work on queries.
*/
- bool isMultikey(int i) const { return (multiKeyIndexBits & (((unsigned long long) 1) << i)) != 0; }
+ bool isMultikey(int i) const { return (_multiKeyIndexBits & (((unsigned long long) 1) << i)) != 0; }
void setIndexIsMultikey(const char *thisns, int i, bool multikey = true);
/**
@@ -280,7 +330,7 @@ namespace mongo {
can pushes this down considerably. further tweaking will be a good idea but
this should be an adequate starting point.
*/
- double N = min(nIndexes,7) + 3;
+ double N = min(_nIndexes,7) + 3;
double x = _paddingFactor + (0.001 * N);
if ( x <= 2.0 ) {
setPaddingFactor( x );
@@ -310,6 +360,26 @@ namespace mongo {
const IndexDetails* findIndexByPrefix( const BSONObj &keyPattern ,
bool requireSingleKey );
+ void removeIndex( int idx );
+
+ /**
+ * removes things beteen getCompletedIndexCount() and getTotalIndexCount()
+ * this should only be used for crash recovery
+ */
+ void blowAwayInProgressIndexEntries();
+
+ /**
+ * @return the info for the index to retry
+ */
+ BSONObj prepOneUnfinishedIndex();
+
+ /**
+ * swaps all meta data for 2 indexes
+ * a and b are 2 index ids, whose contents will be swapped
+ * must have a lock on the entire collection to do this
+ */
+ void swapIndex( const char* ns, int a, int b );
+
/* Updates the expireAfterSeconds field of the given index to the value in newExpireSecs.
* The specified index must already contain an expireAfterSeconds field, and the value in
* that field and newExpireSecs must both be numeric.
@@ -324,10 +394,9 @@ namespace mongo {
const int userFlags() const { return _userFlags; }
bool isUserFlagSet( int flag ) const { return _userFlags & flag; }
-
-
+
/**
- * these methods only modify NamespaceDetails and do not
+ * these methods only modify NamespaceDetails and do not
* sync changes back to system.namespaces
* a typical call might
if ( nsd->setUserFlag( 4 ) ) {
@@ -335,7 +404,7 @@ namespace mongo {
}
* these methods all return true iff only something was modified
*/
-
+
bool setUserFlag( int flag );
bool clearUserFlag( int flag );
bool replaceUserFlags( int flags );
@@ -354,7 +423,7 @@ namespace mongo {
return -1;
}
- bool haveIdIndex() {
+ bool haveIdIndex() {
return isSystemFlagSet( NamespaceDetails::Flag_HaveIdIndex ) || findIdIndex() >= 0;
}
@@ -401,9 +470,9 @@ namespace mongo {
long long storageSize( int * numExtents = 0 , BSONArrayBuilder * extentInfo = 0 ) const;
int averageObjectSize() {
- if ( stats.nrecords == 0 )
+ if ( _stats.nrecords == 0 )
return 5;
- return (int) (stats.datasize / stats.nrecords);
+ return (int) (_stats.datasize / _stats.nrecords);
}
NamespaceDetails *writingWithoutExtra() {
@@ -412,7 +481,20 @@ namespace mongo {
/** Make all linked Extra objects writeable as well */
NamespaceDetails *writingWithExtra();
+ class IndexBuildBlock {
+ public:
+ IndexBuildBlock( const string& ns, const string& indexName );
+ ~IndexBuildBlock();
+
+ private:
+ string _ns;
+ string _indexName;
+ };
+
private:
+
+ void _removeIndex( int idx );
+
DiskLoc _alloc(const char *ns, int len);
void maybeComplain( const char *ns, int len ) const;
DiskLoc __stdAlloc(int len, bool willBeAt);
@@ -586,7 +668,7 @@ namespace mongo {
bool find(const char *ns, DiskLoc& loc) {
NamespaceDetails *l = details(ns);
if ( l ) {
- loc = l->firstExtent;
+ loc = l->_firstExtent;
return true;
}
return false;
diff --git a/src/mongo/db/ops/count.cpp b/src/mongo/db/ops/count.cpp
index f9288a4f021..a22a6f67d41 100644
--- a/src/mongo/db/ops/count.cpp
+++ b/src/mongo/db/ops/count.cpp
@@ -65,7 +65,7 @@ namespace mongo {
// count of all objects
if ( query.isEmpty() ) {
- return applySkipLimit( d->stats.nrecords , cmd );
+ return applySkipLimit( d->numRecords(), cmd );
}
long long count = 0;
diff --git a/src/mongo/db/pdfile.cpp b/src/mongo/db/pdfile.cpp
index 022711248d9..7bd927aa894 100644
--- a/src/mongo/db/pdfile.cpp
+++ b/src/mongo/db/pdfile.cpp
@@ -516,22 +516,20 @@ namespace mongo {
NamespaceIndex *ni = nsindex(ns);
NamespaceDetails *details = ni->details(ns);
if ( details ) {
- verify( !details->lastExtent.isNull() );
- verify( !details->firstExtent.isNull() );
- getDur().writingDiskLoc(e->xprev) = details->lastExtent;
- getDur().writingDiskLoc(details->lastExtent.ext()->xnext) = eloc;
+ verify( !details->lastExtent().isNull() );
+ verify( !details->firstExtent().isNull() );
+ getDur().writingDiskLoc(e->xprev) = details->lastExtent();
+ getDur().writingDiskLoc(details->lastExtent().ext()->xnext) = eloc;
verify( !eloc.isNull() );
- getDur().writingDiskLoc(details->lastExtent) = eloc;
+ details->setLastExtent( eloc );
}
else {
ni->add_ns(ns, eloc, capped);
details = ni->details(ns);
}
- {
- NamespaceDetails *dw = details->writingWithoutExtra();
- dw->lastExtentSize = e->length;
- }
+ details->setLastExtentSize( e->length );
+
details->addDeletedRec(emptyLoc.drec(), emptyLoc);
}
@@ -603,7 +601,7 @@ namespace mongo {
int bestDiff = 0x7fffffff;
{
Timer t;
- DiskLoc L = f->firstExtent;
+ DiskLoc L = f->firstExtent();
while( !L.isNull() ) {
Extent * e = L.ext();
if( e->length >= low && e->length <= high ) {
@@ -647,10 +645,10 @@ namespace mongo {
e->xprev.ext()->xnext.writing() = e->xnext;
if( !e->xnext.isNull() )
e->xnext.ext()->xprev.writing() = e->xprev;
- if( f->firstExtent == e->myLoc )
- f->firstExtent.writing() = e->xnext;
- if( f->lastExtent == e->myLoc )
- f->lastExtent.writing() = e->xprev;
+ if( f->firstExtent() == e->myLoc )
+ f->setFirstExtent( e->xnext );
+ if( f->lastExtent() == e->myLoc )
+ f->setLastExtent( e->xprev );
// use it
OCCASIONALLY if( n > 512 ) log() << "warning: newExtent " << n << " scanned" << endl;
@@ -859,7 +857,7 @@ namespace mongo {
if ( ! d )
return shared_ptr<Cursor>(new BasicCursor(DiskLoc()));
- DiskLoc loc = d->firstExtent;
+ DiskLoc loc = d->firstExtent();
Extent *e = getExtent(loc);
DEBUGGING {
@@ -917,7 +915,7 @@ namespace mongo {
if ( !d->isCapped() ) {
if ( !startLoc.isNull() )
return shared_ptr<Cursor>(new ReverseCursor( startLoc ));
- Extent *e = d->lastExtent.ext();
+ Extent *e = d->lastExtent().ext();
while ( e->lastRecord.isNull() && !e->xprev.isNull() ) {
OCCASIONALLY out() << " findTableScan: extent empty, skipping ahead" << endl;
e = e->getPrevExtent();
@@ -937,7 +935,7 @@ namespace mongo {
log() << " freeExtents==0" << endl;
return;
}
- DiskLoc a = freeExtents->firstExtent;
+ DiskLoc a = freeExtents->firstExtent();
while( !a.isNull() ) {
Extent *e = a.ext();
log() << " extent " << a.toString() << " len:" << e->length << " prev:" << e->xprev.toString() << endl;
@@ -969,16 +967,16 @@ namespace mongo {
freeExtents = nsdetails(s);
massert( 10361 , "can't create .$freelist", freeExtents);
}
- if( freeExtents->firstExtent.isNull() ) {
- freeExtents->firstExtent.writing() = firstExt;
- freeExtents->lastExtent.writing() = lastExt;
+ if( freeExtents->firstExtent().isNull() ) {
+ freeExtents->setFirstExtent( firstExt );
+ freeExtents->setLastExtent( lastExt );
}
else {
- DiskLoc a = freeExtents->firstExtent;
+ DiskLoc a = freeExtents->firstExtent();
verify( a.ext()->xprev.isNull() );
getDur().writingDiskLoc( a.ext()->xprev ) = lastExt;
getDur().writingDiskLoc( lastExt.ext()->xnext ) = a;
- getDur().writingDiskLoc( freeExtents->firstExtent ) = firstExt;
+ freeExtents->setFirstExtent( firstExt );
}
//printFreeList();
@@ -1013,10 +1011,10 @@ namespace mongo {
}
// free extents
- if( !d->firstExtent.isNull() ) {
- freeExtents(d->firstExtent, d->lastExtent);
- getDur().writingDiskLoc( d->firstExtent ).setInvalid();
- getDur().writingDiskLoc( d->lastExtent ).setInvalid();
+ if( !d->firstExtent().isNull() ) {
+ freeExtents(d->firstExtent(), d->lastExtent());
+ d->setFirstExtentInvalid();
+ d->setLastExtentInvalid();
}
// remove from the catalog hashtable
@@ -1031,7 +1029,7 @@ namespace mongo {
BackgroundOperation::assertNoBgOpInProgForNs(name.c_str());
- if ( d->nIndexes != 0 ) {
+ if ( d->getTotalIndexCount() > 0 ) {
try {
verify( dropIndexes(d, name.c_str(), "*", errmsg, result, true) );
}
@@ -1041,7 +1039,7 @@ namespace mongo {
ss << " cause: " << e.what();
uasserted(12503,ss.str());
}
- verify( d->nIndexes == 0 );
+ verify( d->getTotalIndexCount() == 0 );
}
LOG(1) << "\t dropIndexes done" << endl;
result.append("ns", name.c_str());
@@ -1082,11 +1080,7 @@ namespace mongo {
/* add to the free list */
{
- {
- NamespaceDetails::Stats *s = getDur().writing(&d->stats);
- s->datasize -= todelete->netLength();
- s->nrecords--;
- }
+ d->incrementStats( -1 * todelete->netLength(), -1 );
if (NamespaceString(ns).coll == "system.indexes") {
/* temp: if in system.indexes, don't reuse, and zero out: we want to be
@@ -1336,7 +1330,7 @@ namespace mongo {
// dummy data here, keeping pointers to the btree nodes holding the dummy data and then
// updating the dummy data with the DiskLoc of the real record.
void checkNoIndexConflicts( NamespaceDetails *d, const BSONObj &obj ) {
- for ( int idxNo = 0; idxNo < d->nIndexes; idxNo++ ) {
+ for ( int idxNo = 0; idxNo < d->getCompletedIndexCount(); idxNo++ ) {
if( d->idx(idxNo).unique() ) {
IndexDetails& idx = d->idx(idxNo);
if (ignoreUniqueIndex(idx))
@@ -1379,13 +1373,13 @@ namespace mongo {
DiskLoc loc;
if ( ! d->isCapped() ) { // size capped doesn't grow
LOG(1) << "allocating new extent for " << ns << " padding:" << d->paddingFactor() << " lenWHdr: " << lenWHdr << endl;
- cc().database()->allocExtent(ns, Extent::followupSize(lenWHdr, d->lastExtentSize), false, !god);
+ cc().database()->allocExtent(ns, Extent::followupSize(lenWHdr, d->lastExtentSize()), false, !god);
loc = d->alloc(ns, lenWHdr);
if ( loc.isNull() ) {
- log() << "warning: alloc() failed after allocating new extent. lenWHdr: " << lenWHdr << " last extent size:" << d->lastExtentSize << "; trying again" << endl;
- for ( int z=0; z<10 && lenWHdr > d->lastExtentSize; z++ ) {
+ log() << "warning: alloc() failed after allocating new extent. lenWHdr: " << lenWHdr << " last extent size:" << d->lastExtentSize() << "; trying again" << endl;
+ for ( int z=0; z<10 && lenWHdr > d->lastExtentSize(); z++ ) {
log() << "try #" << z << endl;
- cc().database()->allocExtent(ns, Extent::followupSize(lenWHdr, d->lastExtentSize), false, !god);
+ cc().database()->allocExtent(ns, Extent::followupSize(lenWHdr, d->lastExtentSize()), false, !god);
loc = d->alloc(ns, lenWHdr);
if ( ! loc.isNull() )
break;
@@ -1445,6 +1439,10 @@ namespace mongo {
return d;
}
+
+ /**
+ * @param loc the location in system.indexes where the index spec is
+ */
void NOINLINE_DECL insert_makeIndex(NamespaceDetails* tableToIndex,
const string& tabletoidxns,
const DiskLoc& loc,
@@ -1454,12 +1452,11 @@ namespace mongo {
NamespaceString(tabletoidxns).coll != "system.indexes");
BSONObj info = loc.obj();
-
- // The total number of indexes right before we write to the collection
- int oldNIndexes = -1;
- int idxNo = tableToIndex->getTotalIndexCount();
std::string idxName = info["name"].valuestr();
+
+ int idxNo = -1;
+
// Set curop description before setting indexBuildInProg, so that there's something
// commands can find and kill as soon as indexBuildInProg is set. Only set this if it's a
// killable index, so we don't overwrite commands in currentOp.
@@ -1469,12 +1466,13 @@ namespace mongo {
try {
IndexDetails& idx = tableToIndex->getNextIndexDetails(tabletoidxns.c_str());
+ NamespaceDetails::IndexBuildBlock indexBuildBlock( tabletoidxns, idxName );
+
// It's important that this is outside the inner try/catch so that we never try to call
// kill_idx on a half-formed disk loc (if this asserts).
getDur().writingDiskLoc(idx.info) = loc;
try {
- getDur().writingInt(tableToIndex->indexBuildsInProgress) += 1;
buildAnIndex(tabletoidxns, tableToIndex, idx, mayInterrupt);
}
catch (DBException& e) {
@@ -1506,33 +1504,19 @@ namespace mongo {
idxNo = IndexBuildsInProgress::get(tabletoidxns.c_str(), idxName);
// Make sure the newly created index is relocated to nIndexes, if it isn't already there
- if (idxNo != tableToIndex->nIndexes) {
+ if ( idxNo != tableToIndex->getCompletedIndexCount() ) {
log() << "switching indexes at position " << idxNo << " and "
- << tableToIndex->nIndexes << endl;
- // We cannot use idx here, as it may point to a different index entry if it was
- // flipped during building
- IndexDetails temp = tableToIndex->idx(idxNo);
- *getDur().writing(&tableToIndex->idx(idxNo)) =
- tableToIndex->idx(tableToIndex->nIndexes);
- *getDur().writing(&tableToIndex->idx(tableToIndex->nIndexes)) = temp;
+ << tableToIndex->getCompletedIndexCount() << endl;
- // We also have to flip multikey entries
- bool tempMultikey = tableToIndex->isMultikey(idxNo);
- tableToIndex->setIndexIsMultikey(tabletoidxns.c_str(), idxNo,
- tableToIndex->isMultikey(tableToIndex->nIndexes));
- tableToIndex->setIndexIsMultikey(tabletoidxns.c_str(), tableToIndex->nIndexes,
- tempMultikey);
+ tableToIndex->swapIndex( tabletoidxns.c_str(),
+ idxNo,
+ tableToIndex->getCompletedIndexCount() );
- idxNo = tableToIndex->nIndexes;
+ idxNo = tableToIndex->getCompletedIndexCount();
}
- // Store the current total of indexes in case something goes wrong actually adding the
- // index
- oldNIndexes = tableToIndex->getTotalIndexCount();
-
// clear transient info caches so they refresh; increments nIndexes
tableToIndex->addIndex(tabletoidxns.c_str());
- getDur().writingInt(tableToIndex->indexBuildsInProgress) -= 1;
IndexLegacy::postBuildHook(tableToIndex, idx);
}
@@ -1542,16 +1526,11 @@ namespace mongo {
// metadata is consistent on any exception. It may leak like a sieve if the index
// successfully finished building and addIndex or kill_idx threw.
- // Check if nIndexes was incremented
- if (oldNIndexes != -1 && oldNIndexes != tableToIndex->nIndexes) {
- getDur().writingInt(tableToIndex->nIndexes) = oldNIndexes;
- }
-
// Move any other in prog indexes "back" one. It is important that idxNo is set
// correctly so that the correct index is removed
- IndexBuildsInProgress::remove(tabletoidxns.c_str(), idxNo);
- getDur().writingInt(tableToIndex->indexBuildsInProgress) -= 1;
-
+ if ( idxNo >= 0 ) {
+ IndexBuildsInProgress::remove(tabletoidxns.c_str(), idxNo);
+ }
throw;
}
}
@@ -1563,7 +1542,9 @@ namespace mongo {
NamespaceDetails* nsd = nsdetails(ns);
// Go through unfinished index builds and try to find this index
- for (int i=nsd->nIndexes; i<nsd->nIndexes+nsd->indexBuildsInProgress; i++) {
+ for ( int i=nsd->getCompletedIndexCount();
+ i < nsd->getTotalIndexCount();
+ i++ ) {
if (indexName == nsd->idx(i).indexName()) {
return i;
}
@@ -1659,11 +1640,11 @@ namespace mongo {
int lenWHdr = d->getRecordAllocationSize( len + Record::HeaderSize );
fassert( 16440, lenWHdr >= ( len + Record::HeaderSize ) );
-
+
// If the collection is capped, check if the new object will violate a unique index
// constraint before allocating space.
- if (d->nIndexes &&
- d->isCapped() &&
+ if (d->getCompletedIndexCount() &&
+ d->isCapped() &&
!god) {
checkNoIndexConflicts( d, BSONObj( reinterpret_cast<const char *>( obuf ) ) );
}
@@ -1696,12 +1677,7 @@ namespace mongo {
addRecordToRecListInExtent(r, loc);
- /* durability todo : this could be a bit annoying / slow to record constantly */
- {
- NamespaceDetails::Stats *s = getDur().writing(&d->stats);
- s->datasize += r->netLength();
- s->nrecords++;
- }
+ d->incrementStats( r->netLength(), 1 );
// we don't bother resetting query optimizer stats for the god tables - also god is true when adding a btree bucket
if ( !god )
@@ -1712,11 +1688,12 @@ namespace mongo {
}
/* add this record to our indexes */
- if (d->nIndexes) {
+ if ( d->getTotalIndexCount() > 0 ) {
try {
BSONObj obj(r->data());
indexRecord(ns, d, obj, loc);
- } catch( AssertionException& e ) {
+ }
+ catch( AssertionException& e ) {
// should be a dup key error on _id index
if( tableToIndex || d->isCapped() ) {
massert( 12583, "unexpected index insertion failure on capped collection", !d->isCapped() );
@@ -1784,13 +1761,7 @@ namespace mongo {
e->lastRecord.writing() = loc;
}
- /* todo: don't update for oplog? seems wasteful. */
- {
- NamespaceDetails::Stats *s = getDur().writing(&d->stats);
- s->datasize += r->netLength();
- s->nrecords++;
- }
-
+ d->incrementStats( r->netLength(), 1 );
return r;
}
diff --git a/src/mongo/db/query_optimizer_internal.cpp b/src/mongo/db/query_optimizer_internal.cpp
index c94212c1deb..3191772ec51 100644
--- a/src/mongo/db/query_optimizer_internal.cpp
+++ b/src/mongo/db/query_optimizer_internal.cpp
@@ -432,7 +432,7 @@ namespace mongo {
vector<shared_ptr<QueryPlan> > plans;
shared_ptr<QueryPlan> optimalPlan;
shared_ptr<QueryPlan> specialPlan;
- for( int i = 0; i < d->nIndexes; ++i ) {
+ for( int i = 0; i < d->getCompletedIndexCount(); ++i ) {
if ( !QueryUtilIndexed::indexUseful( _qps.frsp(), d, i, _qps.order() ) ) {
continue;
@@ -1607,7 +1607,7 @@ namespace mongo {
}
else {
bool useful = false;
- for( int j = 0; j < d->nIndexes; ++j ) {
+ for( int j = 0; j < d->getCompletedIndexCount(); ++j ) {
if ( indexUseful( *i, d, j, BSONObj() ) ) {
useful = true;
break;
diff --git a/src/mongo/db/queryutil.cpp b/src/mongo/db/queryutil.cpp
index 899c03d73eb..e5cb55aa560 100644
--- a/src/mongo/db/queryutil.cpp
+++ b/src/mongo/db/queryutil.cpp
@@ -1455,11 +1455,13 @@ namespace mongo {
"multiKey" << _multiKey.toString()
).jsonString();
}
-
+
void FieldRangeSetPair::assertValidIndex( const NamespaceDetails *d, int idxNo ) const {
- massert( 14048, "FieldRangeSetPair invalid index specified", idxNo >= 0 && idxNo < d->nIndexes );
+ massert( 14048,
+ "FieldRangeSetPair invalid index specified",
+ idxNo >= 0 && idxNo < d->getCompletedIndexCount() );
}
-
+
const FieldRangeSet &FieldRangeSetPair::frsForIndex( const NamespaceDetails* nsd, int idxNo ) const {
assertValidIndexOrNoIndex( nsd, idxNo );
if ( idxNo < 0 ) {
diff --git a/src/mongo/db/repl/finding_start_cursor.cpp b/src/mongo/db/repl/finding_start_cursor.cpp
index 570be6357b2..7da987890a5 100644
--- a/src/mongo/db/repl/finding_start_cursor.cpp
+++ b/src/mongo/db/repl/finding_start_cursor.cpp
@@ -105,26 +105,26 @@ namespace mongo {
DiskLoc FindingStartCursor::extentFirstLoc( const DiskLoc &rec ) {
Extent *e = rec.rec()->myExtent( rec );
- if ( !_qp.nsd()->capLooped() || ( e->myLoc != _qp.nsd()->capExtent ) )
+ if ( !_qp.nsd()->capLooped() || ( e->myLoc != _qp.nsd()->capExtent() ) )
return e->firstRecord;
// Likely we are on the fresh side of capExtent, so return first fresh record.
// If we are on the stale side of capExtent, then the collection is small and it
// doesn't matter if we start the extent scan with capFirstNewRecord.
- return _qp.nsd()->capFirstNewRecord;
+ return _qp.nsd()->capFirstNewRecord();
}
-
+
DiskLoc FindingStartCursor::prevExtentFirstLoc( const DiskLoc& rec ) const {
Extent *e = rec.rec()->myExtent( rec );
if ( _qp.nsd()->capLooped() ) {
while( true ) {
// Advance e to preceding extent (looping to lastExtent if necessary).
if ( e->xprev.isNull() ) {
- e = _qp.nsd()->lastExtent.ext();
+ e = _qp.nsd()->lastExtent().ext();
}
else {
e = e->xprev.ext();
}
- if ( e->myLoc == _qp.nsd()->capExtent ) {
+ if ( e->myLoc == _qp.nsd()->capExtent() ) {
// Reached the extent containing the oldest data in the collection.
return DiskLoc();
}
diff --git a/src/mongo/db/repl/rs_initialsync.cpp b/src/mongo/db/repl/rs_initialsync.cpp
index 87f010d5ef3..01695a05755 100644
--- a/src/mongo/db/repl/rs_initialsync.cpp
+++ b/src/mongo/db/repl/rs_initialsync.cpp
@@ -114,7 +114,7 @@ namespace mongo {
NamespaceDetails *d = nsdetails(rsoplog);
// temp
- if( d && d->stats.nrecords == 0 )
+ if( d && d->numRecords() == 0 )
return; // already empty, ok.
LOG(1) << "replSet empty oplog" << rsLog;
diff --git a/src/mongo/db/repl/rs_rollback.cpp b/src/mongo/db/repl/rs_rollback.cpp
index 3bdbe8101d6..f4b8804e669 100644
--- a/src/mongo/db/repl/rs_rollback.cpp
+++ b/src/mongo/db/repl/rs_rollback.cpp
@@ -522,7 +522,7 @@ namespace mongo {
}
}
// did we just empty the collection? if so let's check if it even exists on the source.
- if( nsd->stats.nrecords == 0 ) {
+ if( nsd->numRecords() == 0 ) {
try {
string sys = cc().database()->name + ".system.namespaces";
bo o = them->findOne(sys, QUERY("name"<<d.ns));
diff --git a/src/mongo/dbtests/btreetests.inl b/src/mongo/dbtests/btreetests.inl
index d66dfc41e0a..718ea5aad1b 100644
--- a/src/mongo/dbtests/btreetests.inl
+++ b/src/mongo/dbtests/btreetests.inl
@@ -422,10 +422,10 @@
}
// dump();
string ns = id().indexNamespace();
- ASSERT_EQUALS( 3, nsdetails( ns )->stats.nrecords );
+ ASSERT_EQUALS( 3, nsdetails( ns )->numRecords() );
int expectedCount = 10 - unindexKeys();
// dump();
- ASSERT_EQUALS( 1, nsdetails( ns )->stats.nrecords );
+ ASSERT_EQUALS( 1, nsdetails( ns )->numRecords() );
long long unused = 0;
ASSERT_EQUALS( expectedCount, bt()->fullValidate( dl(), order(), &unused, true ) );
ASSERT_EQUALS( 0, unused );
@@ -476,10 +476,10 @@
}
// dump();
string ns = id().indexNamespace();
- ASSERT_EQUALS( 4, nsdetails( ns )->stats.nrecords );
+ ASSERT_EQUALS( 4, nsdetails( ns )->numRecords() );
BSONObj k = key( 'a' + 17 );
unindex( k );
- ASSERT_EQUALS( 3, nsdetails( ns )->stats.nrecords );
+ ASSERT_EQUALS( 3, nsdetails( ns )->numRecords() );
long long unused = 0;
ASSERT_EQUALS( 17, bt()->fullValidate( dl(), order(), &unused, true ) );
ASSERT_EQUALS( 0, unused );
@@ -620,13 +620,13 @@
// dump();
string ns = id().indexNamespace();
ASSERT_EQUALS( 8, bt()->fullValidate( dl(), order(), 0, true ) );
- ASSERT_EQUALS( 7, nsdetails( ns )->stats.nrecords );
+ ASSERT_EQUALS( 7, nsdetails( ns )->numRecords() );
BSONObj k = BSON( "" << "bb" );
verify( unindex( k ) );
// dump();
ASSERT_EQUALS( 7, bt()->fullValidate( dl(), order(), 0, true ) );
- ASSERT_EQUALS( 5, nsdetails( ns )->stats.nrecords );
+ ASSERT_EQUALS( 5, nsdetails( ns )->numRecords() );
ArtificialTree::checkStructure( "{b:{a:null},d:{c:null},f:{e:null},_:{g:null}}", id() );
}
};
@@ -638,13 +638,13 @@
// dump();
string ns = id().indexNamespace();
ASSERT_EQUALS( 10, bt()->fullValidate( dl(), order(), 0, true ) );
- ASSERT_EQUALS( 7, nsdetails( ns )->stats.nrecords );
+ ASSERT_EQUALS( 7, nsdetails( ns )->numRecords() );
BSONObj k = BSON( "" << "bb" );
verify( unindex( k ) );
// dump();
ASSERT_EQUALS( 9, bt()->fullValidate( dl(), order(), 0, true ) );
- ASSERT_EQUALS( 5, nsdetails( ns )->stats.nrecords );
+ ASSERT_EQUALS( 5, nsdetails( ns )->numRecords() );
ArtificialTree::checkStructure( "{b:{a:null},cc:{c:null},d:null,f:{e:null},h:{g:null}}", id() );
}
};
@@ -657,12 +657,12 @@
// dump();
string ns = id().indexNamespace();
ASSERT_EQUALS( 4, bt()->fullValidate( dl(), order(), 0, true ) );
- ASSERT_EQUALS( 3, nsdetails( ns )->stats.nrecords );
+ ASSERT_EQUALS( 3, nsdetails( ns )->numRecords() );
BSONObj k = BSON( "" << "c" );
verify( unindex( k ) );
// dump();
ASSERT_EQUALS( 3, bt()->fullValidate( dl(), order(), 0, true ) );
- ASSERT_EQUALS( 3, nsdetails( ns )->stats.nrecords );
+ ASSERT_EQUALS( 3, nsdetails( ns )->numRecords() );
ArtificialTree::checkStructure( "{d:{b:{a:null}}}", id() );
}
};
@@ -674,14 +674,14 @@
// dump();
string ns = id().indexNamespace();
ASSERT_EQUALS( 11, bt()->fullValidate( dl(), order(), 0, true ) );
- ASSERT_EQUALS( 7, nsdetails( ns )->stats.nrecords );
+ ASSERT_EQUALS( 7, nsdetails( ns )->numRecords() );
BSONObj k = BSON( "" << "bb" );
verify( unindex( k ) );
// dump();
ASSERT_EQUALS( 10, bt()->fullValidate( dl(), order(), 0, true ) );
// child does not currently replace parent in this case
- ASSERT_EQUALS( 6, nsdetails( ns )->stats.nrecords );
+ ASSERT_EQUALS( 6, nsdetails( ns )->numRecords() );
ArtificialTree::checkStructure( "{i:{b:{a:null},cc:{c:null},d:null,f:{e:null},h:{g:null}}}", id() );
}
};
@@ -693,14 +693,14 @@
// dump();
string ns = id().indexNamespace();
ASSERT_EQUALS( 11, bt()->fullValidate( dl(), order(), 0, true ) );
- ASSERT_EQUALS( 7, nsdetails( ns )->stats.nrecords );
+ ASSERT_EQUALS( 7, nsdetails( ns )->numRecords() );
BSONObj k = BSON( "" << "ff" );
verify( unindex( k ) );
// dump();
ASSERT_EQUALS( 10, bt()->fullValidate( dl(), order(), 0, true ) );
// child does not currently replace parent in this case
- ASSERT_EQUALS( 6, nsdetails( ns )->stats.nrecords );
+ ASSERT_EQUALS( 6, nsdetails( ns )->numRecords() );
ArtificialTree::checkStructure( "{i:{b:{a:null},cc:{c:null},d:null,f:{e:null},h:{g:null}}}", id() );
}
};
@@ -712,13 +712,13 @@
// dump();
string ns = id().indexNamespace();
ASSERT_EQUALS( 11, bt()->fullValidate( dl(), order(), 0, true ) );
- ASSERT_EQUALS( 7, nsdetails( ns )->stats.nrecords );
+ ASSERT_EQUALS( 7, nsdetails( ns )->numRecords() );
BSONObj k = BSON( "" << "bb" );
verify( unindex( k ) );
// dump();
ASSERT_EQUALS( 10, bt()->fullValidate( dl(), order(), 0, true ) );
- ASSERT_EQUALS( 7, nsdetails( ns )->stats.nrecords );
+ ASSERT_EQUALS( 7, nsdetails( ns )->numRecords() );
ArtificialTree::checkStructure( "{d:{b:{a:null},cc:{c:null}},dd:null,_:{f:{e:null},h:{g:null}}}", id() );
}
};
@@ -730,13 +730,13 @@
// dump();
string ns = id().indexNamespace();
ASSERT_EQUALS( 7, bt()->fullValidate( dl(), order(), 0, true ) );
- ASSERT_EQUALS( 5, nsdetails( ns )->stats.nrecords );
+ ASSERT_EQUALS( 5, nsdetails( ns )->numRecords() );
BSONObj k = BSON( "" << "g" );
verify( unindex( k ) );
// dump();
ASSERT_EQUALS( 6, bt()->fullValidate( dl(), order(), 0, true ) );
- ASSERT_EQUALS( 5, nsdetails( ns )->stats.nrecords );
+ ASSERT_EQUALS( 5, nsdetails( ns )->numRecords() );
ArtificialTree::checkStructure( "{c:{b:{a:null}},d:null,_:{f:{e:null}}}", id() );
}
};
@@ -748,13 +748,13 @@
// dump();
string ns = id().indexNamespace();
ASSERT_EQUALS( 9, bt()->fullValidate( dl(), order(), 0, true ) );
- ASSERT_EQUALS( 7, nsdetails( ns )->stats.nrecords );
+ ASSERT_EQUALS( 7, nsdetails( ns )->numRecords() );
BSONObj k = BSON( "" << "ee" );
verify( unindex( k ) );
// dump();
ASSERT_EQUALS( 8, bt()->fullValidate( dl(), order(), 0, true ) );
- ASSERT_EQUALS( 6, nsdetails( ns )->stats.nrecords );
+ ASSERT_EQUALS( 6, nsdetails( ns )->numRecords() );
ArtificialTree::checkStructure( "{c:{b:{a:null}},_:{e:{d:null},f:null,h:{g:null}}}", id() );
}
};
@@ -766,13 +766,13 @@
// dump();
string ns = id().indexNamespace();
ASSERT_EQUALS( 10, bt()->fullValidate( dl(), order(), 0, true ) );
- ASSERT_EQUALS( 7, nsdetails( ns )->stats.nrecords );
+ ASSERT_EQUALS( 7, nsdetails( ns )->numRecords() );
BSONObj k = BSON( "" << "ee" );
verify( unindex( k ) );
// dump();
ASSERT_EQUALS( 9, bt()->fullValidate( dl(), order(), 0, true ) );
- ASSERT_EQUALS( 6, nsdetails( ns )->stats.nrecords );
+ ASSERT_EQUALS( 6, nsdetails( ns )->numRecords() );
ArtificialTree::checkStructure( "{f:{b:{a:null},c:null,e:{d:null}},ff:null,_:{h:{g:null}}}", id() );
}
};
@@ -784,13 +784,13 @@
// dump();
string ns = id().indexNamespace();
ASSERT_EQUALS( 10, bt()->fullValidate( dl(), order(), 0, true ) );
- ASSERT_EQUALS( 7, nsdetails( ns )->stats.nrecords );
+ ASSERT_EQUALS( 7, nsdetails( ns )->numRecords() );
BSONObj k = BSON( "" << "ee" );
verify( unindex( k ) );
// dump();
ASSERT_EQUALS( 9, bt()->fullValidate( dl(), order(), 0, true ) );
- ASSERT_EQUALS( 6, nsdetails( ns )->stats.nrecords );
+ ASSERT_EQUALS( 6, nsdetails( ns )->numRecords() );
ArtificialTree::checkStructure( "{c:{b:{a:null}},cc:null,_:{e:{d:null},f:null,h:{g:null}}}", id() );
}
};
@@ -802,13 +802,13 @@
// dump();
string ns = id().indexNamespace();
ASSERT_EQUALS( 10, bt()->fullValidate( dl(), order(), 0, true ) );
- ASSERT_EQUALS( 6, nsdetails( ns )->stats.nrecords );
+ ASSERT_EQUALS( 6, nsdetails( ns )->numRecords() );
BSONObj k = BSON( "" << "c" );
verify( unindex( k ) );
// dump();
ASSERT_EQUALS( 9, bt()->fullValidate( dl(), order(), 0, true ) );
- ASSERT_EQUALS( 4, nsdetails( ns )->stats.nrecords );
+ ASSERT_EQUALS( 4, nsdetails( ns )->numRecords() );
// height is not currently reduced in this case
ArtificialTree::checkStructure( "{j:{g:{b:{a:null},d:null,e:null,f:null},h:null,i:null}}", id() );
}
@@ -821,13 +821,13 @@
// dump();
string ns = id().indexNamespace();
ASSERT_EQUALS( 9, bt()->fullValidate( dl(), order(), 0, true ) );
- ASSERT_EQUALS( 6, nsdetails( ns )->stats.nrecords );
+ ASSERT_EQUALS( 6, nsdetails( ns )->numRecords() );
BSONObj k = BSON( "" << "c" );
verify( unindex( k ) );
// dump();
ASSERT_EQUALS( 8, bt()->fullValidate( dl(), order(), 0, true ) );
- ASSERT_EQUALS( 3, nsdetails( ns )->stats.nrecords );
+ ASSERT_EQUALS( 3, nsdetails( ns )->numRecords() );
ArtificialTree::checkStructure( "{g:{b:{a:null},d:null,e:null,f:null},h:null,i:null}", id() );
}
};
@@ -838,12 +838,12 @@
ArtificialTree::setTree( "{h:{e:{b:{a:null},c:null,d:null},_:{f:null}},_:{i:null}}", id() );
string ns = id().indexNamespace();
ASSERT_EQUALS( 8, bt()->fullValidate( dl(), order(), 0, true ) );
- ASSERT_EQUALS( 6, nsdetails( ns )->stats.nrecords );
+ ASSERT_EQUALS( 6, nsdetails( ns )->numRecords() );
BSONObj k = BSON( "" << "c" );
verify( unindex( k ) );
long long keyCount = bt()->fullValidate( dl(), order(), 0, true );
ASSERT_EQUALS( 7, keyCount );
- ASSERT_EQUALS( 4, nsdetails( ns )->stats.nrecords );
+ ASSERT_EQUALS( 4, nsdetails( ns )->numRecords() );
// no recursion currently in this case
ArtificialTree::checkStructure( "{h:{b:{a:null},d:null,e:null,f:null},_:{i:null}}", id() );
}
@@ -894,7 +894,7 @@
long long unused = 0;
ASSERT_EQUALS( _count, bt()->fullValidate( dl(), order(), &unused, true ) );
ASSERT_EQUALS( 0, unused );
- ASSERT_EQUALS( 3, nsdetails( ns )->stats.nrecords );
+ ASSERT_EQUALS( 3, nsdetails( ns )->numRecords() );
BSONObj k = bigKey( *i );
unindex( k );
// dump();
@@ -908,10 +908,10 @@
ASSERT_EQUALS( 0, unused );
validate();
if ( !merge() ) {
- ASSERT_EQUALS( 3, nsdetails( ns )->stats.nrecords );
+ ASSERT_EQUALS( 3, nsdetails( ns )->numRecords() );
}
else {
- ASSERT_EQUALS( 1, nsdetails( ns )->stats.nrecords );
+ ASSERT_EQUALS( 1, nsdetails( ns )->numRecords() );
}
}
protected:
@@ -1011,13 +1011,13 @@
string ns = id().indexNamespace();
ArtificialTree::setTree( "{$10:{$1:null,$2:null,$3:null,$4:null,$5:null,$6:null},b:{$20:null,$30:null,$40:null,$50:null,a:null},_:{c:null}}", id() );
ASSERT_EQUALS( 14, bt()->fullValidate( dl(), order(), 0, true ) );
- ASSERT_EQUALS( 4, nsdetails( ns )->stats.nrecords );
+ ASSERT_EQUALS( 4, nsdetails( ns )->numRecords() );
BSONObj k = BSON( "" << bigNumString( 0x40 ) );
// dump();
ASSERT( unindex( k ) );
// dump();
ASSERT_EQUALS( 13, bt()->fullValidate( dl(), order(), 0, true ) );
- ASSERT_EQUALS( 4, nsdetails( ns )->stats.nrecords );
+ ASSERT_EQUALS( 4, nsdetails( ns )->numRecords() );
ArtificialTree::checkStructure( "{$6:{$1:null,$2:null,$3:null,$4:null,$5:null},b:{$10:null,$20:null,$30:null,$50:null,a:null},_:{c:null}}", id() );
}
};
@@ -1028,13 +1028,13 @@
string ns = id().indexNamespace();
ArtificialTree::setTree( "{$10:{$1:null,$2:null,$3:null,$4:null},b:{$20:null,$30:null,$40:null,$50:null,$60:null,$70:null},_:{c:null}}", id() );
ASSERT_EQUALS( 13, bt()->fullValidate( dl(), order(), 0, true ) );
- ASSERT_EQUALS( 4, nsdetails( ns )->stats.nrecords );
+ ASSERT_EQUALS( 4, nsdetails( ns )->numRecords() );
BSONObj k = BSON( "" << bigNumString( 0x3 ) );
// dump();
ASSERT( unindex( k ) );
// dump();
ASSERT_EQUALS( 12, bt()->fullValidate( dl(), order(), 0, true ) );
- ASSERT_EQUALS( 4, nsdetails( ns )->stats.nrecords );
+ ASSERT_EQUALS( 4, nsdetails( ns )->numRecords() );
ArtificialTree::checkStructure( "{$20:{$1:null,$2:null,$4:null,$10:null},b:{$30:null,$40:null,$50:null,$60:null,$70:null},_:{c:null}}", id() );
}
};
@@ -1045,13 +1045,13 @@
string ns = id().indexNamespace();
ArtificialTree::setTree( "{$20:{$1:{$0:null},$3:{$2:null},$5:{$4:null},$7:{$6:null},$9:{$8:null},$11:{$10:null},$13:{$12:null},_:{$14:null}},b:{$30:null,$40:{$35:null},$50:{$45:null}},_:{c:null}}", id() );
ASSERT_EQUALS( 23, bt()->fullValidate( dl(), order(), 0, true ) );
- ASSERT_EQUALS( 14, nsdetails( ns )->stats.nrecords );
+ ASSERT_EQUALS( 14, nsdetails( ns )->numRecords() );
BSONObj k = BSON( "" << bigNumString( 0x30 ) );
// dump();
ASSERT( unindex( k ) );
// dump();
ASSERT_EQUALS( 22, bt()->fullValidate( dl(), order(), 0, true ) );
- ASSERT_EQUALS( 14, nsdetails( ns )->stats.nrecords );
+ ASSERT_EQUALS( 14, nsdetails( ns )->numRecords() );
ArtificialTree::checkStructure( "{$9:{$1:{$0:null},$3:{$2:null},$5:{$4:null},$7:{$6:null},_:{$8:null}},b:{$11:{$10:null},$13:{$12:null},$20:{$14:null},$40:{$35:null},$50:{$45:null}},_:{c:null}}", id() );
}
};
@@ -1062,13 +1062,13 @@
string ns = id().indexNamespace();
ArtificialTree::setTree( "{$20:{$1:{$0:null},$3:{$2:null},$5:null,_:{$14:null}},b:{$30:{$25:null},$40:{$35:null},$50:{$45:null},$60:{$55:null},$70:{$65:null},$80:{$75:null},$90:{$85:null},$100:{$95:null}},_:{c:null}}", id() );
ASSERT_EQUALS( 25, bt()->fullValidate( dl(), order(), 0, true ) );
- ASSERT_EQUALS( 15, nsdetails( ns )->stats.nrecords );
+ ASSERT_EQUALS( 15, nsdetails( ns )->numRecords() );
BSONObj k = BSON( "" << bigNumString( 0x5 ) );
// dump();
ASSERT( unindex( k ) );
// dump();
ASSERT_EQUALS( 24, bt()->fullValidate( dl(), order(), 0, true ) );
- ASSERT_EQUALS( 15, nsdetails( ns )->stats.nrecords );
+ ASSERT_EQUALS( 15, nsdetails( ns )->numRecords() );
ArtificialTree::checkStructure( "{$50:{$1:{$0:null},$3:{$2:null},$20:{$14:null},$30:{$25:null},$40:{$35:null},_:{$45:null}},b:{$60:{$55:null},$70:{$65:null},$80:{$75:null},$90:{$85:null},$100:{$95:null}},_:{c:null}}", id() );
}
};
@@ -1079,13 +1079,13 @@
string ns = id().indexNamespace();
ArtificialTree::setTree( "{$10:{$1:null,$2:null,$3:null,$4:null,$5:null,$6:null},_:{$20:null,$30:null,$40:null,$50:null,a:null}}", id() );
ASSERT_EQUALS( 12, bt()->fullValidate( dl(), order(), 0, true ) );
- ASSERT_EQUALS( 3, nsdetails( ns )->stats.nrecords );
+ ASSERT_EQUALS( 3, nsdetails( ns )->numRecords() );
BSONObj k = BSON( "" << bigNumString( 0x40 ) );
// dump();
ASSERT( unindex( k ) );
// dump();
ASSERT_EQUALS( 11, bt()->fullValidate( dl(), order(), 0, true ) );
- ASSERT_EQUALS( 3, nsdetails( ns )->stats.nrecords );
+ ASSERT_EQUALS( 3, nsdetails( ns )->numRecords() );
ArtificialTree::checkStructure( "{$6:{$1:null,$2:null,$3:null,$4:null,$5:null},_:{$10:null,$20:null,$30:null,$50:null,a:null}}", id() );
}
};
@@ -1148,7 +1148,7 @@
string ns = id().indexNamespace();
ArtificialTree::setTree( "{$10:{$1:null,$2:null,$3:null,$4:null,$5:null,$6:null},_:{$20:null,$30:null,$40:null,$50:null,a:null}}", id() );
ASSERT_EQUALS( 12, bt()->fullValidate( dl(), order(), 0, true ) );
- ASSERT_EQUALS( 3, nsdetails( ns )->stats.nrecords );
+ ASSERT_EQUALS( 3, nsdetails( ns )->numRecords() );
// force parent pack
ArtificialTree::is( dl() )->forcePack();
BSONObj k = BSON( "" << bigNumString( 0x40 ) );
@@ -1156,7 +1156,7 @@
ASSERT( unindex( k ) );
// dump();
ASSERT_EQUALS( 11, bt()->fullValidate( dl(), order(), 0, true ) );
- ASSERT_EQUALS( 3, nsdetails( ns )->stats.nrecords );
+ ASSERT_EQUALS( 3, nsdetails( ns )->numRecords() );
ArtificialTree::checkStructure( "{$6:{$1:null,$2:null,$3:null,$4:null,$5:null},_:{$10:null,$20:null,$30:null,$50:null,a:null}}", id() );
}
};
@@ -1167,13 +1167,13 @@
string ns = id().indexNamespace();
ArtificialTree::setTree( "{$10$10:{$1:null,$2:null,$3:null,$4:null},$100:{$20:null,$30:null,$40:null,$50:null,$60:null,$70:null,$80:null},$200:null,$300:null,$400:null,$500:null,$600:null,$700:null,$800:null,$900:null,_:{c:null}}", id() );
ASSERT_EQUALS( 22, bt()->fullValidate( dl(), order(), 0, true ) );
- ASSERT_EQUALS( 4, nsdetails( ns )->stats.nrecords );
+ ASSERT_EQUALS( 4, nsdetails( ns )->numRecords() );
BSONObj k = BSON( "" << bigNumString( 0x3 ) );
// dump();
ASSERT( unindex( k ) );
// dump();
ASSERT_EQUALS( 21, bt()->fullValidate( dl(), order(), 0, true ) );
- ASSERT_EQUALS( 6, nsdetails( ns )->stats.nrecords );
+ ASSERT_EQUALS( 6, nsdetails( ns )->numRecords() );
ArtificialTree::checkStructure( "{$500:{$30:{$1:null,$2:null,$4:null,$10$10:null,$20:null},$100:{$40:null,$50:null,$60:null,$70:null,$80:null},$200:null,$300:null,$400:null},_:{$600:null,$700:null,$800:null,$900:null,_:{c:null}}}", id() );
}
};
@@ -1291,13 +1291,13 @@
string ns = id().indexNamespace();
ArtificialTree::setTree( "{$10:{$1:null,$2:null,$3:null,$4:null,$5:null,$6:null},$20:{$11:null,$12:null,$13:null,$14:null},_:{$30:null}}", id() );
ASSERT_EQUALS( 13, bt()->fullValidate( dl(), order(), 0, true ) );
- ASSERT_EQUALS( 4, nsdetails( ns )->stats.nrecords );
+ ASSERT_EQUALS( 4, nsdetails( ns )->numRecords() );
BSONObj k = BSON( "" << bigNumString( 0x12 ) );
// dump();
ASSERT( unindex( k ) );
// dump();
ASSERT_EQUALS( 12, bt()->fullValidate( dl(), order(), 0, true ) );
- ASSERT_EQUALS( 4, nsdetails( ns )->stats.nrecords );
+ ASSERT_EQUALS( 4, nsdetails( ns )->numRecords() );
ArtificialTree::checkStructure( "{$5:{$1:null,$2:null,$3:null,$4:null},$20:{$6:null,$10:null,$11:null,$13:null,$14:null},_:{$30:null}}", id() );
}
};
@@ -1308,13 +1308,13 @@
string ns = id().indexNamespace();
ArtificialTree::setTree( "{$10:{$1:null},$20:{$11:null,$12:null,$13:null,$14:null},_:{$31:null,$32:null,$33:null,$34:null,$35:null,$36:null}}", id() );
ASSERT_EQUALS( 13, bt()->fullValidate( dl(), order(), 0, true ) );
- ASSERT_EQUALS( 4, nsdetails( ns )->stats.nrecords );
+ ASSERT_EQUALS( 4, nsdetails( ns )->numRecords() );
BSONObj k = BSON( "" << bigNumString( 0x12 ) );
// dump();
ASSERT( unindex( k ) );
// dump();
ASSERT_EQUALS( 12, bt()->fullValidate( dl(), order(), 0, true ) );
- ASSERT_EQUALS( 4, nsdetails( ns )->stats.nrecords );
+ ASSERT_EQUALS( 4, nsdetails( ns )->numRecords() );
ArtificialTree::checkStructure( "{$10:{$1:null},$31:{$11:null,$13:null,$14:null,$20:null},_:{$32:null,$33:null,$34:null,$35:null,$36:null}}", id() );
}
};
@@ -1325,13 +1325,13 @@
string ns = id().indexNamespace();
ArtificialTree::setTree( "{$10:{$5:{$1:null,$2:null},$8:{$6:null,$7:null}},_:{$20:null,$30:null,$40:null,$50:null,$60:null,$70:null,$80:null,$90:null}}", id() );
ASSERT_EQUALS( 15, bt()->fullValidate( dl(), order(), 0, true ) );
- ASSERT_EQUALS( 5, nsdetails( ns )->stats.nrecords );
+ ASSERT_EQUALS( 5, nsdetails( ns )->numRecords() );
BSONObj k = BSON( "" << bigNumString( 0x7 ) );
// dump();
ASSERT( unindex( k ) );
// dump();
ASSERT_EQUALS( 14, bt()->fullValidate( dl(), order(), 0, true ) );
- ASSERT_EQUALS( 4, nsdetails( ns )->stats.nrecords );
+ ASSERT_EQUALS( 4, nsdetails( ns )->numRecords() );
ArtificialTree::checkStructure( "{$40:{$8:{$1:null,$2:null,$5:null,$6:null},$10:null,$20:null,$30:null},_:{$50:null,$60:null,$70:null,$80:null,$90:null}}", id() );
}
};
@@ -1398,13 +1398,13 @@
string ns = id().indexNamespace();
ArtificialTree::setTree( "{b:{a:null}}", id() );
ASSERT_EQUALS( 2, bt()->fullValidate( dl(), order(), 0, true ) );
- ASSERT_EQUALS( 2, nsdetails( ns )->stats.nrecords );
+ ASSERT_EQUALS( 2, nsdetails( ns )->numRecords() );
BSONObj k = BSON( "" << "a" );
// dump();
ASSERT( unindex( k ) );
// dump();
ASSERT_EQUALS( 1, bt()->fullValidate( dl(), order(), 0, true ) );
- ASSERT_EQUALS( 1, nsdetails( ns )->stats.nrecords );
+ ASSERT_EQUALS( 1, nsdetails( ns )->numRecords() );
ArtificialTree::checkStructure( "{b:null}", id() );
}
};
@@ -1415,13 +1415,13 @@
string ns = id().indexNamespace();
ArtificialTree::setTree( "{a:null,c:{b:null},d:null}", id() );
ASSERT_EQUALS( 4, bt()->fullValidate( dl(), order(), 0, true ) );
- ASSERT_EQUALS( 2, nsdetails( ns )->stats.nrecords );
+ ASSERT_EQUALS( 2, nsdetails( ns )->numRecords() );
BSONObj k = BSON( "" << "b" );
// dump();
ASSERT( unindex( k ) );
// dump();
ASSERT_EQUALS( 3, bt()->fullValidate( dl(), order(), 0, true ) );
- ASSERT_EQUALS( 1, nsdetails( ns )->stats.nrecords );
+ ASSERT_EQUALS( 1, nsdetails( ns )->numRecords() );
ArtificialTree::checkStructure( "{a:null,c:null,d:null}", id() );
}
};
@@ -1434,14 +1434,14 @@
long long unused = 0;
ASSERT_EQUALS( 4, bt()->fullValidate( dl(), order(), &unused, true ) );
ASSERT_EQUALS( 0, unused );
- ASSERT_EQUALS( 2, nsdetails( ns )->stats.nrecords );
+ ASSERT_EQUALS( 2, nsdetails( ns )->numRecords() );
BSONObj k = BSON( "" << "c" );
// dump();
ASSERT( unindex( k ) );
// dump();
ASSERT_EQUALS( 3, bt()->fullValidate( dl(), order(), &unused, true ) );
ASSERT_EQUALS( 0, unused );
- ASSERT_EQUALS( 1, nsdetails( ns )->stats.nrecords );
+ ASSERT_EQUALS( 1, nsdetails( ns )->numRecords() );
ArtificialTree::checkStructure( "{a:null,b:null,d:null}", id() );
}
};
@@ -1455,7 +1455,7 @@
long long unused = 0;
ASSERT_EQUALS( 3, bt()->fullValidate( dl(), order(), &unused, true ) );
ASSERT_EQUALS( 1, unused );
- ASSERT_EQUALS( 2, nsdetails( ns )->stats.nrecords );
+ ASSERT_EQUALS( 2, nsdetails( ns )->numRecords() );
BSONObj k = BSON( "" << "c" );
// dump();
ASSERT( unindex( k ) );
@@ -1463,7 +1463,7 @@
unused = 0;
ASSERT_EQUALS( 2, bt()->fullValidate( dl(), order(), &unused, true ) );
ASSERT_EQUALS( 1, unused );
- ASSERT_EQUALS( 1, nsdetails( ns )->stats.nrecords );
+ ASSERT_EQUALS( 1, nsdetails( ns )->numRecords() );
// doesn't discriminate between used and unused
ArtificialTree::checkStructure( "{a:null,b:null,d:null}", id() );
}
@@ -1477,7 +1477,7 @@
long long unused = 0;
ASSERT_EQUALS( 2, bt()->fullValidate( dl(), order(), &unused, true ) );
ASSERT_EQUALS( 0, unused );
- ASSERT_EQUALS( 2, nsdetails( ns )->stats.nrecords );
+ ASSERT_EQUALS( 2, nsdetails( ns )->numRecords() );
BSONObj k = BSON( "" << "a" );
// dump();
ASSERT( unindex( k ) );
@@ -1485,7 +1485,7 @@
unused = 0;
ASSERT_EQUALS( 1, bt()->fullValidate( dl(), order(), &unused, true ) );
ASSERT_EQUALS( 0, unused );
- ASSERT_EQUALS( 1, nsdetails( ns )->stats.nrecords );
+ ASSERT_EQUALS( 1, nsdetails( ns )->numRecords() );
ArtificialTree::checkStructure( "{b:null}", id() );
}
};
@@ -1498,7 +1498,7 @@
long long unused = 0;
ASSERT_EQUALS( 7, bt()->fullValidate( dl(), order(), &unused, true ) );
ASSERT_EQUALS( 0, unused );
- ASSERT_EQUALS( 5, nsdetails( ns )->stats.nrecords );
+ ASSERT_EQUALS( 5, nsdetails( ns )->numRecords() );
BSONObj k = BSON( "" << "y" );
// dump();
ASSERT( unindex( k ) );
@@ -1506,7 +1506,7 @@
unused = 0;
ASSERT_EQUALS( 6, bt()->fullValidate( dl(), order(), &unused, true ) );
ASSERT_EQUALS( 0, unused );
- ASSERT_EQUALS( 3, nsdetails( ns )->stats.nrecords );
+ ASSERT_EQUALS( 3, nsdetails( ns )->numRecords() );
ArtificialTree::checkStructure( "{a:null,e:{c:{b:null},d:null},z:null}", id() );
}
};
@@ -1519,7 +1519,7 @@
long long unused = 0;
ASSERT_EQUALS( 4, bt()->fullValidate( dl(), order(), &unused, true ) );
ASSERT_EQUALS( 0, unused );
- ASSERT_EQUALS( 4, nsdetails( ns )->stats.nrecords );
+ ASSERT_EQUALS( 4, nsdetails( ns )->numRecords() );
BSONObj k = BSON( "" << "a" );
// dump();
ASSERT( unindex( k ) );
@@ -1527,7 +1527,7 @@
unused = 0;
ASSERT_EQUALS( 3, bt()->fullValidate( dl(), order(), &unused, true ) );
ASSERT_EQUALS( 0, unused );
- ASSERT_EQUALS( 2, nsdetails( ns )->stats.nrecords );
+ ASSERT_EQUALS( 2, nsdetails( ns )->numRecords() );
ArtificialTree::checkStructure( "{c:null,_:{e:null,f:null}}", id() );
}
};
@@ -1540,14 +1540,14 @@
long long unused = 0;
ASSERT_EQUALS( 5, bt()->fullValidate( dl(), order(), &unused, true ) );
ASSERT_EQUALS( 0, unused );
- ASSERT_EQUALS( 3, nsdetails( ns )->stats.nrecords );
+ ASSERT_EQUALS( 3, nsdetails( ns )->numRecords() );
BSONObj k = BSON( "" << "d" );
// dump();
ASSERT( unindex( k ) );
// dump();
ASSERT_EQUALS( 4, bt()->fullValidate( dl(), order(), &unused, true ) );
ASSERT_EQUALS( 1, unused );
- ASSERT_EQUALS( 3, nsdetails( ns )->stats.nrecords );
+ ASSERT_EQUALS( 3, nsdetails( ns )->numRecords() );
ArtificialTree::checkStructure( "{a:null,d:{c:{b:null}},e:null}", id() );
ASSERT( bt()->keyNode( 1 ).recordLoc.getOfs() & 1 ); // check 'unused' key
}
@@ -1561,14 +1561,14 @@
long long unused = 0;
ASSERT_EQUALS( 3, bt()->fullValidate( dl(), order(), &unused, true ) );
ASSERT_EQUALS( 0, unused );
- ASSERT_EQUALS( 3, nsdetails( ns )->stats.nrecords );
+ ASSERT_EQUALS( 3, nsdetails( ns )->numRecords() );
BSONObj k = BSON( "" << "a" );
// dump();
ASSERT( unindex( k ) );
// dump();
ASSERT_EQUALS( 2, bt()->fullValidate( dl(), order(), &unused, true ) );
ASSERT_EQUALS( 1, unused );
- ASSERT_EQUALS( 3, nsdetails( ns )->stats.nrecords );
+ ASSERT_EQUALS( 3, nsdetails( ns )->numRecords() );
ArtificialTree::checkStructure( "{a:null,_:{c:null,_:{d:null}}}", id() );
ASSERT( bt()->keyNode( 0 ).recordLoc.getOfs() & 1 ); // check 'unused' key
}
@@ -1582,14 +1582,14 @@
long long unused = 0;
ASSERT_EQUALS( 13, bt()->fullValidate( dl(), order(), &unused, true ) );
ASSERT_EQUALS( 0, unused );
- ASSERT_EQUALS( 4, nsdetails( ns )->stats.nrecords );
+ ASSERT_EQUALS( 4, nsdetails( ns )->numRecords() );
BSONObj k = BSON( "" << bigNumString( 0x30, 0x10 ) );
// dump();
ASSERT( unindex( k ) );
// dump();
ASSERT_EQUALS( 12, bt()->fullValidate( dl(), order(), &unused, true ) );
ASSERT_EQUALS( 0, unused );
- ASSERT_EQUALS( 4, nsdetails( ns )->stats.nrecords );
+ ASSERT_EQUALS( 4, nsdetails( ns )->numRecords() );
ArtificialTree::checkStructure( "{$60:{$10:null,$20:null,$27:{$23:null,$25:null},$40:null,$50:null},_:{$70:null,$80:null,$90:null,$100:null}}", id() );
}
};
@@ -1602,14 +1602,14 @@
long long unused = 0;
ASSERT_EQUALS( 13, bt()->fullValidate( dl(), order(), &unused, true ) );
ASSERT_EQUALS( 0, unused );
- ASSERT_EQUALS( 4, nsdetails( ns )->stats.nrecords );
+ ASSERT_EQUALS( 4, nsdetails( ns )->numRecords() );
BSONObj k = BSON( "" << bigNumString( 0x100, 0x10 ) );
// dump();
ASSERT( unindex( k ) );
// dump();
ASSERT_EQUALS( 12, bt()->fullValidate( dl(), order(), &unused, true ) );
ASSERT_EQUALS( 0, unused );
- ASSERT_EQUALS( 4, nsdetails( ns )->stats.nrecords );
+ ASSERT_EQUALS( 4, nsdetails( ns )->numRecords() );
ArtificialTree::checkStructure( "{$80:{$10:null,$20:null,$30:null,$40:null,$50:null,$60:null,$70:null},_:{$90:null,$97:{$93:null,$95:null}}}", id() );
}
};
diff --git a/src/mongo/dbtests/clienttests.cpp b/src/mongo/dbtests/clienttests.cpp
index cf32f78cb2e..609b9d07f1f 100644
--- a/src/mongo/dbtests/clienttests.cpp
+++ b/src/mongo/dbtests/clienttests.cpp
@@ -116,7 +116,7 @@ namespace ClientTests {
db.insert(ns(), BSON("x" << 1 << "y" << 2));
db.insert(ns(), BSON("x" << 2 << "y" << 2));
- ASSERT_EQUALS(1, nsdetails(ns())->nIndexes);
+ ASSERT_EQUALS(1, nsdetails(ns())->getCompletedIndexCount());
// _id index
ASSERT_EQUALS(1U, db.count("test.system.indexes"));
// test.buildindex
@@ -126,13 +126,13 @@ namespace ClientTests {
db.ensureIndex(ns(), BSON("y" << 1), true);
- ASSERT_EQUALS(1, nsdetails(ns())->nIndexes);
+ ASSERT_EQUALS(1, nsdetails(ns())->getCompletedIndexCount());
ASSERT_EQUALS(1U, db.count("test.system.indexes"));
ASSERT_EQUALS(3U, db.count("test.system.namespaces"));
db.ensureIndex(ns(), BSON("x" << 1), true);
- ASSERT_EQUALS(2, nsdetails(ns())->nIndexes);
+ ASSERT_EQUALS(2, nsdetails(ns())->getCompletedIndexCount());
ASSERT_EQUALS(2U, db.count("test.system.indexes"));
ASSERT_EQUALS(4U, db.count("test.system.namespaces"));
}
diff --git a/src/mongo/dbtests/indexupdatetests.cpp b/src/mongo/dbtests/indexupdatetests.cpp
index bba3eb36bf2..be152d0ccca 100644
--- a/src/mongo/dbtests/indexupdatetests.cpp
+++ b/src/mongo/dbtests/indexupdatetests.cpp
@@ -527,39 +527,41 @@ namespace IndexUpdateTests {
public:
void run() {
// _id_ is at 0, so nIndexes == 1
- halfAddIndex("a");
- halfAddIndex("b");
- halfAddIndex("c");
- halfAddIndex("d");
+ NamespaceDetails::IndexBuildBlock* a = halfAddIndex("a");
+ NamespaceDetails::IndexBuildBlock* b = halfAddIndex("b");
+ NamespaceDetails::IndexBuildBlock* c = halfAddIndex("c");
+ NamespaceDetails::IndexBuildBlock* d = halfAddIndex("d");
int offset = IndexBuildsInProgress::get(_ns, "b_1");
ASSERT_EQUALS(2, offset);
IndexBuildsInProgress::remove(_ns, offset);
- nsdetails(_ns)->indexBuildsInProgress--;
+ delete b;
ASSERT_EQUALS(2, IndexBuildsInProgress::get(_ns, "c_1"));
ASSERT_EQUALS(3, IndexBuildsInProgress::get(_ns, "d_1"));
offset = IndexBuildsInProgress::get(_ns, "d_1");
IndexBuildsInProgress::remove(_ns, offset);
- nsdetails(_ns)->indexBuildsInProgress--;
+ delete d;
ASSERT_EQUALS(2, IndexBuildsInProgress::get(_ns, "c_1"));
ASSERT_THROWS(IndexBuildsInProgress::get(_ns, "d_1"), MsgAssertionException);
offset = IndexBuildsInProgress::get(_ns, "a_1");
IndexBuildsInProgress::remove(_ns, offset);
- nsdetails(_ns)->indexBuildsInProgress--;
+ delete a;
ASSERT_EQUALS(1, IndexBuildsInProgress::get(_ns, "c_1"));
+ delete c;
}
private:
- IndexDetails& halfAddIndex(const std::string& key) {
+ NamespaceDetails::IndexBuildBlock* halfAddIndex(const std::string& key) {
+ string name = key + "_1";
BSONObj indexInfo = BSON( "v" << 1 <<
"key" << BSON( key << 1 ) <<
"ns" << _ns <<
- "name" << (key+"_1"));
+ "name" << name );
int32_t lenWHdr = indexInfo.objsize() + Record::HeaderSize;
const char* systemIndexes = "unittests.system.indexes";
DiskLoc infoLoc = allocateSpaceForANewRecord( systemIndexes,
@@ -572,9 +574,7 @@ namespace IndexUpdateTests {
addRecordToRecListInExtent( infoRecord, infoLoc );
IndexDetails& id = nsdetails( _ns )->getNextIndexDetails( _ns );
id.info = infoLoc;
- nsdetails(_ns)->indexBuildsInProgress++;
-
- return id;
+ return new NamespaceDetails::IndexBuildBlock( _ns, name );
}
};
diff --git a/src/mongo/dbtests/namespacetests.cpp b/src/mongo/dbtests/namespacetests.cpp
index 3c944ad03f6..6dc2bb0a609 100644
--- a/src/mongo/dbtests/namespacetests.cpp
+++ b/src/mongo/dbtests/namespacetests.cpp
@@ -1568,7 +1568,7 @@ namespace NamespaceTests {
}
int nRecords() const {
int count = 0;
- for ( DiskLoc i = nsd()->firstExtent; !i.isNull(); i = i.ext()->xnext ) {
+ for ( DiskLoc i = nsd()->firstExtent(); !i.isNull(); i = i.ext()->xnext ) {
int fileNo = i.ext()->firstRecord.a();
if ( fileNo == -1 )
continue;
@@ -1577,12 +1577,12 @@ namespace NamespaceTests {
++count;
}
}
- ASSERT_EQUALS( count, nsd()->stats.nrecords );
+ ASSERT_EQUALS( count, nsd()->numRecords() );
return count;
}
int nExtents() const {
int count = 0;
- for ( DiskLoc i = nsd()->firstExtent; !i.isNull(); i = i.ext()->xnext )
+ for ( DiskLoc i = nsd()->firstExtent(); !i.isNull(); i = i.ext()->xnext )
++count;
return count;
}
@@ -1600,8 +1600,8 @@ namespace NamespaceTests {
}
static BSONObj bigObj(bool bGenID=false) {
BSONObjBuilder b;
- if (bGenID)
- b.appendOID("_id", 0, true);
+ if (bGenID)
+ b.appendOID("_id", 0, true);
string as( 187, 'a' );
b.append( "a", as );
return b.obj();
@@ -1610,8 +1610,8 @@ namespace NamespaceTests {
/** Return the smallest DeletedRecord in deletedList, or DiskLoc() if none. */
DiskLoc smallestDeletedRecord() {
for( int i = 0; i < Buckets; ++i ) {
- if ( !nsd()->deletedList[ i ].isNull() ) {
- return nsd()->deletedList[ i ];
+ if ( !nsd()->deletedListEntry( i ).isNull() ) {
+ return nsd()->deletedListEntry( i );
}
}
return DiskLoc();
@@ -1626,9 +1626,9 @@ namespace NamespaceTests {
// Extract the first DeletedRecord from the deletedList.
DiskLoc deleted;
for( int i = 0; i < Buckets; ++i ) {
- if ( !nsd()->deletedList[ i ].isNull() ) {
- deleted = nsd()->deletedList[ i ];
- nsd()->deletedList[ i ].writing().Null();
+ if ( !nsd()->deletedListEntry( i ).isNull() ) {
+ deleted = nsd()->deletedListEntry( i );
+ nsd()->deletedListEntry( i ).writing().Null();
break;
}
}
@@ -1641,7 +1641,7 @@ namespace NamespaceTests {
// Re-insert the DeletedRecord into the deletedList bucket appropriate for its
// new size.
- nsd()->deletedList[ NamespaceDetails::bucket( newDeletedRecordSize ) ].writing() =
+ nsd()->deletedListEntry( NamespaceDetails::bucket( newDeletedRecordSize ) ).writing() =
deleted;
}
};
@@ -1652,10 +1652,10 @@ namespace NamespaceTests {
create();
ASSERT( nsd() );
ASSERT_EQUALS( 0, nRecords() );
- ASSERT( nsd()->firstExtent == nsd()->capExtent );
+ ASSERT( nsd()->firstExtent() == nsd()->capExtent() );
DiskLoc initial = DiskLoc();
initial.setInvalid();
- ASSERT( initial == nsd()->capFirstNewRecord );
+ ASSERT( initial == nsd()->capFirstNewRecord() );
}
};
@@ -2144,7 +2144,7 @@ namespace NamespaceTests {
}
nsd->cappedTruncateAfter(ns(), truncAt, false);
- ASSERT_EQUALS( nsd->stats.nrecords , 28 );
+ ASSERT_EQUALS( nsd->numRecords() , 28 );
{
scoped_ptr<ForwardCappedCursor> c( ForwardCappedCursor::make( nsd ) );
@@ -2175,18 +2175,18 @@ namespace NamespaceTests {
public:
void run() {
create();
- nsd()->deletedList[ 2 ] = nsd()->cappedListOfAllDeletedRecords().drec()->nextDeleted().drec()->nextDeleted();
+ nsd()->deletedListEntry( 2 ) = nsd()->cappedListOfAllDeletedRecords().drec()->nextDeleted().drec()->nextDeleted();
nsd()->cappedListOfAllDeletedRecords().drec()->nextDeleted().drec()->nextDeleted().writing() = DiskLoc();
nsd()->cappedLastDelRecLastExtent().Null();
NamespaceDetails *d = nsd();
- zero( &d->capExtent );
- zero( &d->capFirstNewRecord );
+ zero( &d->capExtent() );
+ zero( &d->capFirstNewRecord() );
nsd();
- ASSERT( nsd()->firstExtent == nsd()->capExtent );
- ASSERT( nsd()->capExtent.getOfs() != 0 );
- ASSERT( !nsd()->capFirstNewRecord.isValid() );
+ ASSERT( nsd()->firstExtent() == nsd()->capExtent() );
+ ASSERT( nsd()->capExtent().getOfs() != 0 );
+ ASSERT( !nsd()->capFirstNewRecord().isValid() );
int nDeleted = 0;
for ( DiskLoc i = nsd()->cappedListOfAllDeletedRecords(); !i.isNull(); i = i.drec()->nextDeleted(), ++nDeleted );
ASSERT_EQUALS( 10, nDeleted );
diff --git a/src/mongo/dbtests/pdfiletests.cpp b/src/mongo/dbtests/pdfiletests.cpp
index 23ea78624f7..a6b3d307a43 100644
--- a/src/mongo/dbtests/pdfiletests.cpp
+++ b/src/mongo/dbtests/pdfiletests.cpp
@@ -65,7 +65,7 @@ namespace PdfileTests {
return 0;
}
// bypass standard alloc/insert routines to use the extent we want.
- static DiskLoc insert( DiskLoc ext, int i ) {
+ static DiskLoc insert( const DiskLoc& ext, int i ) {
BSONObjBuilder b;
b.append( "a", i );
BSONObj o = b.done();
@@ -112,7 +112,7 @@ namespace PdfileTests {
class EmptyLooped : public Base {
virtual void prepare() {
- nsd()->writingWithExtra()->capFirstNewRecord = DiskLoc();
+ nsd()->writingWithExtra()->capFirstNewRecord() = DiskLoc();
}
virtual int count() const {
return 0;
@@ -121,7 +121,7 @@ namespace PdfileTests {
class EmptyMultiExtentLooped : public Base {
virtual void prepare() {
- nsd()->writingWithExtra()->capFirstNewRecord = DiskLoc();
+ nsd()->writingWithExtra()->capFirstNewRecord() = DiskLoc();
}
virtual int count() const {
return 0;
@@ -133,7 +133,7 @@ namespace PdfileTests {
class Single : public Base {
virtual void prepare() {
- nsd()->writingWithExtra()->capFirstNewRecord = insert( nsd()->capExtent, 0 );
+ nsd()->writingWithExtra()->capFirstNewRecord() = insert( nsd()->capExtent(), 0 );
}
virtual int count() const {
return 1;
@@ -142,9 +142,9 @@ namespace PdfileTests {
class NewCapFirst : public Base {
virtual void prepare() {
- DiskLoc x = insert( nsd()->capExtent, 0 );
- nsd()->writingWithExtra()->capFirstNewRecord = x;
- insert( nsd()->capExtent, 1 );
+ DiskLoc x = insert( nsd()->capExtent(), 0 );
+ nsd()->writingWithExtra()->capFirstNewRecord() = x;
+ insert( nsd()->capExtent(), 1 );
}
virtual int count() const {
return 2;
@@ -153,8 +153,8 @@ namespace PdfileTests {
class NewCapLast : public Base {
virtual void prepare() {
- insert( nsd()->capExtent, 0 );
- nsd()->capFirstNewRecord.writing() = insert( nsd()->capExtent, 1 );
+ insert( nsd()->capExtent(), 0 );
+ nsd()->capFirstNewRecord().writing() = insert( nsd()->capExtent(), 1 );
}
virtual int count() const {
return 2;
@@ -163,9 +163,9 @@ namespace PdfileTests {
class NewCapMiddle : public Base {
virtual void prepare() {
- insert( nsd()->capExtent, 0 );
- nsd()->capFirstNewRecord.writing() = insert( nsd()->capExtent, 1 );
- insert( nsd()->capExtent, 2 );
+ insert( nsd()->capExtent(), 0 );
+ nsd()->capFirstNewRecord().writing() = insert( nsd()->capExtent(), 1 );
+ insert( nsd()->capExtent(), 2 );
}
virtual int count() const {
return 3;
@@ -174,10 +174,10 @@ namespace PdfileTests {
class FirstExtent : public Base {
virtual void prepare() {
- insert( nsd()->capExtent, 0 );
- insert( nsd()->lastExtent, 1 );
- nsd()->capFirstNewRecord.writing() = insert( nsd()->capExtent, 2 );
- insert( nsd()->capExtent, 3 );
+ insert( nsd()->capExtent(), 0 );
+ insert( nsd()->lastExtent(), 1 );
+ nsd()->capFirstNewRecord().writing() = insert( nsd()->capExtent(), 2 );
+ insert( nsd()->capExtent(), 3 );
}
virtual int count() const {
return 4;
@@ -189,11 +189,11 @@ namespace PdfileTests {
class LastExtent : public Base {
virtual void prepare() {
- nsd()->capExtent.writing() = nsd()->lastExtent;
- insert( nsd()->capExtent, 0 );
- insert( nsd()->firstExtent, 1 );
- nsd()->capFirstNewRecord.writing() = insert( nsd()->capExtent, 2 );
- insert( nsd()->capExtent, 3 );
+ nsd()->capExtent().writing() = nsd()->lastExtent();
+ insert( nsd()->capExtent(), 0 );
+ insert( nsd()->firstExtent(), 1 );
+ nsd()->capFirstNewRecord().writing() = insert( nsd()->capExtent(), 2 );
+ insert( nsd()->capExtent(), 3 );
}
virtual int count() const {
return 4;
@@ -205,12 +205,12 @@ namespace PdfileTests {
class MidExtent : public Base {
virtual void prepare() {
- nsd()->capExtent.writing() = nsd()->firstExtent.ext()->xnext;
- insert( nsd()->capExtent, 0 );
- insert( nsd()->lastExtent, 1 );
- insert( nsd()->firstExtent, 2 );
- nsd()->capFirstNewRecord.writing() = insert( nsd()->capExtent, 3 );
- insert( nsd()->capExtent, 4 );
+ nsd()->capExtent().writing() = nsd()->firstExtent().ext()->xnext;
+ insert( nsd()->capExtent(), 0 );
+ insert( nsd()->lastExtent(), 1 );
+ insert( nsd()->firstExtent(), 2 );
+ nsd()->capFirstNewRecord().writing() = insert( nsd()->capExtent(), 3 );
+ insert( nsd()->capExtent(), 4 );
}
virtual int count() const {
return 5;
@@ -222,10 +222,10 @@ namespace PdfileTests {
class AloneInExtent : public Base {
virtual void prepare() {
- nsd()->capExtent.writing() = nsd()->firstExtent.ext()->xnext;
- insert( nsd()->lastExtent, 0 );
- insert( nsd()->firstExtent, 1 );
- nsd()->capFirstNewRecord.writing() = insert( nsd()->capExtent, 2 );
+ nsd()->capExtent().writing() = nsd()->firstExtent().ext()->xnext;
+ insert( nsd()->lastExtent(), 0 );
+ insert( nsd()->firstExtent(), 1 );
+ nsd()->capFirstNewRecord().writing() = insert( nsd()->capExtent(), 2 );
}
virtual int count() const {
return 3;
@@ -237,11 +237,11 @@ namespace PdfileTests {
class FirstInExtent : public Base {
virtual void prepare() {
- nsd()->capExtent.writing() = nsd()->firstExtent.ext()->xnext;
- insert( nsd()->lastExtent, 0 );
- insert( nsd()->firstExtent, 1 );
- nsd()->capFirstNewRecord.writing() = insert( nsd()->capExtent, 2 );
- insert( nsd()->capExtent, 3 );
+ nsd()->capExtent().writing() = nsd()->firstExtent().ext()->xnext;
+ insert( nsd()->lastExtent(), 0 );
+ insert( nsd()->firstExtent(), 1 );
+ nsd()->capFirstNewRecord().writing() = insert( nsd()->capExtent(), 2 );
+ insert( nsd()->capExtent(), 3 );
}
virtual int count() const {
return 4;
@@ -253,11 +253,11 @@ namespace PdfileTests {
class LastInExtent : public Base {
virtual void prepare() {
- nsd()->capExtent.writing() = nsd()->firstExtent.ext()->xnext;
- insert( nsd()->capExtent, 0 );
- insert( nsd()->lastExtent, 1 );
- insert( nsd()->firstExtent, 2 );
- nsd()->capFirstNewRecord.writing() = insert( nsd()->capExtent, 3 );
+ nsd()->capExtent().writing() = nsd()->firstExtent().ext()->xnext;
+ insert( nsd()->capExtent(), 0 );
+ insert( nsd()->lastExtent(), 1 );
+ insert( nsd()->firstExtent(), 2 );
+ nsd()->capFirstNewRecord().writing() = insert( nsd()->capExtent(), 3 );
}
virtual int count() const {
return 4;
diff --git a/src/mongo/dbtests/queryoptimizertests.cpp b/src/mongo/dbtests/queryoptimizertests.cpp
index d4edbb16fc6..31d713a9be7 100644
--- a/src/mongo/dbtests/queryoptimizertests.cpp
+++ b/src/mongo/dbtests/queryoptimizertests.cpp
@@ -93,7 +93,7 @@ namespace {
}
int existingIndexNo( const BSONObj &key ) const {
NamespaceDetails *d = nsd();
- for( int i = 0; i < d->nIndexes; ++i ) {
+ for( int i = 0; i < d->getCompletedIndexCount(); ++i ) {
if ( ( d->idx( i ).keyPattern() == key ) ||
( d->idx( i ).isIdIndex() && IndexDetails::isIdIndexPattern( key ) ) ) {
return i;
diff --git a/src/mongo/dbtests/queryutiltests.cpp b/src/mongo/dbtests/queryutiltests.cpp
index 9074afa204a..4cdf961dbfd 100644
--- a/src/mongo/dbtests/queryutiltests.cpp
+++ b/src/mongo/dbtests/queryutiltests.cpp
@@ -1648,7 +1648,7 @@ namespace QueryUtilTests {
client_.resetIndexCache();
client_.ensureIndex( ns(), key, false, name.c_str() );
NamespaceDetails *d = nsd();
- for( int i = 0; i < d->nIndexes; ++i ) {
+ for( int i = 0; i < d->getCompletedIndexCount(); ++i ) {
if ( d->idx(i).keyPattern() == key /*indexName() == name*/ || ( d->idx(i).isIdIndex() && IndexDetails::isIdIndexPattern( key ) ) )
return &d->idx(i);
}
diff --git a/src/mongo/s/d_migrate.cpp b/src/mongo/s/d_migrate.cpp
index 4fcccc59eeb..47f02bde586 100644
--- a/src/mongo/s/d_migrate.cpp
+++ b/src/mongo/s/d_migrate.cpp
@@ -401,9 +401,9 @@ namespace mongo {
// there's a fair amount of slack before we determine a chunk is too large because object sizes will vary
unsigned long long maxRecsWhenFull;
long long avgRecSize;
- const long long totalRecs = d->stats.nrecords;
+ const long long totalRecs = d->numRecords();
if ( totalRecs > 0 ) {
- avgRecSize = d->stats.datasize / totalRecs;
+ avgRecSize = d->dataSize() / totalRecs;
maxRecsWhenFull = maxChunkSize / avgRecSize;
maxRecsWhenFull = std::min( (unsigned long long)(Chunk::MaxObjectPerChunk + 1) , 130 * maxRecsWhenFull / 100 /* slack */ );
}
diff --git a/src/mongo/s/d_split.cpp b/src/mongo/s/d_split.cpp
index ba302515fbf..8a7aecc3a57 100644
--- a/src/mongo/s/d_split.cpp
+++ b/src/mongo/s/d_split.cpp
@@ -277,10 +277,10 @@ namespace mongo {
// otherwise make it (max,MinKey,MinKey...) so that bound is non-inclusive
max = Helpers::toKeyFormat( kp.extendRangeBound( max, false ) );
}
-
- const long long recCount = d->stats.nrecords;
- const long long dataSize = d->stats.datasize;
-
+
+ const long long recCount = d->numRecords();
+ const long long dataSize = d->dataSize();
+
//
// 1.b Now that we have the size estimate, go over the remaining parameters and apply any maximum size
// restrictions specified there.
diff --git a/src/mongo/tools/dump.cpp b/src/mongo/tools/dump.cpp
index d94789fb3c1..a720cce3aa5 100644
--- a/src/mongo/tools/dump.cpp
+++ b/src/mongo/tools/dump.cpp
@@ -329,38 +329,38 @@ public:
}
void _repair( Database* db , string ns , boost::filesystem::path outfile ){
- NamespaceDetails * nsd = nsdetails( ns );
- log() << "nrecords: " << nsd->stats.nrecords
- << " datasize: " << nsd->stats.datasize
- << " firstExtent: " << nsd->firstExtent
+ const NamespaceDetails * nsd = nsdetails( ns );
+ log() << "nrecords: " << nsd->numRecords()
+ << " datasize: " << nsd->dataSize()
+ << " firstExtent: " << nsd->firstExtent()
<< endl;
-
- if ( nsd->firstExtent.isNull() ){
+
+ if ( nsd->firstExtent().isNull() ){
log() << " ERROR fisrtExtent is null" << endl;
return;
}
-
- if ( ! nsd->firstExtent.isValid() ){
+
+ if ( ! nsd->firstExtent().isValid() ){
log() << " ERROR fisrtExtent is not valid" << endl;
return;
}
outfile /= ( ns.substr( ns.find( "." ) + 1 ) + ".bson" );
log() << "writing to: " << outfile.string() << endl;
-
+
FilePtr f (fopen(outfile.string().c_str(), "wb"));
// init with double the docs count because we make two passes
- ProgressMeter m( nsd->stats.nrecords * 2 );
+ ProgressMeter m( nsd->numRecords() * 2 );
m.setName("Repair Progress");
m.setUnits("objects");
-
+
Writer w( f , &m );
try {
log() << "forward extent pass" << endl;
LogIndentLevel lil;
- DiskLoc eLoc = nsd->firstExtent;
+ DiskLoc eLoc = nsd->firstExtent();
while ( ! eLoc.isNull() ){
log() << "extent loc: " << eLoc << endl;
eLoc = _repairExtent( db , ns , true , eLoc , w );
@@ -373,7 +373,7 @@ public:
try {
log() << "backwards extent pass" << endl;
LogIndentLevel lil;
- DiskLoc eLoc = nsd->lastExtent;
+ DiskLoc eLoc = nsd->lastExtent();
while ( ! eLoc.isNull() ){
log() << "extent loc: " << eLoc << endl;
eLoc = _repairExtent( db , ns , false , eLoc , w );
diff --git a/src/mongo/util/touch_pages.cpp b/src/mongo/util/touch_pages.cpp
index 845a1c806fc..0a9183e5bfd 100644
--- a/src/mongo/util/touch_pages.cpp
+++ b/src/mongo/util/touch_pages.cpp
@@ -45,8 +45,8 @@ namespace mongo {
Client::ReadContext ctx(ns);
NamespaceDetails *nsd = nsdetails(ns);
uassert( 16154, "namespace does not exist", nsd );
-
- for( DiskLoc L = nsd->firstExtent; !L.isNull(); L = L.ext()->xnext ) {
+
+ for( DiskLoc L = nsd->firstExtent(); !L.isNull(); L = L.ext()->xnext ) {
MongoDataFile* mdf = cc().database()->getFile( L.a() );
massert( 16238, "can't fetch extent file structure", mdf );
touch_location tl;
@@ -54,8 +54,8 @@ namespace mongo {
tl.offset = L.getOfs();
tl.ext = L.ext();
tl.length = tl.ext->length;
-
- ranges.push_back(tl);
+
+ ranges.push_back(tl);
}
mongoFilesLock.reset(new LockMongoFilesShared());
}