summaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
authorEliot Horowitz <eliot@10gen.com>2014-04-01 15:03:35 -0400
committerEliot Horowitz <eliot@10gen.com>2014-04-03 13:31:49 -0400
commit825c3fb55d1dd5be42f64268968dcf2366467631 (patch)
treef57248daa6b2da74f214c1eaed125d35c6f352df /src
parent6a330b4d9300e2e0592e6a67687f227c8503b8b5 (diff)
downloadmongo-825c3fb55d1dd5be42f64268968dcf2366467631.tar.gz
SERVER-13084: const work for NamespaceDetails to make it clear where more work to do
Diffstat (limited to 'src')
-rw-r--r--src/mongo/db/catalog/collection.h2
-rw-r--r--src/mongo/db/catalog/database.cpp2
-rw-r--r--src/mongo/db/catalog/index_catalog.cpp16
-rw-r--r--src/mongo/db/catalog/index_catalog_entry.cpp8
-rw-r--r--src/mongo/db/commands/test_commands.cpp2
-rw-r--r--src/mongo/db/commands/validate.cpp2
-rw-r--r--src/mongo/db/dbcommands.cpp4
-rw-r--r--src/mongo/db/exec/oplogstart.cpp2
-rw-r--r--src/mongo/db/exec/oplogstart.h4
-rw-r--r--src/mongo/db/index_legacy.cpp12
-rw-r--r--src/mongo/db/index_legacy.h7
-rw-r--r--src/mongo/db/ops/update.cpp2
-rw-r--r--src/mongo/db/repl/rs_rollback.cpp4
-rw-r--r--src/mongo/db/structure/collection_compact.cpp15
-rw-r--r--src/mongo/db/ttl.cpp2
-rw-r--r--src/mongo/dbtests/namespacetests.cpp6
-rw-r--r--src/mongo/dbtests/query_stage_collscan.cpp2
-rw-r--r--src/mongo/dbtests/queryutiltests.cpp2
18 files changed, 39 insertions, 55 deletions
diff --git a/src/mongo/db/catalog/collection.h b/src/mongo/db/catalog/collection.h
index 615ea282290..d7cea2c833d 100644
--- a/src/mongo/db/catalog/collection.h
+++ b/src/mongo/db/catalog/collection.h
@@ -115,7 +115,7 @@ namespace mongo {
bool ok() const { return _magic == 1357924; }
- NamespaceDetails* details() { return _details; } // TODO: remove
+ NamespaceDetails* detailsWritable() { return _details; } // TODO: remove
const NamespaceDetails* details() const { return _details; }
CollectionInfoCache* infoCache() { return &_infoCache; }
diff --git a/src/mongo/db/catalog/database.cpp b/src/mongo/db/catalog/database.cpp
index d03dfbaf529..1e7b929cfe5 100644
--- a/src/mongo/db/catalog/database.cpp
+++ b/src/mongo/db/catalog/database.cpp
@@ -691,7 +691,7 @@ namespace mongo {
Collection* collection = getCollection( ns );
massert( 17400, "_namespaceIndex.add_ns failed?", collection );
- NamespaceDetails* nsd = collection->details();
+ NamespaceDetails* nsd = collection->detailsWritable();
// allocation strategy set explicitly in flags or by server-wide default
if ( !options.capped ) {
diff --git a/src/mongo/db/catalog/index_catalog.cpp b/src/mongo/db/catalog/index_catalog.cpp
index 8cbfdc43b60..1ce951a7e14 100644
--- a/src/mongo/db/catalog/index_catalog.cpp
+++ b/src/mongo/db/catalog/index_catalog.cpp
@@ -342,6 +342,13 @@ namespace mongo {
int idxNo = _details->_catalogFindIndexByName( idxName, true );
invariant( idxNo < numIndexesReady() );
+ // some special cases stuff
+ if ( pluginName == IndexNames::TEXT ) {
+ if ( _details->setUserFlag(NamespaceDetails::Flag_UsePowerOf2Sizes) ) {
+ _details->syncUserFlags( _collection->ns().ns() );
+ }
+ }
+
return Status::OK();
}
catch ( const AssertionException& exc ) {
@@ -413,7 +420,7 @@ namespace mongo {
return systemIndexesEntry.getStatus();
// 2) collection's NamespaceDetails
- IndexDetails& indexDetails = _collection->details()->getNextIndexDetails( _collection );
+ IndexDetails& indexDetails = _collection->detailsWritable()->getNextIndexDetails( _collection );
try {
getDur().writingDiskLoc( indexDetails.info ) = systemIndexesEntry.getValue();
@@ -427,7 +434,7 @@ namespace mongo {
int before = _collection->details()->_indexBuildsInProgress;
try {
- getDur().writingInt( _collection->details()->_indexBuildsInProgress ) += 1;
+ getDur().writingInt( _collection->detailsWritable()->_indexBuildsInProgress ) += 1;
}
catch ( DBException& e ) {
log() << "got exception trying to incrementStats _indexBuildsInProgress: " << e;
@@ -513,7 +520,7 @@ namespace mongo {
fassert( 17207, _catalog->_collection->ok() );
- NamespaceDetails* nsd = _collection->details();
+ NamespaceDetails* nsd = _collection->detailsWritable();
int idxNo = nsd->_catalogFindIndexByName( _indexName, true );
fassert( 17202, idxNo >= 0 );
@@ -542,12 +549,9 @@ namespace mongo {
entry->setIsReady( true );
- IndexLegacy::postBuildHook( _catalog->_collection,
- _catalog->findIndexByName( _indexName )->keyPattern() );
}
-
Status IndexCatalog::_isSpecOk( const BSONObj& spec ) const {
const NamespaceString& nss = _collection->ns();
diff --git a/src/mongo/db/catalog/index_catalog_entry.cpp b/src/mongo/db/catalog/index_catalog_entry.cpp
index 836761ab16e..81988ae6945 100644
--- a/src/mongo/db/catalog/index_catalog_entry.cpp
+++ b/src/mongo/db/catalog/index_catalog_entry.cpp
@@ -92,7 +92,7 @@ namespace mongo {
}
void IndexCatalogEntry::setHead( DiskLoc newHead ) {
- NamespaceDetails* nsd = _collection->details();
+ NamespaceDetails* nsd = _collection->detailsWritable();
int idxNo = _indexNo();
IndexDetails& id = nsd->idx( idxNo );
id.head.writing() = newHead;
@@ -102,7 +102,7 @@ namespace mongo {
void IndexCatalogEntry::setMultikey() {
if ( isMultikey() )
return;
- NamespaceDetails* nsd = _collection->details();
+ NamespaceDetails* nsd = _collection->detailsWritable();
int idxNo = _indexNo();
if ( nsd->setIndexIsMultikey( idxNo, true ) ) {
LOG(1) << _collection->ns().ns() << ": clearing plan cache - index "
@@ -119,13 +119,13 @@ namespace mongo {
}
DiskLoc IndexCatalogEntry::_catalogHead() const {
- NamespaceDetails* nsd = _collection->details();
+ const NamespaceDetails* nsd = _collection->details();
int idxNo = _indexNo();
return nsd->idx( idxNo ).head;
}
bool IndexCatalogEntry::_catalogIsMultikey() const {
- NamespaceDetails* nsd = _collection->details();
+ const NamespaceDetails* nsd = _collection->details();
int idxNo = _indexNo();
return nsd->isMultikey( idxNo );
}
diff --git a/src/mongo/db/commands/test_commands.cpp b/src/mongo/db/commands/test_commands.cpp
index ef4fc7c02c2..f061ff7a176 100644
--- a/src/mongo/db/commands/test_commands.cpp
+++ b/src/mongo/db/commands/test_commands.cpp
@@ -153,7 +153,7 @@ namespace mongo {
Runner::RunnerState state = runner->getNext(NULL, &end);
massert( 13418, "captrunc invalid n", Runner::RUNNER_ADVANCED == state);
}
- collection->details()->cappedTruncateAfter( nss.ns().c_str(), end, inc );
+ collection->detailsWritable()->cappedTruncateAfter( nss.ns().c_str(), end, inc );
return true;
}
};
diff --git a/src/mongo/db/commands/validate.cpp b/src/mongo/db/commands/validate.cpp
index 848f13d5ab4..d80e2eca635 100644
--- a/src/mongo/db/commands/validate.cpp
+++ b/src/mongo/db/commands/validate.cpp
@@ -99,7 +99,7 @@ namespace mongo {
const bool full = cmdObj["full"].trueValue();
const bool scanData = full || cmdObj["scandata"].trueValue();
- NamespaceDetails* nsd = collection->details();
+ const NamespaceDetails* nsd = collection->details();
bool valid = true;
BSONArrayBuilder errors; // explanation(s) for why valid = false
diff --git a/src/mongo/db/dbcommands.cpp b/src/mongo/db/dbcommands.cpp
index ac723016c30..b6712098b5f 100644
--- a/src/mongo/db/dbcommands.cpp
+++ b/src/mongo/db/dbcommands.cpp
@@ -1072,7 +1072,7 @@ namespace mongo {
result.append( "numExtents" , numExtents );
result.append( "nindexes" , collection->getIndexCatalog()->numIndexesReady() );
- NamespaceDetails* nsd = collection->details();
+ const NamespaceDetails* nsd = collection->details();
result.append( "lastExtentSize" , nsd->lastExtentSize() / scale );
result.append( "paddingFactor" , nsd->paddingFactor() );
result.append( "systemFlags" , nsd->systemFlags() );
@@ -1123,7 +1123,7 @@ namespace mongo {
return false;
}
- NamespaceDetails* nsd = coll->details();
+ NamespaceDetails* nsd = coll->detailsWritable();
bool ok = true;
diff --git a/src/mongo/db/exec/oplogstart.cpp b/src/mongo/db/exec/oplogstart.cpp
index 8003b541a12..2b2a5d1a0da 100644
--- a/src/mongo/db/exec/oplogstart.cpp
+++ b/src/mongo/db/exec/oplogstart.cpp
@@ -186,7 +186,7 @@ namespace mongo {
}
// static
- DiskLoc OplogStart::prevExtentFirstLoc(NamespaceDetails* nsd, const DiskLoc& rec ) {
+ DiskLoc OplogStart::prevExtentFirstLoc(const NamespaceDetails* nsd, const DiskLoc& rec ) {
Extent *e = rec.rec()->myExtent( rec );
if (nsd->capLooped() ) {
while( true ) {
diff --git a/src/mongo/db/exec/oplogstart.h b/src/mongo/db/exec/oplogstart.h
index 159e8c82a2a..1fce80218d4 100644
--- a/src/mongo/db/exec/oplogstart.h
+++ b/src/mongo/db/exec/oplogstart.h
@@ -80,7 +80,7 @@ namespace mongo {
bool isBackwardsScanning() { return _backwardsScanning; }
private:
// Copied verbatim.
- static DiskLoc prevExtentFirstLoc(NamespaceDetails* nsd, const DiskLoc& rec);
+ static DiskLoc prevExtentFirstLoc(const NamespaceDetails* nsd, const DiskLoc& rec);
StageState workBackwardsScan(WorkingSetID* out);
@@ -107,7 +107,7 @@ namespace mongo {
// Our final state: done.
bool _done;
- NamespaceDetails* _nsd;
+ const NamespaceDetails* _nsd;
// We only go backwards via a collscan for a few seconds.
Timer _timer;
diff --git a/src/mongo/db/index_legacy.cpp b/src/mongo/db/index_legacy.cpp
index 8d63ce1cbe3..0973132f61d 100644
--- a/src/mongo/db/index_legacy.cpp
+++ b/src/mongo/db/index_legacy.cpp
@@ -81,16 +81,4 @@ namespace mongo {
}
}
- // static
- void IndexLegacy::postBuildHook(Collection* collection, const BSONObj& keyPattern) {
- // If it's an FTS index, we want to set the power of 2 flag.
- string pluginName = collection->getIndexCatalog()->getAccessMethodName(keyPattern);
- if (IndexNames::TEXT == pluginName) {
- NamespaceDetails* nsd = collection->details();
- if (nsd->setUserFlag(NamespaceDetails::Flag_UsePowerOf2Sizes)) {
- nsd->syncUserFlags(collection->ns().ns());
- }
- }
- }
-
} // namespace mongo
diff --git a/src/mongo/db/index_legacy.h b/src/mongo/db/index_legacy.h
index 942c53f6487..b0ae9f87033 100644
--- a/src/mongo/db/index_legacy.h
+++ b/src/mongo/db/index_legacy.h
@@ -68,13 +68,6 @@ namespace mongo {
*/
static BSONObj getMissingField(Collection* collection, const BSONObj& infoObj);
- /**
- * Perform any post-build steps for this index.
- *
- * This is a no-op unless the index is a FTS index. In that case, we set the flag for using
- * power of 2 sizes for space allocation.
- */
- static void postBuildHook(Collection* collection, const BSONObj& keyPattern );
};
} // namespace mongo
diff --git a/src/mongo/db/ops/update.cpp b/src/mongo/db/ops/update.cpp
index 8e53953e70e..8bdb532e860 100644
--- a/src/mongo/db/ops/update.cpp
+++ b/src/mongo/db/ops/update.cpp
@@ -725,7 +725,7 @@ namespace mongo {
// Broadcast the mutation so that query results stay correct.
collection->cursorCache()->invalidateDocument(loc, INVALIDATION_MUTATION);
- collection->details()->paddingFits();
+ collection->detailsWritable()->paddingFits();
// All updates were in place. Apply them via durability and writing pointer.
mutablebson::DamageVector::const_iterator where = damages.begin();
diff --git a/src/mongo/db/repl/rs_rollback.cpp b/src/mongo/db/repl/rs_rollback.cpp
index d92e37fa213..8a935938918 100644
--- a/src/mongo/db/repl/rs_rollback.cpp
+++ b/src/mongo/db/repl/rs_rollback.cpp
@@ -520,7 +520,7 @@ namespace mongo {
DiskLoc loc = Helpers::findOne(d.ns, pattern, false);
if( Listener::getElapsedTimeMillis() - start > 200 )
log() << "replSet warning roll back slow no _id index for " << d.ns << " perhaps?" << rsLog;
- NamespaceDetails* nsd = collection->details();
+ NamespaceDetails* nsd = collection->detailsWritable();
//would be faster but requires index: DiskLoc loc = Helpers::findById(nsd, pattern);
if( !loc.isNull() ) {
try {
@@ -606,7 +606,7 @@ namespace mongo {
// clean up oplog
LOG(2) << "replSet rollback truncate oplog after " << h.commonPoint.toStringPretty() << rsLog;
// todo: fatal error if this throws?
- oplogCollection->details()->cappedTruncateAfter(rsoplog, h.commonPointOurDiskloc, false);
+ oplogCollection->detailsWritable()->cappedTruncateAfter(rsoplog, h.commonPointOurDiskloc, false);
Status status = getGlobalAuthorizationManager()->initialize();
if (!status.isOK()) {
diff --git a/src/mongo/db/structure/collection_compact.cpp b/src/mongo/db/structure/collection_compact.cpp
index 7e0df7dc1a1..b5f120d9082 100644
--- a/src/mongo/db/structure/collection_compact.cpp
+++ b/src/mongo/db/structure/collection_compact.cpp
@@ -230,16 +230,15 @@ namespace mongo {
return StatusWith<CompactStats>( ErrorCodes::BadValue,
"cannot compact when indexes in progress" );
- NamespaceDetails* d = details();
-
// this is a big job, so might as well make things tidy before we start just to be nice.
getDur().commitIfNeeded();
list<DiskLoc> extents;
- for( DiskLoc extLocation = d->firstExtent();
+ for( DiskLoc extLocation = _details->firstExtent();
!extLocation.isNull();
- extLocation = getExtentManager()->getExtent( extLocation )->xnext )
+ extLocation = getExtentManager()->getExtent( extLocation )->xnext ) {
extents.push_back( extLocation );
+ }
log() << "compact " << extents.size() << " extents";
// same data, but might perform a little different after compact?
@@ -266,10 +265,10 @@ namespace mongo {
}
log() << "compact orphan deleted lists" << endl;
- d->orphanDeletedList();
+ _details->orphanDeletedList();
// Start over from scratch with our extent sizing and growth
- d->setLastExtentSize( 0 );
+ _details->setLastExtentSize( 0 );
// before dropping indexes, at least make sure we can allocate one extent!
// this will allocate an extent and add to free list
@@ -296,7 +295,7 @@ namespace mongo {
// reset data size and record counts to 0 for this namespace
// as we're about to tally them up again for each new extent
- d->setStats( 0, 0 );
+ _details->setStats( 0, 0 );
ProgressMeterHolder pm(cc().curop()->setMessage("compact extent",
"Extent Compacting Progress",
@@ -308,7 +307,7 @@ namespace mongo {
pm.hit();
}
- invariant( getExtentManager()->getExtent( d->firstExtent() )->xprev.isNull() );
+ invariant( getExtentManager()->getExtent( _details->firstExtent() )->xprev.isNull() );
// indexes will do their own progress meter
pm.finished();
diff --git a/src/mongo/db/ttl.cpp b/src/mongo/db/ttl.cpp
index cc97732b32b..c5459bf3410 100644
--- a/src/mongo/db/ttl.cpp
+++ b/src/mongo/db/ttl.cpp
@@ -119,7 +119,7 @@ namespace mongo {
continue;
}
- NamespaceDetails* nsd = collection->details();
+ NamespaceDetails* nsd = collection->detailsWritable();
if ( nsd->setUserFlag( NamespaceDetails::Flag_UsePowerOf2Sizes ) ) {
// TODO: wish there was a cleaner way to do this
nsd->syncUserFlags( ns );
diff --git a/src/mongo/dbtests/namespacetests.cpp b/src/mongo/dbtests/namespacetests.cpp
index fa3af55a558..188cc751ca7 100644
--- a/src/mongo/dbtests/namespacetests.cpp
+++ b/src/mongo/dbtests/namespacetests.cpp
@@ -1080,7 +1080,7 @@ namespace NamespaceTests {
Collection* c = collection();
if ( !c )
return NULL;
- return c->details()->writingWithExtra();
+ return c->detailsWritable()->writingWithExtra();
}
Database* db() const {
return _context.db();
@@ -1649,7 +1649,7 @@ namespace NamespaceTests {
}
ASSERT( nRecords() < N );
- NamespaceDetails*nsd = collection()->details();
+ NamespaceDetails* nsd = collection()->detailsWritable();
DiskLoc last, first;
{
@@ -1763,7 +1763,7 @@ namespace NamespaceTests {
public:
void run() {
create();
- NamespaceDetails *nsd = collection()->details();
+ NamespaceDetails *nsd = collection()->detailsWritable();
// Set 2 & 54 as multikey
nsd->setIndexIsMultikey(2, true);
diff --git a/src/mongo/dbtests/query_stage_collscan.cpp b/src/mongo/dbtests/query_stage_collscan.cpp
index 3b78b8a1e21..74a5b722bda 100644
--- a/src/mongo/dbtests/query_stage_collscan.cpp
+++ b/src/mongo/dbtests/query_stage_collscan.cpp
@@ -153,7 +153,7 @@ namespace QueryStageCollectionScan {
Database* db() { return _context.db(); }
Collection* collection() { return db()->getCollection( ns() ); }
- NamespaceDetails *nsd() { return collection()->details(); }
+ NamespaceDetails *nsd() { return collection()->detailsWritable(); }
private:
Lock::GlobalWrite lk_;
diff --git a/src/mongo/dbtests/queryutiltests.cpp b/src/mongo/dbtests/queryutiltests.cpp
index e5bad6998f1..ae0ebdd48dc 100644
--- a/src/mongo/dbtests/queryutiltests.cpp
+++ b/src/mongo/dbtests/queryutiltests.cpp
@@ -1599,7 +1599,7 @@ namespace QueryUtilTests {
Client::Context* ctx() { return &_ctx; }
Database* db() { return _ctx.db(); }
Collection* collection() { return db()->getCollection( ns() ); }
- NamespaceDetails *nsd() { return collection()->details(); }
+ NamespaceDetails *nsd() { return collection()->detailsWritable(); }
int indexno( const BSONObj &key ) {
stringstream ss;