summaryrefslogtreecommitdiff
path: root/src/mongo
diff options
context:
space:
mode:
authorEliot Horowitz <eliot@10gen.com>2013-10-30 16:36:01 -0400
committerEliot Horowitz <eliot@10gen.com>2013-11-02 13:19:54 -0400
commitf4752b153a3294bd6dfbfb3b626526c7baf7f90e (patch)
tree298f68154117a8c6b32b6af2f306c15a93bd1800 /src/mongo
parentc83e2593e8ad68bd2ad09eaaa4e28df34d0da068 (diff)
downloadmongo-f4752b153a3294bd6dfbfb3b626526c7baf7f90e.tar.gz
SERVER-11178: IndexScan now uses IndexCatalog classes
Diffstat (limited to 'src/mongo')
-rw-r--r--src/mongo/db/catalog/index_catalog.cpp28
-rw-r--r--src/mongo/db/catalog/index_catalog.h10
-rw-r--r--src/mongo/db/commands/dbhash.cpp28
-rw-r--r--src/mongo/db/dbcommands.cpp17
-rw-r--r--src/mongo/db/dbhelpers.cpp23
-rw-r--r--src/mongo/db/query/internal_plans.h9
-rw-r--r--src/mongo/s/d_migrate.cpp16
-rw-r--r--src/mongo/s/d_split.cpp42
8 files changed, 103 insertions, 70 deletions
diff --git a/src/mongo/db/catalog/index_catalog.cpp b/src/mongo/db/catalog/index_catalog.cpp
index 3699bf60582..1652d1601b1 100644
--- a/src/mongo/db/catalog/index_catalog.cpp
+++ b/src/mongo/db/catalog/index_catalog.cpp
@@ -701,6 +701,15 @@ namespace mongo {
return _details->getCompletedIndexCount();
}
+ IndexDescriptor* IndexCatalog::findIdIndex() {
+ for ( int i = 0; i < numIndexesReady(); i++ ) {
+ IndexDescriptor* desc = getDescriptor( i );
+ if ( desc->isIdIndex() )
+ return desc;
+ }
+ return NULL;
+ }
+
IndexDescriptor* IndexCatalog::findIndexByName( const StringData& name ) {
int idxNo = _details->findIndexByName( name );
if ( idxNo < 0 )
@@ -715,6 +724,25 @@ namespace mongo {
return getDescriptor( idxNo );
}
+ IndexDescriptor* IndexCatalog::findIndexByPrefix( const BSONObj &keyPattern,
+ bool requireSingleKey ) {
+ IndexDescriptor* best = NULL;
+
+ for ( int i = 0; i < numIndexesReady(); i++ ) {
+ IndexDescriptor* desc = getDescriptor( i );
+
+ if ( !keyPattern.isPrefixOf( desc->keyPattern() ) )
+ continue;
+
+ if( !_details->isMultikey( i ) )
+ return desc;
+
+ if ( !requireSingleKey )
+ best = desc;
+ }
+
+ return best;
+ }
IndexDescriptor* IndexCatalog::getDescriptor( int idxNo ) {
_checkMagic();
diff --git a/src/mongo/db/catalog/index_catalog.h b/src/mongo/db/catalog/index_catalog.h
index 7a0f7637882..d500a0747c6 100644
--- a/src/mongo/db/catalog/index_catalog.h
+++ b/src/mongo/db/catalog/index_catalog.h
@@ -66,6 +66,8 @@ namespace mongo {
* in which case everything from this tree has to go away
*/
+ IndexDescriptor* findIdIndex();
+
/**
* @return null if cannot find
*/
@@ -76,6 +78,14 @@ namespace mongo {
*/
IndexDescriptor* findIndexByKeyPattern( const BSONObj& key );
+ /* Returns the index entry for the first index whose prefix contains
+ * 'keyPattern'. If 'requireSingleKey' is true, skip indices that contain
+ * array attributes. Otherwise, returns NULL.
+ */
+ IndexDescriptor* findIndexByPrefix( const BSONObj &keyPattern,
+ bool requireSingleKey );
+
+
// throws
// never returns NULL
IndexDescriptor* getDescriptor( int idxNo );
diff --git a/src/mongo/db/commands/dbhash.cpp b/src/mongo/db/commands/dbhash.cpp
index 7bb796e8af9..ab986fc2780 100644
--- a/src/mongo/db/commands/dbhash.cpp
+++ b/src/mongo/db/commands/dbhash.cpp
@@ -80,34 +80,22 @@ namespace mongo {
}
*fromCache = false;
- NamespaceDetails * nsd = nsdetails( fullCollectionName );
- verify( nsd );
-
- // debug SERVER-761
- NamespaceDetails::IndexIterator ii = nsd->ii();
- while( ii.more() ) {
- const IndexDetails &idx = ii.next();
- if ( !idx.head.isValid() || !idx.info.isValid() ) {
- log() << "invalid index for ns: " << fullCollectionName << " " << idx.head << " " << idx.info;
- if ( idx.info.isValid() )
- log() << " " << idx.info.obj();
- log() << endl;
- }
- }
+ Collection* collection = cc().database()->getCollection( fullCollectionName );
+ if ( !collection )
+ return "";
+
+ IndexDescriptor* desc = collection->getIndexCatalog()->findIdIndex();
auto_ptr<Runner> runner;
- int idNum = nsd->findIdIndex();
- if ( idNum >= 0 ) {
- runner.reset(InternalPlanner::indexScan(fullCollectionName,
- nsd,
- idNum,
+ if ( desc ) {
+ runner.reset(InternalPlanner::indexScan(desc,
BSONObj(),
BSONObj(),
false,
InternalPlanner::FORWARD,
InternalPlanner::IXSCAN_FETCH));
}
- else if ( nsd->isCapped() ) {
+ else if ( collection->details()->isCapped() ) {
runner.reset(InternalPlanner::collectionScan(fullCollectionName));
}
else {
diff --git a/src/mongo/db/dbcommands.cpp b/src/mongo/db/dbcommands.cpp
index d992fe01c3d..85a794ae100 100644
--- a/src/mongo/db/dbcommands.cpp
+++ b/src/mongo/db/dbcommands.cpp
@@ -936,9 +936,9 @@ namespace mongo {
bool estimate = jsobj["estimate"].trueValue();
Client::Context ctx( ns );
- NamespaceDetails *d = nsdetails(ns);
+ Collection* collection = ctx.db()->getCollection( ns );
- if ( ! d || d->numRecords() == 0 ) {
+ if ( !collection || collection->numRecords() == 0 ) {
result.appendNumber( "size" , 0 );
result.appendNumber( "numObjects" , 0 );
result.append( "millis" , timer.millis() );
@@ -950,8 +950,8 @@ namespace mongo {
auto_ptr<Runner> runner;
if ( min.isEmpty() && max.isEmpty() ) {
if ( estimate ) {
- result.appendNumber( "size" , d->dataSize() );
- result.appendNumber( "numObjects" , d->numRecords() );
+ result.appendNumber( "size" , collection->details()->dataSize() );
+ result.appendNumber( "numObjects" , collection->numRecords() );
result.append( "millis" , timer.millis() );
return 1;
}
@@ -968,8 +968,9 @@ namespace mongo {
keyPattern = Helpers::inferKeyPattern( min );
}
- const IndexDetails *idx = d->findIndexByPrefix( keyPattern ,
- true ); /* require single key */
+ IndexDescriptor *idx =
+ collection->getIndexCatalog()->findIndexByPrefix( keyPattern, true ); /* require single key */
+
if ( idx == NULL ) {
errmsg = "couldn't find valid index containing key pattern";
return false;
@@ -979,10 +980,10 @@ namespace mongo {
min = Helpers::toKeyFormat( kp.extendRangeBound( min, false ) );
max = Helpers::toKeyFormat( kp.extendRangeBound( max, false ) );
- runner.reset(InternalPlanner::indexScan(ns, d, d->idxNo(*idx), min, max, false));
+ runner.reset(InternalPlanner::indexScan(idx, min, max, false));
}
- long long avgObjSize = d->dataSize() / d->numRecords();
+ long long avgObjSize = collection->details()->dataSize() / collection->numRecords();
long long maxSize = jsobj["maxSize"].numberLong();
long long maxObjects = jsobj["maxObjects"].numberLong();
diff --git a/src/mongo/db/dbhelpers.cpp b/src/mongo/db/dbhelpers.cpp
index 1369f0ccc29..0173d35baba 100644
--- a/src/mongo/db/dbhelpers.cpp
+++ b/src/mongo/db/dbhelpers.cpp
@@ -331,12 +331,13 @@ namespace mongo {
// Scoping for write lock.
{
Client::WriteContext ctx(ns);
+ Collection* collection = ctx.ctx().db()->getCollection( ns );
+ if ( !collection ) break;
- NamespaceDetails* nsd = nsdetails( ns );
- if (NULL == nsd) { break; }
- int ii = nsd->findIndexByKeyPattern( indexKeyPattern.toBSON() );
+ IndexDescriptor* desc =
+ collection->getIndexCatalog()->findIndexByKeyPattern( indexKeyPattern.toBSON() );
- auto_ptr<Runner> runner(InternalPlanner::indexScan(ns, nsd, ii, min, max,
+ auto_ptr<Runner> runner(InternalPlanner::indexScan(desc, min, max,
maxInclusive,
InternalPlanner::FORWARD,
InternalPlanner::IXSCAN_FETCH));
@@ -433,12 +434,12 @@ namespace mongo {
*numDocs = 0;
Client::ReadContext ctx( ns );
-
- NamespaceDetails* details = nsdetails( ns );
- if ( !details ) return Status( ErrorCodes::NamespaceNotFound, ns );
+ Collection* collection = ctx.ctx().db()->getCollection( ns );
+ if ( !collection ) return Status( ErrorCodes::NamespaceNotFound, ns );
// Require single key
- const IndexDetails *idx = details->findIndexByPrefix( range.keyPattern, true );
+ IndexDescriptor *idx =
+ collection->getIndexCatalog()->findIndexByPrefix( range.keyPattern, true );
if ( idx == NULL ) {
return Status( ErrorCodes::IndexNotFound, range.keyPattern.toString() );
@@ -450,10 +451,10 @@ namespace mongo {
// sizes will vary
long long avgDocsWhenFull;
long long avgDocSizeBytes;
- const long long totalDocsInNS = details->numRecords();
+ const long long totalDocsInNS = collection->numRecords();
if ( totalDocsInNS > 0 ) {
// TODO: Figure out what's up here
- avgDocSizeBytes = details->dataSize() / totalDocsInNS;
+ avgDocSizeBytes = collection->details()->dataSize() / totalDocsInNS;
avgDocsWhenFull = maxChunkSizeBytes / avgDocSizeBytes;
avgDocsWhenFull = std::min( kMaxDocsPerChunk + 1,
130 * avgDocsWhenFull / 100 /* slack */);
@@ -474,7 +475,7 @@ namespace mongo {
bool isLargeChunk = false;
long long docCount = 0;
- auto_ptr<Runner> runner(InternalPlanner::indexScan(ns, details, details->idxNo(*idx), min, max, false));
+ auto_ptr<Runner> runner(InternalPlanner::indexScan(idx, min, max, false));
// we can afford to yield here because any change to the base data that we might miss is
// already being queued and will be migrated in the 'transferMods' stage
runner->setYieldPolicy(Runner::YIELD_AUTO);
diff --git a/src/mongo/db/query/internal_plans.h b/src/mongo/db/query/internal_plans.h
index 71e7b1a9fbf..30c9825805c 100644
--- a/src/mongo/db/query/internal_plans.h
+++ b/src/mongo/db/query/internal_plans.h
@@ -86,15 +86,16 @@ namespace mongo {
/**
* Return an index scan. Caller owns returned pointer.
*/
- static Runner* indexScan(const StringData& ns, NamespaceDetails* nsd, int idxNo,
+ static Runner* indexScan(IndexDescriptor* descriptor,
const BSONObj& startKey, const BSONObj& endKey,
bool endKeyInclusive, Direction direction = FORWARD,
int options = 0) {
- verify(NULL != nsd);
+ verify(descriptor);
+
+ const NamespaceString& ns = descriptor->getIndexedCollection()->ns();
IndexScanParams params;
- params.descriptor = CatalogHack::getDescriptor(nsd, idxNo);
- verify(NULL != params.descriptor);
+ params.descriptor = descriptor;
params.direction = direction;
params.bounds.isSimpleRange = true;
params.bounds.startKey = startKey;
diff --git a/src/mongo/s/d_migrate.cpp b/src/mongo/s/d_migrate.cpp
index 7798491335d..778349300a3 100644
--- a/src/mongo/s/d_migrate.cpp
+++ b/src/mongo/s/d_migrate.cpp
@@ -389,14 +389,15 @@ namespace mongo {
*/
bool storeCurrentLocs( long long maxChunkSize , string& errmsg , BSONObjBuilder& result ) {
Client::ReadContext ctx( _ns );
- NamespaceDetails *d = nsdetails( _ns );
- if ( ! d ) {
+ Collection* collection = ctx.ctx().db()->getCollection( _ns );
+ if ( !collection ) {
errmsg = "ns not found, should be impossible";
return false;
}
- const IndexDetails *idx = d->findIndexByPrefix( _shardKeyPattern ,
- true ); /* require single key */
+ IndexDescriptor *idx =
+ collection->getIndexCatalog()->findIndexByPrefix( _shardKeyPattern ,
+ true ); /* require single key */
if ( idx == NULL ) {
errmsg = (string)"can't find index in storeCurrentLocs" + causedBy( errmsg );
@@ -407,8 +408,7 @@ namespace mongo {
BSONObj min = Helpers::toKeyFormat( kp.extendRangeBound( _min, false ) );
BSONObj max = Helpers::toKeyFormat( kp.extendRangeBound( _max, false ) );
- auto_ptr<Runner> runner(InternalPlanner::indexScan(_ns, d, d->idxNo(*idx),
- min, max, false));
+ auto_ptr<Runner> runner(InternalPlanner::indexScan(idx, min, max, false));
// we can afford to yield here because any change to the base data that we might miss is
// already being queued and will be migrated in the 'transferMods' stage
runner->setYieldPolicy(Runner::YIELD_AUTO);
@@ -418,9 +418,9 @@ namespace mongo {
// there's a fair amount of slack before we determine a chunk is too large because object sizes will vary
unsigned long long maxRecsWhenFull;
long long avgRecSize;
- const long long totalRecs = d->numRecords();
+ const long long totalRecs = collection->numRecords();
if ( totalRecs > 0 ) {
- avgRecSize = d->dataSize() / totalRecs;
+ avgRecSize = collection->details()->dataSize() / totalRecs;
maxRecsWhenFull = maxChunkSize / avgRecSize;
maxRecsWhenFull = std::min( (unsigned long long)(Chunk::MaxObjectPerChunk + 1) , 130 * maxRecsWhenFull / 100 /* slack */ );
}
diff --git a/src/mongo/s/d_split.cpp b/src/mongo/s/d_split.cpp
index 0e323d7c6d2..2b47ebe8cbb 100644
--- a/src/mongo/s/d_split.cpp
+++ b/src/mongo/s/d_split.cpp
@@ -123,10 +123,10 @@ namespace mongo {
errmsg = "ns not found";
return false;
}
- NamespaceDetails *d = collection->details();
- const IndexDetails *idx = d->findIndexByPrefix( keyPattern ,
- true ); /* require single key */
+ IndexDescriptor *idx =
+ collection->getIndexCatalog()->findIndexByPrefix( keyPattern,
+ true ); /* require single key */
if ( idx == NULL ) {
errmsg = "couldn't find valid index for shard key";
return false;
@@ -142,7 +142,7 @@ namespace mongo {
max = Helpers::toKeyFormat( kp.extendRangeBound( max, false ) );
}
- auto_ptr<Runner> runner(InternalPlanner::indexScan(ns, d, d->idxNo(*idx), min, max,
+ auto_ptr<Runner> runner(InternalPlanner::indexScan(idx, min, max,
false, InternalPlanner::FORWARD));
runner->setYieldPolicy(Runner::YIELD_AUTO);
@@ -151,7 +151,7 @@ namespace mongo {
// this index.
// NOTE A local copy of 'missingField' is made because indices may be
// invalidated during a db lock yield.
- BSONObj missingFieldObj = IndexLegacy::getMissingField(collection,idx->info.obj());
+ BSONObj missingFieldObj = IndexLegacy::getMissingField(collection,idx->infoObj());
BSONElement missingField = missingFieldObj.firstElement();
// for now, the only check is that all shard keys are filled
@@ -271,14 +271,17 @@ namespace mongo {
{
// Get the size estimate for this namespace
Client::ReadContext ctx( ns );
- NamespaceDetails *d = nsdetails( ns );
- if ( ! d ) {
+ Collection* collection = ctx.ctx().db()->getCollection( ns );
+ if ( !collection ) {
errmsg = "ns not found";
return false;
}
-
- const IndexDetails *idx = d->findIndexByPrefix( keyPattern ,
- true ); /* require single key */
+
+ NamespaceDetails* d = collection->details();
+
+ IndexDescriptor *idx =
+ collection->getIndexCatalog()->findIndexByPrefix( keyPattern,
+ true ); /* require single key */
if ( idx == NULL ) {
errmsg = (string)"couldn't find index over splitting key " +
keyPattern.clientReadable().toString();
@@ -366,7 +369,7 @@ namespace mongo {
long long currCount = 0;
long long numChunks = 0;
- auto_ptr<Runner> runner(InternalPlanner::indexScan(ns, d, d->idxNo(*idx), min, max,
+ auto_ptr<Runner> runner(InternalPlanner::indexScan(idx, min, max,
false, InternalPlanner::FORWARD));
BSONObj currKey;
@@ -424,9 +427,9 @@ namespace mongo {
keyCount = currCount / 2;
currCount = 0;
log() << "splitVector doing another cycle because of force, keyCount now: " << keyCount << endl;
-
- runner.reset(InternalPlanner::indexScan(ns, d, d->idxNo(*idx), min, max,
- false, InternalPlanner::FORWARD));
+
+ runner.reset(InternalPlanner::indexScan(idx, min, max,
+ false, InternalPlanner::FORWARD));
runner->setYieldPolicy(Runner::YIELD_AUTO);
state = runner->getNext(&currKey, NULL);
@@ -830,10 +833,12 @@ namespace mongo {
for (int i=1; i >= 0 ; i--){ // high chunk more likely to have only one obj
Client::ReadContext ctx( ns );
- NamespaceDetails *d = nsdetails( ns );
+ Collection* collection = ctx.ctx().db()->getCollection( ns );
+ verify( collection );
- const IndexDetails *idx = d->findIndexByPrefix( keyPattern ,
- true ); /* exclude multikeys */
+ IndexDescriptor *idx =
+ collection->getIndexCatalog()->findIndexByPrefix( keyPattern ,
+ true ); /* exclude multikeys */
if ( idx == NULL ) {
break;
}
@@ -843,8 +848,7 @@ namespace mongo {
BSONObj newmin = Helpers::toKeyFormat( kp.extendRangeBound( chunk.min, false) );
BSONObj newmax = Helpers::toKeyFormat( kp.extendRangeBound( chunk.max, false) );
- auto_ptr<Runner> runner(InternalPlanner::indexScan(ns, d, d->idxNo(*idx),
- newmin, newmax, false));
+ auto_ptr<Runner> runner(InternalPlanner::indexScan(idx, newmin, newmax, false));
// check if exactly one document found
if (Runner::RUNNER_ADVANCED == runner->getNext(NULL, NULL)) {