summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorDwight <dmerriman@gmail.com>2009-08-13 12:28:48 -0400
committerDwight <dmerriman@gmail.com>2009-08-13 12:28:48 -0400
commite6258aaa474984c6b0aeb08728fa972e3858cd31 (patch)
treececb8513177ca68fbecf17e21aa4f569e2d9ec91
parent02959549f205696c7390083fd135c9176fc77e31 (diff)
parent1fa6ea3b5371c903c21a12df593284a906173f93 (diff)
downloadmongo-e6258aaa474984c6b0aeb08728fa972e3858cd31.tar.gz
Merge branch 'master' of git@github.com:mongodb/mongo
-rw-r--r--SConstruct3
-rw-r--r--client/dbclient.cpp5
-rw-r--r--client/dbclient.h4
-rw-r--r--db/btree.h35
-rw-r--r--db/btreecursor.cpp37
-rw-r--r--db/db.cpp4
-rw-r--r--db/dbcommands.cpp31
-rw-r--r--db/jsobj.h1
-rw-r--r--db/matcher.cpp66
-rw-r--r--db/namespace.cpp14
-rw-r--r--db/namespace.h3
-rw-r--r--db/query.cpp3
-rw-r--r--db/queryoptimizer.cpp20
-rw-r--r--db/queryoptimizer.h11
-rw-r--r--db/queryutil.cpp116
-rw-r--r--db/queryutil.h77
-rw-r--r--dbtests/cursortests.cpp95
-rw-r--r--dbtests/queryoptimizertests.cpp112
-rw-r--r--jstests/exists.js28
-rw-r--r--jstests/stats.js9
-rw-r--r--s/cursors.cpp2
-rw-r--r--scripting/engine.cpp39
-rw-r--r--scripting/engine.h1
-rw-r--r--scripting/engine_spidermonkey.cpp21
-rw-r--r--shell/collection.js23
-rw-r--r--shell/dbshell.cpp1
-rw-r--r--tools/Tool.cpp36
-rw-r--r--tools/Tool.h15
-rw-r--r--tools/bridge.cpp7
-rw-r--r--tools/dump.cpp12
-rw-r--r--tools/export.cpp2
-rw-r--r--tools/files.cpp2
-rw-r--r--tools/importJSON.cpp2
-rw-r--r--tools/restore.cpp1
-rw-r--r--tools/sniffer.cpp5
-rw-r--r--util/goodies.h4
36 files changed, 601 insertions, 246 deletions
diff --git a/SConstruct b/SConstruct
index 8e53fa91bab..5866d859a80 100644
--- a/SConstruct
+++ b/SConstruct
@@ -639,6 +639,9 @@ def doConfigure( myenv , needJava=True , needPcre=True , shell=False ):
# this will add it iff it exists and works
myCheckLib( "boost_system" + boostCompiler + "-mt" )
+ if not conf.CheckCXXHeader( "execinfo.h" ):
+ myenv.Append( CPPDEFINES=[ "NOEXECINFO" ] )
+
if needJava:
for j in javaLibs:
myCheckLib( j , True , True )
diff --git a/client/dbclient.cpp b/client/dbclient.cpp
index 6f10b8338c9..2ecfcc750f8 100644
--- a/client/dbclient.cpp
+++ b/client/dbclient.cpp
@@ -817,6 +817,11 @@ namespace mongo {
return master == Left ? left : right;
}
+ DBClientConnection& DBClientPaired::slaveConn(){
+ DBClientConnection& m = checkMaster();
+ return master == Left ? right : left;
+ }
+
bool DBClientPaired::connect(const string &serverHostname1, const string &serverHostname2) {
string errmsg;
bool l = left.connect(serverHostname1, errmsg);
diff --git a/client/dbclient.h b/client/dbclient.h
index 8f520ee8f2d..9a65f643e5e 100644
--- a/client/dbclient.h
+++ b/client/dbclient.h
@@ -789,11 +789,13 @@ namespace mongo {
void isntMaster() {
master = ( ( master == Left ) ? NotSetR : NotSetL );
}
-
+
string getServerAddress() const {
return left.getServerAddress() + "," + right.getServerAddress();
}
+ DBClientConnection& slaveConn();
+
/* TODO - not yet implemented. mongos may need these. */
virtual bool call( Message &toSend, Message &response, bool assertOk=true ) { assert(false); return false; }
virtual void say( Message &toSend ) { assert(false); }
diff --git a/db/btree.h b/db/btree.h
index e3587f97543..1d58617c7be 100644
--- a/db/btree.h
+++ b/db/btree.h
@@ -215,15 +215,17 @@ namespace mongo {
};
class BtreeCursor : public Cursor {
- friend class BtreeBucket;
- NamespaceDetails *d;
- int idxNo;
- BSONObj startKey;
- BSONObj endKey;
- bool endKeyInclusive_;
- bool multikey; // note this must be updated every getmore batch in case someone added a multikey...
public:
BtreeCursor( NamespaceDetails *_d, int _idxNo, const IndexDetails&, const BSONObj &startKey, const BSONObj &endKey, bool endKeyInclusive, int direction );
+
+ // a BoundList contains intervals specified by inclusive start
+ // and end bounds. The intervals should be nonoverlapping and occur in
+ // the specified direction of traversal. For example, given a simple index {i:1}
+ // and direction +1, one valid BoundList is: (1, 2); (4, 6). The same BoundList
+ // would be valid for index {i:-1} with direction -1.
+ typedef vector< pair< BSONObj, BSONObj > > BoundList;
+ BtreeCursor( NamespaceDetails *_d, int _idxNo, const IndexDetails& _id, const vector< pair< BSONObj, BSONObj > > &_bounds, int _direction );
+
virtual bool ok() {
return !bucket.isNull();
}
@@ -290,13 +292,14 @@ namespace mongo {
virtual string toString() {
string s = string("BtreeCursor ") + indexDetails.indexName();
if ( direction < 0 ) s += " reverse";
+ if ( bounds_.size() > 1 ) s += " multi";
return s;
}
BSONObj prettyKey( const BSONObj &key ) const {
return key.replaceFieldNames( indexDetails.keyPattern() ).clientReadable();
}
-
+
virtual BSONObj prettyStartKey() const {
return prettyKey( startKey );
}
@@ -315,6 +318,20 @@ namespace mongo {
/* Check if the current key is beyond endKey. */
void checkEnd();
+ // selective audits on construction
+ void audit();
+
+ // init start / end keys with a new range
+ void init();
+
+ friend class BtreeBucket;
+ NamespaceDetails *d;
+ int idxNo;
+ BSONObj startKey;
+ BSONObj endKey;
+ bool endKeyInclusive_;
+ bool multikey; // note this must be updated every getmore batch in case someone added a multikey...
+
const IndexDetails& indexDetails;
BSONObj order;
DiskLoc bucket;
@@ -322,6 +339,8 @@ namespace mongo {
int direction; // 1=fwd,-1=reverse
BSONObj keyAtKeyOfs; // so we can tell if things moved around on us between the query and the getMore call
DiskLoc locAtKeyOfs;
+ BoundList bounds_;
+ unsigned boundIndex_;
};
#pragma pack()
diff --git a/db/btreecursor.cpp b/db/btreecursor.cpp
index 55028036036..e3e942dfe60 100644
--- a/db/btreecursor.cpp
+++ b/db/btreecursor.cpp
@@ -34,14 +34,37 @@ namespace mongo {
startKey( _startKey ),
endKey( _endKey ),
endKeyInclusive_( endKeyInclusive ),
+ multikey( d->isMultikey( idxNo ) ),
indexDetails( _id ),
order( _id.keyPattern() ),
- direction( _direction )
+ direction( _direction ),
+ boundIndex_()
{
+ audit();
+ init();
+ }
+
+ BtreeCursor::BtreeCursor( NamespaceDetails *_d, int _idxNo, const IndexDetails& _id, const vector< pair< BSONObj, BSONObj > > &_bounds, int _direction )
+ :
+ d(_d), idxNo(_idxNo),
+ endKeyInclusive_( true ),
+ multikey( d->isMultikey( idxNo ) ),
+ indexDetails( _id ),
+ order( _id.keyPattern() ),
+ direction( _direction ),
+ bounds_( _bounds ),
+ boundIndex_()
+ {
+ assert( !bounds_.empty() );
+ startKey = bounds_[ 0 ].first;
+ endKey = bounds_[ 0 ].second;
+ audit();
+ init();
+ }
+
+ void BtreeCursor::audit() {
dassert( d->idxNo((IndexDetails&) indexDetails) == idxNo );
- multikey = d->isMultikey(idxNo);
- bool found;
if ( otherTraceLevel >= 12 ) {
if ( otherTraceLevel >= 200 ) {
out() << "::BtreeCursor() qtl>200. validating entire index." << endl;
@@ -52,7 +75,10 @@ namespace mongo {
indexDetails.head.btree()->dump();
}
}
+ }
+ void BtreeCursor::init() {
+ bool found;
bucket = indexDetails.head.btree()->
locate(indexDetails, indexDetails.head, startKey, order, keyOfs, found, direction > 0 ? minDiskLoc : maxDiskLoc, direction);
@@ -104,6 +130,11 @@ namespace mongo {
bucket = bucket.btree()->advance(bucket, keyOfs, direction, "BtreeCursor::advance");
skipUnusedKeys();
checkEnd();
+ while( !ok() && ++boundIndex_ < bounds_.size() ) {
+ startKey = bounds_[ boundIndex_ ].first;
+ endKey = bounds_[ boundIndex_ ].second;
+ init();
+ }
return !bucket.isNull();
}
diff --git a/db/db.cpp b/db/db.cpp
index 074cb1b3d7a..e06461ccd75 100644
--- a/db/db.cpp
+++ b/db/db.cpp
@@ -243,6 +243,10 @@ namespace mongo {
problem() << "Uncaught exception, terminating" << endl;
dbexit( EXIT_UNCAUGHT );
}
+
+ // any thread cleanup can happen here
+
+ globalScriptEngine->threadDone();
}
diff --git a/db/dbcommands.cpp b/db/dbcommands.cpp
index 9f6d2633c03..98e3e3a10d9 100644
--- a/db/dbcommands.cpp
+++ b/db/dbcommands.cpp
@@ -944,6 +944,37 @@ namespace mongo {
}
} cmdDatasize;
+ class CollectionStats : public Command {
+ public:
+ CollectionStats() : Command( "collstats" ) {}
+ virtual bool slaveOk() { return true; }
+ virtual void help( stringstream &help ) const {
+ help << " example: { collstats:\"blog.posts\" } ";
+ }
+ bool run(const char *dbname, BSONObj& jsobj, string& errmsg, BSONObjBuilder& result, bool fromRepl ){
+ string ns = dbname;
+ if ( ns.find( "." ) != string::npos )
+ ns = ns.substr( 0 , ns.find( "." ) );
+ ns += ".";
+ ns += jsobj.firstElement().valuestr();
+
+ NamespaceDetails * nsd = nsdetails( ns.c_str() );
+ if ( ! nsd ){
+ errmsg = "ns not found";
+ return false;
+ }
+
+ result.append( "ns" , ns.c_str() );
+
+ result.append( "count" , nsd->nrecords );
+ result.append( "size" , nsd->datasize );
+ result.append( "storageSize" , nsd->storageSize() );
+ result.append( "nindexes" , nsd->nIndexes );
+
+ return true;
+ }
+ } cmdCollectionStatis;
+
class CmdBuildInfo : public Command {
public:
CmdBuildInfo() : Command( "buildinfo" ) {}
diff --git a/db/jsobj.h b/db/jsobj.h
index 8d80c80144a..c3d87c83ff3 100644
--- a/db/jsobj.h
+++ b/db/jsobj.h
@@ -876,6 +876,7 @@ namespace mongo {
opSIZE = 0x0A,
opALL = 0x0B,
NIN = 0x0C,
+ opEXISTS = 0x0D,
};
};
ostream& operator<<( ostream &s, const BSONObj &o );
diff --git a/db/matcher.cpp b/db/matcher.cpp
index 68e69a81654..bc6792c7e67 100644
--- a/db/matcher.cpp
+++ b/db/matcher.cpp
@@ -257,7 +257,7 @@ namespace mongo {
ok = true;
all = true;
}
- else if ( fn[1] == 's' && fn[2] == 'i' && fn[3] == 'z' && fn[4] == 'e' && fe.isNumber() ) {
+ else if ( fn[1] == 's' && fn[2] == 'i' && fn[3] == 'z' && fn[4] == 'e' && fn[5] == 0 && fe.isNumber() ) {
shared_ptr< BSONObjBuilder > b( new BSONObjBuilder() );
builders_.push_back( b );
b->appendAs(fe, e.fieldName());
@@ -265,6 +265,13 @@ namespace mongo {
haveSize = true;
ok = true;
}
+ else if ( fn[1] == 'e' && fn[2] == 'x' && fn[3] == 'i' && fn[4] == 's' && fn[5] == 't' && fn[6] == 's' && fn[7] == 0 && fe.isBoolean() ) {
+ shared_ptr< BSONObjBuilder > b( new BSONObjBuilder() );
+ builders_.push_back( b );
+ b->appendAs(fe, e.fieldName());
+ addBasic(b->done().firstElement(), BSONObj::opEXISTS);
+ ok = true;
+ }
else
uassert( (string)"invalid $operator: " + fn , false);
}
@@ -326,6 +333,12 @@ namespace mongo {
else
return -ret;
}
+
+ int retMissing( const BasicMatcher &bm ) {
+ if ( bm.compareOp != BSONObj::opEXISTS )
+ return 0;
+ return bm.toMatch.boolean() ? -1 : 1;
+ }
/* Check if a particular field matches.
@@ -349,25 +362,25 @@ namespace mongo {
*/
int JSMatcher::matchesDotted(const char *fieldName, const BSONElement& toMatch, const BSONObj& obj, int compareOp, const BasicMatcher& bm , bool *deep, bool isArr) {
- if ( compareOp == BSONObj::opALL ) {
- if ( bm.myset->size() == 0 )
- return -1; // is this desired?
- BSONObjSetDefaultOrder actualKeys;
- getKeysFromObject( BSON( fieldName << 1 ), obj, actualKeys );
- if ( actualKeys.size() == 0 )
- return 0;
- for( set< BSONElement, element_lt >::const_iterator i = bm.myset->begin(); i != bm.myset->end(); ++i ) {
- // ignore nulls
- if ( i->type() == jstNULL )
- continue;
- // parallel traversal would be faster worst case I guess
- BSONObjBuilder b;
- b.appendAs( *i, "" );
- if ( !actualKeys.count( b.done() ) )
- return -1;
- }
- return 1;
- }
+ if ( compareOp == BSONObj::opALL ) {
+ if ( bm.myset->size() == 0 )
+ return -1; // is this desired?
+ BSONObjSetDefaultOrder actualKeys;
+ getKeysFromObject( BSON( fieldName << 1 ), obj, actualKeys );
+ if ( actualKeys.size() == 0 )
+ return 0;
+ for( set< BSONElement, element_lt >::const_iterator i = bm.myset->begin(); i != bm.myset->end(); ++i ) {
+ // ignore nulls
+ if ( i->type() == jstNULL )
+ continue;
+ // parallel traversal would be faster worst case I guess
+ BSONObjBuilder b;
+ b.appendAs( *i, "" );
+ if ( !actualKeys.count( b.done() ) )
+ return -1;
+ }
+ return 1;
+ }
if ( compareOp == BSONObj::NE )
return matchesNe( fieldName, toMatch, obj, bm, deep );
@@ -376,7 +389,6 @@ namespace mongo {
int ret = matchesNe( fieldName, *i, obj, bm, deep );
if ( ret != 1 )
return ret;
- // code to handle 0 (missing) return value doesn't deal with nin yet
}
return 1;
}
@@ -403,7 +415,7 @@ namespace mongo {
}
}
}
- return found ? -1 : 0;
+ return found ? -1 : retMissing( bm );
}
const char *p = strchr(fieldName, '.');
if ( p ) {
@@ -411,9 +423,9 @@ namespace mongo {
BSONElement se = obj.getField(left.c_str());
if ( se.eoo() )
- return 0;
+ return retMissing( bm );
if ( se.type() != Object && se.type() != Array )
- return 0;
+ return retMissing( bm );
BSONObj eo = se.embeddedObject();
return matchesDotted(p+1, toMatch, eo, compareOp, bm, deep, se.type() == Array);
@@ -421,8 +433,10 @@ namespace mongo {
e = obj.getField(fieldName);
}
}
-
- if ( ( e.type() != Array || indexed || compareOp == BSONObj::opSIZE ) &&
+
+ if ( compareOp == BSONObj::opEXISTS ) {
+ return ( e.eoo() ^ toMatch.boolean() ) ? 1 : -1;
+ } else if ( ( e.type() != Array || indexed || compareOp == BSONObj::opSIZE ) &&
valuesMatch(e, toMatch, compareOp, bm, deep) ) {
return 1;
} else if ( e.type() == Array && compareOp != BSONObj::opSIZE ) {
diff --git a/db/namespace.cpp b/db/namespace.cpp
index 14eef59e34a..a5c21439876 100644
--- a/db/namespace.cpp
+++ b/db/namespace.cpp
@@ -504,7 +504,19 @@ namespace mongo {
}
return -1;
}
-
+
+ long long NamespaceDetails::storageSize(){
+ Extent * e = firstExtent.ext();
+ assert( e );
+
+ long long total = 0;
+ while ( e ){
+ total += e->length;
+ e = e->getNextExtent();
+ }
+ return total;
+ }
+
/* ------------------------------------------------------------------------- */
map< string, shared_ptr< NamespaceDetailsTransient > > NamespaceDetailsTransient::map_;
diff --git a/db/namespace.h b/db/namespace.h
index bcc56cb7ef2..4a207f4a557 100644
--- a/db/namespace.h
+++ b/db/namespace.h
@@ -379,6 +379,9 @@ namespace mongo {
void checkMigrate();
+ long long storageSize();
+
+
private:
bool cappedMayDelete() const {
return !( flags & Flag_CappedDisallowDelete );
diff --git a/db/query.cpp b/db/query.cpp
index 83e9fa7fa5f..204a1562963 100644
--- a/db/query.cpp
+++ b/db/query.cpp
@@ -1401,7 +1401,8 @@ namespace mongo {
if ( d ){
int i = d->findIdIndex();
if( i < 0 ) {
- log() << "warning: no _id index on $snapshot query, ns:" << ns << endl;
+ if ( strstr( ns , ".system." ) == 0 )
+ log() << "warning: no _id index on $snapshot query, ns:" << ns << endl;
}
else {
/* [dm] the name of an _id index tends to vary, so we build the hint the hard way here.
diff --git a/db/queryoptimizer.cpp b/db/queryoptimizer.cpp
index 2d6d7033506..f2b03e9cd33 100644
--- a/db/queryoptimizer.cpp
+++ b/db/queryoptimizer.cpp
@@ -33,7 +33,7 @@ namespace mongo {
QueryPlan::QueryPlan(
NamespaceDetails *_d, int _idxNo,
- const FieldBoundSet &fbs, const BSONObj &order, const BSONObj &startKey, const BSONObj &endKey ) :
+ const FieldRangeSet &fbs, const BSONObj &order, const BSONObj &startKey, const BSONObj &endKey ) :
d(_d), idxNo(_idxNo),
fbs_( fbs ),
order_( order ),
@@ -76,7 +76,7 @@ namespace mongo {
goto doneCheckOrder;
if ( strcmp( oe.fieldName(), ke.fieldName() ) == 0 )
break;
- if ( !fbs.bound( ke.fieldName() ).equality() )
+ if ( !fbs.range( ke.fieldName() ).equality() )
goto doneCheckOrder;
}
int d = elementDirection( oe ) == elementDirection( ke ) ? 1 : -1;
@@ -100,11 +100,11 @@ namespace mongo {
BSONElement e = i.next();
if ( e.eoo() )
break;
- const FieldBound &fb = fbs.bound( e.fieldName() );
+ const FieldRange &fb = fbs.range( e.fieldName() );
int number = (int) e.number(); // returns 0.0 if not numeric
bool forward = ( ( number >= 0 ? 1 : -1 ) * ( direction_ >= 0 ? 1 : -1 ) > 0 );
- startKeyBuilder.appendAs( forward ? fb.lower() : fb.upper(), "" );
- endKeyBuilder.appendAs( forward ? fb.upper() : fb.lower(), "" );
+ startKeyBuilder.appendAs( forward ? fb.min() : fb.max(), "" );
+ endKeyBuilder.appendAs( forward ? fb.max() : fb.min(), "" );
if ( stillOptimalIndexedQueryCount ) {
if ( fb.nontrivial() )
++optimalIndexedQueryCount;
@@ -115,16 +115,16 @@ namespace mongo {
optimalIndexedQueryCount = -1;
}
if ( fb.equality() ) {
- BSONElement e = fb.upper();
+ BSONElement e = fb.max();
if ( !e.isNumber() && !e.mayEncapsulate() && e.type() != RegEx )
++exactIndexedQueryCount;
}
orderFieldsUnindexed.erase( e.fieldName() );
}
if ( !scanAndOrderRequired_ &&
- ( optimalIndexedQueryCount == fbs.nNontrivialBounds() ) )
+ ( optimalIndexedQueryCount == fbs.nNontrivialRanges() ) )
optimal_ = true;
- if ( exactIndexedQueryCount == fbs.nNontrivialBounds() &&
+ if ( exactIndexedQueryCount == fbs.nNontrivialRanges() &&
orderFieldsUnindexed.size() == 0 &&
exactIndexedQueryCount == index_->keyPattern().nFields() &&
exactIndexedQueryCount == fbs.query().nFields() ) {
@@ -135,7 +135,7 @@ namespace mongo {
if ( endKey_.isEmpty() )
endKey_ = endKeyBuilder.obj();
if ( ( scanAndOrderRequired_ || order_.isEmpty() ) &&
- !fbs.bound( idxKey.firstElement().fieldName() ).nontrivial() )
+ !fbs.range( idxKey.firstElement().fieldName() ).nontrivial() )
unhelpful_ = true;
}
@@ -289,7 +289,7 @@ namespace mongo {
return;
// If table scan is optimal or natural order requested
- if ( ( fbs_.nNontrivialBounds() == 0 && order_.isEmpty() ) ||
+ if ( ( fbs_.nNontrivialRanges() == 0 && order_.isEmpty() ) ||
( !order_.isEmpty() && !strcmp( order_.firstElement().fieldName(), "$natural" ) ) ) {
// Table scan plan
addPlan( PlanPtr( new QueryPlan( d, -1, fbs_, order_ ) ), checkFirst );
diff --git a/db/queryoptimizer.h b/db/queryoptimizer.h
index ceb50ff2404..651cd0e0b7a 100644
--- a/db/queryoptimizer.h
+++ b/db/queryoptimizer.h
@@ -29,12 +29,11 @@ namespace mongo {
public:
QueryPlan(NamespaceDetails *_d,
int _idxNo, // -1 = no index
- const FieldBoundSet &fbs,
+ const FieldRangeSet &fbs,
const BSONObj &order,
const BSONObj &startKey = BSONObj(),
const BSONObj &endKey = BSONObj() );
-// QueryPlan( const QueryPlan &other );
/* If true, no other index can do better. */
bool optimal() const { return optimal_; }
/* ScanAndOrder processing will be required if true */
@@ -55,12 +54,12 @@ namespace mongo {
const char *ns() const { return fbs_.ns(); }
BSONObj query() const { return fbs_.query(); }
BSONObj simplifiedQuery( const BSONObj& fields = BSONObj() ) const { return fbs_.simplifiedQuery( fields ); }
- const FieldBound &bound( const char *fieldName ) const { return fbs_.bound( fieldName ); }
+ const FieldRange &range( const char *fieldName ) const { return fbs_.range( fieldName ); }
void registerSelf( long long nScanned ) const;
private:
NamespaceDetails *d;
int idxNo;
- const FieldBoundSet &fbs_;
+ const FieldRangeSet &fbs_;
const BSONObj &order_;
const IndexDetails *index_;
bool optimal_;
@@ -120,7 +119,7 @@ namespace mongo {
shared_ptr< T > runOp( T &op ) {
return dynamic_pointer_cast< T >( runOp( static_cast< QueryOp& >( op ) ) );
}
- const FieldBoundSet &fbs() const { return fbs_; }
+ const FieldRangeSet &fbs() const { return fbs_; }
BSONObj explain() const;
bool usingPrerecordedPlan() const { return usingPrerecordedPlan_; }
private:
@@ -143,7 +142,7 @@ namespace mongo {
static void nextOp( QueryOp &op );
};
const char *ns;
- FieldBoundSet fbs_;
+ FieldRangeSet fbs_;
PlanSet plans_;
bool mayRecordPlan_;
bool usingPrerecordedPlan_;
diff --git a/db/queryutil.cpp b/db/queryutil.cpp
index bb9b544b110..ed05a083e0d 100644
--- a/db/queryutil.cpp
+++ b/db/queryutil.cpp
@@ -24,36 +24,36 @@
namespace mongo {
- FieldBound::FieldBound( const BSONElement &e , bool optimize ) :
- lower_( minKey.firstElement() ),
- lowerInclusive_( true ),
- upper_( maxKey.firstElement() ),
- upperInclusive_( true ) {
+ FieldRange::FieldRange( const BSONElement &e, bool optimize ) {
+ lower() = minKey.firstElement();
+ lowerInclusive() = true;
+ upper() = maxKey.firstElement();
+ upperInclusive() = true;
if ( e.eoo() )
return;
if ( e.type() == RegEx ) {
const char *r = e.simpleRegex();
if ( r ) {
- lower_ = addObj( BSON( "" << r ) ).firstElement();
- upper_ = addObj( BSON( "" << simpleRegexEnd( r ) ) ).firstElement();
- upperInclusive_ = false;
+ lower() = addObj( BSON( "" << r ) ).firstElement();
+ upper() = addObj( BSON( "" << simpleRegexEnd( r ) ) ).firstElement();
+ upperInclusive() = false;
}
return;
}
switch( e.getGtLtOp() ) {
case BSONObj::Equality:
- lower_ = e;
- upper_ = e;
+ lower() = e;
+ upper() = e;
break;
case BSONObj::LT:
- upperInclusive_ = false;
+ upperInclusive() = false;
case BSONObj::LTE:
- upper_ = e;
+ upper() = e;
break;
case BSONObj::GT:
- lowerInclusive_ = false;
+ lowerInclusive() = false;
case BSONObj::GTE:
- lower_ = e;
+ lower() = e;
break;
case BSONObj::opALL: {
massert( "$all requires array", e.type() == Array );
@@ -61,7 +61,7 @@ namespace mongo {
if ( i.moreWithEOO() ) {
BSONElement f = i.next();
if ( !f.eoo() )
- lower_ = upper_ = f;
+ lower() = upper() = f;
}
break;
}
@@ -79,62 +79,62 @@ namespace mongo {
if ( min.woCompare( f, false ) > 0 )
min = f;
}
- lower_ = min;
- upper_ = max;
+ lower() = min;
+ upper() = max;
}
default:
break;
}
if ( optimize ){
- if ( lower_.type() != MinKey && upper_.type() == MaxKey && lower_.isSimpleType() ){ // TODO: get rid of isSimpleType
+ if ( lower().type() != MinKey && upper().type() == MaxKey && lower().isSimpleType() ){ // TODO: get rid of isSimpleType
BSONObjBuilder b;
- b.appendMaxForType( lower_.fieldName() , lower_.type() );
- upper_ = addObj( b.obj() ).firstElement();
+ b.appendMaxForType( lower().fieldName() , lower().type() );
+ upper() = addObj( b.obj() ).firstElement();
}
- else if ( lower_.type() == MinKey && upper_.type() != MaxKey && upper_.isSimpleType() ){ // TODO: get rid of isSimpleType
+ else if ( lower().type() == MinKey && upper().type() != MaxKey && upper().isSimpleType() ){ // TODO: get rid of isSimpleType
BSONObjBuilder b;
- b.appendMinForType( upper_.fieldName() , upper_.type() );
- lower_ = addObj( b.obj() ).firstElement();
+ b.appendMinForType( upper().fieldName() , upper().type() );
+ lower() = addObj( b.obj() ).firstElement();
}
}
}
- const FieldBound &FieldBound::operator&=( const FieldBound &other ) {
+ const FieldRange &FieldRange::operator&=( const FieldRange &other ) {
int cmp;
- cmp = other.upper_.woCompare( upper_, false );
+ cmp = other.max().woCompare( upper(), false );
if ( cmp == 0 )
- if ( !other.upperInclusive_ )
- upperInclusive_ = false;
+ if ( !other.maxInclusive() )
+ upperInclusive() = false;
if ( cmp < 0 ) {
- upper_ = other.upper_;
- upperInclusive_ = other.upperInclusive_;
+ upper() = other.max();
+ upperInclusive() = other.maxInclusive();
}
- cmp = other.lower_.woCompare( lower_, false );
+ cmp = other.min().woCompare( lower(), false );
if ( cmp == 0 )
- if ( !other.lowerInclusive_ )
- lowerInclusive_ = false;
+ if ( !other.minInclusive() )
+ lowerInclusive() = false;
if ( cmp > 0 ) {
- lower_ = other.lower_;
- lowerInclusive_ = other.lowerInclusive_;
+ lower() = other.min();
+ lowerInclusive() = other.minInclusive();
}
for( vector< BSONObj >::const_iterator i = other.objData_.begin(); i != other.objData_.end(); ++i )
objData_.push_back( *i );
return *this;
}
- string FieldBound::simpleRegexEnd( string regex ) {
+ string FieldRange::simpleRegexEnd( string regex ) {
++regex[ regex.length() - 1 ];
return regex;
}
- BSONObj FieldBound::addObj( const BSONObj &o ) {
+ BSONObj FieldRange::addObj( const BSONObj &o ) {
objData_.push_back( o );
return o;
}
- FieldBoundSet::FieldBoundSet( const char *ns, const BSONObj &query , bool optimize ) :
+ FieldRangeSet::FieldRangeSet( const char *ns, const BSONObj &query , bool optimize ) :
ns_( ns ),
query_( query.getOwned() ) {
BSONObjIterator i( query_ );
@@ -145,7 +145,7 @@ namespace mongo {
if ( strcmp( e.fieldName(), "$where" ) == 0 )
continue;
if ( getGtLtOp( e ) == BSONObj::Equality ) {
- bounds_[ e.fieldName() ] &= FieldBound( e , optimize );
+ ranges_[ e.fieldName() ] &= FieldRange( e , optimize );
}
else {
BSONObjIterator i( e.embeddedObject() );
@@ -153,24 +153,24 @@ namespace mongo {
BSONElement f = i.next();
if ( f.eoo() )
break;
- bounds_[ e.fieldName() ] &= FieldBound( f , optimize );
+ ranges_[ e.fieldName() ] &= FieldRange( f , optimize );
}
}
}
}
- FieldBound *FieldBoundSet::trivialBound_ = 0;
- FieldBound &FieldBoundSet::trivialBound() {
- if ( trivialBound_ == 0 )
- trivialBound_ = new FieldBound();
- return *trivialBound_;
+ FieldRange *FieldRangeSet::trivialRange_ = 0;
+ FieldRange &FieldRangeSet::trivialRange() {
+ if ( trivialRange_ == 0 )
+ trivialRange_ = new FieldRange();
+ return *trivialRange_;
}
- BSONObj FieldBoundSet::simplifiedQuery( const BSONObj &_fields ) const {
+ BSONObj FieldRangeSet::simplifiedQuery( const BSONObj &_fields ) const {
BSONObj fields = _fields;
if ( fields.isEmpty() ) {
BSONObjBuilder b;
- for( map< string, FieldBound >::const_iterator i = bounds_.begin(); i != bounds_.end(); ++i ) {
+ for( map< string, FieldRange >::const_iterator i = ranges_.begin(); i != ranges_.end(); ++i ) {
b.append( i->first.c_str(), 1 );
}
fields = b.obj();
@@ -182,29 +182,29 @@ namespace mongo {
if ( e.eoo() )
break;
const char *name = e.fieldName();
- const FieldBound &bound = bounds_[ name ];
- if ( bound.equality() )
- b.appendAs( bound.lower(), name );
- else if ( bound.nontrivial() ) {
+ const FieldRange &range = ranges_[ name ];
+ if ( range.equality() )
+ b.appendAs( range.min(), name );
+ else if ( range.nontrivial() ) {
BSONObjBuilder c;
- if ( bound.lower().type() != MinKey )
- c.appendAs( bound.lower(), bound.lowerInclusive() ? "$gte" : "$gt" );
- if ( bound.upper().type() != MaxKey )
- c.appendAs( bound.upper(), bound.upperInclusive() ? "$lte" : "$lt" );
+ if ( range.min().type() != MinKey )
+ c.appendAs( range.min(), range.minInclusive() ? "$gte" : "$gt" );
+ if ( range.max().type() != MaxKey )
+ c.appendAs( range.max(), range.maxInclusive() ? "$lte" : "$lt" );
b.append( name, c.done() );
}
}
return b.obj();
}
- QueryPattern FieldBoundSet::pattern( const BSONObj &sort ) const {
+ QueryPattern FieldRangeSet::pattern( const BSONObj &sort ) const {
QueryPattern qp;
- for( map< string, FieldBound >::const_iterator i = bounds_.begin(); i != bounds_.end(); ++i ) {
+ for( map< string, FieldRange >::const_iterator i = ranges_.begin(); i != ranges_.end(); ++i ) {
if ( i->second.equality() ) {
qp.fieldTypes_[ i->first ] = QueryPattern::Equality;
} else if ( i->second.nontrivial() ) {
- bool upper = i->second.upper().type() != MaxKey;
- bool lower = i->second.lower().type() != MinKey;
+ bool upper = i->second.max().type() != MaxKey;
+ bool lower = i->second.min().type() != MinKey;
if ( upper && lower )
qp.fieldTypes_[ i->first ] = QueryPattern::UpperAndLowerBound;
else if ( upper )
diff --git a/db/queryutil.h b/db/queryutil.h
index 5b925622081..35a2267a2bc 100644
--- a/db/queryutil.h
+++ b/db/queryutil.h
@@ -22,34 +22,47 @@
namespace mongo {
- // bounds on a field's value that may be determined from query -- used to
+ struct FieldBound {
+ BSONElement bound_;
+ bool inclusive_;
+ };
+
+ struct FieldInterval {
+ FieldBound lower_;
+ FieldBound upper_;
+ };
+
+ // range of a field's value that may be determined from query -- used to
// determine index limits
- class FieldBound {
+ class FieldRange {
public:
- FieldBound( const BSONElement &e = BSONObj().firstElement() , bool optimize=true );
- const FieldBound &operator&=( const FieldBound &other );
- BSONElement lower() const { return lower_; }
- BSONElement upper() const { return upper_; }
- bool lowerInclusive() const { return lowerInclusive_; }
- bool upperInclusive() const { return upperInclusive_; }
+ FieldRange( const BSONElement &e = BSONObj().firstElement() , bool optimize=true );
+ const FieldRange &operator&=( const FieldRange &other );
+ BSONElement min() const { return interval_.lower_.bound_; }
+ BSONElement max() const { return interval_.upper_.bound_; }
+ bool minInclusive() const { return interval_.lower_.inclusive_; }
+ bool maxInclusive() const { return interval_.upper_.inclusive_; }
bool equality() const {
return
- lower_.woCompare( upper_, false ) == 0 &&
- upperInclusive_ &&
- lowerInclusive_;
+ min().woCompare( max(), false ) == 0 &&
+ maxInclusive() &&
+ minInclusive();
}
bool nontrivial() const {
return
- minKey.firstElement().woCompare( lower_, false ) != 0 ||
- maxKey.firstElement().woCompare( upper_, false ) != 0;
+ minKey.firstElement().woCompare( min(), false ) != 0 ||
+ maxKey.firstElement().woCompare( max(), false ) != 0;
}
private:
+ // towards replacing interval_ with a set of intervals
+ BSONElement &lower() { return interval_.lower_.bound_; }
+ BSONElement &upper() { return interval_.upper_.bound_; }
+ bool &lowerInclusive() { return interval_.lower_.inclusive_; }
+ bool &upperInclusive() { return interval_.upper_.inclusive_; }
+
BSONObj addObj( const BSONObj &o );
string simpleRegexEnd( string regex );
- BSONElement lower_;
- bool lowerInclusive_;
- BSONElement upper_;
- bool upperInclusive_;
+ FieldInterval interval_;
vector< BSONObj > objData_;
};
@@ -57,7 +70,7 @@ namespace mongo {
// similar to an earlier query and should use the same plan
class QueryPattern {
public:
- friend class FieldBoundSet;
+ friend class FieldRangeSet;
enum Type {
Equality,
LowerBound,
@@ -118,20 +131,20 @@ namespace mongo {
BSONObj sort_;
};
- // bounds on fields' value that may be determined from query -- used to
+ // ranges of fields' value that may be determined from query -- used to
// determine index limits
- class FieldBoundSet {
+ class FieldRangeSet {
public:
- FieldBoundSet( const char *ns, const BSONObj &query , bool optimize=true );
- const FieldBound &bound( const char *fieldName ) const {
- map< string, FieldBound >::const_iterator f = bounds_.find( fieldName );
- if ( f == bounds_.end() )
- return trivialBound();
+ FieldRangeSet( const char *ns, const BSONObj &query , bool optimize=true );
+ const FieldRange &range( const char *fieldName ) const {
+ map< string, FieldRange >::const_iterator f = ranges_.find( fieldName );
+ if ( f == ranges_.end() )
+ return trivialRange();
return f->second;
}
- int nNontrivialBounds() const {
+ int nNontrivialRanges() const {
int count = 0;
- for( map< string, FieldBound >::const_iterator i = bounds_.begin(); i != bounds_.end(); ++i )
+ for( map< string, FieldRange >::const_iterator i = ranges_.begin(); i != ranges_.end(); ++i )
if ( i->second.nontrivial() )
++count;
return count;
@@ -141,16 +154,16 @@ namespace mongo {
// if fields is specified, order fields of returned object to match those of 'fields'
BSONObj simplifiedQuery( const BSONObj &fields = BSONObj() ) const;
bool matchPossible() const {
- for( map< string, FieldBound >::const_iterator i = bounds_.begin(); i != bounds_.end(); ++i )
- if ( i->second.lower().woCompare( i->second.upper(), false ) > 0 )
+ for( map< string, FieldRange >::const_iterator i = ranges_.begin(); i != ranges_.end(); ++i )
+ if ( i->second.min().woCompare( i->second.max(), false ) > 0 )
return false;
return true;
}
QueryPattern pattern( const BSONObj &sort = BSONObj() ) const;
private:
- static FieldBound *trivialBound_;
- static FieldBound &trivialBound();
- mutable map< string, FieldBound > bounds_;
+ static FieldRange *trivialRange_;
+ static FieldRange &trivialRange();
+ mutable map< string, FieldRange > ranges_;
const char *ns_;
BSONObj query_;
};
diff --git a/dbtests/cursortests.cpp b/dbtests/cursortests.cpp
index fafc48c26a2..18c4a828855 100644
--- a/dbtests/cursortests.cpp
+++ b/dbtests/cursortests.cpp
@@ -18,6 +18,8 @@
*/
#include "../db/clientcursor.h"
+#include "../db/instance.h"
+#include "../db/btree.h"
#include "dbtests.h"
@@ -103,12 +105,103 @@ namespace CursorTests {
};
} // namespace IdSetTests
+
+ namespace BtreeCursorTests {
+
+ class MultiRange {
+ public:
+ void run() {
+ dblock lk;
+ const char *ns = "unittests.cursortests.BtreeCursorTests.MultiRange";
+ {
+ DBDirectClient c;
+ for( int i = 0; i < 10; ++i )
+ c.insert( ns, BSON( "a" << i ) );
+ ASSERT( c.ensureIndex( ns, BSON( "a" << 1 ) ) );
+ }
+ BtreeCursor::BoundList b;
+ b.push_back( pair< BSONObj, BSONObj >( BSON( "" << 1 ), BSON( "" << 2 ) ) );
+ b.push_back( pair< BSONObj, BSONObj >( BSON( "" << 4 ), BSON( "" << 6 ) ) );
+ setClient( ns );
+ BtreeCursor c( nsdetails( ns ), 1, nsdetails( ns )->indexes[ 1 ], b, 1 );
+ ASSERT_EQUALS( "BtreeCursor a_1 multi", c.toString() );
+ double expected[] = { 1, 2, 4, 5, 6 };
+ for( int i = 0; i < 5; ++i ) {
+ ASSERT( c.ok() );
+ ASSERT_EQUALS( expected[ i ], c.currKey().firstElement().number() );
+ c.advance();
+ }
+ ASSERT( !c.ok() );
+ }
+ };
+
+ class MultiRangeGap {
+ public:
+ void run() {
+ dblock lk;
+ const char *ns = "unittests.cursortests.BtreeCursorTests.MultiRangeGap";
+ {
+ DBDirectClient c;
+ for( int i = 0; i < 10; ++i )
+ c.insert( ns, BSON( "a" << i ) );
+ for( int i = 100; i < 110; ++i )
+ c.insert( ns, BSON( "a" << i ) );
+ ASSERT( c.ensureIndex( ns, BSON( "a" << 1 ) ) );
+ }
+ BtreeCursor::BoundList b;
+ b.push_back( pair< BSONObj, BSONObj >( BSON( "" << -50 ), BSON( "" << 2 ) ) );
+ b.push_back( pair< BSONObj, BSONObj >( BSON( "" << 40 ), BSON( "" << 60 ) ) );
+ b.push_back( pair< BSONObj, BSONObj >( BSON( "" << 109 ), BSON( "" << 200 ) ) );
+ setClient( ns );
+ BtreeCursor c( nsdetails( ns ), 1, nsdetails( ns )->indexes[ 1 ], b, 1 );
+ ASSERT_EQUALS( "BtreeCursor a_1 multi", c.toString() );
+ double expected[] = { 0, 1, 2, 109 };
+ for( int i = 0; i < 4; ++i ) {
+ ASSERT( c.ok() );
+ ASSERT_EQUALS( expected[ i ], c.currKey().firstElement().number() );
+ c.advance();
+ }
+ ASSERT( !c.ok() );
+ }
+ };
+
+ class MultiRangeReverse {
+ public:
+ void run() {
+ dblock lk;
+ const char *ns = "unittests.cursortests.BtreeCursorTests.MultiRangeReverse";
+ {
+ DBDirectClient c;
+ for( int i = 0; i < 10; ++i )
+ c.insert( ns, BSON( "a" << i ) );
+ ASSERT( c.ensureIndex( ns, BSON( "a" << 1 ) ) );
+ }
+ BtreeCursor::BoundList b;
+ b.push_back( pair< BSONObj, BSONObj >( BSON( "" << 6 ), BSON( "" << 4 ) ) );
+ b.push_back( pair< BSONObj, BSONObj >( BSON( "" << 2 ), BSON( "" << 1 ) ) );
+ setClient( ns );
+ BtreeCursor c( nsdetails( ns ), 1, nsdetails( ns )->indexes[ 1 ], b, -1 );
+ ASSERT_EQUALS( "BtreeCursor a_1 reverse multi", c.toString() );
+ double expected[] = { 6, 5, 4, 2, 1 };
+ for( int i = 0; i < 5; ++i ) {
+ ASSERT( c.ok() );
+ ASSERT_EQUALS( expected[ i ], c.currKey().firstElement().number() );
+ c.advance();
+ }
+ ASSERT( !c.ok() );
+ }
+ };
+
+ } // namespace MultiBtreeCursorTests
- class All : public Suite {
+ class All : public ::Suite {
public:
All() {
add< IdSetTests::BasicSize >();
add< IdSetTests::Upgrade >();
+ add< BtreeCursorTests::MultiRange >();
+ add< BtreeCursorTests::MultiRangeGap >();
+ add< BtreeCursorTests::MultiRangeReverse >();
}
};
} // namespace CursorTests
diff --git a/dbtests/queryoptimizertests.cpp b/dbtests/queryoptimizertests.cpp
index 61ad92a5f6e..85ffade8f5a 100644
--- a/dbtests/queryoptimizertests.cpp
+++ b/dbtests/queryoptimizertests.cpp
@@ -32,16 +32,16 @@ namespace mongo {
namespace QueryOptimizerTests {
- namespace FieldBoundTests {
+ namespace FieldRangeTests {
class Base {
public:
virtual ~Base() {}
void run() {
- FieldBoundSet s( "ns", query() );
- checkElt( lower(), s.bound( "a" ).lower() );
- checkElt( upper(), s.bound( "a" ).upper() );
- ASSERT_EQUALS( lowerInclusive(), s.bound( "a" ).lowerInclusive() );
- ASSERT_EQUALS( upperInclusive(), s.bound( "a" ).upperInclusive() );
+ FieldRangeSet s( "ns", query() );
+ checkElt( lower(), s.range( "a" ).min() );
+ checkElt( upper(), s.range( "a" ).max() );
+ ASSERT_EQUALS( lowerInclusive(), s.range( "a" ).minInclusive() );
+ ASSERT_EQUALS( upperInclusive(), s.range( "a" ).maxInclusive() );
}
protected:
virtual BSONObj query() = 0;
@@ -133,7 +133,7 @@ namespace QueryOptimizerTests {
class EqGteInvalid {
public:
void run() {
- FieldBoundSet fbs( "ns", BSON( "a" << 1 << "a" << GTE << 2 ) );
+ FieldRangeSet fbs( "ns", BSON( "a" << 1 << "a" << GTE << 2 ) );
ASSERT( !fbs.matchPossible() );
}
};
@@ -185,25 +185,25 @@ namespace QueryOptimizerTests {
class Equality {
public:
void run() {
- FieldBoundSet s( "ns", BSON( "a" << 1 ) );
- ASSERT( s.bound( "a" ).equality() );
- FieldBoundSet s2( "ns", BSON( "a" << GTE << 1 << LTE << 1 ) );
- ASSERT( s2.bound( "a" ).equality() );
- FieldBoundSet s3( "ns", BSON( "a" << GT << 1 << LTE << 1 ) );
- ASSERT( !s3.bound( "a" ).equality() );
- FieldBoundSet s4( "ns", BSON( "a" << GTE << 1 << LT << 1 ) );
- ASSERT( !s4.bound( "a" ).equality() );
- FieldBoundSet s5( "ns", BSON( "a" << GTE << 1 << LTE << 1 << GT << 1 ) );
- ASSERT( !s5.bound( "a" ).equality() );
- FieldBoundSet s6( "ns", BSON( "a" << GTE << 1 << LTE << 1 << LT << 1 ) );
- ASSERT( !s6.bound( "a" ).equality() );
+ FieldRangeSet s( "ns", BSON( "a" << 1 ) );
+ ASSERT( s.range( "a" ).equality() );
+ FieldRangeSet s2( "ns", BSON( "a" << GTE << 1 << LTE << 1 ) );
+ ASSERT( s2.range( "a" ).equality() );
+ FieldRangeSet s3( "ns", BSON( "a" << GT << 1 << LTE << 1 ) );
+ ASSERT( !s3.range( "a" ).equality() );
+ FieldRangeSet s4( "ns", BSON( "a" << GTE << 1 << LT << 1 ) );
+ ASSERT( !s4.range( "a" ).equality() );
+ FieldRangeSet s5( "ns", BSON( "a" << GTE << 1 << LTE << 1 << GT << 1 ) );
+ ASSERT( !s5.range( "a" ).equality() );
+ FieldRangeSet s6( "ns", BSON( "a" << GTE << 1 << LTE << 1 << LT << 1 ) );
+ ASSERT( !s6.range( "a" ).equality() );
}
};
class SimplifiedQuery {
public:
void run() {
- FieldBoundSet fbs( "ns", BSON( "a" << GT << 1 << GT << 5 << LT << 10 << "b" << 4 << "c" << LT << 4 << LT << 6 << "d" << GTE << 0 << GT << 0 << "e" << GTE << 0 << LTE << 10 ) );
+ FieldRangeSet fbs( "ns", BSON( "a" << GT << 1 << GT << 5 << LT << 10 << "b" << 4 << "c" << LT << 4 << LT << 6 << "d" << GTE << 0 << GT << 0 << "e" << GTE << 0 << LTE << 10 ) );
BSONObj simple = fbs.simplifiedQuery();
cout << "simple: " << simple << endl;
ASSERT( !simple.getObjectField( "a" ).woCompare( fromjson( "{$gt:5,$lt:10}" ) ) );
@@ -236,27 +236,27 @@ namespace QueryOptimizerTests {
}
private:
static QueryPattern p( const BSONObj &query, const BSONObj &sort = BSONObj() ) {
- return FieldBoundSet( "", query ).pattern( sort );
+ return FieldRangeSet( "", query ).pattern( sort );
}
};
class NoWhere {
public:
void run() {
- ASSERT_EQUALS( 0, FieldBoundSet( "ns", BSON( "$where" << 1 ) ).nNontrivialBounds() );
+ ASSERT_EQUALS( 0, FieldRangeSet( "ns", BSON( "$where" << 1 ) ).nNontrivialRanges() );
}
};
class Numeric {
public:
void run() {
- FieldBoundSet f( "", BSON( "a" << 1 ) );
- ASSERT( f.bound( "a" ).lower().woCompare( BSON( "a" << 2.0 ).firstElement() ) < 0 );
- ASSERT( f.bound( "a" ).lower().woCompare( BSON( "a" << 0.0 ).firstElement() ) > 0 );
+ FieldRangeSet f( "", BSON( "a" << 1 ) );
+ ASSERT( f.range( "a" ).min().woCompare( BSON( "a" << 2.0 ).firstElement() ) < 0 );
+ ASSERT( f.range( "a" ).min().woCompare( BSON( "a" << 0.0 ).firstElement() ) > 0 );
}
};
- } // namespace FieldBoundTests
+ } // namespace FieldRangeTests
namespace QueryPlanTests {
class Base {
@@ -302,8 +302,8 @@ namespace QueryOptimizerTests {
// There's a limit of 10 indexes total, make sure not to exceed this in a given test.
#define INDEXNO(x) nsd()->idxNo( *this->index( BSON(x) ) )
#define INDEX(x) this->index( BSON(x) )
- auto_ptr< FieldBoundSet > FieldBoundSet_GLOBAL;
-#define FBS(x) ( FieldBoundSet_GLOBAL.reset( new FieldBoundSet( ns(), x ) ), *FieldBoundSet_GLOBAL )
+ auto_ptr< FieldRangeSet > FieldRangeSet_GLOBAL;
+#define FBS(x) ( FieldRangeSet_GLOBAL.reset( new FieldRangeSet( ns(), x ) ), *FieldRangeSet_GLOBAL )
class NoIndex : public Base {
public:
@@ -512,17 +512,17 @@ namespace QueryOptimizerTests {
public:
void run() {
QueryPlan p( nsd(), INDEXNO( "a" << 1 << "b" << 1 ), FBS( BSON( "b" << 1 ) ), BSONObj() );
- ASSERT( !p.bound( "a" ).nontrivial() );
+ ASSERT( !p.range( "a" ).nontrivial() );
ASSERT( p.unhelpful() );
QueryPlan p2( nsd(), INDEXNO( "a" << 1 << "b" << 1 ), FBS( BSON( "b" << 1 << "c" << 1 ) ), BSON( "a" << 1 ) );
ASSERT( !p2.scanAndOrderRequired() );
- ASSERT( !p2.bound( "a" ).nontrivial() );
+ ASSERT( !p2.range( "a" ).nontrivial() );
ASSERT( !p2.unhelpful() );
QueryPlan p3( nsd(), INDEXNO( "b" << 1 ), FBS( BSON( "b" << 1 << "c" << 1 ) ), BSONObj() );
- ASSERT( p3.bound( "b" ).nontrivial() );
+ ASSERT( p3.range( "b" ).nontrivial() );
ASSERT( !p3.unhelpful() );
QueryPlan p4( nsd(), INDEXNO( "b" << 1 << "c" << 1 ), FBS( BSON( "c" << 1 << "d" << 1 ) ), BSONObj() );
- ASSERT( !p4.bound( "b" ).nontrivial() );
+ ASSERT( !p4.range( "b" ).nontrivial() );
ASSERT( p4.unhelpful() );
}
};
@@ -909,8 +909,8 @@ namespace QueryOptimizerTests {
BSONObj one = BSON( "a" << 1 );
theDataFileMgr.insert( ns(), one );
deleteObjects( ns(), BSON( "a" << 1 ), false );
- ASSERT( BSON( "a" << 1 ).woCompare( NamespaceDetailsTransient::get( ns() ).indexForPattern( FieldBoundSet( ns(), BSON( "a" << 1 ) ).pattern() ) ) == 0 );
- ASSERT_EQUALS( 2, NamespaceDetailsTransient::get( ns() ).nScannedForPattern( FieldBoundSet( ns(), BSON( "a" << 1 ) ).pattern() ) );
+ ASSERT( BSON( "a" << 1 ).woCompare( NamespaceDetailsTransient::get( ns() ).indexForPattern( FieldRangeSet( ns(), BSON( "a" << 1 ) ).pattern() ) ) == 0 );
+ ASSERT_EQUALS( 2, NamespaceDetailsTransient::get( ns() ).nScannedForPattern( FieldRangeSet( ns(), BSON( "a" << 1 ) ).pattern() ) );
}
};
@@ -961,13 +961,13 @@ namespace QueryOptimizerTests {
assembleRequest( ns(), QUERY( "b" << 0 << "a" << GTE << 0 ).obj, 2, 0, 0, 0, m );
stringstream ss;
runQuery( m, ss );
- ASSERT( BSON( "$natural" << 1 ).woCompare( NamespaceDetailsTransient::get( ns() ).indexForPattern( FieldBoundSet( ns(), BSON( "b" << 0 << "a" << GTE << 0 ) ).pattern() ) ) == 0 );
+ ASSERT( BSON( "$natural" << 1 ).woCompare( NamespaceDetailsTransient::get( ns() ).indexForPattern( FieldRangeSet( ns(), BSON( "b" << 0 << "a" << GTE << 0 ) ).pattern() ) ) == 0 );
Message m2;
assembleRequest( ns(), QUERY( "b" << 99 << "a" << GTE << 0 ).obj, 2, 0, 0, 0, m2 );
runQuery( m2, ss );
- ASSERT( BSON( "a" << 1 ).woCompare( NamespaceDetailsTransient::get( ns() ).indexForPattern( FieldBoundSet( ns(), BSON( "b" << 0 << "a" << GTE << 0 ) ).pattern() ) ) == 0 );
- ASSERT_EQUALS( 2, NamespaceDetailsTransient::get( ns() ).nScannedForPattern( FieldBoundSet( ns(), BSON( "b" << 0 << "a" << GTE << 0 ) ).pattern() ) );
+ ASSERT( BSON( "a" << 1 ).woCompare( NamespaceDetailsTransient::get( ns() ).indexForPattern( FieldRangeSet( ns(), BSON( "b" << 0 << "a" << GTE << 0 ) ).pattern() ) ) == 0 );
+ ASSERT_EQUALS( 2, NamespaceDetailsTransient::get( ns() ).nScannedForPattern( FieldRangeSet( ns(), BSON( "b" << 0 << "a" << GTE << 0 ) ).pattern() ) );
}
};
@@ -976,25 +976,25 @@ namespace QueryOptimizerTests {
class All : public Suite {
public:
All() {
- add< FieldBoundTests::Empty >();
- add< FieldBoundTests::Eq >();
- add< FieldBoundTests::DupEq >();
- add< FieldBoundTests::Lt >();
- add< FieldBoundTests::Lte >();
- add< FieldBoundTests::Gt >();
- add< FieldBoundTests::Gte >();
- add< FieldBoundTests::TwoLt >();
- add< FieldBoundTests::TwoGt >();
- add< FieldBoundTests::EqGte >();
- add< FieldBoundTests::EqGteInvalid >();
- add< FieldBoundTests::Regex >();
- add< FieldBoundTests::UnhelpfulRegex >();
- add< FieldBoundTests::In >();
- add< FieldBoundTests::Equality >();
- add< FieldBoundTests::SimplifiedQuery >();
- add< FieldBoundTests::QueryPatternTest >();
- add< FieldBoundTests::NoWhere >();
- add< FieldBoundTests::Numeric >();
+ add< FieldRangeTests::Empty >();
+ add< FieldRangeTests::Eq >();
+ add< FieldRangeTests::DupEq >();
+ add< FieldRangeTests::Lt >();
+ add< FieldRangeTests::Lte >();
+ add< FieldRangeTests::Gt >();
+ add< FieldRangeTests::Gte >();
+ add< FieldRangeTests::TwoLt >();
+ add< FieldRangeTests::TwoGt >();
+ add< FieldRangeTests::EqGte >();
+ add< FieldRangeTests::EqGteInvalid >();
+ add< FieldRangeTests::Regex >();
+ add< FieldRangeTests::UnhelpfulRegex >();
+ add< FieldRangeTests::In >();
+ add< FieldRangeTests::Equality >();
+ add< FieldRangeTests::SimplifiedQuery >();
+ add< FieldRangeTests::QueryPatternTest >();
+ add< FieldRangeTests::NoWhere >();
+ add< FieldRangeTests::Numeric >();
add< QueryPlanTests::NoIndex >();
add< QueryPlanTests::SimpleOrder >();
add< QueryPlanTests::MoreIndexThanNeeded >();
diff --git a/jstests/exists.js b/jstests/exists.js
index dd5a13d6dd5..20c3fe8dbd2 100644
--- a/jstests/exists.js
+++ b/jstests/exists.js
@@ -5,8 +5,28 @@ t.save( {} );
t.save( {a:1} );
t.save( {a:{b:1}} );
t.save( {a:{b:{c:1}}} );
+t.save( {a:{b:{c:{d:null}}}} );
-assert.eq( 4, t.count() );
-assert.eq( 3, t.count( {a:{$ne:null}} ) );
-assert.eq( 2, t.count( {'a.b':{$ne:null}} ) );
-assert.eq( 1, t.count( {'a.b.c':{$ne:null}} ) );
+assert.eq( 5, t.count() );
+assert.eq( 4, t.count( {a:{$ne:null}} ) );
+assert.eq( 3, t.count( {'a.b':{$ne:null}} ) );
+assert.eq( 2, t.count( {'a.b.c':{$ne:null}} ) );
+assert.eq( 0, t.count( {'a.b.c.d':{$ne:null}} ) );
+
+assert.eq( 4, t.count( {a: {$exists:true}} ) );
+assert.eq( 3, t.count( {'a.b': {$exists:true}} ) );
+assert.eq( 2, t.count( {'a.b.c': {$exists:true}} ) );
+assert.eq( 1, t.count( {'a.b.c.d': {$exists:true}} ) );
+
+assert.eq( 1, t.count( {a: {$exists:false}} ) );
+assert.eq( 2, t.count( {'a.b': {$exists:false}} ) );
+assert.eq( 3, t.count( {'a.b.c': {$exists:false}} ) );
+assert.eq( 4, t.count( {'a.b.c.d': {$exists:false}} ) );
+
+t.drop();
+
+t.save( {r:[{s:1}]} );
+assert( t.findOne( {'r.s':{$exists:true}} ) );
+assert( !t.findOne( {'r.s':{$exists:false}} ) );
+assert( !t.findOne( {'r.t':{$exists:true}} ) );
+assert( t.findOne( {'r.t':{$exists:false}} ) );
diff --git a/jstests/stats.js b/jstests/stats.js
new file mode 100644
index 00000000000..26de644f02c
--- /dev/null
+++ b/jstests/stats.js
@@ -0,0 +1,9 @@
+
+t = db.stats1;
+t.drop();
+
+t.save( { a : 1 } );
+
+assert.lt( 0 , t.dataSize() , "A" );
+assert.lt( t.dataSize() , t.storageSize() , "B" );
+assert.lt( 0 , t.totalIndexSize() , "C" );
diff --git a/s/cursors.cpp b/s/cursors.cpp
index 22400113f38..6da8744431e 100644
--- a/s/cursors.cpp
+++ b/s/cursors.cpp
@@ -80,7 +80,7 @@ namespace mongo {
b.appendElements( filter );
b.appendElements( extra );
- FieldBoundSet s( "wrong" , b.obj() , false );
+ FieldRangeSet s( "wrong" , b.obj() , false );
return s.simplifiedQuery();
}
diff --git a/scripting/engine.cpp b/scripting/engine.cpp
index 5d783160431..d34b90aa578 100644
--- a/scripting/engine.cpp
+++ b/scripting/engine.cpp
@@ -50,15 +50,19 @@ namespace mongo {
class ScopeCache {
public:
+
+ ScopeCache(){
+ _magic = 17;
+ }
~ScopeCache(){
+ assert( _magic == 17 );
+ _magic = 1;
+
if ( inShutdown() )
return;
-
- for ( PoolToScopes::iterator i=_pools.begin() ; i != _pools.end(); i++ ){
- for ( list<Scope*>::iterator j=i->second.begin(); j != i->second.end(); j++ )
- delete *j;
- }
+
+ clear();
}
void done( const string& pool , Scope * s ){
@@ -84,10 +88,26 @@ namespace mongo {
s->reset();
return s;
}
+
+ void clear(){
+ set<Scope*> seen;
+
+ for ( PoolToScopes::iterator i=_pools.begin() ; i != _pools.end(); i++ ){
+ for ( list<Scope*>::iterator j=i->second.begin(); j != i->second.end(); j++ ){
+ Scope * s = *j;
+ assert( ! seen.count( s ) );
+ delete s;
+ seen.insert( s );
+ }
+ }
+
+ _pools.clear();
+ }
private:
PoolToScopes _pools;
mutex _mutex;
+ int _magic;
};
thread_specific_ptr<ScopeCache> scopeCache;
@@ -191,11 +211,18 @@ namespace mongo {
if ( ! s ){
s = createScope();
}
-
+
auto_ptr<Scope> p;
p.reset( new PooledScope( pool , s ) );
return p;
}
+ void ScriptEngine::threadDone(){
+ ScopeCache * sc = scopeCache.get();
+ if ( sc ){
+ sc->clear();
+ }
+ }
+
ScriptEngine * globalScriptEngine;
}
diff --git a/scripting/engine.h b/scripting/engine.h
index 5e0e452c8d8..8475d52c950 100644
--- a/scripting/engine.h
+++ b/scripting/engine.h
@@ -78,6 +78,7 @@ namespace mongo {
static void setup();
auto_ptr<Scope> getPooledScope( const string& pool );
+ void threadDone();
};
extern ScriptEngine * globalScriptEngine;
diff --git a/scripting/engine_spidermonkey.cpp b/scripting/engine_spidermonkey.cpp
index 1379b8f2783..70d0785b78a 100644
--- a/scripting/engine_spidermonkey.cpp
+++ b/scripting/engine_spidermonkey.cpp
@@ -12,8 +12,8 @@
namespace mongo {
boost::thread_specific_ptr<SMScope> currentScope( dontDeleteScope );
- boost::mutex smmutex;
-#define smlock boostlock ___lk( smmutex );
+ boost::recursive_mutex smmutex;
+#define smlock recursive_boostlock ___lk( smmutex );
#define GETHOLDER(x,o) ((BSONHolder*)JS_GetPrivate( x , o ))
@@ -921,6 +921,7 @@ namespace mongo {
}
void init( BSONObj * data ){
+ smlock;
if ( ! data )
return;
@@ -934,6 +935,7 @@ namespace mongo {
}
void externalSetup(){
+ smlock;
uassert( "already local connected" , ! _localConnect );
if ( _externalSetup )
return;
@@ -942,6 +944,7 @@ namespace mongo {
}
void localConnect( const char * dbName ){
+ smlock;
uassert( "already setup for external db" , ! _externalSetup );
if ( _localConnect ){
uassert( "connected to different db" , _dbName == dbName );
@@ -959,12 +962,14 @@ namespace mongo {
// ----- getters ------
double getNumber( const char *field ){
+ smlock;
jsval val;
assert( JS_GetProperty( _context , _global , field , &val ) );
return _convertor->toNumber( val );
}
string getString( const char *field ){
+ smlock;
jsval val;
assert( JS_GetProperty( _context , _global , field , &val ) );
JSString * s = JS_ValueToString( _context , val );
@@ -972,18 +977,22 @@ namespace mongo {
}
bool getBoolean( const char *field ){
+ smlock;
return _convertor->getBoolean( _global , field );
}
BSONObj getObject( const char *field ){
+ smlock;
return _convertor->toObject( _convertor->getProperty( _global , field ) );
}
JSObject * getJSObject( const char * field ){
+ smlock;
return _convertor->getJSObject( _global , field );
}
int type( const char *field ){
+ smlock;
jsval val;
assert( JS_GetProperty( _context , _global , field , &val ) );
@@ -1013,26 +1022,31 @@ namespace mongo {
// ----- setters ------
void setNumber( const char *field , double val ){
+ smlock;
jsval v = _convertor->toval( val );
assert( JS_SetProperty( _context , _global , field , &v ) );
}
void setString( const char *field , const char * val ){
+ smlock;
jsval v = _convertor->toval( val );
assert( JS_SetProperty( _context , _global , field , &v ) );
}
void setObject( const char *field , const BSONObj& obj , bool readOnly ){
+ smlock;
jsval v = _convertor->toval( &obj , readOnly );
JS_SetProperty( _context , _global , field , &v );
}
void setBoolean( const char *field , bool val ){
+ smlock;
jsval v = BOOLEAN_TO_JSVAL( val );
assert( JS_SetProperty( _context , _global , field , &v ) );
}
void setThis( const BSONObj * obj ){
+ smlock;
if ( _this )
JS_RemoveRoot( _context , &_this );
@@ -1044,6 +1058,7 @@ namespace mongo {
// ---- functions -----
ScriptingFunction createFunction( const char * code ){
+ smlock;
precall();
return (ScriptingFunction)_convertor->compileFunction( code );
}
@@ -1091,6 +1106,7 @@ namespace mongo {
}
bool exec( const string& code , const string& name = "(anon)" , bool printResult = false , bool reportError = true , bool assertOnError = true, int timeoutMs = 0 ){
+ smlock;
precall();
jsval ret = JSVAL_VOID;
@@ -1156,6 +1172,7 @@ namespace mongo {
}
void injectNative( const char *field, NativeFunction func ){
+ smlock;
string name = field;
_convertor->setProperty( _global , (name + "_").c_str() , PRIVATE_TO_JSVAL( func ) );
diff --git a/shell/collection.js b/shell/collection.js
index 932f391c017..d3aff10e53d 100644
--- a/shell/collection.js
+++ b/shell/collection.js
@@ -44,9 +44,10 @@ DBCollection.prototype.help = function(){
print("\tdb.foo.getIndexes()");
print("\tdb.foo.drop() drop the collection");
print("\tdb.foo.validate() - SLOW");
- print("\tdb.foo.stats() - stats about the collection - SLOW");
- print("\tdb.foo.dataSize() - size in bytes of all the data - SLOW");
- print("\tdb.foo.totalIndexSize() - size in bytes of all the indexes - SLOW");
+ print("\tdb.foo.stats()");
+ print("\tdb.foo.dataSize()");
+ print("\tdb.foo.storageSize() - includes free space allocated to this collection");
+ print("\tdb.foo.totalIndexSize() - size in bytes of all the indexes");
}
DBCollection.prototype.getFullName = function(){
@@ -347,19 +348,15 @@ DBCollection.prototype.getCollection = function( subName ){
}
DBCollection.prototype.stats = function(){
- var res = this.validate().result;
- var p = /\b(\w+)\??: *(\d+)\b/g;
- var m;
-
- var o = {};
- while ( m = p.exec( res ) ){
- o[ m[1] ] = m[2];
- }
- return o;
+ return this._db.runCommand( { collstats : this._shortName } );
}
DBCollection.prototype.dataSize = function(){
- return parseInt( this.stats().datasize );
+ return this.stats().size;
+}
+
+DBCollection.prototype.storageSize = function(){
+ return this.stats().storageSize;
}
DBCollection.prototype.totalIndexSize = function(){
diff --git a/shell/dbshell.cpp b/shell/dbshell.cpp
index 32ae7623e8b..98e1e9b4b7a 100644
--- a/shell/dbshell.cpp
+++ b/shell/dbshell.cpp
@@ -57,7 +57,6 @@ char * shellReadline( const char * prompt ){
}
#if !defined(_WIN32)
-#include <execinfo.h>
#include <string.h>
void quitNicely( int sig ){
diff --git a/tools/Tool.cpp b/tools/Tool.cpp
index 33fd444c35d..6526ebaadbf 100644
--- a/tools/Tool.cpp
+++ b/tools/Tool.cpp
@@ -14,7 +14,7 @@ using namespace mongo;
namespace po = boost::program_options;
mongo::Tool::Tool( string name , string defaultDB , string defaultCollection ) :
- _name( name ) , _db( defaultDB ) , _coll( defaultCollection ) , _conn(0) {
+ _name( name ) , _db( defaultDB ) , _coll( defaultCollection ) , _conn(0), _paired(false) {
_options = new po::options_description( name + " options" );
_options->add_options()
@@ -22,6 +22,8 @@ mongo::Tool::Tool( string name , string defaultDB , string defaultCollection ) :
("host,h",po::value<string>(), "mongo host to connect to" )
("db,d",po::value<string>(), "database to use" )
("collection,c",po::value<string>(), "collection to use (some commands)" )
+ ("username,u",po::value<string>(), "username" )
+ ("password,p",po::value<string>(), "password" )
("dbpath",po::value<string>(), "directly access mongod data files in this path, instead of connecting to a mongod instance" )
("verbose,v", "be more verbose (include multiple times for more verbosity e.g. -vvvvv)")
;
@@ -76,6 +78,7 @@ int mongo::Tool::main( int argc , char ** argv ){
}
else {
DBClientPaired * c = new DBClientPaired();
+ _paired = true;
_conn = c;
if ( ! c->connect( _host ) ){
@@ -101,6 +104,12 @@ int mongo::Tool::main( int argc , char ** argv ){
if ( _params.count( "collection" ) )
_coll = _params["collection"].as<string>();
+ if ( _params.count( "username" ) )
+ _username = _params["username"].as<string>();
+
+ if ( _params.count( "password" ) )
+ _password = _params["password"].as<string>();
+
try {
return run();
}
@@ -109,3 +118,28 @@ int mongo::Tool::main( int argc , char ** argv ){
return -1;
}
}
+
+mongo::DBClientBase& mongo::Tool::conn( bool slaveIfPaired ){
+ if ( _paired && slaveIfPaired )
+ return ((DBClientPaired*)_conn)->slaveConn();
+ return *_conn;
+}
+
+void mongo::Tool::auth( string dbname ){
+ if ( ! dbname.size() )
+ dbname = _db;
+
+ if ( ! ( _username.size() || _password.size() ) )
+ return;
+
+ string errmsg;
+ if ( _conn->auth( dbname , _username , _password , errmsg ) )
+ return;
+
+ // try against the admin db
+ string err2;
+ if ( _conn->auth( "admin" , _username , _password , errmsg ) )
+ return;
+
+ throw mongo::UserException( (string)"auth failed: " + errmsg );
+}
diff --git a/tools/Tool.h b/tools/Tool.h
index 576dbe0ffa5..4082a593f1e 100644
--- a/tools/Tool.h
+++ b/tools/Tool.h
@@ -53,19 +53,26 @@ namespace mongo {
virtual void printHelp(ostream &out);
virtual void printExtraHelp( ostream & out );
-
+
protected:
+
+ mongo::DBClientBase &conn( bool slaveIfPaired = false );
+ void auth( string db = "" );
+
string _name;
string _db;
string _coll;
+
+ string _username;
+ string _password;
- mongo::DBClientBase &conn() { return *_conn; }
-
+
private:
string _host;
mongo::DBClientBase * _conn;
-
+ bool _paired;
+
boost::program_options::options_description * _options;
boost::program_options::positional_options_description _positonalOptions;
diff --git a/tools/bridge.cpp b/tools/bridge.cpp
index f997ffd6c80..42c32874303 100644
--- a/tools/bridge.cpp
+++ b/tools/bridge.cpp
@@ -72,13 +72,12 @@ public:
auto_ptr< MyListener > listener;
-#if !defined(_WIN32)
-#include <execinfo.h>
+#if !defined(_WIN32)
void cleanup( int sig ) {
close( listener->socket() );
for ( set<MessagingPort*>::iterator i = ports.begin(); i != ports.end(); i++ )
(*i)->shutdown();
- ::exit( 0 );
+ ::exit( 0 );
}
void setupSignals() {
@@ -108,7 +107,7 @@ void check( bool b ) {
int main( int argc, char **argv ) {
setupSignals();
-
+
check( argc == 5 );
for( int i = 1; i < 5; ++i ) {
diff --git a/tools/dump.cpp b/tools/dump.cpp
index 0729efa2af6..27e19938173 100644
--- a/tools/dump.cpp
+++ b/tools/dump.cpp
@@ -40,7 +40,7 @@ public:
int out = open( outputFile.string().c_str() , O_WRONLY | O_CREAT | O_TRUNC , 0666 );
assert( out );
- auto_ptr<DBClientCursor> cursor = conn().query( coll.c_str() , Query().snapshot() , 0 , 0 , 0 , Option_SlaveOk | Option_NoCursorTimeout );
+ auto_ptr<DBClientCursor> cursor = conn( true ).query( coll.c_str() , Query().snapshot() , 0 , 0 , 0 , Option_SlaveOk | Option_NoCursorTimeout );
int num = 0;
while ( cursor->more() ) {
@@ -56,12 +56,12 @@ public:
void go( const string db , const path outdir ) {
cout << "DATABASE: " << db << "\t to \t" << outdir.string() << endl;
-
+
create_directories( outdir );
string sns = db + ".system.namespaces";
- auto_ptr<DBClientCursor> cursor = conn().query( sns.c_str() , Query() , 0 , 0 , 0 , Option_SlaveOk | Option_NoCursorTimeout );
+ auto_ptr<DBClientCursor> cursor = conn( true ).query( sns.c_str() , Query() , 0 , 0 , 0 , Option_SlaveOk | Option_NoCursorTimeout );
while ( cursor->more() ) {
BSONObj obj = cursor->next();
if ( obj.toString().find( ".$" ) != string::npos )
@@ -86,8 +86,9 @@ public:
if ( db == "*" ){
cout << "all dbs" << endl;
-
- BSONObj res = conn().findOne( "admin.$cmd" , BSON( "listDatabases" << 1 ) );
+ auth( "admin" );
+
+ BSONObj res = conn( true ).findOne( "admin.$cmd" , BSON( "listDatabases" << 1 ) );
BSONObj dbs = res.getField( "databases" ).embeddedObjectUserCheck();
set<string> keys;
dbs.getFieldNames( keys );
@@ -104,6 +105,7 @@ public:
}
}
else {
+ auth( db );
go( db , root / db );
}
return 0;
diff --git a/tools/export.cpp b/tools/export.cpp
index 2a55d61bf85..a8dec9a1848 100644
--- a/tools/export.cpp
+++ b/tools/export.cpp
@@ -63,6 +63,8 @@ public:
printHelp(cerr);
return 1;
}
+
+ auth();
if ( hasParam( "fields" ) ){
diff --git a/tools/files.cpp b/tools/files.cpp
index 957c546105d..2a53b02d107 100644
--- a/tools/files.cpp
+++ b/tools/files.cpp
@@ -66,6 +66,8 @@ public:
}
GridFS g( conn() , _db );
+ auth();
+
string filename = getParam( "file" );
if ( cmd == "list" ){
diff --git a/tools/importJSON.cpp b/tools/importJSON.cpp
index 69b20c209df..c3f60c0e20b 100644
--- a/tools/importJSON.cpp
+++ b/tools/importJSON.cpp
@@ -62,6 +62,8 @@ public:
printHelp(cerr);
return -1;
}
+
+ auth();
if ( hasParam( "drop" ) ){
cout << "dropping: " << ns << endl;
diff --git a/tools/restore.cpp b/tools/restore.cpp
index a201d4d2c38..615de0bd185 100644
--- a/tools/restore.cpp
+++ b/tools/restore.cpp
@@ -39,6 +39,7 @@ public:
}
int run(){
+ auth();
drillDown( getParam( "dir" ) );
return 0;
}
diff --git a/tools/sniffer.cpp b/tools/sniffer.cpp
index 8bb3cb47734..b6968f257a9 100644
--- a/tools/sniffer.cpp
+++ b/tools/sniffer.cpp
@@ -13,6 +13,11 @@
#include <pcap.h>
+#ifdef _WIN32
+#undef min
+#undef max
+#endif
+
#include "../util/builder.h"
#include "../util/message.h"
#include "../db/dbmessage.h"
diff --git a/util/goodies.h b/util/goodies.h
index f29f9fe213a..34cb289b460 100644
--- a/util/goodies.h
+++ b/util/goodies.h
@@ -23,7 +23,7 @@
namespace mongo {
-#if !defined(_WIN32)
+#if !defined(_WIN32) && !defined(NOEXECINFO)
} // namespace mongo
@@ -60,7 +60,7 @@ namespace mongo {
free (strings);
}
#else
- inline void printStackTrace() { }
+ inline void printStackTrace( ostream &o = cout ) { }
#endif
/* set to TRUE if we are exiting */