summaryrefslogtreecommitdiff
path: root/db
diff options
context:
space:
mode:
authorAaron <aaron@10gen.com>2010-10-19 10:42:29 -0700
committerAaron <aaron@10gen.com>2010-10-19 10:42:29 -0700
commit0f38c2376d4015584dd3bd6ed90242a7f11cd440 (patch)
treebc5a2ee15356368fef5415f16d71d19bbf427881 /db
parent015faa0b605a2de0c05460aa8822ea11b10d99a8 (diff)
parent552e94d71d2db6ccad2b123e551db805f08970a4 (diff)
downloadmongo-0f38c2376d4015584dd3bd6ed90242a7f11cd440.tar.gz
merge
Diffstat (limited to 'db')
-rw-r--r--db/btree.cpp4
-rw-r--r--db/btree.h58
-rw-r--r--db/btreecursor.cpp28
-rw-r--r--db/client.cpp10
-rw-r--r--db/clientcursor.cpp17
-rw-r--r--db/common.cpp11
-rw-r--r--db/concurrency.h5
-rw-r--r--db/db.cpp5
-rw-r--r--db/geo/2d.cpp70
-rw-r--r--db/mongomutex.h74
-rw-r--r--db/namespace.h13
-rw-r--r--db/queryoptimizer.cpp6
-rw-r--r--db/queryoptimizer.h2
-rw-r--r--db/repl/health.cpp10
14 files changed, 170 insertions, 143 deletions
diff --git a/db/btree.cpp b/db/btree.cpp
index a3eb5ae1f0d..de8f6694306 100644
--- a/db/btree.cpp
+++ b/db/btree.cpp
@@ -750,7 +750,9 @@ found:
assert( n > 2 );
int split = 0;
int rightSize = 0;
- int rightSizeLimit = topSize * ( keypos == n ? 0.1 : 0.5 ); // see SERVER-983
+ // when splitting a btree node, if the new key is greater than all the other keys, we should not do an even split, but a 90/10 split.
+ // see SERVER-983
+ int rightSizeLimit = topSize * ( keypos == n ? 0.1 : 0.5 );
for( int i = n - 1; i > -1; --i ) {
rightSize += keyNode( i ).key.objsize();
if ( rightSize > rightSizeLimit ) {
diff --git a/db/btree.h b/db/btree.h
index 4fcd04a4555..ba28d642890 100644
--- a/db/btree.h
+++ b/db/btree.h
@@ -253,18 +253,9 @@ namespace mongo {
class BtreeCursor : public Cursor {
public:
BtreeCursor( NamespaceDetails *_d, int _idxNo, const IndexDetails&, const BSONObj &startKey, const BSONObj &endKey, bool endKeyInclusive, int direction );
-
BtreeCursor( NamespaceDetails *_d, int _idxNo, const IndexDetails& _id, const shared_ptr< FieldRangeVector > &_bounds, int _direction );
- ~BtreeCursor(){
- }
- virtual bool ok() {
- return !bucket.isNull();
- }
- bool eof() {
- return !ok();
- }
+ virtual bool ok() { return !bucket.isNull(); }
virtual bool advance();
-
virtual void noteLocation(); // updates keyAtKeyOfs...
virtual void checkLocation();
virtual bool supportGetMore() { return true; }
@@ -278,7 +269,7 @@ namespace mongo {
*/
virtual bool getsetdup(DiskLoc loc) {
if( multikey ) {
- pair<set<DiskLoc>::iterator, bool> p = dups.insert(loc);
+ pair<set<DiskLoc>::iterator, bool> p = _dups.insert(loc);
return !p.second;
}
return false;
@@ -296,7 +287,6 @@ namespace mongo {
}
virtual BSONObj currKey() const { return currKeyNode().key; }
-
virtual BSONObj indexKeyPattern() { return indexDetails.keyPattern(); }
virtual void aboutToDeleteBucket(const DiskLoc& b) {
@@ -304,22 +294,14 @@ namespace mongo {
keyOfs = -1;
}
- virtual DiskLoc currLoc() {
- return !bucket.isNull() ? _currKeyNode().recordLoc : DiskLoc();
- }
- virtual DiskLoc refLoc() {
- return currLoc();
- }
- virtual Record* _current() {
- return currLoc().rec();
- }
- virtual BSONObj current() {
- return BSONObj(_current());
- }
+ virtual DiskLoc currLoc() { return !bucket.isNull() ? _currKeyNode().recordLoc : DiskLoc(); }
+ virtual DiskLoc refLoc() { return currLoc(); }
+ virtual Record* _current() { return currLoc().rec(); }
+ virtual BSONObj current() { return BSONObj(_current()); }
virtual string toString() {
string s = string("BtreeCursor ") + indexDetails.indexName();
if ( direction < 0 ) s += " reverse";
- if ( bounds_.get() && bounds_->size() > 1 ) s += " multi";
+ if ( _bounds.get() && _bounds->size() > 1 ) s += " multi";
return s;
}
@@ -331,7 +313,7 @@ namespace mongo {
if ( !_independentFieldRanges ) {
return BSON( "start" << prettyKey( startKey ) << "end" << prettyKey( endKey ) );
} else {
- return bounds_->obj();
+ return _bounds->obj();
}
}
@@ -339,9 +321,7 @@ namespace mongo {
virtual CoveredIndexMatcher *matcher() const { return _matcher.get(); }
- virtual void setMatcher( shared_ptr< CoveredIndexMatcher > matcher ) {
- _matcher = matcher;
- }
+ virtual void setMatcher( shared_ptr< CoveredIndexMatcher > matcher ) { _matcher = matcher; }
virtual long long nscanned() { return _nscanned; }
@@ -367,25 +347,23 @@ namespace mongo {
void advanceTo( const BSONObj &keyBegin, int keyBeginLen, bool afterKey, const vector< const BSONElement * > &keyEnd, const vector< bool > &keyEndInclusive );
friend class BtreeBucket;
- set<DiskLoc> dups;
- NamespaceDetails *d;
- int idxNo;
-
+
+ set<DiskLoc> _dups;
+ NamespaceDetails * const d;
+ const int idxNo;
BSONObj startKey;
BSONObj endKey;
- bool endKeyInclusive_;
-
+ bool _endKeyInclusive;
bool multikey; // note this must be updated every getmore batch in case someone added a multikey...
-
const IndexDetails& indexDetails;
- BSONObj order;
- Ordering _ordering;
+ const BSONObj _order;
+ const Ordering _ordering;
DiskLoc bucket;
int keyOfs;
- int direction; // 1=fwd,-1=reverse
+ const int direction; // 1=fwd,-1=reverse
BSONObj keyAtKeyOfs; // so we can tell if things moved around on us between the query and the getMore call
DiskLoc locAtKeyOfs;
- shared_ptr< FieldRangeVector > bounds_;
+ const shared_ptr< FieldRangeVector > _bounds;
auto_ptr< FieldRangeVector::Iterator > _boundsIterator;
const IndexSpec& _spec;
shared_ptr< CoveredIndexMatcher > _matcher;
diff --git a/db/btreecursor.cpp b/db/btreecursor.cpp
index a69bd27a785..9f00e591136 100644
--- a/db/btreecursor.cpp
+++ b/db/btreecursor.cpp
@@ -31,11 +31,11 @@ namespace mongo {
d(_d), idxNo(_idxNo),
startKey( _startKey ),
endKey( _endKey ),
- endKeyInclusive_( endKeyInclusive ),
+ _endKeyInclusive( endKeyInclusive ),
multikey( d->isMultikey( idxNo ) ),
indexDetails( _id ),
- order( _id.keyPattern() ),
- _ordering( Ordering::make( order ) ),
+ _order( _id.keyPattern() ),
+ _ordering( Ordering::make( _order ) ),
direction( _direction ),
_spec( _id.getSpec() ),
_independentFieldRanges( false ),
@@ -43,27 +43,27 @@ namespace mongo {
{
audit();
init();
- DEV assert( dups.size() == 0 );
+ dassert( _dups.size() == 0 );
}
BtreeCursor::BtreeCursor( NamespaceDetails *_d, int _idxNo, const IndexDetails& _id, const shared_ptr< FieldRangeVector > &_bounds, int _direction )
:
d(_d), idxNo(_idxNo),
- endKeyInclusive_( true ),
+ _endKeyInclusive( true ),
multikey( d->isMultikey( idxNo ) ),
indexDetails( _id ),
- order( _id.keyPattern() ),
- _ordering( Ordering::make( order ) ),
+ _order( _id.keyPattern() ),
+ _ordering( Ordering::make( _order ) ),
direction( _direction ),
- bounds_( ( assert( _bounds.get() ), _bounds ) ),
- _boundsIterator( new FieldRangeVector::Iterator( *bounds_ ) ),
+ _bounds( ( assert( _bounds.get() ), _bounds ) ),
+ _boundsIterator( new FieldRangeVector::Iterator( *_bounds ) ),
_spec( _id.getSpec() ),
_independentFieldRanges( true ),
_nscanned( 0 )
{
massert( 13384, "BtreeCursor FieldRangeVector constructor doesn't accept special indexes", !_spec.getType() );
audit();
- startKey = bounds_->startKey();
+ startKey = _bounds->startKey();
_boundsIterator->advance( startKey ); // handles initialization
_boundsIterator->prepDive();
pair< DiskLoc, int > noBestParent;
@@ -71,7 +71,7 @@ namespace mongo {
keyOfs = 0;
indexDetails.head.btree()->customLocate( bucket, keyOfs, startKey, 0, false, _boundsIterator->cmp(), _boundsIterator->inc(), _ordering, direction, noBestParent );
skipAndCheck();
- DEV assert( dups.size() == 0 );
+ dassert( _dups.size() == 0 );
}
void BtreeCursor::audit() {
@@ -80,7 +80,7 @@ namespace mongo {
if ( otherTraceLevel >= 12 ) {
if ( otherTraceLevel >= 200 ) {
out() << "::BtreeCursor() qtl>200. validating entire index." << endl;
- indexDetails.head.btree()->fullValidate(indexDetails.head, order);
+ indexDetails.head.btree()->fullValidate(indexDetails.head, _order);
}
else {
out() << "BTreeCursor(). dumping head bucket" << endl;
@@ -169,9 +169,9 @@ namespace mongo {
if ( bucket.isNull() )
return;
if ( !endKey.isEmpty() ) {
- int cmp = sgn( endKey.woCompare( currKey(), order ) );
+ int cmp = sgn( endKey.woCompare( currKey(), _order ) );
if ( ( cmp != 0 && cmp != direction ) ||
- ( cmp == 0 && !endKeyInclusive_ ) )
+ ( cmp == 0 && !_endKeyInclusive ) )
bucket = DiskLoc();
}
}
diff --git a/db/client.cpp b/db/client.cpp
index d48faa2834b..706865bc713 100644
--- a/db/client.cpp
+++ b/db/client.cpp
@@ -256,21 +256,20 @@ namespace mongo {
return c->toString();
}
- void curopWaitingForLock( int type ){
+ Client* curopWaitingForLock( int type ){
Client * c = currentClient.get();
assert( c );
CurOp * co = c->curop();
if ( co ){
co->waitingForLock( type );
}
+ return c;
}
- void curopGotLock(){
- Client * c = currentClient.get();
+ void curopGotLock(Client *c){
assert(c);
CurOp * co = c->curop();
- if ( co ){
+ if ( co )
co->gotLock();
- }
}
CurOp::~CurOp(){
@@ -278,7 +277,6 @@ namespace mongo {
scoped_lock bl(Client::clientsMutex);
_client->_curOp = _wrapped;
}
-
_client = 0;
}
diff --git a/db/clientcursor.cpp b/db/clientcursor.cpp
index 3534b974cb7..772983fd649 100644
--- a/db/clientcursor.cpp
+++ b/db/clientcursor.cpp
@@ -189,7 +189,22 @@ namespace mongo {
break;
}
- wassert( toAdvance.size() < 5000 );
+ if( toAdvance.size() >= 3000 ) {
+ log() << "perf warning MPW101: " << toAdvance.size() << " cursors for one diskloc "
+ << dl.toString()
+ << ' ' << toAdvance[1000]->ns
+ << ' ' << toAdvance[2000]->ns
+ << ' ' << toAdvance[1000]->_pinValue
+ << ' ' << toAdvance[2000]->_pinValue
+ << ' ' << toAdvance[1000]->pos
+ << ' ' << toAdvance[2000]->pos
+ << ' ' << toAdvance[1000]->_idleAgeMillis
+ << ' ' << toAdvance[2000]->_idleAgeMillis
+ << ' ' << toAdvance[1000]->_doingDeletes
+ << ' ' << toAdvance[2000]->_doingDeletes
+ << endl;
+ //wassert( toAdvance.size() < 5000 );
+ }
for ( vector<ClientCursor*>::iterator i = toAdvance.begin(); i != toAdvance.end(); ++i ){
ClientCursor* cc = *i;
diff --git a/db/common.cpp b/db/common.cpp
index 717e127cf5d..5ef34455497 100644
--- a/db/common.cpp
+++ b/db/common.cpp
@@ -26,15 +26,6 @@ namespace mongo {
/* we use new here so we don't have to worry about destructor orders at program shutdown */
MongoMutex &dbMutex( *(new MongoMutex("rw:dbMutex")) );
- MongoMutex::MongoMutex(const char *name) : _m(name) {
- //static int n;
- //assert( ++n == 1 ); // we don't expect there to be but a single one of these; warn, maybe, if there are
-
- /*cout << sizeof(*this) << endl;
- cout << sizeof(MutexInfo) << endl;
- cout << sizeof(RWLock) << endl;
- cout << sizeof(ThreadLocalValue<int>) << endl;
- _state.get();*/
- }
+ MongoMutex::MongoMutex(const char *name) : _m(name) { }
}
diff --git a/db/concurrency.h b/db/concurrency.h
index b0f2c508fb3..7b446eb1c8d 100644
--- a/db/concurrency.h
+++ b/db/concurrency.h
@@ -38,8 +38,9 @@ namespace mongo {
string sayClientState();
bool haveClient();
- void curopWaitingForLock( int type );
- void curopGotLock();
+ class Client;
+ Client* curopWaitingForLock( int type );
+ void curopGotLock(Client*);
/* mutex time stats */
class MutexInfo {
diff --git a/db/db.cpp b/db/db.cpp
index a57e3afc178..abbe360ba7a 100644
--- a/db/db.cpp
+++ b/db/db.cpp
@@ -731,8 +731,8 @@ int main(int argc, char* argv[], char *envp[] )
;
sharding_options.add_options()
- ("configsvr", "declare this is a config db of a cluster")
- ("shardsvr", "declare this is a shard db of a cluster")
+ ("configsvr", "declare this is a config db of a cluster; default port 27019; default dir /data/configdb")
+ ("shardsvr", "declare this is a shard db of a cluster; default port 27018")
("noMoveParanoia" , "turn off paranoid saving of data for moveChunk. this is on by default for now, but default will switch" )
;
@@ -969,6 +969,7 @@ int main(int argc, char* argv[], char *envp[] )
}
}
if ( params.count("configsvr" ) ){
+ uassert( 13499, "replication should not be enabled on a config server", !cmdLine.usingReplSets() && !replSettings.master && !replSettings.slave);
if ( params.count( "diaglog" ) == 0 )
_diaglog.level = 1;
if ( params.count( "dbpath" ) == 0 )
diff --git a/db/geo/2d.cpp b/db/geo/2d.cpp
index 50bf1d752d7..05acb8bd4ed 100644
--- a/db/geo/2d.cpp
+++ b/db/geo/2d.cpp
@@ -1419,7 +1419,7 @@ namespace mongo {
_want._min = Point( _g , _bl );
_want._max = Point( _g , _tr );
-
+
uassert( 13064 , "need an area > 0 " , _want.area() > 0 );
_state = START;
@@ -1430,12 +1430,14 @@ namespace mongo {
GEODEBUG( "center : " << center.toString() << "\t" << _prefix );
- {
- GeoHash a(0LL,32);
- GeoHash b(0LL,32);
- b.move(1,1);
- _fudge = _g->distance(a,b);
- }
+ {
+ GeoHash a(0LL,32);
+ GeoHash b(0LL,32);
+ b.move(1,1);
+ _fudge = _g->distance(a,b);
+ }
+
+ _wantLen = _fudge + std::max((_want._max._x - _want._min._x), (_want._max._y - _want._min._y));
ok();
}
@@ -1470,32 +1472,47 @@ namespace mongo {
_state = DONE;
return;
}
-
- Box cur( _g , _prefix );
- if ( cur._min._x + _fudge < _want._min._x &&
- cur._min._y + _fudge < _want._min._y &&
- cur._max._x - _fudge > _want._max._x &&
- cur._max._y - _fudge > _want._max._y ){
-
- _state = DONE;
- GeoHash temp = _prefix.commonPrefix( cur._max.hash( _g ) );
-
- GEODEBUG( "box done : " << cur.toString() << " prefix:" << _prefix << " common:" << temp );
-
- if ( temp == _prefix )
- return;
- _prefix = temp;
- GEODEBUG( "\t one more loop" );
- continue;
- }
- else {
+
+ if (_g->sizeEdge(_prefix) < _wantLen){
_prefix = _prefix.up();
+ } else {
+ for (int i=-1; i<=1; i++){
+ for (int j=-1; j<=1; j++){
+
+ if (i == 0 && j == 0)
+ continue; // main box
+
+ GeoHash newBox = _prefix;
+ newBox.move(i, j);
+
+ PREFIXDEBUG(newBox, _g);
+
+ Box cur( _g , newBox );
+ if (_want.intersects(cur)){
+ // TODO consider splitting into quadrants
+ getPointsForPrefix(newBox);
+ } else {
+ GEODEBUG("skipping box");
+ }
+ }
+ }
+ _state = DONE;
}
+
}
return;
}
}
+
+ void getPointsForPrefix(const GeoHash& prefix){
+ if ( ! BtreeLocation::initial( *_id , _spec , _min , _max , prefix , _found , this ) ){
+ return;
+ }
+
+ while ( _min.hasPrefix( prefix ) && _min.advance( -1 , _found , this ) );
+ while ( _max.hasPrefix( prefix ) && _max.advance( 1 , _found , this ) );
+ }
virtual bool checkDistance( const GeoHash& h , double& d ){
bool res = _want.inside( Point( _g , h ) , _fudge );
@@ -1508,6 +1525,7 @@ namespace mongo {
GeoHash _bl;
GeoHash _tr;
Box _want;
+ double _wantLen;
int _found;
diff --git a/db/mongomutex.h b/db/mongomutex.h
index f2493909ca7..bdd29e15729 100644
--- a/db/mongomutex.h
+++ b/db/mongomutex.h
@@ -20,6 +20,13 @@
namespace mongo {
+ /* the 'big lock' we use for most operations. a read/write lock.
+ there is one of these, dbMutex.
+ generally if you need to declare a mutex use the right primitive class no this.
+
+ use readlock and writelock classes for scoped locks on this rather than direct
+ manipulation.
+ */
class MongoMutex {
public:
MongoMutex(const char * name);
@@ -30,50 +37,39 @@ namespace mongo {
* < 0 read lock
*/
int getState() const { return _state.get(); }
+
+ bool atLeastReadLocked() const { return _state.get() != 0; }
+ void assertAtLeastReadLocked() const { assert(atLeastReadLocked()); }
bool isWriteLocked() const { return getState() > 0; }
void assertWriteLocked() const {
assert( getState() > 0 );
DEV assert( !_releasedEarly.get() );
}
- bool atLeastReadLocked() const { return _state.get() != 0; }
- void assertAtLeastReadLocked() const { assert(atLeastReadLocked()); }
-
- bool _checkWriteLockAlready(){
- DEV assert( haveClient() );
-
- int s = _state.get();
- if( s > 0 ) {
- _state.set(s+1);
- return true;
- }
-
- massert( 10293 , (string)"internal error: locks are not upgradeable: " + sayClientState() , s == 0 );
-
- return false;
- }
+ // write lock
void lock() {
- if ( _checkWriteLockAlready() )
+ if ( _writeLockedAlready() )
return;
_state.set(1);
- curopWaitingForLock( 1 ); // stats
+ Client *c = curopWaitingForLock( 1 ); // stats
_m.lock();
- curopGotLock();
+ curopGotLock(c);
_minfo.entered();
- MongoFile::lockAll();
+ MongoFile::lockAll(); // for _DEBUG validation -- a no op for release build
}
+ // try write lock
bool lock_try( int millis ) {
- if ( _checkWriteLockAlready() )
+ if ( _writeLockedAlready() )
return true;
- curopWaitingForLock( 1 );
+ Client *c = curopWaitingForLock( 1 );
bool got = _m.lock_try( millis );
- curopGotLock();
+ curopGotLock(c);
if ( got ){
_minfo.entered();
@@ -84,9 +80,8 @@ namespace mongo {
return got;
}
-
+ // un write lock
void unlock() {
- //DEV cout << "UNLOCK" << endl;
int s = _state.get();
if( s > 1 ) {
_state.set(s-1);
@@ -117,8 +112,8 @@ namespace mongo {
unlock();
}
+ // read lock
void lock_shared() {
- //DEV cout << " LOCKSHARED" << endl;
int s = _state.get();
if( s ) {
if( s > 0 ) {
@@ -133,11 +128,12 @@ namespace mongo {
}
}
_state.set(-1);
- curopWaitingForLock( -1 );
+ Client *c = curopWaitingForLock( -1 );
_m.lock_shared();
- curopGotLock();
+ curopGotLock(c);
}
+ // try read lock
bool lock_shared_try( int millis ) {
int s = _state.get();
if ( s ){
@@ -146,6 +142,10 @@ namespace mongo {
return true;
}
+ /* [dm] should there be
+ Client *c = curopWaitingForLock( 1 );
+ here? i think so. seems to be missing.
+ */
bool got = _m.lock_shared_try( millis );
if ( got )
_state.set(-1);
@@ -153,7 +153,6 @@ namespace mongo {
}
void unlock_shared() {
- //DEV cout << " UNLOCKSHARED" << endl;
int s = _state.get();
if( s > 0 ) {
assert( s > 1 ); /* we must have done a lock write first to have s > 1 */
@@ -172,6 +171,18 @@ namespace mongo {
MutexInfo& info() { return _minfo; }
private:
+ /* @return true if was already write locked. increments recursive lock count. */
+ bool _writeLockedAlready() {
+ dassert( haveClient() );
+ int s = _state.get();
+ if( s > 0 ) {
+ _state.set(s+1);
+ return true;
+ }
+ massert( 10293 , (string)"internal error: locks are not upgradeable: " + sayClientState() , s == 0 );
+ return false;
+ }
+
RWLock _m;
/* > 0 write lock with recurse count
@@ -185,6 +196,11 @@ namespace mongo {
we use a separate TLS value for releasedEarly - that is ok as
our normal/common code path, we never even touch it */
ThreadLocalValue<bool> _releasedEarly;
+
+ /* this is for fsyncAndLock command. otherwise write lock's greediness will
+ make us block on any attempted write lock the the fsync's lock.
+ */
+ //volatile bool _blockWrites;
};
extern MongoMutex &dbMutex;
diff --git a/db/namespace.h b/db/namespace.h
index fa0a0f85a08..1c9be22e62e 100644
--- a/db/namespace.h
+++ b/db/namespace.h
@@ -240,15 +240,16 @@ namespace mongo {
}
class IndexIterator {
- friend class NamespaceDetails;
- int i, n;
- NamespaceDetails *d;
- IndexIterator(NamespaceDetails *_d);
public:
int pos() { return i; } // note this is the next one to come
bool more() { return i < n; }
IndexDetails& next() { return d->idx(i++); }
- }; // IndexIterator
+ private:
+ friend class NamespaceDetails;
+ int i, n;
+ NamespaceDetails *d;
+ IndexIterator(NamespaceDetails *_d);
+ };
IndexIterator ii() { return IndexIterator(this); }
@@ -259,7 +260,7 @@ namespace mongo {
for a single document. see multikey in wiki.
for these, we have to do some dedup work on queries.
*/
- bool isMultikey(int i) { return (multiKeyIndexBits & (((unsigned long long) 1) << i)) != 0; }
+ bool isMultikey(int i) const { return (multiKeyIndexBits & (((unsigned long long) 1) << i)) != 0; }
void setIndexIsMultikey(int i) {
dassert( i < NIndexesMax );
unsigned long long x = ((unsigned long long) 1) << i;
diff --git a/db/queryoptimizer.cpp b/db/queryoptimizer.cpp
index 60f46742c68..89082c64568 100644
--- a/db/queryoptimizer.cpp
+++ b/db/queryoptimizer.cpp
@@ -63,7 +63,7 @@ namespace mongo {
scanAndOrderRequired_( true ),
exactKeyMatch_( false ),
direction_( 0 ),
- endKeyInclusive_( endKey.isEmpty() ),
+ _endKeyInclusive( endKey.isEmpty() ),
unhelpful_( false ),
_special( special ),
_type(0),
@@ -202,8 +202,8 @@ namespace mongo {
massert( 10363 , "newCursor() with start location not implemented for indexed plans", startLoc.isNull() );
if ( _startOrEndSpec ) {
- // we are sure to spec endKeyInclusive_
- return shared_ptr<Cursor>( new BtreeCursor( d, idxNo, *index_, _startKey, _endKey, endKeyInclusive_, direction_ >= 0 ? 1 : -1 ) );
+ // we are sure to spec _endKeyInclusive
+ return shared_ptr<Cursor>( new BtreeCursor( d, idxNo, *index_, _startKey, _endKey, _endKeyInclusive, direction_ >= 0 ? 1 : -1 ) );
} else if ( index_->getSpec().getType() ) {
return shared_ptr<Cursor>( new BtreeCursor( d, idxNo, *index_, _frv->startKey(), _frv->endKey(), true, direction_ >= 0 ? 1 : -1 ) );
} else {
diff --git a/db/queryoptimizer.h b/db/queryoptimizer.h
index f7268d51a97..cbae3d8d73d 100644
--- a/db/queryoptimizer.h
+++ b/db/queryoptimizer.h
@@ -81,7 +81,7 @@ namespace mongo {
shared_ptr< FieldRangeVector > _originalFrv;
BSONObj _startKey;
BSONObj _endKey;
- bool endKeyInclusive_;
+ bool _endKeyInclusive;
bool unhelpful_;
string _special;
IndexType * _type;
diff --git a/db/repl/health.cpp b/db/repl/health.cpp
index 5b93e6430fc..b76b2be2238 100644
--- a/db/repl/health.cpp
+++ b/db/repl/health.cpp
@@ -370,9 +370,15 @@ namespace mongo {
BSONObjBuilder bb;
bb.append("_id", (int) m->id());
bb.append("name", m->fullName());
- bb.append("health", m->hbinfo().health);
+ double h = m->hbinfo().health;
+ bb.append("health", h);
bb.append("state", (int) m->state().s);
- bb.append("stateStr", m->state().toString());
+ if( h == 0 ) {
+ // if we can't connect the state info is from the past and could be confusing to show
+ bb.append("stateStr", "(not reachable/healthy)");
+ } else {
+ bb.append("stateStr", m->state().toString());
+ }
bb.append("uptime", (unsigned) (m->hbinfo().upSince ? (time(0)-m->hbinfo().upSince) : 0));
bb.appendTimestamp("optime", m->hbinfo().opTime.asDate());
bb.appendDate("optimeDate", m->hbinfo().opTime.getSecs() * 1000LL);