summaryrefslogtreecommitdiff
path: root/src/mongo/db
diff options
context:
space:
mode:
Diffstat (limited to 'src/mongo/db')
-rw-r--r--src/mongo/db/btree.cpp90
-rw-r--r--src/mongo/db/btree.h16
-rw-r--r--src/mongo/db/btreebuilder.cpp2
-rw-r--r--src/mongo/db/btreecursor.cpp16
-rw-r--r--src/mongo/db/cap.cpp30
-rw-r--r--src/mongo/db/client.cpp18
-rw-r--r--src/mongo/db/client.h4
-rw-r--r--src/mongo/db/clientcursor.cpp26
-rw-r--r--src/mongo/db/clientcursor.h2
-rw-r--r--src/mongo/db/cloner.cpp8
-rw-r--r--src/mongo/db/cmdline.cpp14
-rw-r--r--src/mongo/db/commands/cloud.cpp14
-rw-r--r--src/mongo/db/commands/distinct.cpp4
-rwxr-xr-xsrc/mongo/db/commands/document_source_cursor.cpp6
-rw-r--r--src/mongo/db/commands/mr.cpp22
-rwxr-xr-xsrc/mongo/db/commands/pipeline_command.cpp2
-rw-r--r--src/mongo/db/compact.cpp12
-rw-r--r--src/mongo/db/curop.cpp2
-rw-r--r--src/mongo/db/cursor.cpp8
-rw-r--r--src/mongo/db/cursor.h2
-rw-r--r--src/mongo/db/d_concurrency.cpp44
-rw-r--r--src/mongo/db/database.cpp18
-rw-r--r--src/mongo/db/db.cpp42
-rw-r--r--src/mongo/db/db.h10
-rw-r--r--src/mongo/db/dbcommands.cpp22
-rw-r--r--src/mongo/db/dbcommands_admin.cpp4
-rw-r--r--src/mongo/db/dbcommands_generic.cpp10
-rw-r--r--src/mongo/db/dbeval.cpp4
-rw-r--r--src/mongo/db/dbhelpers.cpp12
-rw-r--r--src/mongo/db/dbmessage.h2
-rw-r--r--src/mongo/db/dbwebserver.cpp10
-rw-r--r--src/mongo/db/diskloc.h6
-rw-r--r--src/mongo/db/dur.cpp26
-rw-r--r--src/mongo/db/dur_commitjob.cpp2
-rw-r--r--src/mongo/db/dur_commitjob.h2
-rw-r--r--src/mongo/db/dur_journal.cpp36
-rw-r--r--src/mongo/db/dur_preplogbuffer.cpp6
-rw-r--r--src/mongo/db/dur_recover.cpp36
-rw-r--r--src/mongo/db/explain.cpp6
-rw-r--r--src/mongo/db/extsort.cpp4
-rw-r--r--src/mongo/db/geo/2d.cpp176
-rw-r--r--src/mongo/db/geo/core.h24
-rw-r--r--src/mongo/db/geo/haystack.cpp6
-rw-r--r--src/mongo/db/index.cpp6
-rw-r--r--src/mongo/db/index.h2
-rw-r--r--src/mongo/db/indexkey.cpp4
-rw-r--r--src/mongo/db/instance.cpp42
-rw-r--r--src/mongo/db/introspect.cpp2
-rw-r--r--src/mongo/db/jsobj.cpp82
-rw-r--r--src/mongo/db/jsobjmanipulator.h8
-rw-r--r--src/mongo/db/json.cpp6
-rw-r--r--src/mongo/db/key.cpp14
-rw-r--r--src/mongo/db/lasterror.cpp4
-rw-r--r--src/mongo/db/lasterror.h2
-rwxr-xr-xsrc/mongo/db/matcher.cpp28
-rw-r--r--src/mongo/db/matcher.h2
-rw-r--r--src/mongo/db/minilex.h6
-rw-r--r--src/mongo/db/mongommf.cpp28
-rw-r--r--src/mongo/db/mongommf.h2
-rw-r--r--src/mongo/db/mongomutex.h4
-rw-r--r--src/mongo/db/namespace-inl.h2
-rw-r--r--src/mongo/db/namespace_details.cpp40
-rw-r--r--src/mongo/db/namespace_details.h8
-rw-r--r--src/mongo/db/nonce.cpp4
-rw-r--r--src/mongo/db/oplog.cpp24
-rw-r--r--src/mongo/db/oplog.h2
-rw-r--r--src/mongo/db/oplogreader.h2
-rw-r--r--src/mongo/db/ops/delete.cpp2
-rw-r--r--src/mongo/db/ops/query.cpp24
-rw-r--r--src/mongo/db/ops/update.cpp24
-rw-r--r--src/mongo/db/ops/update.h8
-rw-r--r--src/mongo/db/pagefault.cpp6
-rw-r--r--src/mongo/db/pdfile.cpp138
-rw-r--r--src/mongo/db/pdfile.h28
-rwxr-xr-xsrc/mongo/db/pipeline/accumulator.cpp4
-rwxr-xr-xsrc/mongo/db/pipeline/accumulator_add_to_set.cpp4
-rwxr-xr-xsrc/mongo/db/pipeline/accumulator_avg.cpp4
-rwxr-xr-xsrc/mongo/db/pipeline/accumulator_first.cpp2
-rwxr-xr-xsrc/mongo/db/pipeline/accumulator_last.cpp2
-rwxr-xr-xsrc/mongo/db/pipeline/accumulator_min_max.cpp4
-rwxr-xr-xsrc/mongo/db/pipeline/accumulator_push.cpp4
-rwxr-xr-xsrc/mongo/db/pipeline/accumulator_sum.cpp2
-rwxr-xr-xsrc/mongo/db/pipeline/document.cpp6
-rwxr-xr-xsrc/mongo/db/pipeline/document.h2
-rwxr-xr-xsrc/mongo/db/pipeline/document_source.cpp4
-rwxr-xr-xsrc/mongo/db/pipeline/document_source.h4
-rwxr-xr-xsrc/mongo/db/pipeline/document_source_bson_array.cpp8
-rwxr-xr-xsrc/mongo/db/pipeline/document_source_command_futures.cpp6
-rwxr-xr-xsrc/mongo/db/pipeline/document_source_filter_base.cpp2
-rwxr-xr-xsrc/mongo/db/pipeline/document_source_group.cpp2
-rwxr-xr-xsrc/mongo/db/pipeline/document_source_match.cpp2
-rwxr-xr-xsrc/mongo/db/pipeline/document_source_out.cpp4
-rwxr-xr-xsrc/mongo/db/pipeline/document_source_sort.cpp6
-rwxr-xr-xsrc/mongo/db/pipeline/document_source_unwind.cpp6
-rwxr-xr-xsrc/mongo/db/pipeline/expression.cpp36
-rwxr-xr-xsrc/mongo/db/pipeline/value.cpp50
-rwxr-xr-xsrc/mongo/db/pipeline/value.h2
-rw-r--r--src/mongo/db/projection.cpp4
-rw-r--r--src/mongo/db/queryoptimizer.cpp16
-rw-r--r--src/mongo/db/queryoptimizer.h8
-rw-r--r--src/mongo/db/queryoptimizercursorimpl.cpp6
-rw-r--r--src/mongo/db/querypattern.cpp2
-rw-r--r--src/mongo/db/queryutil.cpp48
-rw-r--r--src/mongo/db/queryutil.h8
-rw-r--r--src/mongo/db/record.cpp4
-rw-r--r--src/mongo/db/repl.cpp22
-rw-r--r--src/mongo/db/repl/consensus.cpp18
-rw-r--r--src/mongo/db/repl/health.cpp8
-rw-r--r--src/mongo/db/repl/manager.cpp2
-rw-r--r--src/mongo/db/repl/rs.cpp24
-rw-r--r--src/mongo/db/repl/rs.h14
-rw-r--r--src/mongo/db/repl/rs_config.cpp2
-rw-r--r--src/mongo/db/repl/rs_initialsync.cpp6
-rw-r--r--src/mongo/db/repl/rs_initiate.cpp2
-rw-r--r--src/mongo/db/repl/rs_optime.h2
-rw-r--r--src/mongo/db/repl/rs_rollback.cpp14
-rw-r--r--src/mongo/db/repl/rs_sync.cpp12
-rw-r--r--src/mongo/db/repl_block.cpp4
-rw-r--r--src/mongo/db/replutil.h4
-rw-r--r--src/mongo/db/restapi.cpp2
-rw-r--r--src/mongo/db/scanandorder.cpp6
-rw-r--r--src/mongo/db/scanandorder.h2
-rw-r--r--src/mongo/db/security_common.cpp2
-rw-r--r--src/mongo/db/stats/snapshots.cpp4
-rw-r--r--src/mongo/db/stats/top.h2
-rw-r--r--src/mongo/db/taskqueue.h4
126 files changed, 882 insertions, 896 deletions
diff --git a/src/mongo/db/btree.cpp b/src/mongo/db/btree.cpp
index d7ff311ffae..f652f5e13a9 100644
--- a/src/mongo/db/btree.cpp
+++ b/src/mongo/db/btree.cpp
@@ -139,8 +139,8 @@ namespace mongo {
long long BtreeBucket<V>::fullValidate(const DiskLoc& thisLoc, const BSONObj &order, long long *unusedCount, bool strict, unsigned depth) const {
{
bool f = false;
- assert( f = true );
- massert( 10281 , "assert is misdefined", f);
+ verify( f = true );
+ massert( 10281 , "verify is misdefined", f);
}
killCurrentOp.checkForInterrupt();
@@ -169,7 +169,7 @@ namespace mongo {
DiskLoc left = kn.prevChildBucket;
const BtreeBucket *b = left.btree<V>();
if ( strict ) {
- assert( b->parent == thisLoc );
+ verify( b->parent == thisLoc );
}
else {
wassert( b->parent == thisLoc );
@@ -181,7 +181,7 @@ namespace mongo {
DiskLoc ll = this->nextChild;
const BtreeBucket *b = ll.btree<V>();
if ( strict ) {
- assert( b->parent == thisLoc );
+ verify( b->parent == thisLoc );
}
else {
wassert( b->parent == thisLoc );
@@ -252,7 +252,7 @@ namespace mongo {
ONCE {
((BtreeBucket<V> *) this)->dump();
}
- assert(false);
+ verify(false);
}
}
}
@@ -260,7 +260,7 @@ namespace mongo {
template< class V >
inline void BucketBasics<V>::markUnused(int keypos) {
- assert( keypos >= 0 && keypos < this->n );
+ verify( keypos >= 0 && keypos < this->n );
k(keypos).setUnused();
}
@@ -293,21 +293,21 @@ namespace mongo {
*/
template< class V >
inline int BucketBasics<V>::_alloc(int bytes) {
- assert( this->emptySize >= bytes );
+ verify( this->emptySize >= bytes );
this->topSize += bytes;
this->emptySize -= bytes;
int ofs = totalDataSize() - this->topSize;
- assert( ofs > 0 );
+ verify( ofs > 0 );
return ofs;
}
template< class V >
void BucketBasics<V>::_delKeyAtPos(int keypos, bool mayEmpty) {
// TODO This should be keypos < n
- assert( keypos >= 0 && keypos <= this->n );
- assert( childForPos(keypos).isNull() );
+ verify( keypos >= 0 && keypos <= this->n );
+ verify( childForPos(keypos).isNull() );
// TODO audit cases where nextChild is null
- assert( ( mayEmpty && this->n > 0 ) || this->n > 1 || this->nextChild.isNull() );
+ verify( ( mayEmpty && this->n > 0 ) || this->n > 1 || this->nextChild.isNull() );
this->emptySize += sizeof(_KeyNode);
this->n--;
for ( int j = keypos; j < this->n; j++ )
@@ -322,7 +322,7 @@ namespace mongo {
template< class V >
void BucketBasics<V>::popBack(DiskLoc& recLoc, Key &key) {
massert( 10282 , "n==0 in btree popBack()", this->n > 0 );
- assert( k(this->n-1).isUsed() ); // no unused skipping in this function at this point - btreebuilder doesn't require that
+ verify( k(this->n-1).isUsed() ); // no unused skipping in this function at this point - btreebuilder doesn't require that
KeyNode kn = keyNode(this->n-1);
recLoc = kn.recordLoc;
key.assign(kn.key);
@@ -347,7 +347,7 @@ namespace mongo {
int bytesNeeded = key.dataSize() + sizeof(_KeyNode);
if ( bytesNeeded > this->emptySize )
return false;
- assert( bytesNeeded <= this->emptySize );
+ verify( bytesNeeded <= this->emptySize );
if( this->n ) {
const KeyNode klast = keyNode(this->n-1);
if( klast.key.woCompare(key, order) > 0 ) {
@@ -355,7 +355,7 @@ namespace mongo {
log() << " klast: " << keyNode(this->n-1).key.toString() << endl;
log() << " key: " << key.toString() << endl;
DEV klast.key.woCompare(key, order);
- assert(false);
+ verify(false);
}
}
this->emptySize -= sizeof(_KeyNode);
@@ -508,7 +508,7 @@ namespace mongo {
this->emptySize = tdz - dataUsed - this->n * sizeof(_KeyNode);
{
int foo = this->emptySize;
- assert( foo >= 0 );
+ verify( foo >= 0 );
}
setPacked();
@@ -518,7 +518,7 @@ namespace mongo {
template< class V >
inline void BucketBasics<V>::truncateTo(int N, const Ordering &order, int &refPos) {
- assert( Lock::somethingWriteLocked() );
+ verify( Lock::somethingWriteLocked() );
assertWritable();
this->n = N;
setNotPacked();
@@ -544,7 +544,7 @@ namespace mongo {
*/
template< class V >
int BucketBasics<V>::splitPos( int keypos ) const {
- assert( this->n > 2 );
+ verify( this->n > 2 );
int split = 0;
int rightSize = 0;
// when splitting a btree node, if the new key is greater than all the other keys, we should not do an even split, but a 90/10 split.
@@ -571,7 +571,7 @@ namespace mongo {
template< class V >
void BucketBasics<V>::reserveKeysFront( int nAdd ) {
- assert( this->emptySize >= int( sizeof( _KeyNode ) * nAdd ) );
+ verify( this->emptySize >= int( sizeof( _KeyNode ) * nAdd ) );
this->emptySize -= sizeof( _KeyNode ) * nAdd;
for( int i = this->n - 1; i > -1; --i ) {
k( i + nAdd ) = k( i );
@@ -613,7 +613,7 @@ namespace mongo {
continue;
}
- assert(b->n>0);
+ verify(b->n>0);
largestLoc = loc;
largestKey = b->n-1;
@@ -821,7 +821,7 @@ namespace mongo {
template< class V >
void BtreeBucket<V>::delBucket(const DiskLoc thisLoc, const IndexDetails& id) {
ClientCursor::informAboutToDeleteBucket(thisLoc); // slow...
- assert( !isHead() );
+ verify( !isHead() );
DiskLoc ll = this->parent;
const BtreeBucket *p = ll.btree<V>();
@@ -849,7 +849,7 @@ namespace mongo {
/** note: may delete the entire bucket! this invalid upon return sometimes. */
template< class V >
void BtreeBucket<V>::delKeyAtPos( const DiskLoc thisLoc, IndexDetails& id, int p, const Ordering &order) {
- assert(this->n>0);
+ verify(this->n>0);
DiskLoc left = this->childForPos(p);
if ( this->n == 1 ) {
@@ -907,7 +907,7 @@ namespace mongo {
void BtreeBucket<V>::deleteInternalKey( const DiskLoc thisLoc, int keypos, IndexDetails &id, const Ordering &order ) {
DiskLoc lchild = this->childForPos( keypos );
DiskLoc rchild = this->childForPos( keypos + 1 );
- assert( !lchild.isNull() || !rchild.isNull() );
+ verify( !lchild.isNull() || !rchild.isNull() );
int advanceDirection = lchild.isNull() ? 1 : -1;
int advanceKeyOfs = keypos;
DiskLoc advanceLoc = advance( thisLoc, advanceKeyOfs, advanceDirection, __FUNCTION__ );
@@ -937,9 +937,9 @@ namespace mongo {
template< class V >
void BtreeBucket<V>::replaceWithNextChild( const DiskLoc thisLoc, IndexDetails &id ) {
- assert( this->n == 0 && !this->nextChild.isNull() );
+ verify( this->n == 0 && !this->nextChild.isNull() );
if ( this->parent.isNull() ) {
- assert( id.head == thisLoc );
+ verify( id.head == thisLoc );
id.head.writing() = this->nextChild;
}
else {
@@ -953,7 +953,7 @@ namespace mongo {
template< class V >
bool BtreeBucket<V>::canMergeChildren( const DiskLoc &thisLoc, int leftIndex ) const {
- assert( leftIndex >= 0 && leftIndex < this->n );
+ verify( leftIndex >= 0 && leftIndex < this->n );
DiskLoc leftNodeLoc = this->childForPos( leftIndex );
DiskLoc rightNodeLoc = this->childForPos( leftIndex + 1 );
if ( leftNodeLoc.isNull() || rightNodeLoc.isNull() ) {
@@ -986,7 +986,7 @@ namespace mongo {
int rightSizeLimit = ( l->topSize + l->n * KNS + keyNode( leftIndex ).key.dataSize() + KNS + r->topSize + r->n * KNS ) / 2;
// This constraint should be ensured by only calling this function
// if we go below the low water mark.
- assert( rightSizeLimit < BtreeBucket<V>::bodySize() );
+ verify( rightSizeLimit < BtreeBucket<V>::bodySize() );
for( int i = r->n - 1; i > -1; --i ) {
rightSize += r->keyNode( i ).key.dataSize() + KNS;
if ( rightSize > rightSizeLimit ) {
@@ -1061,7 +1061,7 @@ namespace mongo {
template< class V >
int BtreeBucket<V>::indexInParent( const DiskLoc &thisLoc ) const {
- assert( !this->parent.isNull() );
+ verify( !this->parent.isNull() );
const BtreeBucket *p = BTREE(this->parent);
if ( p->nextChild == thisLoc ) {
return p->n;
@@ -1078,7 +1078,7 @@ namespace mongo {
dump();
out() << "Parent: " << this->parent << "\n";
p->dump();
- assert(false);
+ verify(false);
return -1; // just to compile
}
@@ -1175,7 +1175,7 @@ namespace mongo {
// By definition, if we are below the low water mark and cannot merge
// then we must actively balance.
- assert( split != l->n );
+ verify( split != l->n );
if ( split < l->n ) {
doBalanceLeftToRight( thisLoc, leftIndex, split, l, lchild, r, rchild, id, order );
}
@@ -1280,7 +1280,7 @@ namespace mongo {
this->_delKeyAtPos( keypos, true );
// Ensure we do not orphan neighbor's old child.
- assert( this->childForPos( keypos ) == rchild );
+ verify( this->childForPos( keypos ) == rchild );
// Just set temporarily - required to pass validation in insertHere()
this->childForPos( keypos ) = lchild;
@@ -1323,10 +1323,10 @@ namespace mongo {
out() << " recordLoc: " << recordLoc.toString() << " rchild: " << rchild.toString() << endl;
out() << " key: " << key.toString() << endl;
dump();
- assert(false);
+ verify(false);
}
kn->prevChildBucket = this->nextChild;
- assert( kn->prevChildBucket == lchild );
+ verify( kn->prevChildBucket == lchild );
this->nextChild.writing() = rchild;
if ( !rchild.isNull() )
BTREE(rchild)->parent.writing() = thisLoc;
@@ -1341,7 +1341,7 @@ namespace mongo {
out() << " recordLoc: " << recordLoc.toString() << " rchild: " << rchild.toString() << endl;
out() << " key: " << key.toString() << endl;
dump();
- assert(false);
+ verify(false);
}
const Loc *pc = &k(keypos+1).prevChildBucket;
*getDur().alreadyDeclared( const_cast<Loc*>(pc) ) = rchild; // declared in basicInsert()
@@ -1422,7 +1422,7 @@ namespace mongo {
}
else {
int kp = keypos-split-1;
- assert(kp>=0);
+ verify(kp>=0);
BTREE(rLoc)->insertHere(rLoc, kp, recordLoc, key, order, lchild, rchild, idx);
}
}
@@ -1460,7 +1460,7 @@ namespace mongo {
out() << " thisLoc: " << thisLoc.toString() << endl;
out() << " keyOfs: " << keyOfs << " n:" << this->n << " direction: " << direction << endl;
out() << bucketSummary() << endl;
- assert(false);
+ verify(false);
}
int adj = direction < 0 ? 1 : 0;
int ko = keyOfs + direction;
@@ -1494,7 +1494,7 @@ namespace mongo {
return ancestor;
}
}
- assert( direction<0 || an->nextChild == childLoc );
+ verify( direction<0 || an->nextChild == childLoc );
// parent exhausted also, keep going up
childLoc = ancestor;
ancestor = an->parent;
@@ -1704,7 +1704,7 @@ namespace mongo {
Continuation<V>& c,
bool dupsAllowed) const {
dassert( c.key.dataSize() <= this->KeyMax );
- assert( c.key.dataSize() > 0 );
+ verify( c.key.dataSize() > 0 );
int pos;
bool found = find(c.idx, c.key, c.recordLoc, c.order, pos, !dupsAllowed);
@@ -1753,7 +1753,7 @@ namespace mongo {
problem() << "ERROR: key too large len:" << key.dataSize() << " max:" << this->KeyMax << ' ' << key.dataSize() << ' ' << idx.indexNamespace() << endl;
return 2;
}
- assert( key.dataSize() > 0 );
+ verify( key.dataSize() > 0 );
int pos;
bool found = find(idx, key, recordLoc, order, pos, !dupsAllowed);
@@ -1926,7 +1926,7 @@ namespace mongo {
A.GETOFS() += 2;
b->bt_insert(id.head, A, key, order, true, id);
A.GETOFS() += 2;
- assert( b->k(0).isUsed() );
+ verify( b->k(0).isUsed() );
// b->k(0).setUnused();
b->k(1).setUnused();
b->k(2).setUnused();
@@ -1960,19 +1960,19 @@ namespace mongo {
DiskLoc56Bit bigl;
{
bigl = big;
- assert( big == bigl );
+ verify( big == bigl );
DiskLoc e = bigl;
- assert( big == e );
+ verify( big == e );
}
{
DiskLoc d;
- assert( d.isNull() );
+ verify( d.isNull() );
DiskLoc56Bit l;
l = d;
- assert( l.isNull() );
+ verify( l.isNull() );
d = l;
- assert( d.isNull() );
- assert( l < bigl );
+ verify( d.isNull() );
+ verify( l < bigl );
}
}
} btunittest;
diff --git a/src/mongo/db/btree.h b/src/mongo/db/btree.h
index 38f395b2d78..056eb486b85 100644
--- a/src/mongo/db/btree.h
+++ b/src/mongo/db/btree.h
@@ -90,12 +90,12 @@ namespace mongo {
unsigned short _kdo;
void setKeyDataOfs(short s) {
_kdo = s;
- assert(s>=0);
+ verify(s>=0);
}
/** Seems to be redundant. */
void setKeyDataOfsSavingUse(short s) {
_kdo = s;
- assert(s>=0);
+ verify(s>=0);
}
/**
* Unused keys are not returned by read operations. Keys may be marked
@@ -240,9 +240,9 @@ namespace mongo {
void operator=(const DiskLoc& loc) {
ofs = loc.getOfs();
int la = loc.a();
- assert( la <= 0xffffff ); // must fit in 3 bytes
+ verify( la <= 0xffffff ); // must fit in 3 bytes
if( la < 0 ) {
- assert( la == -1 );
+ verify( la == -1 );
la = 0;
ofs = OurNullOfs;
}
@@ -412,7 +412,7 @@ namespace mongo {
bool _pushBack(const DiskLoc recordLoc, const Key& key, const Ordering &order, const DiskLoc prevChild);
void pushBack(const DiskLoc recordLoc, const Key& key, const Ordering &order, const DiskLoc prevChild) {
bool ok = _pushBack( recordLoc , key , order , prevChild );
- assert(ok);
+ verify(ok);
}
/**
@@ -1045,9 +1045,9 @@ namespace mongo {
virtual bool isMultiKey() const { return _multikey; }
/*const _KeyNode& _currKeyNode() const {
- assert( !bucket.isNull() );
+ verify( !bucket.isNull() );
const _KeyNode& kn = keyNode(keyOfs);
- assert( kn.isUsed() );
+ verify( kn.isUsed() );
return kn;
}*/
@@ -1176,7 +1176,7 @@ namespace mongo {
*/
template< class V >
BtreeBucket<V> * DiskLoc::btreemod() const {
- assert( _a != -1 );
+ verify( _a != -1 );
BtreeBucket<V> *b = const_cast< BtreeBucket<V> * >( btree<V>() );
return static_cast< BtreeBucket<V>* >( getDur().writingPtr( b, V::BucketSize ) );
}
diff --git a/src/mongo/db/btreebuilder.cpp b/src/mongo/db/btreebuilder.cpp
index 0ec587a1958..5619474ee07 100644
--- a/src/mongo/db/btreebuilder.cpp
+++ b/src/mongo/db/btreebuilder.cpp
@@ -172,7 +172,7 @@ namespace mongo {
x = next;
getDur().commitIfNeeded();
}
- assert( idx.head.isNull() );
+ verify( idx.head.isNull() );
log(2) << "done rollback" << endl;
}
)
diff --git a/src/mongo/db/btreecursor.cpp b/src/mongo/db/btreecursor.cpp
index e2158762bdf..8a19aa3136f 100644
--- a/src/mongo/db/btreecursor.cpp
+++ b/src/mongo/db/btreecursor.cpp
@@ -49,7 +49,7 @@ namespace mongo {
}
virtual BSONObj keyAt(int ofs) const {
- assert( !bucket.isNull() );
+ verify( !bucket.isNull() );
const BtreeBucket<V> *b = bucket.btree<V>();
int n = b->getN();
if( n == b->INVALID_N_SENTINEL ) {
@@ -60,7 +60,7 @@ namespace mongo {
}
virtual BSONObj currKey() const {
- assert( !bucket.isNull() );
+ verify( !bucket.isNull() );
return bucket.btree<V>()->keyNode(keyOfs).key.toBson();
}
@@ -102,7 +102,7 @@ namespace mongo {
_multikey = d->isMultikey(idxNo);
if ( keyOfs >= 0 ) {
- assert( !keyAtKeyOfs.isEmpty() );
+ verify( !keyAtKeyOfs.isEmpty() );
try {
// Note keyAt() returns an empty BSONObj if keyOfs is now out of range,
@@ -173,7 +173,7 @@ namespace mongo {
private:
const KeyNode currKeyNode() const {
- assert( !bucket.isNull() );
+ verify( !bucket.isNull() );
const BtreeBucket<V> *b = bucket.btree<V>();
return b->keyNode(keyOfs);
}
@@ -205,7 +205,7 @@ namespace mongo {
}
virtual BSONObj currKey() const {
- assert( !bucket.isNull() );
+ verify( !bucket.isNull() );
return bucket.btree<V1>()->keyNode(keyOfs).key.toBson();
}
@@ -227,7 +227,7 @@ namespace mongo {
private:
const KeyNode currKeyNode() const {
- assert( !bucket.isNull() );
+ verify( !bucket.isNull() );
const BtreeBucket<V1> *b = bucket.btree<V1>();
return b->keyNode(keyOfs);
}
@@ -310,7 +310,7 @@ namespace mongo {
_order( _id.keyPattern() ),
_ordering( Ordering::make( _order ) ),
_direction( _direction ),
- _bounds( ( assert( _bounds.get() ), _bounds ) ),
+ _bounds( ( verify( _bounds.get() ), _bounds ) ),
_boundsIterator( new FieldRangeVectorIterator( *_bounds ) ),
_independentFieldRanges( true ),
_nscanned( 0 ) {
@@ -450,7 +450,7 @@ namespace mongo {
struct BtreeCursorUnitTest {
BtreeCursorUnitTest() {
- assert( minDiskLoc.compare(maxDiskLoc) < 0 );
+ verify( minDiskLoc.compare(maxDiskLoc) < 0 );
}
} btut;
diff --git a/src/mongo/db/cap.cpp b/src/mongo/db/cap.cpp
index a8be2383115..72da3f9cc2e 100644
--- a/src/mongo/db/cap.cpp
+++ b/src/mongo/db/cap.cpp
@@ -54,7 +54,7 @@ namespace mongo {
(or 3...there will be a little unused sliver at the end of the extent.)
*/
void NamespaceDetails::compact() {
- assert(capped);
+ verify(capped);
list<DiskLoc> drecs;
@@ -69,7 +69,7 @@ namespace mongo {
drecs.sort();
list<DiskLoc>::iterator j = drecs.begin();
- assert( j != drecs.end() );
+ verify( j != drecs.end() );
DiskLoc a = *j;
while ( 1 ) {
j++;
@@ -105,7 +105,7 @@ namespace mongo {
void NamespaceDetails::cappedCheckMigrate() {
// migrate old NamespaceDetails format
- assert( capped );
+ verify( capped );
if ( capExtent.a() == 0 && capExtent.getOfs() == 0 ) {
//capFirstNewRecord = DiskLoc();
capFirstNewRecord.writing().setInvalid();
@@ -128,18 +128,18 @@ namespace mongo {
}
bool NamespaceDetails::inCapExtent( const DiskLoc &dl ) const {
- assert( !dl.isNull() );
+ verify( !dl.isNull() );
// We could have a rec or drec, doesn't matter.
bool res = dl.drec()->myExtentLoc(dl) == capExtent;
DEV {
// old implementation. this check is temp to test works the same. new impl should be a little faster.
- assert( res == (dl.drec()->myExtent( dl ) == capExtent.ext()) );
+ verify( res == (dl.drec()->myExtent( dl ) == capExtent.ext()) );
}
return res;
}
bool NamespaceDetails::nextIsInCapExtent( const DiskLoc &dl ) const {
- assert( !dl.isNull() );
+ verify( !dl.isNull() );
DiskLoc next = dl.drec()->nextDeleted;
if ( next.isNull() )
return false;
@@ -186,7 +186,7 @@ namespace mongo {
else
prev.drec()->nextDeleted.writing() = ret.drec()->nextDeleted;
ret.drec()->nextDeleted.writing().setInvalid(); // defensive.
- assert( ret.drec()->extentOfs < ret.getOfs() );
+ verify( ret.drec()->extentOfs < ret.getOfs() );
}
return ret;
@@ -197,7 +197,7 @@ namespace mongo {
if ( !cappedLastDelRecLastExtent().isValid() )
getDur().writingDiskLoc( cappedLastDelRecLastExtent() ) = DiskLoc();
- assert( len < 400000000 );
+ verify( len < 400000000 );
int passes = 0;
int maxPasses = ( len / 30 ) + 2; // 30 is about the smallest entry that could go in the oplog
if ( maxPasses < 5000 ) {
@@ -209,7 +209,7 @@ namespace mongo {
// delete records until we have room and the max # objects limit achieved.
/* this fails on a rename -- that is ok but must keep commented out */
- //assert( theCapExtent()->ns == ns );
+ //verify( theCapExtent()->ns == ns );
theCapExtent()->assertOk();
DiskLoc firstEmptyExtent;
@@ -306,14 +306,14 @@ namespace mongo {
// deleted record. Here we check that 'i' is not the last deleted
// record. (We expect that there will be deleted records in the new
// capExtent as well.)
- assert( !i.drec()->nextDeleted.isNull() );
+ verify( !i.drec()->nextDeleted.isNull() );
cappedLastDelRecLastExtent().writing() = i;
}
}
void NamespaceDetails::cappedTruncateAfter(const char *ns, DiskLoc end, bool inclusive) {
- DEV assert( this == nsdetails(ns) );
- assert( cappedLastDelRecLastExtent().isValid() );
+ DEV verify( this == nsdetails(ns) );
+ verify( cappedLastDelRecLastExtent().isValid() );
// We iteratively remove the newest document until the newest document
// is 'end', then we remove 'end' if requested.
@@ -326,7 +326,7 @@ namespace mongo {
getDur().commitIfNeeded();
// 'curr' will point to the newest document in the collection.
DiskLoc curr = theCapExtent()->lastRecord;
- assert( !curr.isNull() );
+ verify( !curr.isNull() );
if ( curr == end ) {
if ( inclusive ) {
// 'end' has been found, so break next iteration.
@@ -358,7 +358,7 @@ namespace mongo {
// the 'capExtent' can't be empty, so we set 'capExtent' to
// capExtent's prev extent.
if ( theCapExtent()->lastRecord.isNull() ) {
- assert( !theCapExtent()->xprev.isNull() );
+ verify( !theCapExtent()->xprev.isNull() );
// NOTE Because we didn't delete the last document, and
// capLooped() is false, capExtent is not the first extent
// so xprev will be nonnull.
@@ -407,7 +407,7 @@ namespace mongo {
}
void NamespaceDetails::emptyCappedCollection( const char *ns ) {
- DEV assert( this == nsdetails(ns) );
+ DEV verify( this == nsdetails(ns) );
massert( 13424, "collection must be capped", capped );
massert( 13425, "background index build in progress", !indexBuildInProgress );
massert( 13426, "indexes present", nIndexes == 0 );
diff --git a/src/mongo/db/client.cpp b/src/mongo/db/client.cpp
index aabca3c05a2..de6410c2fe6 100644
--- a/src/mongo/db/client.cpp
+++ b/src/mongo/db/client.cpp
@@ -83,7 +83,7 @@ namespace mongo {
#if defined _DEBUG
static unsigned long long nThreads = 0;
void assertStartingUp() {
- assert( nThreads <= 1 );
+ verify( nThreads <= 1 );
}
#else
void assertStartingUp() { }
@@ -99,7 +99,7 @@ namespace mongo {
}
}
#endif
- assert( currentClient.get() == 0 );
+ verify( currentClient.get() == 0 );
Client *c = new Client(desc, mp);
currentClient.reset(c);
mongo::lastError.initThread();
@@ -179,7 +179,7 @@ namespace mongo {
_ns( ns ),
_db(db)
{
- assert( db == 0 || db->isOk() );
+ verify( db == 0 || db->isOk() );
_client->_context = this;
checkNsAccess( doauth );
}
@@ -266,7 +266,7 @@ namespace mongo {
_ns( ns ),
_db(db)
{
- assert(_db);
+ verify(_db);
checkNotStale();
_client->_context = this;
_client->_curOp->enter( this );
@@ -281,7 +281,7 @@ namespace mongo {
}
_db = dbHolderUnchecked().getOrCreate( _ns , _path , _justCreated );
- assert(_db);
+ verify(_db);
if( _doVersion ) checkNotStale();
massert( 16107 , str::stream() << "Don't have a lock on: " << _ns , Lock::atLeastReadLocked( _ns ) );
_client->_context = this;
@@ -305,7 +305,7 @@ namespace mongo {
}
Client::Context::~Context() {
- DEV assert( _client == currentClient.get() );
+ DEV verify( _client == currentClient.get() );
_client->_curOp->leave( this );
_client->_context = _oldContext; // note: _oldContext may be null
}
@@ -365,7 +365,7 @@ namespace mongo {
Client* curopWaitingForLock( char type ) {
Client * c = currentClient.get();
- assert( c );
+ verify( c );
CurOp * co = c->curop();
if ( co ) {
co->waitingForLock( type );
@@ -374,7 +374,7 @@ namespace mongo {
}
void curopGotLock(Client *c) {
- assert(c);
+ verify(c);
CurOp * co = c->curop();
if ( co )
co->gotLock();
@@ -422,7 +422,7 @@ namespace mongo {
{
BSONElement id = i.next();
- assert( id.type() );
+ verify( id.type() );
_remoteId = id.wrap( "_id" );
}
diff --git a/src/mongo/db/client.h b/src/mongo/db/client.h
index a277a97ec89..7dc4376868d 100644
--- a/src/mongo/db/client.h
+++ b/src/mongo/db/client.h
@@ -110,7 +110,7 @@ namespace mongo {
string toString() const;
void gotHandshake( const BSONObj& o );
bool hasRemote() const { return _mp; }
- HostAndPort getRemote() const { assert( _mp ); return _mp->remote(); }
+ HostAndPort getRemote() const { verify( _mp ); return _mp->remote(); }
BSONObj getRemoteID() const { return _remoteId; }
BSONObj getHandshake() const { return _handshake; }
AbstractMessagingPort * port() const { return _mp; }
@@ -240,7 +240,7 @@ namespace mongo {
/** get the Client object for this thread. */
inline Client& cc() {
Client * c = currentClient.get();
- assert( c );
+ verify( c );
return *c;
}
diff --git a/src/mongo/db/clientcursor.cpp b/src/mongo/db/clientcursor.cpp
index 4a80a725280..4e6674b2dd3 100644
--- a/src/mongo/db/clientcursor.cpp
+++ b/src/mongo/db/clientcursor.cpp
@@ -47,13 +47,13 @@ namespace mongo {
ClientCursor *cc = clientCursorsById.begin()->second;
log() << "first one: " << cc->_cursorid << ' ' << cc->_ns << endl;
clientCursorsById.clear();
- assert(false);
+ verify(false);
}
}
void ClientCursor::setLastLoc_inlock(DiskLoc L) {
- assert( _pos != -2 ); // defensive - see ~ClientCursor
+ verify( _pos != -2 ); // defensive - see ~ClientCursor
if ( L == _lastLoc )
return;
@@ -80,15 +80,15 @@ namespace mongo {
Lock::assertWriteLocked(ns);
int len = strlen(ns);
const char* dot = strchr(ns, '.');
- assert( len > 0 && dot);
+ verify( len > 0 && dot);
bool isDB = (dot == &ns[len-1]); // first (and only) dot is the last char
{
//cout << "\nTEMP invalidate " << ns << endl;
Database *db = cc().database();
- assert(db);
- assert( str::startsWith(ns, db->name) );
+ verify(db);
+ verify( str::startsWith(ns, db->name) );
for( LockedIterator i; i.ok(); ) {
ClientCursor *cc = i.current();
@@ -123,7 +123,7 @@ namespace mongo {
for ( CCByLoc::iterator i = bl.begin(); i != bl.end(); ++i ) {
ClientCursor *cc = i->second;
if ( strncmp(ns, cc->ns.c_str(), len) == 0 ) {
- assert( cc->_db == db );
+ verify( cc->_db == db );
toDelete.push_back(i->second);
}
}*/
@@ -214,7 +214,7 @@ namespace mongo {
recursive_scoped_lock lock(ccmutex);
Database *db = cc().database();
- assert(db);
+ verify(db);
aboutToDeleteForSharding( db , dl );
@@ -228,7 +228,7 @@ namespace mongo {
while ( 1 ) {
toAdvance.push_back(j->second);
- DEV assert( j->first.loc == dl );
+ DEV verify( j->first.loc == dl );
++j;
if ( j == stop )
break;
@@ -306,8 +306,8 @@ namespace mongo {
Lock::assertAtLeastReadLocked(ns);
- assert( _db );
- assert( str::startsWith(_ns, _db->name) );
+ verify( _db );
+ verify( str::startsWith(_ns, _db->name) );
if( queryOptions & QueryOption_NoCursorTimeout )
noTimeout();
recursive_scoped_lock lock(ccmutex);
@@ -367,7 +367,7 @@ namespace mongo {
it.next();
x--;
}
- assert( x == 0 );
+ verify( x == 0 );
ret.insert( it.next() );
return true;
}
@@ -390,7 +390,7 @@ namespace mongo {
it.next();
x--;
}
- assert( x == 0 );
+ verify( x == 0 );
if ( fromKey )
*fromKey = true;
@@ -426,7 +426,7 @@ namespace mongo {
need to call when you are ready to "unlock".
*/
void ClientCursor::updateLocation() {
- assert( _cursorid );
+ verify( _cursorid );
_idleAgeMillis = 0;
_c->prepareToYield();
DiskLoc cl = _c->refLoc();
diff --git a/src/mongo/db/clientcursor.h b/src/mongo/db/clientcursor.h
index 2a30b419f6a..4b6fe9fad67 100644
--- a/src/mongo/db/clientcursor.h
+++ b/src/mongo/db/clientcursor.h
@@ -89,7 +89,7 @@ namespace mongo {
ClientCursor * c() { return _c; }
void release() {
if( _c ) {
- assert( _c->_pinValue >= 100 );
+ verify( _c->_pinValue >= 100 );
_c->_pinValue -= 100;
_c = 0;
}
diff --git a/src/mongo/db/cloner.cpp b/src/mongo/db/cloner.cpp
index 4f890a6f723..978896b2928 100644
--- a/src/mongo/db/cloner.cpp
+++ b/src/mongo/db/cloner.cpp
@@ -155,7 +155,7 @@ namespace mongo {
BSONObj js = tmp;
if ( isindex ) {
- assert( strstr(from_collection, "system.indexes") );
+ verify( strstr(from_collection, "system.indexes") );
js = fixindex(tmp);
storedForLater->push_back( js.getOwned() );
continue;
@@ -362,8 +362,8 @@ namespace mongo {
string s = "bad system.namespaces object " + collection.toString();
massert( 10290 , s.c_str(), false);
}
- assert( !e.eoo() );
- assert( e.type() == String );
+ verify( !e.eoo() );
+ verify( e.type() == String );
const char *from_name = e.valuestr();
if( strstr(from_name, ".system.") ) {
@@ -394,7 +394,7 @@ namespace mongo {
/* change name "<fromdb>.collection" -> <todb>.collection */
const char *p = strchr(from_name, '.');
- assert(p);
+ verify(p);
string to_name = todb + p;
bool wantIdIndex = false;
diff --git a/src/mongo/db/cmdline.cpp b/src/mongo/db/cmdline.cpp
index c08258e9a4d..28b214aa418 100644
--- a/src/mongo/db/cmdline.cpp
+++ b/src/mongo/db/cmdline.cpp
@@ -142,14 +142,14 @@ namespace mongo {
}
void setupLaunchSignals() {
- assert( signal(SIGUSR2 , launchSignal ) != SIG_ERR );
+ verify( signal(SIGUSR2 , launchSignal ) != SIG_ERR );
}
void CmdLine::launchOk() {
if ( cmdLine.doFork ) {
// killing leader will propagate to parent
- assert( kill( cmdLine.leaderProc, SIGUSR2 ) == 0 );
+ verify( kill( cmdLine.leaderProc, SIGUSR2 ) == 0 );
}
}
#endif
@@ -171,9 +171,9 @@ namespace mongo {
// setup cwd
char buffer[1024];
#ifdef _WIN32
- assert( _getcwd( buffer , 1000 ) );
+ verify( _getcwd( buffer , 1000 ) );
#else
- assert( getcwd( buffer , 1000 ) );
+ verify( getcwd( buffer , 1000 ) );
#endif
cmdLine.cwd = buffer;
}
@@ -288,7 +288,7 @@ namespace mongo {
if ( params.count( "logpath" ) ) {
// test logpath
logpath = params["logpath"].as<string>();
- assert( logpath.size() );
+ verify( logpath.size() );
if ( logpath[0] != '/' ) {
logpath = cmdLine.cwd + "/" + logpath;
}
@@ -487,8 +487,8 @@ namespace mongo {
void setupCoreSignals() {
#if !defined(_WIN32)
- assert( signal(SIGUSR1 , rotateLogs ) != SIG_ERR );
- assert( signal(SIGHUP , ignoreSignal ) != SIG_ERR );
+ verify( signal(SIGUSR1 , rotateLogs ) != SIG_ERR );
+ verify( signal(SIGHUP , ignoreSignal ) != SIG_ERR );
#endif
}
diff --git a/src/mongo/db/commands/cloud.cpp b/src/mongo/db/commands/cloud.cpp
index c68b9f7564a..e4b5ce5722c 100644
--- a/src/mongo/db/commands/cloud.cpp
+++ b/src/mongo/db/commands/cloud.cpp
@@ -19,15 +19,15 @@ namespace mongo {
}
void dynHostResolve(string& name, int& port) {
- assert( !name.empty() );
- assert( !str::contains(name, ':') );
- assert( str::startsWith(name, '#') );
+ verify( !name.empty() );
+ verify( !str::contains(name, ':') );
+ verify( str::startsWith(name, '#') );
string s = dynHostNames.get(name);
if( s.empty() ) {
name.clear();
return;
}
- assert( !str::startsWith(s, '#') );
+ verify( !str::startsWith(s, '#') );
HostAndPort hp(s);
if( hp.hasPort() ) {
port = hp.port();
@@ -56,17 +56,17 @@ namespace mongo {
}
CmdCloud() : Command("cloud") {}
bool run(const string& dbname, BSONObj& cmdObj, int, string& errmsg, BSONObjBuilder& result, bool fromRepl) {
- assert(!fromRepl);
+ verify(!fromRepl);
BSONObj nodes = cmdObj["nodes"].Obj();
map<string,string> ipmap;
for( BSONObj::iterator i(nodes); i.more(); ) {
BSONElement e = i.next();
- assert( *e.fieldName() == '#' );
+ verify( *e.fieldName() == '#' );
ipmap[e.fieldName()] = e.String();
}
string me = cmdObj["me"].String();
- assert( !me.empty() && me[0] == '#' );
+ verify( !me.empty() && me[0] == '#' );
log(/*1*/) << "CmdCloud" << endl;
diff --git a/src/mongo/db/commands/distinct.cpp b/src/mongo/db/commands/distinct.cpp
index 2a40506a02b..08c78308b35 100644
--- a/src/mongo/db/commands/distinct.cpp
+++ b/src/mongo/db/commands/distinct.cpp
@@ -91,7 +91,7 @@ namespace mongo {
}
- assert( cursor );
+ verify( cursor );
string cursorName = cursor->toString();
auto_ptr<ClientCursor> cc (new ClientCursor(QueryOption_NoCursorTimeout, cursor, ns));
@@ -136,7 +136,7 @@ namespace mongo {
RARELY killCurrentOp.checkForInterrupt();
}
- assert( start == bb.buf() );
+ verify( start == bb.buf() );
result.appendArray( "values" , arr.done() );
diff --git a/src/mongo/db/commands/document_source_cursor.cpp b/src/mongo/db/commands/document_source_cursor.cpp
index 9e71eae77f4..d8bb170e6a4 100755
--- a/src/mongo/db/commands/document_source_cursor.cpp
+++ b/src/mongo/db/commands/document_source_cursor.cpp
@@ -95,12 +95,12 @@ namespace mongo {
void DocumentSourceCursor::setSource(DocumentSource *pSource) {
/* this doesn't take a source */
- assert(false);
+ verify(false);
}
void DocumentSourceCursor::sourceToBson(BSONObjBuilder *pBuilder) const {
/* this has no analog in the BSON world */
- assert(false);
+ verify(false);
}
DocumentSourceCursor::DocumentSourceCursor(
@@ -121,7 +121,7 @@ namespace mongo {
const shared_ptr<Cursor> &pCursor,
const string &ns,
const intrusive_ptr<ExpressionContext> &pExpCtx) {
- assert(pCursor.get());
+ verify(pCursor.get());
intrusive_ptr<DocumentSourceCursor> pSource(
new DocumentSourceCursor(pCursor, ns, pExpCtx));
return pSource;
diff --git a/src/mongo/db/commands/mr.cpp b/src/mongo/db/commands/mr.cpp
index f4d43fe2125..420249a007c 100644
--- a/src/mongo/db/commands/mr.cpp
+++ b/src/mongo/db/commands/mr.cpp
@@ -48,7 +48,7 @@ namespace mongo {
void JSFunction::init( State * state ) {
_scope = state->scope();
- assert( _scope );
+ verify( _scope );
_scope->init( &_wantedScope );
_func = _scope->createFunction( _code.c_str() );
@@ -68,7 +68,7 @@ namespace mongo {
*/
void JSMapper::map( const BSONObj& o ) {
Scope * s = _func.scope();
- assert( s );
+ verify( s );
if ( s->invoke( _func.func() , &_params, &o , 0 , true, false, true ) )
throw UserException( 9014, str::stream() << "map invoke failed: " + s->getError() );
}
@@ -176,14 +176,14 @@ namespace mongo {
uassert( 13070 , "value too large to reduce" , ee.size() < ( BSONObjMaxUserSize / 2 ) );
if ( sizeSoFar + ee.size() > BSONObjMaxUserSize ) {
- assert( n > 1 ); // if not, inf. loop
+ verify( n > 1 ); // if not, inf. loop
break;
}
valueBuilder->append( ee );
sizeSoFar += ee.size();
}
- assert(valueBuilder);
+ verify(valueBuilder);
valueBuilder->done();
BSONObj args = reduceArgs.obj();
@@ -438,7 +438,7 @@ namespace mongo {
BSONObj key = i->first;
BSONList& all = i->second;
- assert( all.size() == 1 );
+ verify( all.size() == 1 );
BSONObjIterator vi( all[0] );
vi.next();
@@ -543,7 +543,7 @@ namespace mongo {
* Insert doc in collection
*/
void State::insert( const string& ns , const BSONObj& o ) {
- assert( _onDisk );
+ verify( _onDisk );
writelock l( ns );
Client::Context ctx( ns );
@@ -564,7 +564,7 @@ namespace mongo {
* Insert doc into the inc collection
*/
void State::_insertToInc( BSONObj& o ) {
- assert( _onDisk );
+ verify( _onDisk );
theDataFileMgr.insertWithObjMod( _config.incLong.c_str() , o , true );
getDur().commitIfNeeded();
}
@@ -717,7 +717,7 @@ namespace mongo {
BSONObj key = i->first;
BSONList& all = i->second;
- assert( all.size() == 1 );
+ verify( all.size() == 1 );
BSONObj res = _config.finalizer->finalize( all[0] );
@@ -731,7 +731,7 @@ namespace mongo {
}
// use index on "0" to pull sorted data
- assert( _temp->size() == 0 );
+ verify( _temp->size() == 0 );
BSONObj sortKey = BSON( "0" << 1 );
{
bool foundIndex = false;
@@ -745,7 +745,7 @@ namespace mongo {
}
}
- assert( foundIndex );
+ verify( foundIndex );
}
Client::ReadContext ctx( _config.incLong );
@@ -753,7 +753,7 @@ namespace mongo {
BSONObj prev;
BSONList all;
- assert( pm == op->setMessage( "m/r: (3/3) final reduce to collection" , _db.count( _config.incLong, BSONObj(), QueryOption_SlaveOk ) ) );
+ verify( pm == op->setMessage( "m/r: (3/3) final reduce to collection" , _db.count( _config.incLong, BSONObj(), QueryOption_SlaveOk ) ) );
shared_ptr<Cursor> temp =
NamespaceDetailsTransient::bestGuessCursor( _config.incLong.c_str() , BSONObj() ,
diff --git a/src/mongo/db/commands/pipeline_command.cpp b/src/mongo/db/commands/pipeline_command.cpp
index 295b3b2b770..386ce3d2d16 100755
--- a/src/mongo/db/commands/pipeline_command.cpp
+++ b/src/mongo/db/commands/pipeline_command.cpp
@@ -159,7 +159,7 @@ namespace mongo {
}
/* NOTREACHED */
- assert(false);
+ verify(false);
return false;
}
diff --git a/src/mongo/db/compact.cpp b/src/mongo/db/compact.cpp
index 24716cc45fc..533e1845446 100644
--- a/src/mongo/db/compact.cpp
+++ b/src/mongo/db/compact.cpp
@@ -60,7 +60,7 @@ namespace mongo {
Extent *e = ext.ext();
e->assertOk();
- assert( e->validates() );
+ verify( e->validates() );
unsigned skipped = 0;
{
@@ -145,8 +145,8 @@ namespace mongo {
}
} // if !L.isNull()
- assert( d->firstExtent == ext );
- assert( d->lastExtent != ext );
+ verify( d->firstExtent == ext );
+ verify( d->lastExtent != ext );
DiskLoc newFirst = e->xnext;
d->firstExtent.writing() = newFirst;
newFirst.ext()->xprev.writing().Null();
@@ -257,7 +257,7 @@ namespace mongo {
result.append("invalidObjects", skipped);
}
- assert( d->firstExtent.ext()->xprev.isNull() );
+ verify( d->firstExtent.ext()->xprev.isNull() );
// indexes will do their own progress meter?
pm.finished();
@@ -373,11 +373,11 @@ namespace mongo {
int pb = 0;
if( cmdObj.hasElement("paddingFactor") ) {
pf = cmdObj["paddingFactor"].Number();
- assert( pf >= 1.0 && pf <= 4.0 );
+ verify( pf >= 1.0 && pf <= 4.0 );
}
if( cmdObj.hasElement("paddingBytes") ) {
pb = (int) cmdObj["paddingBytes"].Number();
- assert( pb >= 0 && pb <= 1024 * 1024 );
+ verify( pb >= 0 && pb <= 1024 * 1024 );
}
bool validate = !cmdObj.hasElement("validate") || cmdObj["validate"].trueValue(); // default is true at the moment
diff --git a/src/mongo/db/curop.cpp b/src/mongo/db/curop.cpp
index 798856e9547..18abe49188a 100644
--- a/src/mongo/db/curop.cpp
+++ b/src/mongo/db/curop.cpp
@@ -75,7 +75,7 @@ namespace mongo {
if ( progressMeterTotal ) {
if ( _progressMeter.isActive() ) {
cout << "about to assert, old _message: " << _message << " new message:" << msg << endl;
- assert( ! _progressMeter.isActive() );
+ verify( ! _progressMeter.isActive() );
}
_progressMeter.reset( progressMeterTotal , secondsBetween );
}
diff --git a/src/mongo/db/cursor.cpp b/src/mongo/db/cursor.cpp
index ac7afc1532b..13944b20da0 100644
--- a/src/mongo/db/cursor.cpp
+++ b/src/mongo/db/cursor.cpp
@@ -59,7 +59,7 @@ namespace mongo {
}
DiskLoc nextLoop( NamespaceDetails *nsd, const DiskLoc &prev ) {
- assert( nsd->capLooped() );
+ verify( nsd->capLooped() );
DiskLoc next = forward()->next( prev );
if ( !next.isNull() )
return next;
@@ -67,7 +67,7 @@ namespace mongo {
}
DiskLoc prevLoop( NamespaceDetails *nsd, const DiskLoc &curr ) {
- assert( nsd->capLooped() );
+ verify( nsd->capLooped() );
DiskLoc prev = reverse()->next( curr );
if ( !prev.isNull() )
return prev;
@@ -96,7 +96,7 @@ namespace mongo {
}
DiskLoc ForwardCappedCursor::next( const DiskLoc &prev ) const {
- assert( nsd );
+ verify( nsd );
if ( !nsd->capLooped() )
return forward()->next( prev );
@@ -134,7 +134,7 @@ namespace mongo {
}
DiskLoc ReverseCappedCursor::next( const DiskLoc &prev ) const {
- assert( nsd );
+ verify( nsd );
if ( !nsd->capLooped() )
return reverse()->next( prev );
diff --git a/src/mongo/db/cursor.h b/src/mongo/db/cursor.h
index 6b2b04cac56..ee97865438e 100644
--- a/src/mongo/db/cursor.h
+++ b/src/mongo/db/cursor.h
@@ -224,7 +224,7 @@ namespace mongo {
}
bool ok() { return !curr.isNull(); }
Record* _current() {
- assert( ok() );
+ verify( ok() );
return curr.rec();
}
BSONObj current() {
diff --git a/src/mongo/db/d_concurrency.cpp b/src/mongo/db/d_concurrency.cpp
index 84c1cf34516..def72841920 100644
--- a/src/mongo/db/d_concurrency.cpp
+++ b/src/mongo/db/d_concurrency.cpp
@@ -105,7 +105,7 @@ namespace mongo {
void QLock::runExclusively(void (*f)(void)) {
dlog(1) << "QLock::runExclusively" << endl;
boost::mutex::scoped_lock lk( m );
- assert( w.n > 0 );
+ verify( w.n > 0 );
greed++; // stop new acquisitions
X.n++;
while( X.n ) {
@@ -183,14 +183,14 @@ namespace mongo {
}
static bool lock_R_try(int ms) {
- assert( threadState() == 0 );
+ verify( threadState() == 0 );
bool got = q.lock_R_try(ms);
if( got )
threadState() = 'R';
return got;
}
static bool lock_W_try(int ms) {
- assert( threadState() == 0 );
+ verify( threadState() == 0 );
bool got = q.lock_W_try(ms);
if( got ) {
threadState() = 'W';
@@ -199,7 +199,7 @@ namespace mongo {
return got;
}
static void lock_W_stop_greed() {
- assert( threadState() == 0 );
+ verify( threadState() == 0 );
threadState() = 'W';
{
Acquiring a('W');
@@ -241,7 +241,7 @@ namespace mongo {
}
static void lock_w() {
char &ts = threadState();
- assert( ts == 0 );
+ verify( ts == 0 );
getDur().commitIfNeeded();
ts = 'w';
Acquiring a('w');
@@ -255,7 +255,7 @@ namespace mongo {
}
static void lock_r() {
char& ts = threadState();
- assert( ts == 0 );
+ verify( ts == 0 );
ts = 'r';
Acquiring a('r');
q.lock_r();
@@ -269,23 +269,23 @@ namespace mongo {
// these are safe for use ACROSS threads. i.e. one thread can lock and
// another unlock
void Lock::ThreadSpanningOp::setWLockedNongreedy() {
- assert( threadState() == 0 ); // as this spans threads the tls wouldn't make sense
+ verify( threadState() == 0 ); // as this spans threads the tls wouldn't make sense
lock_W_stop_greed();
}
void Lock::ThreadSpanningOp::W_to_R() {
- assert( threadState() == 'W' );
+ verify( threadState() == 'W' );
dur::assertNothingSpooled();
q.W_to_R();
threadState() = 'R';
}
void Lock::ThreadSpanningOp::unsetW() { // note there is no unlocking_W() call here
- assert( threadState() == 'W' );
+ verify( threadState() == 'W' );
q.unlock_W();
q.start_greed();
threadState() = 0;
}
void Lock::ThreadSpanningOp::unsetR() {
- assert( threadState() == 'R' || threadState() == 0 );
+ verify( threadState() == 'R' || threadState() == 0 );
q.unlock_R();
q.start_greed();
threadState() = 0;
@@ -491,15 +491,15 @@ namespace mongo {
}
}
void Lock::GlobalWrite::downgrade() {
- assert( !noop );
- assert( threadState() == 'W' );
+ verify( !noop );
+ verify( threadState() == 'W' );
q.W_to_R();
threadState() = 'R';
}
// you will deadlock if 2 threads doing this
bool Lock::GlobalWrite::upgrade() {
- assert( !noop );
- assert( threadState() == 'R' );
+ verify( !noop );
+ verify( threadState() == 'R' );
if( q.R_to_W() ) {
threadState() = 'W';
return true;
@@ -534,18 +534,18 @@ namespace mongo {
case 'R' :
{
error() << "trying to get a w lock after already getting an R lock is not allowed" << endl;
- assert(false);
+ verify(false);
}
case 'r' :
{
error() << "trying to get a w lock after already getting an r lock is not allowed" << endl;
- assert(false);
+ verify(false);
}
return false;
case 'W' :
return true; // lock nothing further
default:
- assert(false);
+ verify(false);
case 'w' :
case 0 :
break;
@@ -559,7 +559,7 @@ namespace mongo {
error() << "can't lock local and admin db at the same time " << (int) db << ' ' << (int) ls.whichNestable << endl;
fassert(16131,false);
}
- assert( ls.nestableCount > 0 );
+ verify( ls.nestableCount > 0 );
}
else {
ls.whichNestable = db;
@@ -741,7 +741,7 @@ namespace mongo {
case 'w' :
return false;
default:
- assert(false);
+ verify(false);
case 0 :
;
}
@@ -753,7 +753,7 @@ namespace mongo {
case 'w':
break;
default:
- assert(false);
+ verify(false);
case 0 :
lock_w();
locked_w = true;
@@ -765,7 +765,7 @@ namespace mongo {
case 'w':
break;
default:
- assert(false);
+ verify(false);
case 0 :
lock_r();
locked_r = true;
@@ -895,6 +895,6 @@ namespace mongo {
}
MongoMutex::MongoMutex() {
static int n = 0;
- assert( ++n == 1 );
+ verify( ++n == 1 );
}
}
diff --git a/src/mongo/db/database.cpp b/src/mongo/db/database.cpp
index 83fe214312a..dd0bd31704e 100644
--- a/src/mongo/db/database.cpp
+++ b/src/mongo/db/database.cpp
@@ -34,7 +34,7 @@ namespace mongo {
Lock::assertAtLeastReadLocked(db->name);
}
else {
- assert( Lock::isLocked() );
+ verify( Lock::isLocked() );
}
}
@@ -150,13 +150,13 @@ namespace mongo {
}
bool Database::openExistingFile( int n ) {
- assert(this);
+ verify(this);
Lock::assertWriteLocked(name);
{
// must not yet be visible to others as we aren't in the db's write lock and
// we will write to _files vector - thus this assert.
bool loaded = dbHolder().__isLoaded(name, path);
- assert( !loaded );
+ verify( !loaded );
}
// additionally must be in the dbholder mutex (no assert for that yet)
@@ -202,7 +202,7 @@ namespace mongo {
// repair purposes yet we do not.
void Database::openAllFiles() {
//log() << "TEMP openallfiles " << path << ' ' << name << endl;
- assert(this);
+ verify(this);
int n = 0;
while( openExistingFile(n) ) {
n++;
@@ -224,7 +224,7 @@ namespace mongo {
// todo: this is called a lot. streamline the common case
MongoDataFile* Database::getFile( int n, int sizeNeeded , bool preallocateOnly) {
- assert(this);
+ verify(this);
DEV assertDbAtLeastReadLocked(this);
namespaceIndex.init();
@@ -240,12 +240,12 @@ namespace mongo {
MongoDataFile* p = 0;
if ( !preallocateOnly ) {
while ( n >= (int) _files.size() ) {
- assert(this);
+ verify(this);
if( !Lock::isWriteLocked(this->name) ) {
log() << "error: getFile() called in a read lock, yet file to return is not yet open" << endl;
log() << " getFile(" << n << ") _files.size:" <<_files.size() << ' ' << fileName(n).string() << endl;
log() << " context ns: " << cc().ns() << " openallfiles:" << _openAllFiles << endl;
- assert(false);
+ verify(false);
}
_files.push_back(0);
}
@@ -368,7 +368,7 @@ namespace mongo {
return true;
}
- assert( cc().database() == this );
+ verify( cc().database() == this );
if ( ! namespaceIndex.details( profileName.c_str() ) ) {
log() << "creating profile collection: " << profileName << endl;
@@ -437,7 +437,7 @@ namespace mongo {
{
SimpleMutex::scoped_lock lk(_m);
DBs& m = _paths[path];
- assert( m[dbname] == 0 );
+ verify( m[dbname] == 0 );
m[dbname] = db;
_size++;
}
diff --git a/src/mongo/db/db.cpp b/src/mongo/db/db.cpp
index 13cf7faaf86..6d710724ce7 100644
--- a/src/mongo/db/db.cpp
+++ b/src/mongo/db/db.cpp
@@ -99,7 +99,7 @@ namespace mongo {
struct MyStartupTests {
MyStartupTests() {
- assert( sizeof(OID) == 12 );
+ verify( sizeof(OID) == 12 );
}
} mystartupdbcpp;
@@ -114,12 +114,12 @@ namespace mongo {
sleepsecs(1);
unsigned n = 0;
auto f = [&n](const BSONObj& o) {
- assert( o.valid() );
+ verify( o.valid() );
//cout << o << endl;
n++;
bool testClosingSocketOnError = false;
if( testClosingSocketOnError )
- assert(false);
+ verify(false);
};
DBClientConnection db(false);
db.connect("localhost");
@@ -190,7 +190,7 @@ namespace mongo {
QueryResult *qr = (QueryResult *) header;
long long cursorid = qr->cursorId;
if( cursorid ) {
- assert( dbresponse.exhaust && *dbresponse.exhaust != 0 );
+ verify( dbresponse.exhaust && *dbresponse.exhaust != 0 );
string ns = dbresponse.exhaust; // before reset() free's it...
m.reset();
BufBuilder b(512);
@@ -246,8 +246,8 @@ namespace mongo {
static DBDirectClient db;
if ( h->version == 4 && h->versionMinor == 4 ) {
- assert( PDFILE_VERSION == 4 );
- assert( PDFILE_VERSION_MINOR == 5 );
+ verify( PDFILE_VERSION == 4 );
+ verify( PDFILE_VERSION_MINOR == 5 );
list<string> colls = db.getCollectionNames( dbName );
for ( list<string>::iterator i=colls.begin(); i!=colls.end(); i++) {
@@ -276,7 +276,7 @@ namespace mongo {
Client::GodScope gs;
log(1) << "enter repairDatabases (to check pdfile version #)" << endl;
- //assert(checkNsFilesOnLoad);
+ //verify(checkNsFilesOnLoad);
checkNsFilesOnLoad = false; // we are mainly just checking the header - don't scan the whole .ns file for every db here.
Lock::GlobalWrite lk;
@@ -304,7 +304,7 @@ namespace mongo {
// QUESTION: Repair even if file format is higher version than code?
log() << "\t starting upgrade" << endl;
string errmsg;
- assert( doDBUpgrade( dbName , errmsg , h ) );
+ verify( doDBUpgrade( dbName , errmsg , h ) );
}
else {
log() << "\t Not upgrading, exiting" << endl;
@@ -573,8 +573,6 @@ namespace mongo {
using namespace mongo;
#include <boost/program_options.hpp>
-#undef assert
-#define assert MONGO_assert
namespace po = boost::program_options;
@@ -838,7 +836,7 @@ int main(int argc, char* argv[]) {
}
if (params.count("smallfiles")) {
cmdLine.smallfiles = true;
- assert( dur::DataLimitPerJournalFile >= 128 * 1024 * 1024 );
+ verify( dur::DataLimitPerJournalFile >= 128 * 1024 * 1024 );
dur::DataLimitPerJournalFile = 128 * 1024 * 1024;
}
if (params.count("diaglog")) {
@@ -914,7 +912,7 @@ int main(int argc, char* argv[]) {
dbexit( EXIT_BADOPTIONS );
}
lenForNewNsFiles = x * 1024 * 1024;
- assert(lenForNewNsFiles > 0);
+ verify(lenForNewNsFiles > 0);
}
if (params.count("oplogSize")) {
long long x = params["oplogSize"].as<int>();
@@ -928,7 +926,7 @@ int main(int argc, char* argv[]) {
dbexit( EXIT_BADOPTIONS );
}
cmdLine.oplogSize = x * 1024 * 1024;
- assert(cmdLine.oplogSize > 0);
+ verify(cmdLine.oplogSize > 0);
}
if (params.count("cacheSize")) {
long x = params["cacheSize"].as<long>();
@@ -1201,27 +1199,27 @@ namespace mongo {
sigemptyset( &addrSignals.sa_mask );
addrSignals.sa_flags = SA_SIGINFO;
- assert( sigaction(SIGSEGV, &addrSignals, 0) == 0 );
- assert( sigaction(SIGBUS, &addrSignals, 0) == 0 );
- assert( sigaction(SIGILL, &addrSignals, 0) == 0 );
- assert( sigaction(SIGFPE, &addrSignals, 0) == 0 );
+ verify( sigaction(SIGSEGV, &addrSignals, 0) == 0 );
+ verify( sigaction(SIGBUS, &addrSignals, 0) == 0 );
+ verify( sigaction(SIGILL, &addrSignals, 0) == 0 );
+ verify( sigaction(SIGFPE, &addrSignals, 0) == 0 );
- assert( signal(SIGABRT, abruptQuit) != SIG_ERR );
- assert( signal(SIGQUIT, abruptQuit) != SIG_ERR );
- assert( signal(SIGPIPE, pipeSigHandler) != SIG_ERR );
+ verify( signal(SIGABRT, abruptQuit) != SIG_ERR );
+ verify( signal(SIGQUIT, abruptQuit) != SIG_ERR );
+ verify( signal(SIGPIPE, pipeSigHandler) != SIG_ERR );
setupSIGTRAPforGDB();
sigemptyset( &asyncSignals );
if ( inFork )
- assert( signal( SIGHUP , setupSignals_ignoreHelper ) != SIG_ERR );
+ verify( signal( SIGHUP , setupSignals_ignoreHelper ) != SIG_ERR );
else
sigaddset( &asyncSignals, SIGHUP );
sigaddset( &asyncSignals, SIGINT );
sigaddset( &asyncSignals, SIGTERM );
- assert( pthread_sigmask( SIG_SETMASK, &asyncSignals, 0 ) == 0 );
+ verify( pthread_sigmask( SIG_SETMASK, &asyncSignals, 0 ) == 0 );
boost::thread it( interruptThread );
set_terminate( myterminate );
diff --git a/src/mongo/db/db.h b/src/mongo/db/db.h
index 94841024194..7d2928c25ba 100644
--- a/src/mongo/db/db.h
+++ b/src/mongo/db/db.h
@@ -35,7 +35,7 @@ namespace mongo {
dbtemprelease() {
const Client& c = cc();
_context = c.getContext();
- assert( Lock::isLocked() );
+ verify( Lock::isLocked() );
if( Lock::nested() ) {
Lock::nested();
massert(10298 , "can't temprelease nested lock", false);
@@ -44,7 +44,7 @@ namespace mongo {
_context->unlocked();
}
tr.reset(new Lock::TempRelease);
- assert( c.curop() );
+ verify( c.curop() );
c.curop()->yielded();
}
~dbtemprelease() {
@@ -55,7 +55,7 @@ namespace mongo {
};
/** must be write locked
- no assert (and no release) if nested write lock
+ no verify(and no release) if nested write lock
a lot like dbtempreleasecond, eliminate?
*/
struct dbtempreleasewritelock {
@@ -65,13 +65,13 @@ namespace mongo {
dbtempreleasewritelock() {
const Client& c = cc();
_context = c.getContext();
- assert( Lock::isW() );
+ verify( Lock::isW() );
if( Lock::nested() )
return;
if ( _context )
_context->unlocked();
tr.reset(new Lock::TempRelease);
- assert( c.curop() );
+ verify( c.curop() );
c.curop()->yielded();
}
~dbtempreleasewritelock() {
diff --git a/src/mongo/db/dbcommands.cpp b/src/mongo/db/dbcommands.cpp
index cebb50ce38e..293e1d6f429 100644
--- a/src/mongo/db/dbcommands.cpp
+++ b/src/mongo/db/dbcommands.cpp
@@ -89,7 +89,7 @@ namespace mongo {
CmdResetError() : Command("resetError", false, "reseterror") {}
bool run(const string& db, BSONObj& cmdObj, int, string& errmsg, BSONObjBuilder& result, bool fromRepl) {
LastError *le = lastError.get();
- assert( le );
+ verify( le );
le->reset();
return true;
}
@@ -223,7 +223,7 @@ namespace mongo {
return true;
}
- assert( sprintf( buf , "w block pass: %lld" , ++passes ) < 30 );
+ verify( sprintf( buf , "w block pass: %lld" , ++passes ) < 30 );
c.curop()->setMessage( buf );
sleepmillis(1);
killCurrentOp.checkForInterrupt();
@@ -698,12 +698,12 @@ namespace mongo {
struct DBCommandsUnitTest {
DBCommandsUnitTest() {
- assert( removeBit(1, 0) == 0 );
- assert( removeBit(2, 0) == 1 );
- assert( removeBit(2, 1) == 0 );
- assert( removeBit(255, 1) == 127 );
- assert( removeBit(21, 2) == 9 );
- assert( removeBit(0x4000000000000001ULL, 62) == 1 );
+ verify( removeBit(1, 0) == 0 );
+ verify( removeBit(2, 0) == 1 );
+ verify( removeBit(2, 1) == 0 );
+ verify( removeBit(255, 1) == 127 );
+ verify( removeBit(21, 2) == 9 );
+ verify( removeBit(0x4000000000000001ULL, 62) == 1 );
}
} dbc_unittest;
@@ -1127,7 +1127,7 @@ namespace mongo {
cursor->advance();
BSONElement ne = obj["n"];
- assert(ne.isNumber());
+ verify(ne.isNumber());
int myn = ne.numberInt();
if ( n != myn ) {
log() << "should have chunk: " << n << " have:" << myn << endl;
@@ -1874,7 +1874,7 @@ namespace mongo {
bool retval = false;
if ( c->locktype() == Command::NONE ) {
- assert( !c->lockGlobally() );
+ verify( !c->lockGlobally() );
// we also trust that this won't crash
retval = true;
@@ -1894,7 +1894,7 @@ namespace mongo {
}
else if( c->locktype() != Command::WRITE ) {
// read lock
- assert( ! c->logTheOp() );
+ verify( ! c->logTheOp() );
string ns = c->parseNs(dbname, cmdObj);
scoped_ptr<Lock::GlobalRead> lk;
if( c->lockGlobally() )
diff --git a/src/mongo/db/dbcommands_admin.cpp b/src/mongo/db/dbcommands_admin.cpp
index 20116040feb..223c71cd07f 100644
--- a/src/mongo/db/dbcommands_admin.cpp
+++ b/src/mongo/db/dbcommands_admin.cpp
@@ -440,7 +440,7 @@ namespace mongo {
log() << "CMD fsync: sync:" << sync << " lock:" << lock << endl;
if( lock ) {
Lock::ThreadSpanningOp::setWLockedNongreedy();
- assert( !locked ); // impossible to get here if locked is true
+ verify( !locked ); // impossible to get here if locked is true
try {
//uassert(12034, "fsync: can't lock while an unlock is pending", !unlockRequested);
uassert(12032, "fsync: sync option must be true when using lock", sync);
@@ -458,7 +458,7 @@ namespace mongo {
Lock::ThreadSpanningOp::unsetR();
throw;
}
- assert( !locked );
+ verify( !locked );
locked = true;
log() << "db is now locked for snapshotting, no writes allowed. db.fsyncUnlock() to unlock" << endl;
log() << " For more info see " << FSyncCommand::url() << endl;
diff --git a/src/mongo/db/dbcommands_generic.cpp b/src/mongo/db/dbcommands_generic.cpp
index 4e9e4cdb7ed..accc6fe262e 100644
--- a/src/mongo/db/dbcommands_generic.cpp
+++ b/src/mongo/db/dbcommands_generic.cpp
@@ -197,13 +197,13 @@ namespace mongo {
return false;
}
int x = (int) cmdObj["journalCommitInterval"].Number();
- assert( x > 1 && x < 500 );
+ verify( x > 1 && x < 500 );
cmdLine.journalCommitInterval = x;
log() << "setParameter journalCommitInterval=" << x << endl;
s++;
}
if( cmdObj.hasElement("notablescan") ) {
- assert( !cmdLine.isMongos() );
+ verify( !cmdLine.isMongos() );
if( s == 0 )
result.append("was", cmdLine.noTableScan);
cmdLine.noTableScan = cmdObj["notablescan"].Bool();
@@ -216,7 +216,7 @@ namespace mongo {
s++;
}
if( cmdObj.hasElement("syncdelay") ) {
- assert( !cmdLine.isMongos() );
+ verify( !cmdLine.isMongos() );
if( s == 0 )
result.append("was", cmdLine.syncdelay );
cmdLine.syncdelay = cmdObj["syncdelay"].Number();
@@ -233,7 +233,7 @@ namespace mongo {
result.append("was", replApplyBatchSize );
BSONElement e = cmdObj["replApplyBatchSize"];
ParameterValidator * v = ParameterValidator::get( e.fieldName() );
- assert( v );
+ verify( v );
if ( ! v->isValid( e , errmsg ) )
return false;
replApplyBatchSize = e.numberInt();
@@ -386,7 +386,7 @@ namespace mongo {
log() << "terminating, shutdown command received" << endl;
dbexit( EXIT_CLEAN , "shutdown called" , true ); // this never returns
- assert(0);
+ verify(0);
return true;
}
diff --git a/src/mongo/db/dbeval.cpp b/src/mongo/db/dbeval.cpp
index 67e110da71e..4b27cf881fc 100644
--- a/src/mongo/db/dbeval.cpp
+++ b/src/mongo/db/dbeval.cpp
@@ -51,9 +51,9 @@ namespace mongo {
code = e.codeWScopeCode();
break;
default:
- assert(0);
+ verify(0);
}
- assert( code );
+ verify( code );
if ( ! globalScriptEngine ) {
errmsg = "db side execution is disabled";
diff --git a/src/mongo/db/dbhelpers.cpp b/src/mongo/db/dbhelpers.cpp
index 70372d5b868..e34027d01e2 100644
--- a/src/mongo/db/dbhelpers.cpp
+++ b/src/mongo/db/dbhelpers.cpp
@@ -97,7 +97,7 @@ namespace mongo {
bool * nsFound , bool * indexFound ) {
Lock::assertAtLeastReadLocked(ns);
Database *database = c.database();
- assert( database );
+ verify( database );
NamespaceDetails *d = database->namespaceIndex.details(ns);
if ( ! d )
return false;
@@ -122,7 +122,7 @@ namespace mongo {
}
DiskLoc Helpers::findById(NamespaceDetails *d, BSONObj idquery) {
- assert(d);
+ verify(d);
int idxNo = d->findIdIndex();
uassert(13430, "no _id index", idxNo>=0);
IndexDetails& i = d->idx( idxNo );
@@ -166,7 +166,7 @@ namespace mongo {
void Helpers::upsert( const string& ns , const BSONObj& o, bool fromMigrate ) {
BSONElement e = o["_id"];
- assert( e.type() );
+ verify( e.type() );
BSONObj id = e.wrap();
OpDebug debug;
@@ -206,7 +206,7 @@ namespace mongo {
BSONObj keya , keyb;
BSONObj minClean = toKeyFormat( min , keya );
BSONObj maxClean = toKeyFormat( max , keyb );
- assert( keya == keyb );
+ verify( keya == keyb );
Client::Context ctx(ns);
@@ -218,7 +218,7 @@ namespace mongo {
return 0;
int ii = nsd->findIndexByKeyPattern( keya );
- assert( ii >= 0 );
+ verify( ii >= 0 );
IndexDetails& i = nsd->idx( ii );
@@ -275,7 +275,7 @@ namespace mongo {
_root /= a;
if ( b.size() )
_root /= b;
- assert( a.size() || b.size() );
+ verify( a.size() || b.size() );
_file = _root;
diff --git a/src/mongo/db/dbmessage.h b/src/mongo/db/dbmessage.h
index ad5aea2cdff..9a8f1d08864 100644
--- a/src/mongo/db/dbmessage.h
+++ b/src/mongo/db/dbmessage.h
@@ -217,7 +217,7 @@ namespace mongo {
}
void markReset() {
- assert( mark );
+ verify( mark );
nextjsobj = mark;
}
diff --git a/src/mongo/db/dbwebserver.cpp b/src/mongo/db/dbwebserver.cpp
index eb19ba3be6c..a9818dc29fb 100644
--- a/src/mongo/db/dbwebserver.cpp
+++ b/src/mongo/db/dbwebserver.cpp
@@ -35,8 +35,6 @@
#include "../util/admin_access.h"
#include "dbwebserver.h"
#include <boost/date_time/posix_time/posix_time.hpp>
-#undef assert
-#define assert MONGO_assert
namespace mongo {
@@ -407,8 +405,8 @@ namespace mongo {
string cmd = commands[i];
Command * c = Command::findCommand( cmd );
- assert( c );
- assert( c->locktype() == 0 );
+ verify( c );
+ verify( c->locktype() == 0 );
BSONObj co;
{
@@ -498,9 +496,9 @@ namespace mongo {
vector<string>& headers, const SockAddr &from ) {
string cmd;
bool text = false;
- assert( _cmd( url , cmd , text, params ) );
+ verify( _cmd( url , cmd , text, params ) );
Command * c = _cmd( cmd );
- assert( c );
+ verify( c );
BSONObj cmdObj = BSON( cmd << 1 );
Client& client = cc();
diff --git a/src/mongo/db/diskloc.h b/src/mongo/db/diskloc.h
index 5295df3e260..0b7e3334312 100644
--- a/src/mongo/db/diskloc.h
+++ b/src/mongo/db/diskloc.h
@@ -69,7 +69,7 @@ namespace mongo {
_a = -1;
ofs = 0; /* note NullOfs is different. todo clean up. see refs to NullOfs in code - use is valid but outside DiskLoc context so confusing as-is. */
}
- void assertOk() { assert(!isNull()); }
+ void assertOk() { verify(!isNull()); }
void setInvalid() {
_a = -2;
ofs = 0;
@@ -96,7 +96,7 @@ namespace mongo {
}
void inc(int amt) {
- assert( !isNull() );
+ verify( !isNull() );
ofs += amt;
}
@@ -113,7 +113,7 @@ namespace mongo {
const DiskLoc& operator=(const DiskLoc& b) {
_a=b._a;
ofs = b.ofs;
- //assert(ofs!=0);
+ //verify(ofs!=0);
return *this;
}
int compare(const DiskLoc& b) const {
diff --git a/src/mongo/db/dur.cpp b/src/mongo/db/dur.cpp
index 09ab9ebfb35..d3b29019d3c 100644
--- a/src/mongo/db/dur.cpp
+++ b/src/mongo/db/dur.cpp
@@ -188,12 +188,12 @@ namespace mongo {
DurableInterface* DurableInterface::_impl = nonDurableImpl;
void DurableInterface::enableDurability() {
- assert(_impl == nonDurableImpl);
+ verify(_impl == nonDurableImpl);
_impl = durableImpl;
}
void DurableInterface::disableDurability() {
- assert(_impl == durableImpl);
+ verify(_impl == durableImpl);
massert(13616, "can't disable durability with pending writes", !commitJob.hasWritten());
_impl = nonDurableImpl;
}
@@ -337,7 +337,7 @@ namespace mongo {
static int n;
++n;
- assert(debug && cmdLine.dur);
+ verify(debug && cmdLine.dur);
if (commitJob.writes().empty())
return;
const WriteIntent &i = commitJob.lastWrite();
@@ -386,7 +386,7 @@ namespace mongo {
_bytes += mmf->length();
- assert( mmf->length() == (unsigned) mmf->length() );
+ verify( mmf->length() == (unsigned) mmf->length() );
if (memcmp(p, w, (unsigned) mmf->length()) == 0)
return; // next file
@@ -457,7 +457,7 @@ namespace mongo {
LOG(4) << "journal REMAPPRIVATEVIEW" << endl;
d.dbMutex.assertWriteLocked();
- assert( !commitJob.hasWritten() );
+ verify( !commitJob.hasWritten() );
// we want to remap all private views about every 2 seconds. there could be ~1000 views so
// we do a little each pass; beyond the remap time, more significantly, there will be copy on write
@@ -505,7 +505,7 @@ namespace mongo {
dassert( i != e );
if( (*i)->isMongoMMF() ) {
MongoMMF *mmf = (MongoMMF*) *i;
- assert(mmf);
+ verify(mmf);
if( mmf->willNeedRemap() ) {
mmf->willNeedRemap() = false;
mmf->remapThePrivateView();
@@ -535,7 +535,7 @@ namespace mongo {
unspoolWriteIntents(); // in case we were doing some writing ourself (likely impossible with limitedlocks version)
AlignedBuilder &ab = __theBuilder;
- assert( !d.dbMutex.atLeastReadLocked() );
+ verify( !d.dbMutex.atLeastReadLocked() );
// do we need this to be greedy, so that it can start working fairly soon?
// probably: as this is a read lock, it wouldn't change anything if only reads anyway.
@@ -560,7 +560,7 @@ namespace mongo {
unsigned abLen = ab.len();
commitJob.committingReset(); // must be reset before allowing anyone to write
- DEV assert( !commitJob.hasWritten() );
+ DEV verify( !commitJob.hasWritten() );
// release the readlock -- allowing others to now write while we are writing to the journal (etc.)
lk1.reset();
@@ -568,14 +568,14 @@ namespace mongo {
// ****** now other threads can do writes ******
WRITETOJOURNAL(h, ab);
- assert( abLen == ab.len() ); // a check that no one touched the builder while we were doing work. if so, our locking is wrong.
+ verify( abLen == ab.len() ); // a check that no one touched the builder while we were doing work. if so, our locking is wrong.
// data is now in the journal, which is sufficient for acknowledging getLastError.
// (ok to crash after that)
commitJob.committingNotifyCommitted();
WRITETODATAFILES(h, ab);
- assert( abLen == ab.len() ); // check again wasn't modded
+ verify( abLen == ab.len() ); // check again wasn't modded
ab.reset();
// can't : d.dbMutex._remapPrivateViewRequested = true;
@@ -652,7 +652,7 @@ namespace mongo {
// remapping private views must occur after WRITETODATAFILES otherwise
// we wouldn't see newly written data on reads.
//
- DEV assert( !commitJob.hasWritten() );
+ DEV verify( !commitJob.hasWritten() );
if( !Lock::isW() ) {
// REMAPPRIVATEVIEW needs done in a write lock (as there is a short window during remapping when each view
// might not exist) thus we do it later.
@@ -745,7 +745,7 @@ namespace mongo {
getDur().commitIfNeeded(true);
}
else {
- assert( inShutdown() );
+ verify( inShutdown() );
if( commitJob.hasWritten() ) {
log() << "journal warning files are closing outside locks with writes pending" << endl;
}
@@ -872,7 +872,7 @@ namespace mongo {
MongoFile::flushAll(true);
journalCleanup();
- assert(!haveJournalFiles()); // Double check post-conditions
+ verify(!haveJournalFiles()); // Double check post-conditions
}
} // namespace dur
diff --git a/src/mongo/db/dur_commitjob.cpp b/src/mongo/db/dur_commitjob.cpp
index b135827036e..813de2629f9 100644
--- a/src/mongo/db/dur_commitjob.cpp
+++ b/src/mongo/db/dur_commitjob.cpp
@@ -75,7 +75,7 @@ namespace mongo {
log() << "me:" << tlIntents.get()->n_informational() << endl;
else
log() << "no tlIntent for my thread" << endl;
- assert(false);
+ verify(false);
}
#endif
}
diff --git a/src/mongo/db/dur_commitjob.h b/src/mongo/db/dur_commitjob.h
index dd705cc92a7..e3aafbe06e6 100644
--- a/src/mongo/db/dur_commitjob.h
+++ b/src/mongo/db/dur_commitjob.h
@@ -127,7 +127,7 @@ namespace mongo {
*/
class CommitJob : boost::noncopyable {
void _committingReset();
- ~CommitJob(){ assert(!"shouldn't destroy CommitJob!"); }
+ ~CommitJob(){ verify(!"shouldn't destroy CommitJob!"); }
/** record/note an intent to write */
void note(void* p, int len);
diff --git a/src/mongo/db/dur_journal.cpp b/src/mongo/db/dur_journal.cpp
index 7f1e4351c46..9e767816dca 100644
--- a/src/mongo/db/dur_journal.cpp
+++ b/src/mongo/db/dur_journal.cpp
@@ -28,8 +28,6 @@
#include "../util/net/listen.h" // getelapsedtimemillis
#include <boost/static_assert.hpp>
#include <boost/filesystem.hpp>
-#undef assert
-#define assert MONGO_assert
#include "../util/mongoutils/str.h"
#include "dur_journalimpl.h"
#include "../util/file.h"
@@ -96,7 +94,7 @@ namespace mongo {
(2b) refuse to do a recovery startup if that is there without manual override.
*/
log() << "journaling failure/error: " << msg << endl;
- assert(false);
+ verify(false);
}
JSectFooter::JSectFooter() {
@@ -214,7 +212,7 @@ namespace mongo {
log() << "error removing journal files " << e.what() << endl;
throw;
}
- assert(!haveJournalFiles());
+ verify(!haveJournalFiles());
flushMyDirectory(getJournalDir() / "file"); // flushes parent of argument (in this case journal dir)
@@ -293,7 +291,7 @@ namespace mongo {
log() << "preallocating a journal file " << p.string() << endl;
const unsigned BLKSZ = 1024 * 1024;
- assert( len % BLKSZ == 0 );
+ verify( len % BLKSZ == 0 );
AlignedBuilder b(BLKSZ);
memset((void*)b.buf(), 0, BLKSZ);
@@ -302,21 +300,21 @@ namespace mongo {
File f;
f.open( p.string().c_str() , /*read-only*/false , /*direct-io*/false );
- assert( f.is_open() );
+ verify( f.is_open() );
fileofs loc = 0;
while ( loc < len ) {
f.write( loc , b.buf() , BLKSZ );
loc += BLKSZ;
m.hit(BLKSZ);
}
- assert( loc == len );
+ verify( loc == len );
f.fsync();
}
const int NUM_PREALLOC_FILES = 3;
inline boost::filesystem::path preallocPath(int n) {
- assert(n >= 0);
- assert(n < NUM_PREALLOC_FILES);
+ verify(n >= 0);
+ verify(n < NUM_PREALLOC_FILES);
string fn = str::stream() << "prealloc." << n;
return getJournalDir() / fn;
}
@@ -447,7 +445,7 @@ namespace mongo {
void Journal::_open() {
_curFileId = 0;
- assert( _curLogFile == 0 );
+ verify( _curLogFile == 0 );
boost::filesystem::path fname = getFilePathFor(_nextFileNumber);
// if we have a prealloced file, use it
@@ -476,7 +474,7 @@ namespace mongo {
{
JHeader h(fname.string());
_curFileId = h.fileId;
- assert(_curFileId);
+ verify(_curFileId);
AlignedBuilder b(8192);
b.appendStruct(h);
_curLogFile->synchronousAppend(b.buf(), b.len());
@@ -484,13 +482,13 @@ namespace mongo {
}
void Journal::init() {
- assert( _curLogFile == 0 );
+ verify( _curLogFile == 0 );
MongoFile::notifyPreFlush = preFlush;
MongoFile::notifyPostFlush = postFlush;
}
void Journal::open() {
- assert( MongoFile::notifyPreFlush == preFlush );
+ verify( MongoFile::notifyPreFlush == preFlush );
SimpleMutex::scoped_lock lk(_curLogFileMutex);
_open();
}
@@ -527,7 +525,7 @@ namespace mongo {
LSNFile L;
File f;
f.open(lsnPath().string().c_str());
- assert(f.is_open());
+ verify(f.is_open());
if( f.len() == 0 ) {
// this could be 'normal' if we crashed at the right moment
log() << "info lsn file is zero bytes long" << endl;
@@ -700,15 +698,15 @@ namespace mongo {
size_t compressedLength = 0;
rawCompress(uncompressed.buf(), uncompressed.len(), b.cur(), &compressedLength);
- assert( compressedLength < 0xffffffff );
- assert( compressedLength < max );
+ verify( compressedLength < 0xffffffff );
+ verify( compressedLength < max );
b.skip(compressedLength);
// footer
unsigned L = 0xffffffff;
{
// pad to alignment, and set the total section length in the JSectHeader
- assert( 0xffffe000 == (~(Alignment-1)) );
+ verify( 0xffffe000 == (~(Alignment-1)) );
unsigned lenUnpadded = b.len() + sizeof(JSectFooter);
L = (lenUnpadded + Alignment-1) & (~(Alignment-1));
dassert( L >= lenUnpadded );
@@ -727,12 +725,12 @@ namespace mongo {
SimpleMutex::scoped_lock lk(_curLogFileMutex);
// must already be open -- so that _curFileId is correct for previous buffer building
- assert( _curLogFile );
+ verify( _curLogFile );
stats.curr->_uncompressedBytes += uncompressed.len();
unsigned w = b.len();
_written += w;
- assert( w <= L );
+ verify( w <= L );
stats.curr->_journaledBytes += L;
_curLogFile->synchronousAppend((const void *) b.buf(), L);
_rotate();
diff --git a/src/mongo/db/dur_preplogbuffer.cpp b/src/mongo/db/dur_preplogbuffer.cpp
index b22ce6ce4f9..46f55e7a45d 100644
--- a/src/mongo/db/dur_preplogbuffer.cpp
+++ b/src/mongo/db/dur_preplogbuffer.cpp
@@ -82,7 +82,7 @@ namespace mongo {
JEntry e;
e.len = min(i->length(), (unsigned)(mmf->length() - ofs)); //dont write past end of file
- assert( ofs <= 0x80000000 );
+ verify( ofs <= 0x80000000 );
e.ofs = (unsigned) ofs;
e.setFileNo( mmf->fileSuffixNo() );
if( mmf->relativePath() == local ) {
@@ -128,7 +128,7 @@ namespace mongo {
assertNothingSpooled();
const vector<WriteIntent>& _intents = commitJob.getIntentsSorted();
- assert( !_intents.empty() );
+ verify( !_intents.empty() );
WriteIntent last;
for( vector<WriteIntent>::const_iterator i = _intents.begin(); i != _intents.end(); i++ ) {
@@ -160,7 +160,7 @@ namespace mongo {
@return partially populated sectheader and _ab set
*/
static void _PREPLOGBUFFER(JSectHeader& h, AlignedBuilder& bb) {
- assert( cmdLine.dur );
+ verify( cmdLine.dur );
assertLockedForCommitting();
resetLogBuffer(h, bb); // adds JSectHeader
diff --git a/src/mongo/db/dur_recover.cpp b/src/mongo/db/dur_recover.cpp
index 4ccc75dd60c..66b6e411cb8 100644
--- a/src/mongo/db/dur_recover.cpp
+++ b/src/mongo/db/dur_recover.cpp
@@ -106,7 +106,7 @@ namespace mongo {
_lastDbName(0)
, _doDurOps(doDurOpsRecovering)
{
- assert( doDurOpsRecovering );
+ verify( doDurOpsRecovering );
bool ok = uncompress((const char *)compressed, compressedLen, &_uncompressed);
if( !ok ) {
// it should always be ok (i think?) as there is a previous check to see that the JSectFooter is ok
@@ -114,7 +114,7 @@ namespace mongo {
msgasserted(15874, "couldn't uncompress journal section");
}
const char *p = _uncompressed.c_str();
- assert( compressedLen == _h.sectionLen() - sizeof(JSectFooter) - sizeof(JSectHeader) );
+ verify( compressedLen == _h.sectionLen() - sizeof(JSectFooter) - sizeof(JSectHeader) );
_entries = auto_ptr<BufReader>( new BufReader(p, _uncompressed.size()) );
}
@@ -142,7 +142,7 @@ namespace mongo {
switch( lenOrOpCode ) {
case JEntry::OpCode_Footer: {
- assert( false );
+ verify( false );
}
case JEntry::OpCode_FileCreated:
@@ -172,11 +172,11 @@ namespace mongo {
}
// JEntry - a basic write
- assert( lenOrOpCode && lenOrOpCode < JEntry::OpCode_Min );
+ verify( lenOrOpCode && lenOrOpCode < JEntry::OpCode_Min );
_entries->rewind(4);
e.e = (JEntry *) _entries->skip(sizeof(JEntry));
e.dbName = e.e->isLocalDbContext() ? "local" : _lastDbName;
- assert( e.e->len == lenOrOpCode );
+ verify( e.e->len == lenOrOpCode );
_entries->skip(e.e->len);
}
@@ -185,7 +185,7 @@ namespace mongo {
static string fileName(const char* dbName, int fileNo) {
stringstream ss;
ss << dbName << '.';
- assert( fileNo >= 0 );
+ verify( fileNo >= 0 );
if( fileNo == JEntry::DotNsSuffix )
ss << "ns";
else
@@ -216,9 +216,9 @@ namespace mongo {
void RecoveryJob::write(const ParsedJournalEntry& entry) {
//TODO(mathias): look into making some of these dasserts
- assert(entry.e);
- assert(entry.dbName);
- assert(strnlen(entry.dbName, MaxDatabaseNameLen) < MaxDatabaseNameLen);
+ verify(entry.e);
+ verify(entry.dbName);
+ verify(strnlen(entry.dbName, MaxDatabaseNameLen) < MaxDatabaseNameLen);
const string fn = fileName(entry.dbName, entry.e->getFileNo());
MongoFile* file;
@@ -229,23 +229,23 @@ namespace mongo {
MongoMMF* mmf;
if (file) {
- assert(file->isMongoMMF());
+ verify(file->isMongoMMF());
mmf = (MongoMMF*)file;
}
else {
if( !_recovering ) {
log() << "journal error applying writes, file " << fn << " is not open" << endl;
- assert(false);
+ verify(false);
}
boost::shared_ptr<MongoMMF> sp (new MongoMMF);
- assert(sp->open(fn, false));
+ verify(sp->open(fn, false));
_mmfs.push_back(sp);
mmf = sp.get();
}
if ((entry.e->ofs + entry.e->len) <= mmf->length()) {
- assert(mmf->view_write());
- assert(entry.e->srcData());
+ verify(mmf->view_write());
+ verify(entry.e->srcData());
void* dest = (char*)mmf->view_write() + entry.e->ofs;
memcpy(dest, entry.e->srcData(), entry.e->len);
@@ -353,7 +353,7 @@ namespace mongo {
// after the entries check the footer checksum
if( _recovering ) {
- assert( ((const char *)h) + sizeof(JSectHeader) == p );
+ verify( ((const char *)h) + sizeof(JSectHeader) == p );
if( !f->checkHash(h, len + sizeof(JSectHeader)) ) {
msgasserted(13594, "journal checksum doesn't match");
}
@@ -482,7 +482,7 @@ namespace mongo {
}
void _recover() {
- assert( cmdLine.dur );
+ verify( cmdLine.dur );
boost::filesystem::path p = getJournalDir();
if( !exists(p) ) {
@@ -532,10 +532,10 @@ namespace mongo {
char x;
BufReaderY y;
r.read(x); //cout << x; // a
- assert( x == 'a' );
+ verify( x == 'a' );
r.read(y);
r.read(x);
- assert( x == 'b' );
+ verify( x == 'b' );
}
} brunittest;
diff --git a/src/mongo/db/explain.cpp b/src/mongo/db/explain.cpp
index d4444ca6aba..e16781f292a 100644
--- a/src/mongo/db/explain.cpp
+++ b/src/mongo/db/explain.cpp
@@ -178,17 +178,17 @@ namespace mongo {
ret = *i;
}
}
- assert( ret );
+ verify( ret );
return *ret;
}
void ExplainQueryInfo::noteIterate( bool match, bool loadedRecord, bool chunkSkip ) {
- assert( !_clauses.empty() );
+ verify( !_clauses.empty() );
_clauses.back()->noteIterate( match, loadedRecord, chunkSkip );
}
void ExplainQueryInfo::reviseN( long long n ) {
- assert( !_clauses.empty() );
+ verify( !_clauses.empty() );
_clauses.back()->reviseN( n );
}
diff --git a/src/mongo/db/extsort.cpp b/src/mongo/db/extsort.cpp
index 0bedcd1f891..3242f3978f7 100644
--- a/src/mongo/db/extsort.cpp
+++ b/src/mongo/db/extsort.cpp
@@ -236,7 +236,7 @@ namespace mongo {
}
- assert( slot >= 0 );
+ verify( slot >= 0 );
_stash[slot].second = false;
return best;
@@ -248,7 +248,7 @@ namespace mongo {
unsigned long long length;
_buf = (char*)_file.map( file.c_str() , length , MemoryMappedFile::SEQUENTIAL );
massert( 10308 , "mmap failed" , _buf );
- assert( length == (unsigned long long)boost::filesystem::file_size( file ) );
+ verify( length == (unsigned long long)boost::filesystem::file_size( file ) );
_end = _buf + length;
}
BSONObjExternalSorter::FileIterator::~FileIterator() {}
diff --git a/src/mongo/db/geo/2d.cpp b/src/mongo/db/geo/2d.cpp
index 642cff4987c..0ba1250c77b 100644
--- a/src/mongo/db/geo/2d.cpp
+++ b/src/mongo/db/geo/2d.cpp
@@ -355,7 +355,7 @@ namespace mongo {
unsigned _convert( double in ) const {
uassert( 13027 , str::stream() << "point not in interval of [ " << _min << ", " << _max << " )", in < _max && in >= _min );
in -= _min;
- assert( in >= 0 );
+ verify( in >= 0 );
return (unsigned)(in * _scaling);
}
@@ -489,8 +489,8 @@ namespace mongo {
}
bool mid( double amin , double amax , double bmin , double bmax , bool min , double& res ) const {
- assert( amin <= amax );
- assert( bmin <= bmax );
+ verify( amin <= amax );
+ verify( bmin <= bmax );
if ( amin < bmin ) {
if ( amax < bmin )
@@ -858,7 +858,7 @@ namespace mongo {
}
DiskLoc loc() const {
- assert( ! _dirty );
+ verify( ! _dirty );
return _loc;
}
@@ -888,8 +888,8 @@ namespace mongo {
// Definitely need to re-find our current max/min locations too
bool unDirty( const Geo2dType* g, DiskLoc& oldLoc ){
- assert( _dirty );
- assert( ! _id.isEmpty() );
+ verify( _dirty );
+ verify( ! _id.isEmpty() );
oldLoc = _loc;
_loc = DiskLoc();
@@ -952,9 +952,9 @@ namespace mongo {
bool makeDirty(){
if( ! _dirty ){
- assert( ! obj()["_id"].eoo() );
- assert( ! _bucket.isNull() );
- assert( _pos >= 0 );
+ verify( ! obj()["_id"].eoo() );
+ verify( ! _bucket.isNull() );
+ verify( _pos >= 0 );
if( _id.isEmpty() ){
_id = obj()["_id"].wrap( "" ).getOwned();
@@ -1398,7 +1398,7 @@ namespace mongo {
LOG( CDEBUG ) << "Undirtying stack point with id " << i->_id << endl;
if( i->makeDirty() ) _nDirtied++;
- assert( i->isDirty() );
+ verify( i->isDirty() );
}
// Check current item
@@ -1469,7 +1469,7 @@ namespace mongo {
_nRemovedOnYield++;
_found--;
- assert( _found >= 0 );
+ verify( _found >= 0 );
// Can't find our key again, remove
i = _stack.erase( i );
@@ -1504,9 +1504,9 @@ namespace mongo {
_noted = false;
}
- virtual Record* _current() { assert(ok()); LOG( CDEBUG + 1 ) << "_current " << _cur._loc.obj()["_id"] << endl; return _cur._loc.rec(); }
- virtual BSONObj current() { assert(ok()); LOG( CDEBUG + 1 ) << "current " << _cur._o << endl; return _cur._o; }
- virtual DiskLoc currLoc() { assert(ok()); LOG( CDEBUG + 1 ) << "currLoc " << _cur._loc << endl; return _cur._loc; }
+ virtual Record* _current() { verify(ok()); LOG( CDEBUG + 1 ) << "_current " << _cur._loc.obj()["_id"] << endl; return _cur._loc.rec(); }
+ virtual BSONObj current() { verify(ok()); LOG( CDEBUG + 1 ) << "current " << _cur._o << endl; return _cur._o; }
+ virtual DiskLoc currLoc() { verify(ok()); LOG( CDEBUG + 1 ) << "currLoc " << _cur._loc << endl; return _cur._loc; }
virtual BSONObj currKey() const { return _cur._key; }
virtual CoveredIndexMatcher* matcher() const {
@@ -1536,11 +1536,11 @@ namespace mongo {
if( maxToAdd < 0 ) maxToAdd = maxToCheck;
int maxFound = _foundInExp + maxToCheck;
- assert( maxToCheck > 0 );
- assert( maxFound > 0 );
- assert( _found <= 0x7fffffff ); // conversion to int
+ verify( maxToCheck > 0 );
+ verify( maxFound > 0 );
+ verify( _found <= 0x7fffffff ); // conversion to int
int maxAdded = static_cast<int>(_found) + maxToAdd;
- assert( maxAdded >= 0 ); // overflow check
+ verify( maxAdded >= 0 ); // overflow check
bool isNeighbor = _centerPrefix.constrains();
@@ -1682,7 +1682,7 @@ namespace mongo {
}
// Make sure we've got a reasonable center
- assert( _centerPrefix.constrains() );
+ verify( _centerPrefix.constrains() );
GeoHash _neighborPrefix = _centerPrefix;
_neighborPrefix.move( i, j );
@@ -1727,9 +1727,9 @@ namespace mongo {
// Restart our search from a diff box.
_state = START;
- assert( ! onlyExpand );
+ verify( ! onlyExpand );
- assert( _found <= 0x7fffffff );
+ verify( _found <= 0x7fffffff );
fillStack( maxFound - _foundInExp, maxAdded - static_cast<int>(_found) );
// When we return from the recursive fillStack call, we'll either have checked enough points or
@@ -1738,12 +1738,12 @@ namespace mongo {
// If we're maxed out on points, return
if( _foundInExp >= maxFound || _found >= maxAdded ) {
// Make sure we'll come back to add more points
- assert( _state == DOING_EXPAND );
+ verify( _state == DOING_EXPAND );
return;
}
// Otherwise we must be finished to return
- assert( _state == DONE );
+ verify( _state == DONE );
return;
}
@@ -1817,7 +1817,7 @@ namespace mongo {
// if the exact checks are more expensive.
bool needExact = true;
if( expensiveExact ){
- assert( false );
+ verify( false );
KeyResult result = approxKeyCheck( p, d );
if( result == BAD ) continue;
else if( result == GOOD ) needExact = false;
@@ -1939,9 +1939,9 @@ namespace mongo {
checkEarthBounds( p );
d = spheredist_deg( _near, p );
break;
- default: assert( false );
+ default: verify( false );
}
- assert( d >= 0 );
+ verify( d >= 0 );
GEODEBUG( "\t\t\t\t\t\t\t checkDistance " << _near.toString()
<< "\t" << p.toString() << "\t" << d
@@ -1970,7 +1970,7 @@ namespace mongo {
d = spheredist_deg( _near, p );
within = ( d <= _maxDistance );
break;
- default: assert( false );
+ default: verify( false );
}
return within;
@@ -2013,7 +2013,7 @@ namespace mongo {
GEODEBUG( "\t\tInserted new point " << newPoint.toString() << " approx : " << keyD );
- assert( _max > 0 );
+ verify( _max > 0 );
Holder::iterator lastPtIt = _points.end();
lastPtIt--;
@@ -2050,7 +2050,7 @@ namespace mongo {
GEODEBUG( "\t\tNot erasing point " << startErase->toString() );
numToErase--;
startErase++;
- assert( startErase != _points.end() || numToErase == 0 );
+ verify( startErase != _points.end() || numToErase == 0 );
}
if( _uniqueDocs ){
@@ -2092,7 +2092,7 @@ namespace mongo {
_type(type)
{
- assert( g->getDetails() );
+ verify( g->getDetails() );
_nscanned = 0;
_found = 0;
@@ -2108,7 +2108,7 @@ namespace mongo {
_scanDistance = computeXScanDistance( startPt._y, rad2deg( _maxDistance ) + _spec->_error );
}
- assert( _scanDistance > 0 );
+ verify( _scanDistance > 0 );
}
@@ -2143,7 +2143,7 @@ namespace mongo {
{
do {
long long f = found();
- assert( f <= 0x7fffffff );
+ verify( f <= 0x7fffffff );
fillStack( maxPointsHeuristic, _numWanted - static_cast<int>(f) , true );
processExtraPoints();
} while( _state != DONE && _state != DONE_NEIGHBOR &&
@@ -2182,7 +2182,7 @@ namespace mongo {
// Enough found, but need to search neighbor boxes
farDist = std::min( _scanDistance, computeXScanDistance( _near._y, rad2deg( farDist ) ) + 2 * _spec->_error );
}
- assert( farDist >= 0 );
+ verify( farDist >= 0 );
GEODEBUGPRINT( farDist );
// Find the box that includes all the points we need to return
@@ -2317,7 +2317,7 @@ namespace mongo {
GEODEBUG( "\t\tEnding search at point " << ( _points.size() == 0 ? "(beginning)" : maybePointIt->toString() ) );
int numToAddBack = erased - numToErase;
- assert( numToAddBack >= 0 );
+ verify( numToAddBack >= 0 );
GEODEBUG( "\t\tNum tested valid : " << tested.size() << " erased : " << erased << " added back : " << numToAddBack );
@@ -2440,9 +2440,9 @@ namespace mongo {
return _cur != _end;
}
- virtual Record* _current() { assert(ok()); return _cur->_loc.rec(); }
- virtual BSONObj current() { assert(ok()); return _cur->_o; }
- virtual DiskLoc currLoc() { assert(ok()); return _cur->_loc; }
+ virtual Record* _current() { verify(ok()); return _cur->_loc.rec(); }
+ virtual BSONObj current() { verify(ok()); return _cur->_o; }
+ virtual DiskLoc currLoc() { verify(ok()); return _cur->_loc; }
virtual bool advance() {
if( ok() ){
_cur++;
@@ -2570,7 +2570,7 @@ namespace mongo {
error = _g->_errorSphere;
break;
}
- default: assert( false );
+ default: verify( false );
}
// If our distance is in the error bounds...
@@ -2589,7 +2589,7 @@ namespace mongo {
checkEarthBounds( p );
if( spheredist_deg( _startPt , p ) <= _maxDistance ) return true;
break;
- default: assert( false );
+ default: verify( false );
}
return false;
@@ -2898,12 +2898,12 @@ namespace mongo {
IndexDetails& id = d->idx( geoIdx );
Geo2dType * g = (Geo2dType*)id.getSpec().getType();
- assert( &id == g->getDetails() );
+ verify( &id == g->getDetails() );
int numWanted = 100;
if ( cmdObj["num"].isNumber() ) {
numWanted = cmdObj["num"].numberInt();
- assert( numWanted >= 0 );
+ verify( numWanted >= 0 );
}
bool uniqueDocs = false;
@@ -3019,7 +3019,7 @@ namespace mongo {
IndexDetails& id = d->idx( geoIdx );
Geo2dType * g = (Geo2dType*)id.getSpec().getType();
- assert( &id == g->getDetails() );
+ verify( &id == g->getDetails() );
int max = 100000;
@@ -3048,12 +3048,12 @@ namespace mongo {
return (int)(.5+(d*1000));
}
-#define GEOHEQ(a,b) if ( a.toString() != b ){ cout << "[" << a.toString() << "] != [" << b << "]" << endl; assert( a == GeoHash(b) ); }
+#define GEOHEQ(a,b) if ( a.toString() != b ){ cout << "[" << a.toString() << "] != [" << b << "]" << endl; verify( a == GeoHash(b) ); }
void run() {
- assert( ! GeoHash::isBitSet( 0 , 0 ) );
- assert( ! GeoHash::isBitSet( 0 , 31 ) );
- assert( GeoHash::isBitSet( 1 , 31 ) );
+ verify( ! GeoHash::isBitSet( 0 , 0 ) );
+ verify( ! GeoHash::isBitSet( 0 , 31 ) );
+ verify( GeoHash::isBitSet( 1 , 31 ) );
IndexSpec i( BSON( "loc" << "2d" ) );
Geo2dType g( &geo2dplugin , &i );
@@ -3063,10 +3063,10 @@ namespace mongo {
BSONObj in = BSON( "x" << x << "y" << y );
GeoHash h = g._hash( in );
BSONObj out = g._unhash( h );
- assert( round(x) == round( out["x"].number() ) );
- assert( round(y) == round( out["y"].number() ) );
- assert( round( in["x"].number() ) == round( out["x"].number() ) );
- assert( round( in["y"].number() ) == round( out["y"].number() ) );
+ verify( round(x) == round( out["x"].number() ) );
+ verify( round(y) == round( out["y"].number() ) );
+ verify( round( in["x"].number() ) == round( out["x"].number() ) );
+ verify( round( in["y"].number() ) == round( out["y"].number() ) );
}
{
@@ -3075,10 +3075,10 @@ namespace mongo {
BSONObj in = BSON( "x" << x << "y" << y );
GeoHash h = g._hash( in );
BSONObj out = g._unhash( h );
- assert( round(x) == round( out["x"].number() ) );
- assert( round(y) == round( out["y"].number() ) );
- assert( round( in["x"].number() ) == round( out["x"].number() ) );
- assert( round( in["y"].number() ) == round( out["y"].number() ) );
+ verify( round(x) == round( out["x"].number() ) );
+ verify( round(y) == round( out["y"].number() ) );
+ verify( round( in["x"].number() ) == round( out["x"].number() ) );
+ verify( round( in["y"].number() ) == round( out["y"].number() ) );
}
{
@@ -3102,26 +3102,26 @@ namespace mongo {
{
Box b( 5 , 5 , 2 );
- assert( "(5,5) -->> (7,7)" == b.toString() );
+ verify( "(5,5) -->> (7,7)" == b.toString() );
}
{
GeoHash a = g.hash( 1 , 1 );
GeoHash b = g.hash( 4 , 5 );
- assert( 5 == (int)(g.distance( a , b ) ) );
+ verify( 5 == (int)(g.distance( a , b ) ) );
a = g.hash( 50 , 50 );
b = g.hash( 42 , 44 );
- assert( round(10) == round(g.distance( a , b )) );
+ verify( round(10) == round(g.distance( a , b )) );
}
{
GeoHash x("0000");
- assert( 0 == x.getHash() );
+ verify( 0 == x.getHash() );
x.init( 0 , 1 , 32 );
GEOHEQ( x , "0000000000000000000000000000000000000000000000000000000000000001" )
-
- assert( GeoHash( "1100").hasPrefix( GeoHash( "11" ) ) );
- assert( ! GeoHash( "1000").hasPrefix( GeoHash( "11" ) ) );
+
+ verify( GeoHash( "1100").hasPrefix( GeoHash( "11" ) ) );
+ verify( ! GeoHash( "1000").hasPrefix( GeoHash( "11" ) ) );
}
{
@@ -3153,8 +3153,8 @@ namespace mongo {
cout << "b: " << ob.hexDump() << endl;
cout << "c: " << oc.hexDump() << endl;
*/
- assert( oa.woCompare( ob ) < 0 );
- assert( oa.woCompare( oc ) < 0 );
+ verify( oa.woCompare( ob ) < 0 );
+ verify( oa.woCompare( oc ) < 0 );
}
@@ -3171,32 +3171,32 @@ namespace mongo {
{
GeoHash prefix( "110011000000" );
GeoHash entry( "1100110000011100000111000001110000011100000111000001000000000000" );
- assert( ! entry.hasPrefix( prefix ) );
+ verify( ! entry.hasPrefix( prefix ) );
entry = GeoHash("1100110000001100000111000001110000011100000111000001000000000000");
- assert( entry.toString().find( prefix.toString() ) == 0 );
- assert( entry.hasPrefix( GeoHash( "1100" ) ) );
- assert( entry.hasPrefix( prefix ) );
+ verify( entry.toString().find( prefix.toString() ) == 0 );
+ verify( entry.hasPrefix( GeoHash( "1100" ) ) );
+ verify( entry.hasPrefix( prefix ) );
}
{
GeoHash a = g.hash( 50 , 50 );
GeoHash b = g.hash( 48 , 54 );
- assert( round( 4.47214 ) == round( g.distance( a , b ) ) );
+ verify( round( 4.47214 ) == round( g.distance( a , b ) ) );
}
{
Box b( Point( 29.762283 , -95.364271 ) , Point( 29.764283000000002 , -95.36227099999999 ) );
- assert( b.inside( 29.763 , -95.363 ) );
- assert( ! b.inside( 32.9570255 , -96.1082497 ) );
- assert( ! b.inside( 32.9570255 , -96.1082497 , .01 ) );
+ verify( b.inside( 29.763 , -95.363 ) );
+ verify( ! b.inside( 32.9570255 , -96.1082497 ) );
+ verify( ! b.inside( 32.9570255 , -96.1082497 , .01 ) );
}
{
GeoHash a( "11001111" );
- assert( GeoHash( "11" ) == a.commonPrefix( GeoHash("11") ) );
- assert( GeoHash( "11" ) == a.commonPrefix( GeoHash("11110000") ) );
+ verify( GeoHash( "11" ) == a.commonPrefix( GeoHash("11") ) );
+ verify( GeoHash( "11" ) == a.commonPrefix( GeoHash("11110000") ) );
}
{
@@ -3209,8 +3209,8 @@ namespace mongo {
GeoHash h( x , y );
unsigned a,b;
h.unhash_slow( a,b );
- assert( a == x );
- assert( b == y );
+ verify( a == x );
+ verify( b == y );
}
//cout << "slow: " << t.millis() << endl;
}
@@ -3223,8 +3223,8 @@ namespace mongo {
GeoHash h( x , y );
unsigned a,b;
h.unhash_fast( a,b );
- assert( a == x );
- assert( b == y );
+ verify( a == x );
+ verify( b == y );
}
//cout << "fast: " << t.millis() << endl;
}
@@ -3242,8 +3242,8 @@ namespace mongo {
double dist2 = spheredist_deg(LAX, BNA);
// target is 0.45306
- assert( 0.45305 <= dist1 && dist1 <= 0.45307 );
- assert( 0.45305 <= dist2 && dist2 <= 0.45307 );
+ verify( 0.45305 <= dist1 && dist1 <= 0.45307 );
+ verify( 0.45305 <= dist2 && dist2 <= 0.45307 );
}
{
Point BNA (-1.5127, 0.6304);
@@ -3253,32 +3253,32 @@ namespace mongo {
double dist2 = spheredist_rad(LAX, BNA);
// target is 0.45306
- assert( 0.45305 <= dist1 && dist1 <= 0.45307 );
- assert( 0.45305 <= dist2 && dist2 <= 0.45307 );
+ verify( 0.45305 <= dist1 && dist1 <= 0.45307 );
+ verify( 0.45305 <= dist2 && dist2 <= 0.45307 );
}
{
Point JFK (-73.77694444, 40.63861111 );
Point LAX (-118.40, 33.94);
double dist = spheredist_deg(JFK, LAX) * EARTH_RADIUS_MILES;
- assert( dist > 2469 && dist < 2470 );
+ verify( dist > 2469 && dist < 2470 );
}
{
Point BNA (-86.67, 36.12);
Point LAX (-118.40, 33.94);
Point JFK (-73.77694444, 40.63861111 );
- assert( spheredist_deg(BNA, BNA) < 1e-6);
- assert( spheredist_deg(LAX, LAX) < 1e-6);
- assert( spheredist_deg(JFK, JFK) < 1e-6);
+ verify( spheredist_deg(BNA, BNA) < 1e-6);
+ verify( spheredist_deg(LAX, LAX) < 1e-6);
+ verify( spheredist_deg(JFK, JFK) < 1e-6);
Point zero (0, 0);
Point antizero (0,-180);
// these were known to cause NaN
- assert( spheredist_deg(zero, zero) < 1e-6);
- assert( fabs(M_PI-spheredist_deg(zero, antizero)) < 1e-6);
- assert( fabs(M_PI-spheredist_deg(antizero, zero)) < 1e-6);
+ verify( spheredist_deg(zero, zero) < 1e-6);
+ verify( fabs(M_PI-spheredist_deg(zero, antizero)) < 1e-6);
+ verify( fabs(M_PI-spheredist_deg(antizero, zero)) < 1e-6);
}
}
}
diff --git a/src/mongo/db/geo/core.h b/src/mongo/db/geo/core.h
index bdbf1af48f6..27a1d0f1c2d 100644
--- a/src/mongo/db/geo/core.h
+++ b/src/mongo/db/geo/core.h
@@ -98,7 +98,7 @@ namespace mongo {
if ( e.type() == BinData ) {
int len = 0;
_copy( (char*)&_hash , e.binData( len ) );
- assert( len == 8 );
+ verify( len == 8 );
_bits = bits;
}
else {
@@ -123,7 +123,7 @@ namespace mongo {
}
void init( unsigned x , unsigned y , unsigned bits ) {
- assert( bits <= 32 );
+ verify( bits <= 32 );
_hash = 0;
_bits = bits;
for ( unsigned i=0; i<bits; i++ ) {
@@ -172,7 +172,7 @@ namespace mongo {
}
bool hasPrefix( const GeoHash& other ) const {
- assert( other._bits <= _bits );
+ verify( other._bits <= _bits );
if ( other._bits == 0 )
return true;
long long x = other._hash ^ _hash;
@@ -203,7 +203,7 @@ namespace mongo {
}
void setBit( unsigned pos , bool one ) {
- assert( pos < _bits * 2 );
+ verify( pos < _bits * 2 );
if ( one )
_hash |= geoBitSets.masks64[pos];
else if ( _hash & geoBitSets.masks64[pos] )
@@ -215,12 +215,12 @@ namespace mongo {
}
bool getBitX( unsigned pos ) const {
- assert( pos < 32 );
+ verify( pos < 32 );
return getBit( pos * 2 );
}
bool getBitY( unsigned pos ) const {
- assert( pos < 32 );
+ verify( pos < 32 );
return getBit( ( pos * 2 ) + 1 );
}
@@ -228,7 +228,7 @@ namespace mongo {
BSONObjBuilder b(20);
append( b , name );
BSONObj o = b.obj();
- if( ! strlen( name ) ) assert( o.objsize() == 20 );
+ if( ! strlen( name ) ) verify( o.objsize() == 20 );
return o;
}
@@ -258,7 +258,7 @@ namespace mongo {
}
void move( int x , int y ) {
- assert( _bits );
+ verify( _bits );
_move( 0 , x );
_move( 1 , y );
}
@@ -266,7 +266,7 @@ namespace mongo {
void _move( unsigned offset , int d ) {
if ( d == 0 )
return;
- assert( d <= 1 && d>= -1 ); // TEMP
+ verify( d <= 1 && d>= -1 ); // TEMP
bool from, to;
if ( d > 0 ) {
@@ -299,7 +299,7 @@ namespace mongo {
pos -= 2;
}
- assert(0);
+ verify(0);
}
GeoHash& operator=(const GeoHash& h) {
@@ -324,7 +324,7 @@ namespace mongo {
GeoHash& operator+=( const char * s ) {
unsigned pos = _bits * 2;
_bits += strlen(s) / 2;
- assert( _bits <= 32 );
+ verify( _bits <= 32 );
while ( s[0] ) {
if ( s[0] == '1' )
setBit( pos , 1 );
@@ -532,7 +532,7 @@ namespace mongo {
if (cross_prod >= 1 || cross_prod <= -1) {
// fun with floats
- assert( fabs(cross_prod)-1 < 1e-6 );
+ verify( fabs(cross_prod)-1 < 1e-6 );
return cross_prod > 0 ? 0 : M_PI;
}
diff --git a/src/mongo/db/geo/haystack.cpp b/src/mongo/db/geo/haystack.cpp
index 104665087f6..7fe646e62a0 100644
--- a/src/mongo/db/geo/haystack.cpp
+++ b/src/mongo/db/geo/haystack.cpp
@@ -149,7 +149,7 @@ namespace mongo {
}
- assert( _other.size() == 1 );
+ verify( _other.size() == 1 );
BSONElementSet all;
obj.getFieldsDotted( _other[0] , all );
@@ -167,7 +167,7 @@ namespace mongo {
shared_ptr<Cursor> newCursor( const BSONObj& query , const BSONObj& order , int numWanted ) const {
shared_ptr<Cursor> c;
- assert(0);
+ verify(0);
return c;
}
@@ -290,7 +290,7 @@ namespace mongo {
IndexDetails& id = d->idx( idxNum );
GeoHaystackSearchIndex * si = (GeoHaystackSearchIndex*)id.getSpec().getType();
- assert( &id == si->getDetails() );
+ verify( &id == si->getDetails() );
BSONElement n = cmdObj["near"];
BSONElement maxDistance = cmdObj["maxDistance"];
diff --git a/src/mongo/db/index.cpp b/src/mongo/db/index.cpp
index 7c6551549ec..2781b07e592 100644
--- a/src/mongo/db/index.cpp
+++ b/src/mongo/db/index.cpp
@@ -237,7 +237,7 @@ namespace mongo {
void setDifference(BSONObjSet &l, BSONObjSet &r, vector<BSONObj*> &diff) {
// l and r must use the same ordering spec.
- assert( l.key_comp().order() == r.key_comp().order() );
+ verify( l.key_comp().order() == r.key_comp().order() );
BSONObjSet::iterator i = l.begin();
BSONObjSet::iterator j = r.begin();
while ( 1 ) {
@@ -342,7 +342,7 @@ namespace mongo {
}
sourceCollection = nsdetails(sourceNS.c_str());
tlog() << "info: creating collection " << sourceNS << " on add index" << endl;
- assert( sourceCollection );
+ verify( sourceCollection );
}
if ( sourceCollection->findIndexByName(name) >= 0 ) {
@@ -438,7 +438,7 @@ namespace mongo {
keyPattern = info["key"].embeddedObjectUserCheck();
if ( keyPattern.objsize() == 0 ) {
out() << info.toString() << endl;
- assert(false);
+ verify(false);
}
_init();
}
diff --git a/src/mongo/db/index.h b/src/mongo/db/index.h
index 4418f2ad382..8fb0478cd57 100644
--- a/src/mongo/db/index.h
+++ b/src/mongo/db/index.h
@@ -127,7 +127,7 @@ namespace mongo {
string s;
s.reserve(Namespace::MaxNsLen);
s = io.getStringField("ns");
- assert( !s.empty() );
+ verify( !s.empty() );
s += ".$";
s += io.getStringField("name");
return s;
diff --git a/src/mongo/db/indexkey.cpp b/src/mongo/db/indexkey.cpp
index be2df6bed39..2eae7c776a4 100644
--- a/src/mongo/db/indexkey.cpp
+++ b/src/mongo/db/indexkey.cpp
@@ -75,7 +75,7 @@ namespace mongo {
}
void IndexSpec::_init() {
- assert( keyPattern.objsize() );
+ verify( keyPattern.objsize() );
// some basics
_nFields = keyPattern.nFields();
@@ -233,7 +233,7 @@ namespace mongo {
}
else {
// nonterminal array element to expand, so recurse
- assert( !arrElt.eoo() );
+ verify( !arrElt.eoo() );
BSONObjIterator i( arrElt.embeddedObject() );
if ( i.more() ) {
while( i.more() ) {
diff --git a/src/mongo/db/instance.cpp b/src/mongo/db/instance.cpp
index fcd30364782..b471bcf0ff2 100644
--- a/src/mongo/db/instance.cpp
+++ b/src/mongo/db/instance.cpp
@@ -118,14 +118,14 @@ namespace mongo {
*getDur().writing( reinterpret_cast< double * >( value() ) ) = d;
else if ( _element.type() == NumberInt )
*getDur().writing( reinterpret_cast< int * >( value() ) ) = (int) d;
- else assert(0);
+ else verify(0);
}
void BSONElementManipulator::SetLong(long long n) {
- assert( _element.type() == NumberLong );
+ verify( _element.type() == NumberLong );
*getDur().writing( reinterpret_cast< long long * >(value()) ) = n;
}
void BSONElementManipulator::SetInt(int n) {
- assert( _element.type() == NumberInt );
+ verify( _element.type() == NumberInt );
getDur().writingInt( *reinterpret_cast< int * >( value() ) ) = n;
}
/* dur:: version */
@@ -157,12 +157,12 @@ namespace mongo {
scoped_ptr<Matcher> m(new Matcher(q.query));
for( set<Client*>::iterator i = Client::clients.begin(); i != Client::clients.end(); i++ ) {
Client *c = *i;
- assert( c );
+ verify( c );
CurOp* co = c->curop();
if ( c == &me && !co ) {
continue;
}
- assert( co );
+ verify( co );
if( all || co->displayInCurop() ) {
BSONObj info = co->infoNoauth();
if ( all || m->matches( info )) {
@@ -240,7 +240,7 @@ namespace mongo {
try {
dbresponse.exhaust = runQuery(m, q, op, *resp);
- assert( !resp->empty() );
+ verify( !resp->empty() );
}
catch ( SendStaleConfigException& e ){
ex.reset( new SendStaleConfigException( e.getns(), e.getInfo().msg, e.getVersionReceived(), e.getVersionWanted() ) );
@@ -489,7 +489,7 @@ namespace mongo {
if ( n > 2000 ) {
log( n < 30000 ? LL_WARNING : LL_ERROR ) << "receivedKillCursors, n=" << n << endl;
- assert( n < 30000 );
+ verify( n < 30000 );
}
int found = ClientCursor::erase(n, (long long *) x);
@@ -507,10 +507,10 @@ namespace mongo {
assertInWriteLock();
Client::Context * ctx = cc().getContext();
- assert( ctx );
- assert( ctx->inDB( db , path ) );
+ verify( ctx );
+ verify( ctx->inDB( db , path ) );
Database *database = ctx->db();
- assert( database->name == db );
+ verify( database->name == db );
oplogCheckCloseDatabase( database ); // oplog caches some things, dirty its caches
@@ -537,12 +537,12 @@ namespace mongo {
int flags = d.pullInt();
BSONObj query = d.nextJsObj();
- assert( d.moreJSObjs() );
- assert( query.objsize() < m.header()->dataLen() );
+ verify( d.moreJSObjs() );
+ verify( query.objsize() < m.header()->dataLen() );
BSONObj toupdate = d.nextJsObj();
uassert( 10055 , "update object too large", toupdate.objsize() <= BSONObjMaxUserSize);
- assert( toupdate.objsize() < m.header()->dataLen() );
- assert( query.objsize() + toupdate.objsize() < m.header()->dataLen() );
+ verify( toupdate.objsize() < m.header()->dataLen() );
+ verify( query.objsize() + toupdate.objsize() < m.header()->dataLen() );
bool upsert = flags & UpdateOption_Upsert;
bool multi = flags & UpdateOption_Multi;
bool broadcast = flags & UpdateOption_Broadcast;
@@ -582,7 +582,7 @@ namespace mongo {
int flags = d.pullInt();
bool justOne = flags & RemoveOption_JustOne;
bool broadcast = flags & RemoveOption_Broadcast;
- assert( d.moreJSObjs() );
+ verify( d.moreJSObjs() );
BSONObj pattern = d.nextJsObj();
op.debug().query = pattern;
@@ -838,7 +838,7 @@ namespace mongo {
lastError.startRequest( toSend, lastError._get() );
DbResponse dbResponse;
assembleResponse( toSend, dbResponse , _clientHost );
- assert( dbResponse.response );
+ verify( dbResponse.response );
dbResponse.response->concat(); // can get rid of this if we make response handling smarter
response = *dbResponse.response;
getDur().commitIfNeeded();
@@ -859,7 +859,7 @@ namespace mongo {
//if ( ! query.obj.isEmpty() || nToReturn != 0 || nToSkip != 0 || fieldsToReturn || queryOptions )
return DBClientBase::query( ns , query , nToReturn , nToSkip , fieldsToReturn , queryOptions , batchSize );
//
- //assert( query.obj.isEmpty() );
+ //verify( query.obj.isEmpty() );
//throw UserException( (string)"yay:" + ns );
}
@@ -1055,9 +1055,9 @@ namespace mongo {
string s = ss.str();
const char * data = s.c_str();
#ifdef _WIN32
- assert ( _write( fd, data, strlen( data ) ) );
+ verify( _write( fd, data, strlen( data ) ) );
#else
- assert ( write( fd, data, strlen( data ) ) );
+ verify( write( fd, data, strlen( data ) ) );
#endif
}
@@ -1191,7 +1191,7 @@ namespace mongo {
}
void DiagLog::openFile() {
- assert( f == 0 );
+ verify( f == 0 );
stringstream ss;
ss << dbpath << "/diaglog." << hex << time(0);
string name = ss.str();
@@ -1238,7 +1238,7 @@ namespace mongo {
OCCASIONALLY log = true;
if ( log ) {
scoped_lock lk(mutex);
- assert( f );
+ verify( f );
f->write(data,len);
}
}
diff --git a/src/mongo/db/introspect.cpp b/src/mongo/db/introspect.cpp
index 7e1d19ce2f3..fd80d6f8f10 100644
--- a/src/mongo/db/introspect.cpp
+++ b/src/mongo/db/introspect.cpp
@@ -33,7 +33,7 @@ namespace mongo {
assertInWriteLock();
Database *db = c.database();
- DEV assert( db );
+ DEV verify( db );
const char *ns = db->profileName.c_str();
// build object
diff --git a/src/mongo/db/jsobj.cpp b/src/mongo/db/jsobj.cpp
index f418f5d034c..627d346445e 100644
--- a/src/mongo/db/jsobj.cpp
+++ b/src/mongo/db/jsobj.cpp
@@ -34,8 +34,6 @@
#include "jsobjmanipulator.h"
#include "../util/optime.h"
#include <boost/static_assert.hpp>
-#undef assert
-#define assert MONGO_assert
// make sure our assumptions are valid
BOOST_STATIC_ASSERT( sizeof(short) == 2 );
@@ -372,7 +370,7 @@ namespace mongo {
}
log() << "compareDottedFieldNames ERROR l: " << l << " r: " << r << " TOO MANY LOOPS" << endl;
- assert(0);
+ verify(0);
return SAME; // will never get here
}
@@ -732,7 +730,7 @@ namespace mongo {
returns n added not counting _id unless requested.
*/
int BSONObj::addFields(BSONObj& from, set<string>& fields) {
- assert( isEmpty() && !isOwned() ); /* partial implementation for now... */
+ verify( isEmpty() && !isOwned() ); /* partial implementation for now... */
BSONObjBuilder b;
@@ -952,8 +950,8 @@ namespace mongo {
c.appendRegex("x", "goo");
BSONObj p = c.done();
- assert( !o.binaryEqual( p ) );
- assert( o.woCompare( p ) < 0 );
+ verify( !o.binaryEqual( p ) );
+ verify( o.woCompare( p ) < 0 );
}
void testoid() {
@@ -964,10 +962,10 @@ namespace mongo {
OID b;
// goes with sleep above...
// b.init();
- // assert( memcmp(id.getData(), b.getData(), 12) < 0 );
+ // verify( memcmp(id.getData(), b.getData(), 12) < 0 );
b.init( id.str() );
- assert( b == id );
+ verify( b == id );
}
void testbounds() {
@@ -982,15 +980,15 @@ namespace mongo {
b.append( "x" , numeric_limits<double>::max() );
r = b.obj();
}
- assert( l.woCompare( r ) < 0 );
- assert( r.woCompare( l ) > 0 );
+ verify( l.woCompare( r ) < 0 );
+ verify( r.woCompare( l ) > 0 );
{
BSONObjBuilder b;
b.append( "x" , numeric_limits<int>::max() );
l = b.obj();
}
- assert( l.woCompare( r ) < 0 );
- assert( r.woCompare( l ) > 0 );
+ verify( l.woCompare( r ) < 0 );
+ verify( r.woCompare( l ) > 0 );
}
void testorder() {
@@ -999,12 +997,12 @@ namespace mongo {
{ BSONObjBuilder b; b.append( "x" , (long long)2 ); x = b.obj(); }
{ BSONObjBuilder b; b.append( "x" , (int)3 ); y = b.obj(); }
{ BSONObjBuilder b; b.append( "x" , (long long)4 ); z = b.obj(); }
- assert( x.woCompare( y ) < 0 );
- assert( x.woCompare( z ) < 0 );
- assert( y.woCompare( x ) > 0 );
- assert( z.woCompare( x ) > 0 );
- assert( y.woCompare( z ) < 0 );
- assert( z.woCompare( y ) > 0 );
+ verify( x.woCompare( y ) < 0 );
+ verify( x.woCompare( z ) < 0 );
+ verify( y.woCompare( x ) > 0 );
+ verify( z.woCompare( x ) > 0 );
+ verify( y.woCompare( z ) < 0 );
+ verify( z.woCompare( y ) > 0 );
}
{
@@ -1015,36 +1013,36 @@ namespace mongo {
{ BSONObjBuilder b; b.appendNull( "x" ); n = b.obj(); }
{ BSONObjBuilder b; u = b.obj(); }
- assert( ll.woCompare( u ) == d.woCompare( u ) );
- assert( ll.woCompare( u ) == i.woCompare( u ) );
+ verify( ll.woCompare( u ) == d.woCompare( u ) );
+ verify( ll.woCompare( u ) == i.woCompare( u ) );
BSONObj k = BSON( "x" << 1 );
- assert( ll.woCompare( u , k ) == d.woCompare( u , k ) );
- assert( ll.woCompare( u , k ) == i.woCompare( u , k ) );
+ verify( ll.woCompare( u , k ) == d.woCompare( u , k ) );
+ verify( ll.woCompare( u , k ) == i.woCompare( u , k ) );
- assert( u.woCompare( ll ) == u.woCompare( d ) );
- assert( u.woCompare( ll ) == u.woCompare( i ) );
- assert( u.woCompare( ll , k ) == u.woCompare( d , k ) );
- assert( u.woCompare( ll , k ) == u.woCompare( d , k ) );
+ verify( u.woCompare( ll ) == u.woCompare( d ) );
+ verify( u.woCompare( ll ) == u.woCompare( i ) );
+ verify( u.woCompare( ll , k ) == u.woCompare( d , k ) );
+ verify( u.woCompare( ll , k ) == u.woCompare( d , k ) );
- assert( i.woCompare( n ) == d.woCompare( n ) );
+ verify( i.woCompare( n ) == d.woCompare( n ) );
- assert( ll.woCompare( n ) == d.woCompare( n ) );
- assert( ll.woCompare( n ) == i.woCompare( n ) );
- assert( ll.woCompare( n , k ) == d.woCompare( n , k ) );
- assert( ll.woCompare( n , k ) == i.woCompare( n , k ) );
+ verify( ll.woCompare( n ) == d.woCompare( n ) );
+ verify( ll.woCompare( n ) == i.woCompare( n ) );
+ verify( ll.woCompare( n , k ) == d.woCompare( n , k ) );
+ verify( ll.woCompare( n , k ) == i.woCompare( n , k ) );
- assert( n.woCompare( ll ) == n.woCompare( d ) );
- assert( n.woCompare( ll ) == n.woCompare( i ) );
- assert( n.woCompare( ll , k ) == n.woCompare( d , k ) );
- assert( n.woCompare( ll , k ) == n.woCompare( d , k ) );
+ verify( n.woCompare( ll ) == n.woCompare( d ) );
+ verify( n.woCompare( ll ) == n.woCompare( i ) );
+ verify( n.woCompare( ll , k ) == n.woCompare( d , k ) );
+ verify( n.woCompare( ll , k ) == n.woCompare( d , k ) );
}
{
BSONObj l,r;
{ BSONObjBuilder b; b.append( "x" , "eliot" ); l = b.obj(); }
{ BSONObjBuilder b; b.appendSymbol( "x" , "eliot" ); r = b.obj(); }
- assert( l.woCompare( r ) == 0 );
- assert( r.woCompare( l ) == 0 );
+ verify( l.woCompare( r ) == 0 );
+ verify( r.woCompare( l ) == 0 );
}
}
@@ -1057,11 +1055,11 @@ namespace mongo {
BSONObj a = A.done();
BSONObj b = B.done();
BSONObj c = C.done();
- assert( !a.binaryEqual( b ) ); // comments on operator==
+ verify( !a.binaryEqual( b ) ); // comments on operator==
int cmp = a.woCompare(b);
- assert( cmp == 0 );
+ verify( cmp == 0 );
cmp = a.woCompare(c);
- assert( cmp < 0 );
+ verify( cmp < 0 );
testoid();
testbounds();
testorder();
@@ -1226,9 +1224,9 @@ namespace mongo {
BSONObjIterator i( o );
while ( i.more() ) {
_fields[x++] = i.next().rawdata();
- assert( _fields[x-1] );
+ verify( _fields[x-1] );
}
- assert( x == _nfields );
+ verify( x == _nfields );
std::sort( _fields , _fields + _nfields , cmp );
_cur = 0;
}
diff --git a/src/mongo/db/jsobjmanipulator.h b/src/mongo/db/jsobjmanipulator.h
index 880fde8b409..05666409e62 100644
--- a/src/mongo/db/jsobjmanipulator.h
+++ b/src/mongo/db/jsobjmanipulator.h
@@ -30,7 +30,7 @@ namespace mongo {
public:
BSONElementManipulator( const BSONElement &element ) :
_element( element ) {
- assert( !_element.eoo() );
+ verify( !_element.eoo() );
}
/** Replace a Timestamp type with a Date type initialized to
OpTime::now().asDate()
@@ -43,16 +43,16 @@ namespace mongo {
void setNumber(double d) {
if ( _element.type() == NumberDouble ) *reinterpret_cast< double * >( value() ) = d;
else if ( _element.type() == NumberInt ) *reinterpret_cast< int * >( value() ) = (int) d;
- else assert(0);
+ else verify(0);
}
void SetNumber(double d);
void setLong(long long n) {
- assert( _element.type() == NumberLong );
+ verify( _element.type() == NumberLong );
*reinterpret_cast< long long * >( value() ) = n;
}
void SetLong(long long n);
void setInt(int n) {
- assert( _element.type() == NumberInt );
+ verify( _element.type() == NumberInt );
*reinterpret_cast< int * >( value() ) = n;
}
void SetInt(int n);
diff --git a/src/mongo/db/json.cpp b/src/mongo/db/json.cpp
index f27ccbf896e..265f53e161a 100644
--- a/src/mongo/db/json.cpp
+++ b/src/mongo/db/json.cpp
@@ -29,8 +29,6 @@
#include <boost/spirit/utility/loops.hpp>
#include <boost/spirit/utility/lists.hpp>
#endif
-#undef assert
-#define assert MONGO_assert
#include "json.h"
#include "../bson/util/builder.h"
@@ -191,7 +189,7 @@ namespace mongo {
o = '\v';
break;
default:
- assert( false );
+ verify( false );
}
b.ss << o;
}
@@ -642,7 +640,7 @@ namespace mongo {
msgasserted(10340, "Failure parsing JSON string near: " + string( result.stop, limit ));
}
BSONObj ret = b.pop();
- assert( b.empty() );
+ verify( b.empty() );
return ret;
}
diff --git a/src/mongo/db/key.cpp b/src/mongo/db/key.cpp
index 47449986d21..f7deb79927c 100644
--- a/src/mongo/db/key.cpp
+++ b/src/mongo/db/key.cpp
@@ -124,7 +124,7 @@ namespace mongo {
}
default:
out() << "oldCompareElementValues: bad type " << (int) l.type() << endl;
- assert(false);
+ verify(false);
}
return -1;
}
@@ -314,7 +314,7 @@ namespace mongo {
long long m = 2LL << 52;
DEV {
long long d = m-1;
- assert( ((long long) ((double) -d)) == -d );
+ verify( ((long long) ((double) -d)) == -d );
}
if( n >= m || n <= -m ) {
// can't represent exactly as a double
@@ -351,7 +351,7 @@ namespace mongo {
}
BSONObj KeyV1::toBson() const {
- assert( _keyData != 0 );
+ verify( _keyData != 0 );
if( !isCompactFormat() )
return bson();
@@ -413,7 +413,7 @@ namespace mongo {
p += sizeof(double);
break;
default:
- assert(false);
+ verify(false);
}
if( (bits & cHASMORE) == 0 )
@@ -577,7 +577,7 @@ namespace mongo {
sz = ((unsigned) p[1]) + 2;
}
else {
- assert( type == cbindata );
+ verify( type == cbindata );
sz = binDataCodeToLength(p[1]) + 2;
}
}
@@ -655,7 +655,7 @@ namespace mongo {
case cmaxkey:
break;
default:
- assert(false);
+ verify(false);
}
if( (lval&cHASMORE) == 0 )
break;
@@ -671,7 +671,7 @@ namespace mongo {
a[1] = 0;
b[0] = 3;
b[1] = 0;
- assert( strcmp(a,b)>0 && memcmp(a,b,2)>0 );
+ verify( strcmp(a,b)>0 && memcmp(a,b,2)>0 );
}
} cunittest;
diff --git a/src/mongo/db/lasterror.cpp b/src/mongo/db/lasterror.cpp
index dbd3f1815d4..cf107462b78 100644
--- a/src/mongo/db/lasterror.cpp
+++ b/src/mongo/db/lasterror.cpp
@@ -130,7 +130,7 @@ namespace mongo {
void prepareErrForNewRequest( Message &m, LastError * err ) {
// a killCursors message shouldn't affect last error
- assert( err );
+ verify( err );
if ( m.operation() == dbKillCursors ) {
err->disabled = true;
}
@@ -141,7 +141,7 @@ namespace mongo {
}
LastError * LastErrorHolder::startRequest( Message& m , LastError * le ) {
- assert( le );
+ verify( le );
prepareErrForNewRequest( m, le );
return le;
}
diff --git a/src/mongo/db/lasterror.h b/src/mongo/db/lasterror.h
index b47a936caeb..9605bbfcf05 100644
--- a/src/mongo/db/lasterror.h
+++ b/src/mongo/db/lasterror.h
@@ -114,7 +114,7 @@ namespace mongo {
LastError * le = get(false);
if ( ! le ) {
error() << " no LastError!" << endl;
- assert( le );
+ verify( le );
}
return le;
}
diff --git a/src/mongo/db/matcher.cpp b/src/mongo/db/matcher.cpp
index 897af960c22..df57d4dd208 100755
--- a/src/mongo/db/matcher.cpp
+++ b/src/mongo/db/matcher.cpp
@@ -168,12 +168,12 @@ namespace mongo {
}
int ElementMatcher::inverseOfNegativeCompareOp() const {
- assert( negativeCompareOp() );
+ verify( negativeCompareOp() );
return _compareOp == BSONObj::NE ? BSONObj::Equality : BSONObj::opIN;
}
bool ElementMatcher::negativeCompareOpContainsNull() const {
- assert( negativeCompareOp() );
+ verify( negativeCompareOp() );
return (_compareOp == BSONObj::NE && _toMatch.type() != jstNULL) ||
(_compareOp == BSONObj::NIN && _myset->count( staticNull.firstElement()) == 0 );
}
@@ -547,7 +547,7 @@ namespace mongo {
}
inline int Matcher::valuesMatch(const BSONElement& l, const BSONElement& r, int op, const ElementMatcher& bm) const {
- assert( op != BSONObj::NE && op != BSONObj::NIN );
+ verify( op != BSONObj::NE && op != BSONObj::NIN );
if ( op == BSONObj::Equality ) {
return l.valuesEqual(r);
@@ -713,7 +713,7 @@ namespace mongo {
cout << "obj: " << obj << endl;
cout << "fieldName: " << fieldName << endl;
cout << "_constrainIndexKey: " << _constrainIndexKey << endl;
- assert( !e.eoo() );
+ verify( !e.eoo() );
}
}
else {
@@ -1236,7 +1236,7 @@ namespace mongo {
{
// a quick check that we are using our mongo assert macro
int x = 1;
- assert( ++x );
+ verify( ++x );
if( x != 2 ) {
log() << "bad build - wrong assert macro" << endl;
::abort();
@@ -1246,18 +1246,18 @@ namespace mongo {
BSONObj j1((const char *) &js1);
BSONObj j2((const char *) &js2);
Matcher m(j2);
- assert( m.matches(j1) );
+ verify( m.matches(j1) );
js2.sval[0] = 'z';
- assert( !m.matches(j1) );
+ verify( !m.matches(j1) );
Matcher n(j1);
- assert( n.matches(j1) );
- assert( !n.matches(j2) );
+ verify( n.matches(j1) );
+ verify( !n.matches(j2) );
BSONObj j0 = BSONObj();
// BSONObj j0((const char *) &js0);
Matcher p(j0);
- assert( p.matches(j1) );
- assert( p.matches(j2) );
+ verify( p.matches(j1) );
+ verify( p.matches(j2) );
}
} jsunittest;
@@ -1283,14 +1283,14 @@ namespace mongo {
pcrecpp::RE re1(")({a}h.*o");
pcrecpp::RE re("h.llo");
- assert( re.FullMatch("hello") );
- assert( !re1.FullMatch("hello") );
+ verify( re.FullMatch("hello") );
+ verify( !re1.FullMatch("hello") );
pcrecpp::RE_Options options;
options.set_utf8(true);
pcrecpp::RE part("dwi", options);
- assert( part.PartialMatch("dwight") );
+ verify( part.PartialMatch("dwight") );
pcre_config( PCRE_CONFIG_UNICODE_PROPERTIES , &ret );
if ( ! ret )
diff --git a/src/mongo/db/matcher.h b/src/mongo/db/matcher.h
index 7071267c9eb..c5386faa23a 100644
--- a/src/mongo/db/matcher.h
+++ b/src/mongo/db/matcher.h
@@ -102,7 +102,7 @@ namespace mongo {
bool hasLoadedRecord() const { return _loadedRecord; }
bool hasElemMatchKey() const { return _elemMatchKeyFound; }
string elemMatchKey() const {
- assert( hasElemMatchKey() );
+ verify( hasElemMatchKey() );
return _elemMatchKey;
}
diff --git a/src/mongo/db/minilex.h b/src/mongo/db/minilex.h
index 677514aa47c..f77bd9b464b 100644
--- a/src/mongo/db/minilex.h
+++ b/src/mongo/db/minilex.h
@@ -114,9 +114,9 @@ namespace mongo {
MiniLex() {
strhashmap atest;
atest["foo"] = 3;
- assert( atest.count("bar") == 0 );
- assert( atest.count("foo") == 1 );
- assert( atest["foo"] == 3 );
+ verify( atest.count("bar") == 0 );
+ verify( atest.count("foo") == 1 );
+ verify( atest["foo"] == 3 );
for ( int i = 0; i < 256; i++ ) {
ic[i] = starter[i] = false;
diff --git a/src/mongo/db/mongommf.cpp b/src/mongo/db/mongommf.cpp
index 5767d92ffb1..beff3cfa923 100644
--- a/src/mongo/db/mongommf.cpp
+++ b/src/mongo/db/mongommf.cpp
@@ -73,7 +73,7 @@ namespace mongo {
if( !ok ) {
DWORD e = GetLastError();
log() << "VirtualProtect failed (mcw) " << mmf->filename() << ' ' << chunkno << hex << protectStart << ' ' << protectSize << ' ' << errnoWithDescription(e) << endl;
- assert(false);
+ verify(false);
}
}
@@ -81,7 +81,7 @@ namespace mongo {
}
void* MemoryMappedFile::createPrivateMap() {
- assert( maphandle );
+ verify( maphandle );
scoped_lock lk(mapViewMutex);
void *p = MapViewOfFile(maphandle, FILE_MAP_READ, 0, 0, 0);
if ( p == 0 ) {
@@ -113,14 +113,14 @@ namespace mongo {
if( !ok ) {
DWORD e = GetLastError();
log() << "VirtualProtect failed in remapPrivateView " << filename() << hex << oldPrivateAddr << ' ' << len << ' ' << errnoWithDescription(e) << endl;
- assert(false);
+ verify(false);
}
return oldPrivateAddr;
#else
if( !UnmapViewOfFile(oldPrivateAddr) ) {
DWORD e = GetLastError();
log() << "UnMapViewOfFile failed " << filename() << ' ' << errnoWithDescription(e) << endl;
- assert(false);
+ verify(false);
}
// we want the new address to be the same as the old address in case things keep pointers around (as namespaceindex does).
@@ -131,16 +131,16 @@ namespace mongo {
if ( p == 0 ) {
DWORD e = GetLastError();
log() << "MapViewOfFileEx failed " << filename() << " " << errnoWithDescription(e) << endl;
- assert(p);
+ verify(p);
}
- assert(p == oldPrivateAddr);
+ verify(p == oldPrivateAddr);
return p;
#endif
}
#endif
void MongoMMF::remapThePrivateView() {
- assert( cmdLine.dur );
+ verify( cmdLine.dur );
// todo 1.9 : it turns out we require that we always remap to the same address.
// so the remove / add isn't necessary and can be removed?
@@ -153,8 +153,8 @@ namespace mongo {
/** register view. threadsafe */
void PointerToMMF::add(void *view, MongoMMF *f) {
- assert(view);
- assert(f);
+ verify(view);
+ verify(f);
mutex::scoped_lock lk(_m);
_views.insert( pair<void*,MongoMMF*>(view,f) );
}
@@ -173,7 +173,7 @@ namespace mongo {
#else
size_t max = ~((size_t)0);
#endif
- assert( max > (size_t) this ); // just checking that no one redef'd SIZE_MAX and that it is sane
+ verify( max > (size_t) this ); // just checking that no one redef'd SIZE_MAX and that it is sane
// this way we don't need any boundary checking in _find()
_views.insert( pair<void*,MongoMMF*>((void*)0,(MongoMMF*)0) );
@@ -217,8 +217,8 @@ namespace mongo {
PointerToMMF privateViews;
/* void* MongoMMF::switchToPrivateView(void *readonly_ptr) {
- assert( cmdLine.dur );
- assert( testIntent );
+ verify( cmdLine.dur );
+ verify( testIntent );
void *p = readonly_ptr;
@@ -253,7 +253,7 @@ namespace mongo {
void* MongoMMF::_switchToWritableView(void *p) {
size_t ofs;
MongoMMF *f = privateViews.find(p, ofs);
- assert( f );
+ verify( f );
return (((char *)f->_view_write)+ofs);
}
@@ -332,7 +332,7 @@ namespace mongo {
rather vague and not checking if the right database is locked
*/
if( !Lock::somethingWriteLocked() ) {
- assert( inShutdown() );
+ verify( inShutdown() );
DEV {
log() << "is it really ok to close a mongommf outside a write lock? file:" << filename() << endl;
}
diff --git a/src/mongo/db/mongommf.h b/src/mongo/db/mongommf.h
index 62a6cdfd3fd..2cbd5f4d2e6 100644
--- a/src/mongo/db/mongommf.h
+++ b/src/mongo/db/mongommf.h
@@ -76,7 +76,7 @@ namespace mongo {
if the suffix is "ns", fileSuffixNo -1
*/
const RelativePath& relativePath() const {
- DEV assert( !_p._p.empty() );
+ DEV verify( !_p._p.empty() );
return _p;
}
diff --git a/src/mongo/db/mongomutex.h b/src/mongo/db/mongomutex.h
index 0e033124d27..51c746ec634 100644
--- a/src/mongo/db/mongomutex.h
+++ b/src/mongo/db/mongomutex.h
@@ -38,11 +38,11 @@ namespace mongo {
if ( locked == 0 )
enter = curTimeMicros64();
locked++;
- assert( locked >= 1 );
+ verify( locked >= 1 );
}
void leaving() {
locked--;
- assert( locked >= 0 );
+ verify( locked >= 0 );
if ( locked == 0 )
timeLocked += curTimeMicros64() - enter;
}
diff --git a/src/mongo/db/namespace-inl.h b/src/mongo/db/namespace-inl.h
index c18f681e0b9..b482b3d5c36 100644
--- a/src/mongo/db/namespace-inl.h
+++ b/src/mongo/db/namespace-inl.h
@@ -63,7 +63,7 @@ namespace mongo {
/* future : this doesn't need to be an inline. */
inline string Namespace::getSisterNS( const char * local ) const {
- assert( local && local[0] != '.' );
+ verify( local && local[0] != '.' );
string old(buf);
if ( old.find( "." ) != string::npos )
old = old.substr( 0 , old.find( "." ) );
diff --git a/src/mongo/db/namespace_details.cpp b/src/mongo/db/namespace_details.cpp
index d5ffe5eaf97..ffb1853bbc3 100644
--- a/src/mongo/db/namespace_details.cpp
+++ b/src/mongo/db/namespace_details.cpp
@@ -59,7 +59,7 @@ namespace mongo {
// For capped case, signal that we are doing initial extent allocation.
if ( capped )
cappedLastDelRecLastExtent().setInvalid();
- assert( sizeof(dataFileVersion) == 2 );
+ verify( sizeof(dataFileVersion) == 2 );
dataFileVersion = 0;
indexFileVersion = 0;
multiKeyIndexBits = 0;
@@ -145,7 +145,7 @@ namespace mongo {
bool checkNsFilesOnLoad = true;
NOINLINE_DECL void NamespaceIndex::_init() {
- assert( !ht );
+ verify( !ht );
Lock::assertWriteLocked(database_);
@@ -182,7 +182,7 @@ namespace mongo {
if( f.create(pathString, l, true) ) {
getDur().createdFile(pathString, l); // always a new file
len = l;
- assert( len == lenForNewNsFiles );
+ verify( len == lenForNewNsFiles );
p = f.getView();
}
}
@@ -194,7 +194,7 @@ namespace mongo {
}
- assert( len <= 0x7fffffff );
+ verify( len <= 0x7fffffff );
ht = new HashTable<Namespace,NamespaceDetails>(p, (int) len, "namespace index");
if( checkNsFilesOnLoad )
ht->iterAll(namespaceOnLoadCallback);
@@ -206,7 +206,7 @@ namespace mongo {
l->push_back( (string)k );
}
void NamespaceIndex::getNamespaces( list<string>& tofill , bool onlyCollections ) const {
- assert( onlyCollections ); // TODO: need to implement this
+ verify( onlyCollections ); // TODO: need to implement this
// need boost::bind or something to make this less ugly
if ( ht )
@@ -288,7 +288,7 @@ namespace mongo {
to go in a forward direction which is important for performance. */
int regionlen = r->lengthWithHeaders;
extentLoc.set(loc.a(), r->extentOfs);
- assert( r->extentOfs < loc.getOfs() );
+ verify( r->extentOfs < loc.getOfs() );
DEBUGGING out() << "TEMP: alloc() returns " << loc.toString() << ' ' << ns << " lentoalloc:" << lenToAlloc << " ext:" << extentLoc.toString() << endl;
@@ -386,7 +386,7 @@ namespace mongo {
const DeletedRecord *bmr = bestmatch.drec();
*getDur().writing(bestprev) = bmr->nextDeleted;
bmr->nextDeleted.writing().setInvalid(); // defensive.
- assert(bmr->extentOfs < bestmatch.getOfs());
+ verify(bmr->extentOfs < bestmatch.getOfs());
}
return bestmatch;
@@ -442,7 +442,7 @@ namespace mongo {
out() << " fr: " << e.ext()->firstRecord.toString() <<
" lr: " << e.ext()->lastRecord.toString() << " extent->len: " << e.ext()->length << '\n';
}
- assert( len * 5 > lastExtentSize ); // assume it is unusually large record; if not, something is broken
+ verify( len * 5 > lastExtentSize ); // assume it is unusually large record; if not, something is broken
}
}
@@ -486,7 +486,7 @@ namespace mongo {
/* extra space for indexes when more than 10 */
NamespaceDetails::Extra* NamespaceIndex::newExtra(const char *ns, int i, NamespaceDetails *d) {
Lock::assertWriteLocked(ns);
- assert( i >= 0 && i <= 1 );
+ verify( i >= 0 && i <= 1 );
Namespace n(ns);
Namespace extra(n.extraName(i).c_str()); // throws userexception if ns name too long
@@ -505,13 +505,13 @@ namespace mongo {
Extra *e = ni->newExtra(ns, i, this);
long ofs = e->ofsFrom(this);
if( i == 0 ) {
- assert( extraOffset == 0 );
+ verify( extraOffset == 0 );
*getDur().writing(&extraOffset) = ofs;
- assert( extra() == e );
+ verify( extra() == e );
}
else {
Extra *hd = extra();
- assert( hd->next(this) == 0 );
+ verify( hd->next(this) == 0 );
hd->setNext(ofs);
}
return e;
@@ -550,7 +550,7 @@ namespace mongo {
e->setNext( nxt->ofsFrom(this) );
e = nxt;
}
- assert( extraOffset );
+ verify( extraOffset );
}
}
@@ -571,7 +571,7 @@ namespace mongo {
long long NamespaceDetails::storageSize( int * numExtents , BSONArrayBuilder * extentInfo ) const {
Extent * e = firstExtent.ext();
- assert( e );
+ verify( e );
long long total = 0;
int n = 0;
@@ -617,9 +617,9 @@ namespace mongo {
/*static*/ NOINLINE_DECL NamespaceDetailsTransient& NamespaceDetailsTransient::make_inlock(const char *ns) {
shared_ptr< NamespaceDetailsTransient > &t = _nsdMap[ ns ];
- assert( t.get() == 0 );
+ verify( t.get() == 0 );
Database *database = cc().database();
- assert( database );
+ verify( database );
if( _nsdMap.size() % 20000 == 10000 ) {
// so we notice if insanely large #s
log() << "opening namespace " << ns << endl;
@@ -707,9 +707,9 @@ namespace mongo {
void renameNamespace( const char *from, const char *to, bool stayTemp) {
NamespaceIndex *ni = nsindex( from );
- assert( ni );
- assert( ni->details( from ) );
- assert( ! ni->details( to ) );
+ verify( ni );
+ verify( ni->details( from ) );
+ verify( ! ni->details( to ) );
// Our namespace and index details will move to a different
// memory location. The only references to namespace and
@@ -737,7 +737,7 @@ namespace mongo {
nsToDatabase(from, database);
string s = database;
s += ".system.namespaces";
- assert( Helpers::findOne( s.c_str(), BSON( "name" << from ), oldSpec ) );
+ verify( Helpers::findOne( s.c_str(), BSON( "name" << from ), oldSpec ) );
BSONObjBuilder newSpecB;
BSONObjIterator i( oldSpec.getObjectField( "options" ) );
diff --git a/src/mongo/db/namespace_details.h b/src/mongo/db/namespace_details.h
index 595b1dabcc0..d2434fb4931 100644
--- a/src/mongo/db/namespace_details.h
+++ b/src/mongo/db/namespace_details.h
@@ -114,8 +114,8 @@ namespace mongo {
private:
unsigned reserved2;
unsigned reserved3;
- Extra(const Extra&) { assert(false); }
- Extra& operator=(const Extra& r) { assert(false); return *this; }
+ Extra(const Extra&) { verify(false); }
+ Extra& operator=(const Extra& r) { verify(false); return *this; }
public:
Extra() { }
long ofsFrom(NamespaceDetails *d) {
@@ -190,7 +190,7 @@ namespace mongo {
/** get the IndexDetails for the index currently being built in the background. (there is at most one) */
IndexDetails& inProgIdx() {
- DEV assert(indexBuildInProgress);
+ DEV verify(indexBuildInProgress);
return idx(nIndexes);
}
@@ -479,7 +479,7 @@ namespace mongo {
SimpleMutex::scoped_lock lk(_isMutex);
if ( ! spec._finishedInit ) {
spec.reset( details );
- assert( spec._finishedInit );
+ verify( spec._finishedInit );
}
}
return spec;
diff --git a/src/mongo/db/nonce.cpp b/src/mongo/db/nonce.cpp
index 9ea78f01f7a..027bc2219d2 100644
--- a/src/mongo/db/nonce.cpp
+++ b/src/mongo/db/nonce.cpp
@@ -62,8 +62,8 @@ namespace mongo {
massert(10355 , "devrandom failed", !_devrandom->fail());
#elif defined(_WIN32)
unsigned a=0, b=0;
- assert( rand_s(&a) == 0 );
- assert( rand_s(&b) == 0 );
+ verify( rand_s(&a) == 0 );
+ verify( rand_s(&b) == 0 );
n = (((unsigned long long)a)<<32) | b;
#else
n = (((unsigned long long)random())<<32) | random();
diff --git a/src/mongo/db/oplog.cpp b/src/mongo/db/oplog.cpp
index 9f42ab86fc0..458dc2e177f 100644
--- a/src/mongo/db/oplog.cpp
+++ b/src/mongo/db/oplog.cpp
@@ -66,7 +66,7 @@ namespace mongo {
if ( rsOplogDetails == 0 ) {
Client::Context ctx( logns , dbpath, false);
localDB = ctx.db();
- assert( localDB );
+ verify( localDB );
rsOplogDetails = nsdetails(logns);
massert(13389, "local.oplog.rs missing. did you drop it? if so restart server", rsOplogDetails);
}
@@ -143,7 +143,7 @@ namespace mongo {
}
else {
// must be initiation
- assert( *ns == 0 );
+ verify( *ns == 0 );
hashNew = 0;
}
@@ -168,13 +168,13 @@ namespace mongo {
int len = posz + obj.objsize() + 1 + 2 /*o:*/;
Record *r;
- DEV assert( logNS == 0 );
+ DEV verify( logNS == 0 );
{
const char *logns = rsoplog;
if ( rsOplogDetails == 0 ) {
Client::Context ctx( logns , dbpath, false);
localDB = ctx.db();
- assert( localDB );
+ verify( localDB );
rsOplogDetails = nsdetails(logns);
massert(13347, "local.oplog.rs missing. did you drop it? if so restart server", rsOplogDetails);
}
@@ -264,16 +264,16 @@ namespace mongo {
if ( localOplogMainDetails == 0 ) {
Client::Context ctx( logNS , dbpath, false);
localDB = ctx.db();
- assert( localDB );
+ verify( localDB );
localOplogMainDetails = nsdetails(logNS);
- assert( localOplogMainDetails );
+ verify( localOplogMainDetails );
}
Client::Context ctx( logNS , localDB, false );
r = theDataFileMgr.fast_oplog_insert(localOplogMainDetails, logNS, len);
}
else {
Client::Context ctx( logNS, dbpath, false );
- assert( nsdetails( logNS ) );
+ verify( nsdetails( logNS ) );
// first we allocate the space, then we fill it below.
r = theDataFileMgr.fast_oplog_insert( nsdetails( logNS ), logNS, len);
}
@@ -570,19 +570,19 @@ namespace mongo {
OpTime t;
for ( int i = 0; i < 10; i++ ) {
OpTime s = OpTime::_now();
- assert( s != t );
+ verify( s != t );
t = s;
}
OpTime q = t;
- assert( q == t );
- assert( !(q != t) );
+ verify( q == t );
+ verify( !(q != t) );
}
} testoptime;
int _dummy_z;
void pretouchN(vector<BSONObj>& v, unsigned a, unsigned b) {
- DEV assert( !d.dbMutex.isWriteLocked() );
+ DEV verify( !d.dbMutex.isWriteLocked() );
Client *c = currentClient.get();
if( c == 0 ) {
@@ -823,7 +823,7 @@ namespace mongo {
if ( opType[1] == 0 )
deleteObjects(ns, o, /*justOne*/ fields[3].booleanSafe());
else
- assert( opType[1] == 'b' ); // "db" advertisement
+ verify( opType[1] == 'b' ); // "db" advertisement
}
else if ( *opType == 'c' ) {
opCounters->gotCommand();
diff --git a/src/mongo/db/oplog.h b/src/mongo/db/oplog.h
index 6d39265310a..6a7a64d530b 100644
--- a/src/mongo/db/oplog.h
+++ b/src/mongo/db/oplog.h
@@ -79,7 +79,7 @@ namespace mongo {
bool done() const { return !_findingStart; }
/** @return cursor pointing to the first matching op, if done(). */
- shared_ptr<Cursor> cursor() { assert( done() ); return _c; }
+ shared_ptr<Cursor> cursor() { verify( done() ); return _c; }
/** Iterate the cursor, to continue trying to find matching op. */
void next();
diff --git a/src/mongo/db/oplogreader.h b/src/mongo/db/oplogreader.h
index 72eef31a718..fb8d607b01c 100644
--- a/src/mongo/db/oplogreader.h
+++ b/src/mongo/db/oplogreader.h
@@ -50,7 +50,7 @@ namespace mongo {
is needed; if not fine, but if so, need to change.
*//*
void query(const char *ns, const BSONObj& query) {
- assert( !haveCursor() );
+ verify( !haveCursor() );
cursor.reset( _conn->query(ns, query, 0, 0, 0, QueryOption_SlaveOk).release() );
}*/
diff --git a/src/mongo/db/ops/delete.cpp b/src/mongo/db/ops/delete.cpp
index 4f711caf5b8..02ce88f010e 100644
--- a/src/mongo/db/ops/delete.cpp
+++ b/src/mongo/db/ops/delete.cpp
@@ -104,7 +104,7 @@ namespace mongo {
if ( ! match )
continue;
- assert( !dup ); // can't be a dup, we deleted it!
+ verify( !dup ); // can't be a dup, we deleted it!
if ( !justOne ) {
/* NOTE: this is SLOW. this is not good, noteLocation() was designed to be called across getMore
diff --git a/src/mongo/db/ops/query.cpp b/src/mongo/db/ops/query.cpp
index ee2b29bfbbf..f04267395f9 100644
--- a/src/mongo/db/ops/query.cpp
+++ b/src/mongo/db/ops/query.cpp
@@ -45,7 +45,7 @@ namespace mongo {
throw;
}
catch ( AssertionException& e ) {
- assert( e.getCode() != SendStaleConfigCode && e.getCode() != RecvStaleConfigCode );
+ verify( e.getCode() != SendStaleConfigCode && e.getCode() != RecvStaleConfigCode );
e.getInfo().append( anObjBuilder , "assertion" , "assertionCode" );
curop.debug().exceptionInfo = e.getInfo();
@@ -138,7 +138,7 @@ namespace mongo {
}
p.release();
bool ok = ClientCursor::erase(cursorid);
- assert(ok);
+ verify(ok);
cursorid = 0;
cc = 0;
break;
@@ -188,7 +188,7 @@ namespace mongo {
if ( cc ) {
if ( c->supportYields() ) {
ClientCursor::YieldData data;
- assert( cc->prepareToYield( data ) );
+ verify( cc->prepareToYield( data ) );
}
else {
cc->c()->noteLocation();
@@ -227,7 +227,7 @@ namespace mongo {
}
shared_ptr<ExplainQueryInfo> NoExplainStrategy::_doneQueryInfo() {
- assert( false );
+ verify( false );
return shared_ptr<ExplainQueryInfo>();
}
@@ -317,7 +317,7 @@ namespace mongo {
}
}
BSONObj ret = _cursor->current();
- assert( ret.isValid() );
+ verify( ret.isValid() );
return ret;
}
@@ -391,17 +391,17 @@ namespace mongo {
ScanAndOrder *
ReorderBuildStrategy::newScanAndOrder( const QueryPlan::Summary &queryPlan ) const {
- assert( !_parsedQuery.getOrder().isEmpty() );
- assert( _cursor->ok() );
+ verify( !_parsedQuery.getOrder().isEmpty() );
+ verify( _cursor->ok() );
const FieldRangeSet *fieldRangeSet = 0;
if ( queryPlan.valid() ) {
fieldRangeSet = queryPlan._fieldRangeSetMulti.get();
}
else {
- assert( _queryOptimizerCursor );
+ verify( _queryOptimizerCursor );
fieldRangeSet = _queryOptimizerCursor->initialFieldRangeSet();
}
- assert( fieldRangeSet );
+ verify( fieldRangeSet );
return new ScanAndOrder( _parsedQuery.getSkip(),
_parsedQuery.getNumToReturn(),
_parsedQuery.getOrder(),
@@ -631,7 +631,7 @@ namespace mongo {
NamespaceDetailsTransient::getCursor( ns, query, order, QueryPlanSelectionPolicy::any(),
0, &pq, &queryPlan );
}
- assert( cursor );
+ verify( cursor );
QueryResponseBuilder queryResponseBuilder( pq, cursor, queryPlan, oldPlan );
bool saveClientCursor = false;
@@ -928,10 +928,10 @@ namespace mongo {
return queryWithQueryOptimizer( m, queryOptions, ns, jsobj, curop, query, order,
pq_shared, oldPlan, shardingVersionAtStart, result );
} catch ( const QueryRetryException & ) {
- assert( retry == 0 );
+ verify( retry == 0 );
}
}
- assert( false );
+ verify( false );
return 0;
}
diff --git a/src/mongo/db/ops/update.cpp b/src/mongo/db/ops/update.cpp
index 3cbf5fcc085..fa5121cbb70 100644
--- a/src/mongo/db/ops/update.cpp
+++ b/src/mongo/db/ops/update.cpp
@@ -279,7 +279,7 @@ namespace mongo {
}
ms.pushStartSize = n;
- assert( ms.pushStartSize == in.embeddedObject().nFields() );
+ verify( ms.pushStartSize == in.embeddedObject().nFields() );
bb.done();
break;
}
@@ -300,14 +300,14 @@ namespace mongo {
switch( in.type() ) {
case NumberInt: x = x&e.numberInt(); break;
case NumberLong: y = y&e.numberLong(); break;
- default: assert( 0 );
+ default: verify( 0 );
}
}
else if ( str::equals(e.fieldName(), "or") ) {
switch( in.type() ) {
case NumberInt: x = x|e.numberInt(); break;
case NumberLong: y = y|e.numberLong(); break;
- default: assert( 0 );
+ default: verify( 0 );
}
}
else {
@@ -318,7 +318,7 @@ namespace mongo {
switch( in.type() ) {
case NumberInt: b.append( shortFieldName , x ); break;
case NumberLong: b.append( shortFieldName , y ); break;
- default: assert( 0 );
+ default: verify( 0 );
}
break;
@@ -552,7 +552,7 @@ namespace mongo {
newObjBuilder.appendAs( newVal , shortFieldName );
BSONObjBuilder b;
b.appendAs( newVal, shortFieldName );
- assert( _objData.isEmpty() );
+ verify( _objData.isEmpty() );
_objData = b.obj();
newVal = _objData.firstElement();
}
@@ -560,7 +560,7 @@ namespace mongo {
void ModSetState::applyModsInPlace( bool isOnDisk ) {
// TODO i think this assert means that we can get rid of the isOnDisk param
// and just use isOwned as the determination
- DEV assert( isOnDisk == ! _obj.isOwned() );
+ DEV verify( isOnDisk == ! _obj.isOwned() );
for ( ModStateHolder::iterator i = _mods.begin(); i != _mods.end(); ++i ) {
ModState& m = *i->second;
@@ -581,7 +581,7 @@ namespace mongo {
// this should have been handled by prepare
break;
case Mod::POP:
- assert( m.old.eoo() || ( m.old.isABSONObj() && m.old.Obj().isEmpty() ) );
+ verify( m.old.eoo() || ( m.old.isABSONObj() && m.old.Obj().isEmpty() ) );
break;
// [dm] the BSONElementManipulator statements below are for replication (correct?)
case Mod::INC:
@@ -973,12 +973,12 @@ namespace mongo {
else {
BSONObj newObj = mss->createNewFromMods();
checkTooLarge(newObj);
- assert(nsdt);
+ verify(nsdt);
theDataFileMgr.updateRecord(ns, d, nsdt, r, loc , newObj.objdata(), newObj.objsize(), debug);
}
if ( logop ) {
- DEV assert( mods->size() );
+ DEV verify( mods->size() );
BSONObj pattern = patternOrig;
if ( mss->haveArrayDepMod() ) {
@@ -1003,7 +1003,7 @@ namespace mongo {
// regular update
BSONElementManipulator::lookForTimestamps( updateobj );
checkNoMods( updateobj );
- assert(nsdt);
+ verify(nsdt);
theDataFileMgr.updateRecord(ns, d, nsdt, r, loc , updateobj.objdata(), updateobj.objsize(), debug );
if ( logop ) {
logOp("u", ns, updateobj, &patternOrig, 0, fromMigrate );
@@ -1120,7 +1120,7 @@ namespace mongo {
// The Cursor must have a Matcher to record an elemMatchKey. But currently
// a modifier on a dynamic array field may be applied even if there is no
// elemMatchKey, so a matcher cannot be required.
- //assert( c->matcher() );
+ //verify( c->matcher() );
details.requestElemMatchKey();
}
@@ -1236,7 +1236,7 @@ namespace mongo {
}
if ( logop ) {
- DEV assert( mods->size() );
+ DEV verify( mods->size() );
if ( mss->haveArrayDepMod() ) {
BSONObjBuilder patternBuilder;
diff --git a/src/mongo/db/ops/update.h b/src/mongo/db/ops/update.h
index a4934302e7b..6a37bba1baa 100644
--- a/src/mongo/db/ops/update.h
+++ b/src/mongo/db/ops/update.h
@@ -136,7 +136,7 @@ namespace mongo {
manip.setInt( elt.numberInt() + in.numberInt() );
break;
default:
- assert(0);
+ verify(0);
}
}
void IncrementMe( BSONElement& in ) const {
@@ -152,7 +152,7 @@ namespace mongo {
manip.SetInt( elt.numberInt() + in.numberInt() );
break;
default:
- assert(0);
+ verify(0);
}
}
@@ -298,7 +298,7 @@ namespace mongo {
bool _hasDynamicArray;
static Mod::Op opFromStr( const char *fn ) {
- assert( fn[0] == '$' );
+ verify( fn[0] == '$' );
switch( fn[1] ) {
case 'i': {
if ( fn[2] == 'n' && fn[3] == 'c' && fn[4] == 0 )
@@ -513,7 +513,7 @@ namespace mongo {
case NumberInt:
b.append( n , incint ); break;
default:
- assert(0);
+ verify(0);
}
}
diff --git a/src/mongo/db/pagefault.cpp b/src/mongo/db/pagefault.cpp
index 713c60c7c50..a782921d4ba 100644
--- a/src/mongo/db/pagefault.cpp
+++ b/src/mongo/db/pagefault.cpp
@@ -11,7 +11,7 @@ namespace mongo {
PageFaultException::PageFaultException(Record *_r)
{
- assert( cc().allowedToThrowPageFaultException() );
+ verify( cc().allowedToThrowPageFaultException() );
cc().getPageFaultRetryableSection()->didLap();
r = _r;
era = LockMongoFilesShared::getEra();
@@ -19,7 +19,7 @@ namespace mongo {
}
void PageFaultException::touch() {
- assert( !d.dbMutex.atLeastReadLocked() );
+ verify( !d.dbMutex.atLeastReadLocked() );
LockMongoFilesShared lk;
if( LockMongoFilesShared::getEra() != era ) {
// files opened and closed. we don't try to handle but just bail out; this is much simpler
@@ -35,7 +35,7 @@ namespace mongo {
}
PageFaultRetryableSection::PageFaultRetryableSection() {
_laps = 0;
- assert( cc()._pageFaultRetryableSection == 0 );
+ verify( cc()._pageFaultRetryableSection == 0 );
if( d.dbMutex.atLeastReadLocked() ) {
cc()._pageFaultRetryableSection = 0;
if( debug || logLevel > 2 ) {
diff --git a/src/mongo/db/pdfile.cpp b/src/mongo/db/pdfile.cpp
index 877a2e92981..e7eb94dfd0e 100644
--- a/src/mongo/db/pdfile.cpp
+++ b/src/mongo/db/pdfile.cpp
@@ -82,7 +82,7 @@ namespace mongo {
bool inDBRepair = false;
struct doingRepair {
doingRepair() {
- assert( ! inDBRepair );
+ verify( ! inDBRepair );
inDBRepair = true;
}
~doingRepair() {
@@ -117,7 +117,7 @@ namespace mongo {
BackgroundOperation::BackgroundOperation(const char *ns) : _ns(ns) {
SimpleMutex::scoped_lock lk(m);
dbsInProg[_ns.db]++;
- assert( nsInProg.count(_ns.ns()) == 0 );
+ verify( nsInProg.count(_ns.ns()) == 0 );
nsInProg.insert(_ns.ns());
}
@@ -221,7 +221,7 @@ namespace mongo {
if ( sz > 1000000000 )
sz = 1000000000;
int z = ((int)sz) & 0xffffff00;
- assert( z > len );
+ verify( z > len );
return z;
}
@@ -274,7 +274,7 @@ namespace mongo {
while( i.more() ) {
BSONElement e = i.next();
int size = int( e.number() );
- assert( size <= 0x7fffffff );
+ verify( size <= 0x7fffffff );
// $nExtents is just for testing - always allocate new extents
// rather than reuse existing extents so we have some predictibility
// in the extent size used by our tests
@@ -284,9 +284,9 @@ namespace mongo {
else if ( int( e.number() ) > 0 ) {
// We create '$nExtents' extents, each of size 'size'.
int nExtents = int( e.number() );
- assert( size <= 0x7fffffff );
+ verify( size <= 0x7fffffff );
for ( int i = 0; i < nExtents; ++i ) {
- assert( size <= 0x7fffffff );
+ verify( size <= 0x7fffffff );
// $nExtents is just for testing - always allocate new extents
// rather than reuse existing extents so we have some predictibility
// in the extent size used by our tests
@@ -308,7 +308,7 @@ namespace mongo {
}
NamespaceDetails *d = nsdetails(ns);
- assert(d);
+ verify(d);
bool ensure = false;
if ( options.getField( "autoIndexId" ).type() ) {
@@ -408,24 +408,24 @@ namespace mongo {
/** @return true if found and opened. if uninitialized (prealloc only) does not open. */
bool MongoDataFile::openExisting( const char *filename ) {
- assert( _mb == 0 );
+ verify( _mb == 0 );
if( !boost::filesystem::exists(filename) )
return false;
if( !mmf.open(filename,false) ) {
dlog(2) << "info couldn't open " << filename << " probably end of datafile list" << endl;
return false;
}
- _mb = mmf.getView(); assert(_mb);
+ _mb = mmf.getView(); verify(_mb);
unsigned long long sz = mmf.length();
- assert( sz <= 0x7fffffff );
- assert( sz % 4096 == 0 );
+ verify( sz <= 0x7fffffff );
+ verify( sz % 4096 == 0 );
if( sz < 64*1024*1024 && !cmdLine.smallfiles ) {
if( sz >= 16*1024*1024 && sz % (1024*1024) == 0 ) {
log() << "info openExisting file size " << sz << " but cmdLine.smallfiles=false" << endl;
}
else {
log() << "openExisting size " << sz << " less then minimum file size expectation " << filename << endl;
- assert(false);
+ verify(false);
}
}
check(_mb);
@@ -447,8 +447,8 @@ namespace mongo {
if ( size > maxSize() )
size = maxSize();
- assert( size >= 64*1024*1024 || cmdLine.smallfiles );
- assert( size % 4096 == 0 );
+ verify( size >= 64*1024*1024 || cmdLine.smallfiles );
+ verify( size % 4096 == 0 );
if ( preallocateOnly ) {
if ( cmdLine.prealloc ) {
@@ -458,11 +458,11 @@ namespace mongo {
}
{
- assert( _mb == 0 );
+ verify( _mb == 0 );
unsigned long long sz = size;
if( mmf.create(filename, sz, false) )
_mb = mmf.getView();
- assert( sz <= 0x7fffffff );
+ verify( sz <= 0x7fffffff );
size = (int) sz;
}
check(_mb);
@@ -477,11 +477,11 @@ namespace mongo {
NamespaceIndex *ni = nsindex(ns);
NamespaceDetails *details = ni->details(ns);
if ( details ) {
- assert( !details->lastExtent.isNull() );
- assert( !details->firstExtent.isNull() );
+ verify( !details->lastExtent.isNull() );
+ verify( !details->firstExtent.isNull() );
getDur().writingDiskLoc(e->xprev) = details->lastExtent;
getDur().writingDiskLoc(details->lastExtent.ext()->xnext) = eloc;
- assert( !eloc.isNull() );
+ verify( !eloc.isNull() );
getDur().writingDiskLoc(details->lastExtent) = eloc;
}
else {
@@ -500,7 +500,7 @@ namespace mongo {
{
// make sizes align with VM page size
int newSize = (approxSize + 0xfff) & 0xfffff000;
- assert( newSize >= 0 );
+ verify( newSize >= 0 );
if( newSize < Extent::maxSize() )
approxSize = newSize;
}
@@ -513,7 +513,7 @@ namespace mongo {
/* note there could be a lot of looping here is db just started and
no files are open yet. we might want to do something about that. */
if ( loops > 8 ) {
- assert( loops < 10000 );
+ verify( loops < 10000 );
out() << "warning: loops=" << loops << " fileno:" << fileNo << ' ' << ns << '\n';
}
log() << "newExtent: " << ns << " file " << fileNo << " full, adding a new file\n";
@@ -694,7 +694,7 @@ namespace mongo {
if( firstEmptyRegion.isNull() )8
return 0;
- assert(len > 0);
+ verify(len > 0);
int newRecSize = len + Record::HeaderSize;
DiskLoc newRecordLoc = firstEmptyRegion;
Record *r = getRecord(newRecordLoc);
@@ -709,13 +709,13 @@ namespace mongo {
r->lengthWithHeaders = newRecSize;
r->next.markAsFirstOrLastInExtent(this); // we're now last in the extent
if( !lastRecord.isNull() ) {
- assert(getRecord(lastRecord)->next.lastInExtent()); // it was the last one
+ verify(getRecord(lastRecord)->next.lastInExtent()); // it was the last one
getRecord(lastRecord)->next.set(newRecordLoc); // until now
r->prev.set(lastRecord);
}
else {
r->prev.markAsFirstOrLastInExtent(this); // we are the first in the extent
- assert( firstRecord.isNull() );
+ verify( firstRecord.isNull() );
firstRecord = newRecordLoc;
}
lastRecord = newRecordLoc;
@@ -843,13 +843,13 @@ namespace mongo {
*/
void freeExtents(DiskLoc firstExt, DiskLoc lastExt) {
{
- assert( !firstExt.isNull() && !lastExt.isNull() );
+ verify( !firstExt.isNull() && !lastExt.isNull() );
Extent *f = firstExt.ext();
Extent *l = lastExt.ext();
- assert( f->xprev.isNull() );
- assert( l->xnext.isNull() );
- assert( f==l || !f->xnext.isNull() );
- assert( f==l || !l->xprev.isNull() );
+ verify( f->xprev.isNull() );
+ verify( l->xnext.isNull() );
+ verify( f==l || !f->xnext.isNull() );
+ verify( f==l || !l->xprev.isNull() );
}
string s = cc().database()->name + FREELIST_NS;
@@ -866,7 +866,7 @@ namespace mongo {
}
else {
DiskLoc a = freeExtents->firstExtent;
- assert( a.ext()->xprev.isNull() );
+ verify( a.ext()->xprev.isNull() );
getDur().writingDiskLoc( a.ext()->xprev ) = lastExt;
getDur().writingDiskLoc( lastExt.ext()->xnext ) = a;
getDur().writingDiskLoc( freeExtents->firstExtent ) = firstExt;
@@ -883,7 +883,7 @@ namespace mongo {
BackgroundOperation::assertNoBgOpInProgForNs(nsToDrop.c_str());
NamespaceString s(nsToDrop);
- assert( s.db == cc().database()->name );
+ verify( s.db == cc().database()->name );
if( s.isSystem() ) {
if( s.coll == "system.profile" )
uassert( 10087 , "turn off profiling before dropping system.profile collection", cc().database()->profile == 0 );
@@ -920,7 +920,7 @@ namespace mongo {
if ( d->nIndexes != 0 ) {
try {
- assert( dropIndexes(d, name.c_str(), "*", errmsg, result, true) );
+ verify( dropIndexes(d, name.c_str(), "*", errmsg, result, true) );
}
catch( DBException& e ) {
stringstream ss;
@@ -928,7 +928,7 @@ namespace mongo {
ss << " cause: " << e.what();
uasserted(12503,ss.str());
}
- assert( d->nIndexes == 0 );
+ verify( d->nIndexes == 0 );
}
log(1) << "\t dropIndexes done" << endl;
result.append("ns", name.c_str());
@@ -1078,8 +1078,8 @@ namespace mongo {
BSONObj objOld(toupdate);
BSONObj objNew(_buf);
- DEV assert( objNew.objsize() == _len );
- DEV assert( objNew.objdata() == _buf );
+ DEV verify( objNew.objsize() == _len );
+ DEV verify( objNew.objdata() == _buf );
if( !objNew.hasElement("_id") && objOld.hasElement("_id") ) {
/* add back the old _id value if the update removes it. Note this implementation is slow
@@ -1088,7 +1088,7 @@ namespace mongo {
*/
BSONObjBuilder b;
BSONElement e;
- assert( objOld.getObjectID(e) );
+ verify( objOld.getObjectID(e) );
b.append(e); // put _id first, for best performance
b.appendElements(objNew);
objNew = b.obj();
@@ -1138,7 +1138,7 @@ namespace mongo {
problem() << " caught assertion update unindex " << idx.indexNamespace() << endl;
}
}
- assert( !dl.isNull() );
+ verify( !dl.isNull() );
BSONObj idxKey = idx.info.obj().getObjectField("key");
Ordering ordering = Ordering::make(idxKey);
keyUpdates += changes[x].added.size();
@@ -1166,7 +1166,7 @@ namespace mongo {
}
int Extent::followupSize(int len, int lastExtentLen) {
- assert( len < Extent::maxSize() );
+ verify( len < Extent::maxSize() );
int x = initialSize(len);
// changed from 1.20 to 1.35 in v2.1.x to get to larger extent size faster
int y = (int) (lastExtentLen < 4000000 ? lastExtentLen * 4.0 : lastExtentLen * 1.35);
@@ -1182,7 +1182,7 @@ namespace mongo {
}
sz = ((int)sz) & 0xffffff00;
- assert( sz > len );
+ verify( sz > len );
return sz;
}
@@ -1199,7 +1199,7 @@ namespace mongo {
IndexInterface& ii = idx.idxInterface();
Ordering ordering = Ordering::make(order);
- assert( !recordLoc.isNull() );
+ verify( !recordLoc.isNull() );
try {
// we can't do the two step method with multi keys as insertion of one key changes the indexes
@@ -1321,7 +1321,7 @@ namespace mongo {
if( ++n == 2 ) {
d->setIndexIsMultikey(idxNo);
}
- assert( !recordLoc.isNull() );
+ verify( !recordLoc.isNull() );
try {
ii.bt_insert(idx.head, recordLoc, *i, ordering, dupsAllowed, idx);
}
@@ -1376,7 +1376,7 @@ namespace mongo {
BtreeBuilder<V> btBuilder(dupsAllowed, idx);
BSONObj keyLast;
auto_ptr<BSONObjExternalSorter::Iterator> i = sorter.iterator();
- assert( pm == op->setMessage( "index: (2/3) btree bottom up" , phase1->nkeys , 10 ) );
+ verify( pm == op->setMessage( "index: (2/3) btree bottom up" , phase1->nkeys , 10 ) );
while( i->more() ) {
RARELY killCurrentOp.checkForInterrupt();
BSONObjExternalSorter::Data d = i->next();
@@ -1479,7 +1479,7 @@ namespace mongo {
else if( idx.version() == 1 )
buildBottomUpPhases2And3<V1>(dupsAllowed, idx, sorter, dropDups, dupsToDrop, op, phase1, pm, t);
else
- assert(false);
+ verify(false);
if( dropDups )
log() << "\t fastBuildIndex dupsToDrop:" << dupsToDrop.size() << endl;
@@ -1591,14 +1591,14 @@ namespace mongo {
unsigned long long n = 0;
prep(ns.c_str(), d);
- assert( idxNo == d->nIndexes );
+ verify( idxNo == d->nIndexes );
try {
idx.head.writing() = idx.idxInterface().addBucket(idx);
n = addExistingToIndex(ns.c_str(), d, idx, idxNo);
}
catch(...) {
if( cc().database() && nsdetails(ns.c_str()) == d ) {
- assert( idxNo == d->nIndexes );
+ verify( idxNo == d->nIndexes );
done(ns.c_str(), d);
}
else {
@@ -1606,7 +1606,7 @@ namespace mongo {
}
throw;
}
- assert( idxNo == d->nIndexes );
+ verify( idxNo == d->nIndexes );
done(ns.c_str(), d);
return n;
}
@@ -1641,9 +1641,9 @@ namespace mongo {
Timer t;
unsigned long long n;
- assert( !BackgroundOperation::inProgForNs(ns.c_str()) ); // should have been checked earlier, better not be...
- assert( d->indexBuildInProgress == 0 );
- assert( Lock::isWriteLocked(ns) );
+ verify( !BackgroundOperation::inProgForNs(ns.c_str()) ); // should have been checked earlier, better not be...
+ verify( d->indexBuildInProgress == 0 );
+ verify( Lock::isWriteLocked(ns) );
RecoverableIndexState recoverable( d );
// Build index spec here in case the collection is empty and the index details are invalid
@@ -1651,7 +1651,7 @@ namespace mongo {
if( inDBRepair || !background ) {
n = fastBuildIndex(ns.c_str(), d, idx, idxNo);
- assert( !idx.head.isNull() );
+ verify( !idx.head.isNull() );
}
else {
BackgroundIndexBuildJob j(ns.c_str());
@@ -1725,7 +1725,7 @@ namespace mongo {
IDToInsert_() {
type = (char) jstOID;
strcpy(_id, "_id");
- assert( sizeof(IDToInsert_) == 17 );
+ verify( sizeof(IDToInsert_) == 17 );
}
} idToInsert_;
struct IDToInsert : public BSONElement {
@@ -1906,7 +1906,7 @@ namespace mongo {
log() << "failed to drop index after a unique key error building it: " << errmsg << ' ' << tabletoidxns << ' ' << name << endl;
}
- assert( le && !saveerrmsg.empty() );
+ verify( le && !saveerrmsg.empty() );
raiseError(savecode,saveerrmsg.c_str());
throw;
}
@@ -1941,7 +1941,7 @@ namespace mongo {
string tabletoidxns;
BSONObj fixedIndexObject;
if ( addIndex ) {
- assert( obuf );
+ verify( obuf );
BSONObj io((const char *) obuf);
if( !prepareToBuildIndex(io, god, tabletoidxns, tableToIndex, fixedIndexObject ) ) {
// prepare creates _id itself, or this indicates to fail the build silently (such
@@ -1978,7 +1978,7 @@ namespace mongo {
lenWHdr = (int) (lenWHdr * d->paddingFactor);
if ( lenWHdr == 0 ) {
// old datafiles, backward compatible here.
- assert( d->paddingFactor == 0 );
+ verify( d->paddingFactor == 0 );
*getDur().writing(&d->paddingFactor) = 1.0;
lenWHdr = len + Record::HeaderSize;
}
@@ -2006,14 +2006,14 @@ namespace mongo {
}
if ( loc.isNull() ) {
log() << "insert: couldn't alloc space for object ns:" << ns << " capped:" << d->capped << endl;
- assert(d->capped);
+ verify(d->capped);
return DiskLoc();
}
if( earlyIndex ) {
// add record to indexes using two step method so we can do the reading outside a write lock
if ( d->nIndexes ) {
- assert( obuf );
+ verify( obuf );
BSONObj obj((const char *) obuf);
try {
indexRecordUsingTwoSteps(d, obj, loc, true);
@@ -2027,12 +2027,12 @@ namespace mongo {
}
// really allocate now
DiskLoc real = allocateSpaceForANewRecord(ns, d, lenWHdr, god);
- assert( real == loc );
+ verify( real == loc );
}
Record *r = loc.rec();
{
- assert( r->lengthWithHeaders >= lenWHdr );
+ verify( r->lengthWithHeaders >= lenWHdr );
r = (Record*) getDur().writingPtr(r, lenWHdr);
if( addID ) {
/* a little effort was made here to avoid a double copy when we add an ID */
@@ -2099,17 +2099,17 @@ namespace mongo {
assumes ns is capped and no indexes
*/
Record* DataFileMgr::fast_oplog_insert(NamespaceDetails *d, const char *ns, int len) {
- assert( d );
- RARELY assert( d == nsdetails(ns) );
- DEV assert( d == nsdetails(ns) );
+ verify( d );
+ RARELY verify( d == nsdetails(ns) );
+ DEV verify( d == nsdetails(ns) );
DiskLoc extentLoc;
int lenWHdr = len + Record::HeaderSize;
DiskLoc loc = d->alloc(ns, lenWHdr, extentLoc);
- assert( !loc.isNull() );
+ verify( !loc.isNull() );
Record *r = loc.rec();
- assert( r->lengthWithHeaders >= lenWHdr );
+ verify( r->lengthWithHeaders >= lenWHdr );
Extent *e = r->myExtent(loc);
if ( e->lastRecord.isNull() ) {
@@ -2163,8 +2163,8 @@ namespace mongo {
log(1) << "dropDatabase " << db << endl;
Lock::assertWriteLocked(db);
Database *d = cc().database();
- assert( d );
- assert( d->name == db );
+ verify( d );
+ verify( d->name == db );
BackgroundOperation::assertNoBgOpInProgForDb(d->name.c_str());
@@ -2293,8 +2293,8 @@ namespace mongo {
string localhost = ss.str();
problem() << "repairDatabase " << dbName << endl;
- assert( cc().database()->name == dbName );
- assert( cc().database()->path == dbpath );
+ verify( cc().database()->name == dbName );
+ verify( cc().database()->path == dbpath );
BackgroundOperation::assertNoBgOpInProgForDb(dbName);
@@ -2321,7 +2321,7 @@ namespace mongo {
{
// clone to temp location, which effectively does repair
Client::Context ctx( dbName, reservedPathString );
- assert( ctx.justCreated() );
+ verify( ctx.justCreated() );
res = cloneFrom(localhost.c_str(), errmsg, dbName,
/*logForReplication=*/false, /*slaveOk*/false, /*replauth*/false,
@@ -2381,7 +2381,7 @@ namespace mongo {
int i = 0;
int extra = 10; // should not be necessary, this is defensive in case there are missing files
while ( 1 ) {
- assert( i <= DiskLoc::MaxFiles );
+ verify( i <= DiskLoc::MaxFiles );
stringstream ss;
ss << c << i;
q = p / ss.str();
diff --git a/src/mongo/db/pdfile.h b/src/mongo/db/pdfile.h
index 908159ebd5b..d9ebb3f3a6e 100644
--- a/src/mongo/db/pdfile.h
+++ b/src/mongo/db/pdfile.h
@@ -215,7 +215,7 @@ namespace mongo {
DiskLoc nextInExtent(const DiskLoc& myLoc) {
if ( nextOfs == DiskLoc::NullOfs )
return DiskLoc();
- assert( nextOfs );
+ verify( nextOfs );
return DiskLoc(myLoc.a(), nextOfs);
}
@@ -302,15 +302,15 @@ namespace mongo {
DiskLoc reuse(const char *nsname, bool newUseIsAsCapped);
bool isOk() const { return magic == 0x41424344; }
- void assertOk() const { assert(isOk()); }
+ void assertOk() const { verify(isOk()); }
Record* newRecord(int len);
Record* getRecord(DiskLoc dl) {
- assert( !dl.isNull() );
- assert( dl.sameFile(myLoc) );
+ verify( !dl.isNull() );
+ verify( dl.sameFile(myLoc) );
int x = dl.getOfs() - myLoc.getOfs();
- assert( x > 0 );
+ verify( x > 0 );
return (Record *) (((char *) this) + x);
}
@@ -398,13 +398,13 @@ namespace mongo {
}
getDur().createdFile(filename, filelength);
- assert( HeaderSize == 8192 );
+ verify( HeaderSize == 8192 );
DataFileHeader *h = getDur().writing(this);
h->fileLength = filelength;
h->version = PDFILE_VERSION;
h->versionMinor = PDFILE_VERSION_MINOR;
h->unused.set( fileno, HeaderSize );
- assert( (data-(char*)this) == HeaderSize );
+ verify( (data-(char*)this) == HeaderSize );
h->unusedLength = fileLength - HeaderSize - 16;
}
}
@@ -481,7 +481,7 @@ namespace mongo {
return BSONObj(rec()->accessed());
}
inline DeletedRecord* DiskLoc::drec() const {
- assert( _a != -1 );
+ verify( _a != -1 );
DeletedRecord* dr = (DeletedRecord*) rec();
memconcept::is(dr, memconcept::concept::deletedrecord);
return dr;
@@ -493,7 +493,7 @@ namespace mongo {
template< class V >
inline
const BtreeBucket<V> * DiskLoc::btree() const {
- assert( _a != -1 );
+ verify( _a != -1 );
Record *r = rec();
memconcept::is(r, memconcept::concept::btreebucket, "", 8192);
return (const BtreeBucket<V> *) r->data;
@@ -510,7 +510,7 @@ namespace mongo {
inline NamespaceIndex* nsindex(const char *ns) {
Database *database = cc().database();
- assert( database );
+ verify( database );
memconcept::is(database, memconcept::concept::database, ns, sizeof(Database));
DEV {
char buf[256];
@@ -519,7 +519,7 @@ namespace mongo {
out() << "ERROR: attempt to write to wrong database\n";
out() << " ns:" << ns << '\n';
out() << " database->name:" << database->name << endl;
- assert( database->name == buf );
+ verify( database->name == buf );
}
}
return &database->namespaceIndex;
@@ -535,12 +535,12 @@ namespace mongo {
}
inline Extent* DataFileMgr::getExtent(const DiskLoc& dl) {
- assert( dl.a() != -1 );
+ verify( dl.a() != -1 );
return cc().database()->getFile(dl.a())->getExtent(dl);
}
inline Record* DataFileMgr::getRecord(const DiskLoc& dl) {
- assert( dl.a() != -1 );
+ verify( dl.a() != -1 );
Record* r = cc().database()->getFile(dl.a())->recordAt(dl);
return r;
}
@@ -548,7 +548,7 @@ namespace mongo {
BOOST_STATIC_ASSERT( 16 == sizeof(DeletedRecord) );
inline DeletedRecord* DataFileMgr::makeDeletedRecord(const DiskLoc& dl, int len) {
- assert( dl.a() != -1 );
+ verify( dl.a() != -1 );
return (DeletedRecord*) cc().database()->getFile(dl.a())->makeRecord(dl, sizeof(DeletedRecord));
}
diff --git a/src/mongo/db/pipeline/accumulator.cpp b/src/mongo/db/pipeline/accumulator.cpp
index 84e61ca82a0..84b4bf25a7c 100755
--- a/src/mongo/db/pipeline/accumulator.cpp
+++ b/src/mongo/db/pipeline/accumulator.cpp
@@ -39,7 +39,7 @@ namespace mongo {
void Accumulator::opToBson(
BSONObjBuilder *pBuilder, string opName,
string fieldName, unsigned depth) const {
- assert(vpOperand.size() == 1);
+ verify(vpOperand.size() == 1);
BSONObjBuilder builder;
vpOperand[0]->addToBsonObj(&builder, opName, depth);
pBuilder->append(fieldName, builder.done());
@@ -52,7 +52,7 @@ namespace mongo {
void Accumulator::addToBsonArray(
BSONArrayBuilder *pBuilder, unsigned depth) const {
- assert(false); // these can't appear in arrays
+ verify(false); // these can't appear in arrays
}
void agg_framework_reservedErrors() {
diff --git a/src/mongo/db/pipeline/accumulator_add_to_set.cpp b/src/mongo/db/pipeline/accumulator_add_to_set.cpp
index 2e006caf55d..61a0ca5a39b 100755
--- a/src/mongo/db/pipeline/accumulator_add_to_set.cpp
+++ b/src/mongo/db/pipeline/accumulator_add_to_set.cpp
@@ -23,7 +23,7 @@
namespace mongo {
intrusive_ptr<const Value> AccumulatorAddToSet::evaluate(
const intrusive_ptr<Document> &pDocument) const {
- assert(vpOperand.size() == 1);
+ verify(vpOperand.size() == 1);
intrusive_ptr<const Value> prhs(vpOperand[0]->evaluate(pDocument));
if (prhs->getType() == Undefined)
@@ -37,7 +37,7 @@ namespace mongo {
If we didn't, then we'd get an array of arrays, with one array
from each shard that responds.
*/
- assert(prhs->getType() == Array);
+ verify(prhs->getType() == Array);
intrusive_ptr<ValueIterator> pvi(prhs->getArray());
while(pvi->more()) {
diff --git a/src/mongo/db/pipeline/accumulator_avg.cpp b/src/mongo/db/pipeline/accumulator_avg.cpp
index d9df112f52b..f166c185f9a 100755
--- a/src/mongo/db/pipeline/accumulator_avg.cpp
+++ b/src/mongo/db/pipeline/accumulator_avg.cpp
@@ -40,12 +40,12 @@ namespace mongo {
*/
intrusive_ptr<const Value> prhs(
vpOperand[0]->evaluate(pDocument));
- assert(prhs->getType() == Object);
+ verify(prhs->getType() == Object);
intrusive_ptr<Document> pShardDoc(prhs->getDocument());
intrusive_ptr<const Value> pSubTotal(
pShardDoc->getValue(subTotalName));
- assert(pSubTotal.get());
+ verify(pSubTotal.get());
BSONType subTotalType = pSubTotal->getType();
if ((totalType == NumberLong) || (subTotalType == NumberLong))
totalType = NumberLong;
diff --git a/src/mongo/db/pipeline/accumulator_first.cpp b/src/mongo/db/pipeline/accumulator_first.cpp
index 9c45e409237..937b260f136 100755
--- a/src/mongo/db/pipeline/accumulator_first.cpp
+++ b/src/mongo/db/pipeline/accumulator_first.cpp
@@ -23,7 +23,7 @@ namespace mongo {
intrusive_ptr<const Value> AccumulatorFirst::evaluate(
const intrusive_ptr<Document> &pDocument) const {
- assert(vpOperand.size() == 1);
+ verify(vpOperand.size() == 1);
/* only remember the first value seen */
if (!pValue.get())
diff --git a/src/mongo/db/pipeline/accumulator_last.cpp b/src/mongo/db/pipeline/accumulator_last.cpp
index 3d929fc57c5..820907a1151 100755
--- a/src/mongo/db/pipeline/accumulator_last.cpp
+++ b/src/mongo/db/pipeline/accumulator_last.cpp
@@ -23,7 +23,7 @@ namespace mongo {
intrusive_ptr<const Value> AccumulatorLast::evaluate(
const intrusive_ptr<Document> &pDocument) const {
- assert(vpOperand.size() == 1);
+ verify(vpOperand.size() == 1);
/* always remember the last value seen */
pValue = vpOperand[0]->evaluate(pDocument);
diff --git a/src/mongo/db/pipeline/accumulator_min_max.cpp b/src/mongo/db/pipeline/accumulator_min_max.cpp
index ce0151847d4..902f910dcb8 100755
--- a/src/mongo/db/pipeline/accumulator_min_max.cpp
+++ b/src/mongo/db/pipeline/accumulator_min_max.cpp
@@ -23,7 +23,7 @@ namespace mongo {
intrusive_ptr<const Value> AccumulatorMinMax::evaluate(
const intrusive_ptr<Document> &pDocument) const {
- assert(vpOperand.size() == 1);
+ verify(vpOperand.size() == 1);
intrusive_ptr<const Value> prhs(vpOperand[0]->evaluate(pDocument));
/* if this is the first value, just use it */
@@ -42,7 +42,7 @@ namespace mongo {
AccumulatorMinMax::AccumulatorMinMax(int theSense):
AccumulatorSingleValue(),
sense(theSense) {
- assert((sense == 1) || (sense == -1));
+ verify((sense == 1) || (sense == -1));
}
intrusive_ptr<Accumulator> AccumulatorMinMax::createMin(
diff --git a/src/mongo/db/pipeline/accumulator_push.cpp b/src/mongo/db/pipeline/accumulator_push.cpp
index b0c11dcf70f..932ca6361cd 100755
--- a/src/mongo/db/pipeline/accumulator_push.cpp
+++ b/src/mongo/db/pipeline/accumulator_push.cpp
@@ -23,7 +23,7 @@
namespace mongo {
intrusive_ptr<const Value> AccumulatorPush::evaluate(
const intrusive_ptr<Document> &pDocument) const {
- assert(vpOperand.size() == 1);
+ verify(vpOperand.size() == 1);
intrusive_ptr<const Value> prhs(vpOperand[0]->evaluate(pDocument));
if (prhs->getType() == Undefined)
@@ -37,7 +37,7 @@ namespace mongo {
If we didn't, then we'd get an array of arrays, with one array
from each shard that responds.
*/
- assert(prhs->getType() == Array);
+ verify(prhs->getType() == Array);
intrusive_ptr<ValueIterator> pvi(prhs->getArray());
while(pvi->more()) {
diff --git a/src/mongo/db/pipeline/accumulator_sum.cpp b/src/mongo/db/pipeline/accumulator_sum.cpp
index 7f268efcb32..26258c2f19a 100755
--- a/src/mongo/db/pipeline/accumulator_sum.cpp
+++ b/src/mongo/db/pipeline/accumulator_sum.cpp
@@ -23,7 +23,7 @@ namespace mongo {
intrusive_ptr<const Value> AccumulatorSum::evaluate(
const intrusive_ptr<Document> &pDocument) const {
- assert(vpOperand.size() == 1);
+ verify(vpOperand.size() == 1);
intrusive_ptr<const Value> prhs(vpOperand[0]->evaluate(pDocument));
/* upgrade to the widest type required to hold the result */
diff --git a/src/mongo/db/pipeline/document.cpp b/src/mongo/db/pipeline/document.cpp
index f7a85459a99..a224f56eacb 100755
--- a/src/mongo/db/pipeline/document.cpp
+++ b/src/mongo/db/pipeline/document.cpp
@@ -16,8 +16,6 @@
#include "pch.h"
#include <boost/functional/hash.hpp>
-#undef assert
-#define assert MONGO_assert
#include "db/jsobj.h"
#include "db/pipeline/dependency_tracker.h"
#include "db/pipeline/document.h"
@@ -204,7 +202,7 @@ namespace mongo {
}
/* NOTREACHED */
- assert(false);
+ verify(false);
return 0;
}
@@ -220,7 +218,7 @@ namespace mongo {
}
pair<string, intrusive_ptr<const Value> > FieldIterator::next() {
- assert(more());
+ verify(more());
pair<string, intrusive_ptr<const Value> > result(
pDocument->vFieldName[index], pDocument->vpValue[index]);
++index;
diff --git a/src/mongo/db/pipeline/document.h b/src/mongo/db/pipeline/document.h
index 533f5b2fc30..0069c985ea9 100755
--- a/src/mongo/db/pipeline/document.h
+++ b/src/mongo/db/pipeline/document.h
@@ -245,7 +245,7 @@ namespace mongo {
}
inline Document::FieldPair Document::getField(size_t index) const {
- assert( index < vFieldName.size() );
+ verify( index < vFieldName.size() );
return FieldPair(vFieldName[index], vpValue[index]);
}
diff --git a/src/mongo/db/pipeline/document_source.cpp b/src/mongo/db/pipeline/document_source.cpp
index 25cc6c6dbd6..51224286402 100755
--- a/src/mongo/db/pipeline/document_source.cpp
+++ b/src/mongo/db/pipeline/document_source.cpp
@@ -37,7 +37,7 @@ namespace mongo {
}
void DocumentSource::setSource(DocumentSource *pTheSource) {
- assert(!pSource);
+ verify(!pSource);
pSource = pTheSource;
}
@@ -52,7 +52,7 @@ namespace mongo {
void DocumentSource::manageDependencies(
const intrusive_ptr<DependencyTracker> &pTracker) {
#ifdef MONGO_LATER_SERVER_4644
- assert(false); // identify any sources that need this but don't have it
+ verify(false); // identify any sources that need this but don't have it
#endif /* MONGO_LATER_SERVER_4644 */
}
diff --git a/src/mongo/db/pipeline/document_source.h b/src/mongo/db/pipeline/document_source.h
index e16843b1316..6d22426b89b 100755
--- a/src/mongo/db/pipeline/document_source.h
+++ b/src/mongo/db/pipeline/document_source.h
@@ -188,8 +188,8 @@ namespace mongo {
from. This is a convenience for them.
The default implementation of setSource() sets this; if you don't
- need a source, override that to assert(). The default is to
- assert() if this has already been set.
+ need a source, override that to verify(). The default is to
+ verify() if this has already been set.
*/
DocumentSource *pSource;
diff --git a/src/mongo/db/pipeline/document_source_bson_array.cpp b/src/mongo/db/pipeline/document_source_bson_array.cpp
index 46ac2eb82ac..fe90acc0dd4 100755
--- a/src/mongo/db/pipeline/document_source_bson_array.cpp
+++ b/src/mongo/db/pipeline/document_source_bson_array.cpp
@@ -45,7 +45,7 @@ namespace mongo {
}
intrusive_ptr<Document> DocumentSourceBsonArray::getCurrent() {
- assert(haveCurrent);
+ verify(haveCurrent);
BSONObj documentObj(currentElement.Obj());
intrusive_ptr<Document> pDocument(
Document::createFromBsonObj(&documentObj));
@@ -54,7 +54,7 @@ namespace mongo {
void DocumentSourceBsonArray::setSource(DocumentSource *pSource) {
/* this doesn't take a source */
- assert(false);
+ verify(false);
}
DocumentSourceBsonArray::DocumentSourceBsonArray(
@@ -74,7 +74,7 @@ namespace mongo {
BSONElement *pBsonElement,
const intrusive_ptr<ExpressionContext> &pExpCtx) {
- assert(pBsonElement->type() == Array);
+ verify(pBsonElement->type() == Array);
intrusive_ptr<DocumentSourceBsonArray> pSource(
new DocumentSourceBsonArray(pBsonElement, pExpCtx));
@@ -82,6 +82,6 @@ namespace mongo {
}
void DocumentSourceBsonArray::sourceToBson(BSONObjBuilder *pBuilder) const {
- assert(false); // this has no analog in the BSON world
+ verify(false); // this has no analog in the BSON world
}
}
diff --git a/src/mongo/db/pipeline/document_source_command_futures.cpp b/src/mongo/db/pipeline/document_source_command_futures.cpp
index bcb31d588b0..991dabaf781 100755
--- a/src/mongo/db/pipeline/document_source_command_futures.cpp
+++ b/src/mongo/db/pipeline/document_source_command_futures.cpp
@@ -44,19 +44,19 @@ namespace mongo {
}
intrusive_ptr<Document> DocumentSourceCommandFutures::getCurrent() {
- assert(!eof());
+ verify(!eof());
return pCurrent;
}
void DocumentSourceCommandFutures::setSource(DocumentSource *pSource) {
/* this doesn't take a source */
- assert(false);
+ verify(false);
}
void DocumentSourceCommandFutures::sourceToBson(
BSONObjBuilder *pBuilder) const {
/* this has no BSON equivalent */
- assert(false);
+ verify(false);
}
DocumentSourceCommandFutures::DocumentSourceCommandFutures(
diff --git a/src/mongo/db/pipeline/document_source_filter_base.cpp b/src/mongo/db/pipeline/document_source_filter_base.cpp
index 9b4cd64a54a..c04ff7a9f29 100755
--- a/src/mongo/db/pipeline/document_source_filter_base.cpp
+++ b/src/mongo/db/pipeline/document_source_filter_base.cpp
@@ -75,7 +75,7 @@ namespace mongo {
if (unstarted)
findNext();
- assert(pCurrent.get() != NULL);
+ verify(pCurrent.get() != NULL);
return pCurrent;
}
diff --git a/src/mongo/db/pipeline/document_source_group.cpp b/src/mongo/db/pipeline/document_source_group.cpp
index c47b3b373a4..54f77f69a9f 100755
--- a/src/mongo/db/pipeline/document_source_group.cpp
+++ b/src/mongo/db/pipeline/document_source_group.cpp
@@ -48,7 +48,7 @@ namespace mongo {
if (!populated)
populate();
- assert(groupsIterator != groups.end());
+ verify(groupsIterator != groups.end());
++groupsIterator;
if (groupsIterator == groups.end()) {
diff --git a/src/mongo/db/pipeline/document_source_match.cpp b/src/mongo/db/pipeline/document_source_match.cpp
index 5a293ceebe6..bd3e32bef48 100755
--- a/src/mongo/db/pipeline/document_source_match.cpp
+++ b/src/mongo/db/pipeline/document_source_match.cpp
@@ -87,7 +87,7 @@ namespace mongo {
void DocumentSourceMatch::manageDependencies(
const intrusive_ptr<DependencyTracker> &pTracker) {
#ifdef MONGO_LATER_SERVER_4644
- assert(false); // $$$ implement dependencies on Matcher
+ verify(false); // $$$ implement dependencies on Matcher
#endif /* MONGO_LATER_SERVER_4644 */
}
}
diff --git a/src/mongo/db/pipeline/document_source_out.cpp b/src/mongo/db/pipeline/document_source_out.cpp
index 6ddc65fe2d2..45381de555d 100755
--- a/src/mongo/db/pipeline/document_source_out.cpp
+++ b/src/mongo/db/pipeline/document_source_out.cpp
@@ -48,7 +48,7 @@ namespace mongo {
BSONElement *pBsonElement,
const intrusive_ptr<ExpressionContext> &pExpCtx):
DocumentSource(pExpCtx) {
- assert(false && "unimplemented");
+ verify(false && "unimplemented");
}
intrusive_ptr<DocumentSourceOut> DocumentSourceOut::createFromBson(
@@ -61,6 +61,6 @@ namespace mongo {
}
void DocumentSourceOut::sourceToBson(BSONObjBuilder *pBuilder) const {
- assert(false); // CW TODO
+ verify(false); // CW TODO
}
}
diff --git a/src/mongo/db/pipeline/document_source_sort.cpp b/src/mongo/db/pipeline/document_source_sort.cpp
index 1ccb4377f66..63d231aa283 100755
--- a/src/mongo/db/pipeline/document_source_sort.cpp
+++ b/src/mongo/db/pipeline/document_source_sort.cpp
@@ -50,7 +50,7 @@ namespace mongo {
if (!populated)
populate();
- assert(listIterator != documents.end());
+ verify(listIterator != documents.end());
++listIterator;
if (listIterator == documents.end()) {
@@ -149,7 +149,7 @@ namespace mongo {
void DocumentSourceSort::populate() {
/* make sure we've got a sort key */
- assert(vSortKey.size());
+ verify(vSortKey.size());
/* track and warn about how much physical memory has been used */
DocMemMonitor dmm(this);
@@ -215,7 +215,7 @@ namespace mongo {
bool DocumentSourceSort::Carrier::lessThan(
const Carrier &rL, const Carrier &rR) {
/* make sure these aren't from different lists */
- assert(rL.pSort == rR.pSort);
+ verify(rL.pSort == rR.pSort);
/* compare the documents according to the sort key */
return (rL.pSort->compare(rL.pDocument, rR.pDocument) < 0);
diff --git a/src/mongo/db/pipeline/document_source_unwind.cpp b/src/mongo/db/pipeline/document_source_unwind.cpp
index 496557c548f..b250b0ab253 100755
--- a/src/mongo/db/pipeline/document_source_unwind.cpp
+++ b/src/mongo/db/pipeline/document_source_unwind.cpp
@@ -143,7 +143,7 @@ namespace mongo {
/* get the iterator we'll use to unwind the array */
pUnwinder = pUnwindArray->getArray();
- assert(pUnwinder->more()); // we just checked above...
+ verify(pUnwinder->more()); // we just checked above...
pUnwindValue = pUnwinder->next();
}
}
@@ -169,12 +169,12 @@ namespace mongo {
For this to be valid, we must already have pNoUnwindDocument set,
and have set up the vector of indices for that document in fieldIndex.
*/
- assert(pNoUnwindDocument.get());
+ verify(pNoUnwindDocument.get());
intrusive_ptr<Document> pClone(pNoUnwindDocument->clone());
intrusive_ptr<Document> pCurrent(pClone);
const size_t n = fieldIndex.size();
- assert(n);
+ verify(n);
for(size_t i = 0; i < n; ++i) {
const size_t fi = fieldIndex[i];
Document::FieldPair fp(pCurrent->getField(fi));
diff --git a/src/mongo/db/pipeline/expression.cpp b/src/mongo/db/pipeline/expression.cpp
index 76e39a8bd05..674090d89e7 100755
--- a/src/mongo/db/pipeline/expression.cpp
+++ b/src/mongo/db/pipeline/expression.cpp
@@ -33,7 +33,7 @@ namespace mongo {
void Expression::toMatcherBson(
BSONObjBuilder *pBuilder, unsigned depth) const {
- assert(false && "Expression::toMatcherBson()");
+ verify(false && "Expression::toMatcherBson()");
}
Expression::ObjectCtx::ObjectCtx(int theOptions):
@@ -42,9 +42,9 @@ namespace mongo {
}
void Expression::ObjectCtx::unwind(string fieldName) {
- assert(unwindOk());
- assert(!unwindUsed());
- assert(fieldName.size());
+ verify(unwindOk());
+ verify(!unwindUsed());
+ verify(fieldName.size());
unwindField = fieldName;
}
@@ -105,7 +105,7 @@ namespace mongo {
/* if it's our first time, create the document expression */
if (!pExpression.get()) {
- assert(pCtx->documentOk());
+ verify(pCtx->documentOk());
// CW TODO error: document not allowed in this context
pExpressionObject = ExpressionObject::create();
@@ -343,7 +343,7 @@ namespace mongo {
} // switch(type)
/* NOTREACHED */
- assert(false);
+ verify(false);
return intrusive_ptr<Expression>();
}
@@ -589,7 +589,7 @@ namespace mongo {
expressions. Direct equality is a degenerate range expression;
range expressions can be open-ended.
*/
- assert(false && "unimplemented");
+ verify(false && "unimplemented");
}
intrusive_ptr<ExpressionNary> (*ExpressionAnd::getFactory() const)() {
@@ -648,12 +648,12 @@ namespace mongo {
void ExpressionCoerceToBool::addToBsonObj(
BSONObjBuilder *pBuilder, string fieldName, unsigned depth) const {
- assert(false && "not possible"); // no equivalent of this
+ verify(false && "not possible"); // no equivalent of this
}
void ExpressionCoerceToBool::addToBsonArray(
BSONArrayBuilder *pBuilder, unsigned depth) const {
- assert(false && "not possible"); // no equivalent of this
+ verify(false && "not possible"); // no equivalent of this
}
/* ----------------------- ExpressionCompare --------------------------- */
@@ -855,7 +855,7 @@ namespace mongo {
return Value::getOne();
default:
- assert(false); // CW TODO internal error
+ verify(false); // CW TODO internal error
return Value::getNull();
}
}
@@ -982,7 +982,7 @@ namespace mongo {
}
const char *ExpressionConstant::getOpName() const {
- assert(false); // this has no name
+ verify(false); // this has no name
return NULL;
}
@@ -1222,7 +1222,7 @@ namespace mongo {
ExpressionObject *pChild =
dynamic_cast<ExpressionObject *>(pE);
- assert(pChild);
+ verify(pChild);
/*
Check on the type of the result object. If it's an
@@ -1322,7 +1322,7 @@ namespace mongo {
void ExpressionObject::addField(const string &fieldName,
const intrusive_ptr<Expression> &pExpression) {
/* must have an expression */
- assert(pExpression.get());
+ verify(pExpression.get());
/* parse the field path */
FieldPath fieldPath(fieldName);
@@ -1397,7 +1397,7 @@ namespace mongo {
if (i < n) {
/* the intermediate child already exists */
pChild = dynamic_cast<ExpressionObject *>(vpExpression[i].get());
- assert(pChild);
+ verify(pChild);
}
else {
/*
@@ -1496,7 +1496,7 @@ namespace mongo {
*/
Expression *pE = vpExpression[iField].get();
ExpressionObject *pEO = dynamic_cast<ExpressionObject *>(pE);
- assert(pEO);
+ verify(pEO);
/*
Add the current field name to the path being built up,
@@ -1783,7 +1783,7 @@ namespace mongo {
void ExpressionFieldRange::toMatcherBson(
BSONObjBuilder *pBuilder, unsigned depth) const {
- assert(pRange.get()); // otherwise, we can't do anything
+ verify(pRange.get()); // otherwise, we can't do anything
/* if there are no endpoints, then every value is accepted */
if (!pRange->pBottom.get() && !pRange->pTop.get())
@@ -1873,7 +1873,7 @@ namespace mongo {
break;
case CMP:
- assert(false); // not allowed
+ verify(false); // not allowed
break;
}
}
@@ -2566,7 +2566,7 @@ namespace mongo {
void ExpressionNary::toBson(
BSONObjBuilder *pBuilder, const char *pOpName, unsigned depth) const {
const size_t nOperand = vpOperand.size();
- assert(nOperand > 0);
+ verify(nOperand > 0);
if (nOperand == 1) {
vpOperand[0]->addToBsonObj(pBuilder, pOpName, depth + 1);
return;
diff --git a/src/mongo/db/pipeline/value.cpp b/src/mongo/db/pipeline/value.cpp
index c4a03a1d560..fb6ab9adaa8 100755
--- a/src/mongo/db/pipeline/value.cpp
+++ b/src/mongo/db/pipeline/value.cpp
@@ -274,17 +274,17 @@ namespace mongo {
if (type == NumberLong)
return static_cast< double >( simple.longValue );
- assert(type == NumberDouble);
+ verify(type == NumberDouble);
return simple.doubleValue;
}
string Value::getString() const {
- assert(getType() == String);
+ verify(getType() == String);
return stringValue;
}
intrusive_ptr<Document> Value::getDocument() const {
- assert(getType() == Object);
+ verify(getType() == Object);
return pDocumentValue;
}
@@ -299,7 +299,7 @@ namespace mongo {
}
intrusive_ptr<const Value> Value::vi::next() {
- assert(more());
+ verify(more());
return (*pvpValue)[nextIndex++];
}
@@ -311,44 +311,44 @@ namespace mongo {
}
intrusive_ptr<ValueIterator> Value::getArray() const {
- assert(getType() == Array);
+ verify(getType() == Array);
intrusive_ptr<ValueIterator> pVI(
new vi(intrusive_ptr<const Value>(this), &vpValue));
return pVI;
}
OID Value::getOid() const {
- assert(getType() == jstOID);
+ verify(getType() == jstOID);
return oidValue;
}
bool Value::getBool() const {
- assert(getType() == Bool);
+ verify(getType() == Bool);
return simple.boolValue;
}
Date_t Value::getDate() const {
- assert(getType() == Date);
+ verify(getType() == Date);
return dateValue;
}
string Value::getRegex() const {
- assert(getType() == RegEx);
+ verify(getType() == RegEx);
return stringValue;
}
string Value::getSymbol() const {
- assert(getType() == Symbol);
+ verify(getType() == Symbol);
return stringValue;
}
int Value::getInt() const {
- assert(getType() == NumberInt);
+ verify(getType() == NumberInt);
return simple.intValue;
}
unsigned long long Value::getTimestamp() const {
- assert(getType() == Timestamp);
+ verify(getType() == Timestamp);
return dateValue;
}
@@ -357,7 +357,7 @@ namespace mongo {
if (type == NumberInt)
return simple.intValue;
- assert(type == NumberLong);
+ verify(type == NumberLong);
return simple.longValue;
}
@@ -393,7 +393,7 @@ namespace mongo {
case BinData:
// pBuilder->appendBinData(fieldName, ...);
- assert(false); // CW TODO unimplemented
+ verify(false); // CW TODO unimplemented
break;
case jstOID:
@@ -417,7 +417,7 @@ namespace mongo {
break;
case CodeWScope:
- assert(false); // CW TODO unimplemented
+ verify(false); // CW TODO unimplemented
break;
case NumberInt:
@@ -443,7 +443,7 @@ namespace mongo {
case DBRef:
case Code:
case MaxKey:
- assert(false); // CW TODO better message
+ verify(false); // CW TODO better message
break;
}
}
@@ -483,7 +483,7 @@ namespace mongo {
break;
case CodeWScope:
- assert(false); // CW TODO unimplemented
+ verify(false); // CW TODO unimplemented
break;
case NumberInt:
@@ -507,7 +507,7 @@ namespace mongo {
case DBRef:
case Code:
case MaxKey:
- assert(false); // CW TODO better message
+ verify(false); // CW TODO better message
break;
}
@@ -618,7 +618,7 @@ namespace mongo {
false);
} // switch(type)
- assert(false); // CW TODO no conversion available
+ verify(false); // CW TODO no conversion available
return jstNULL;
}
@@ -746,7 +746,7 @@ namespace mongo {
case NumberInt:
case NumberLong:
/* these types were handled above */
- assert(false);
+ verify(false);
case String:
return rL->stringValue.compare(rR->stringValue);
@@ -780,7 +780,7 @@ namespace mongo {
}
/* NOTREACHED */
- assert(false);
+ verify(false);
break;
}
@@ -834,7 +834,7 @@ namespace mongo {
case DBRef:
case Code:
case MaxKey:
- assert(false);
+ verify(false);
break;
} // switch(lType)
@@ -920,7 +920,7 @@ namespace mongo {
case DBRef:
case Code:
case MaxKey:
- assert(false); // CW TODO better message
+ verify(false); // CW TODO better message
break;
} // switch(type)
}
@@ -1029,7 +1029,7 @@ namespace mongo {
case DBRef:
case Code:
case MaxKey:
- assert(false); // CW TODO better message
+ verify(false); // CW TODO better message
return sizeof(Value);
}
@@ -1040,7 +1040,7 @@ namespace mongo {
default. However, not all the compilers seem to do that. Therefore,
this final catch-all is here.
*/
- assert(false);
+ verify(false);
return sizeof(Value);
}
diff --git a/src/mongo/db/pipeline/value.h b/src/mongo/db/pipeline/value.h
index a638bd012a3..ddfcade02d4 100755
--- a/src/mongo/db/pipeline/value.h
+++ b/src/mongo/db/pipeline/value.h
@@ -411,7 +411,7 @@ namespace mongo {
}
inline size_t Value::getArrayLength() const {
- assert(getType() == Array);
+ verify(getType() == Array);
return vpValue.size();
}
diff --git a/src/mongo/db/projection.cpp b/src/mongo/db/projection.cpp
index d07e56527af..a2ec4af325d 100644
--- a/src/mongo/db/projection.cpp
+++ b/src/mongo/db/projection.cpp
@@ -281,14 +281,14 @@ namespace mongo {
}
BSONObj Projection::KeyOnly::hydrate( const BSONObj& key ) const {
- assert( _include.size() == _names.size() );
+ verify( _include.size() == _names.size() );
BSONObjBuilder b( key.objsize() + _stringSize + 16 );
BSONObjIterator i(key);
unsigned n=0;
while ( i.more() ) {
- assert( n < _include.size() );
+ verify( n < _include.size() );
BSONElement e = i.next();
if ( _include[n] ) {
b.appendAs( e , _names[n] );
diff --git a/src/mongo/db/queryoptimizer.cpp b/src/mongo/db/queryoptimizer.cpp
index 7a9429c8868..40ca5926d57 100644
--- a/src/mongo/db/queryoptimizer.cpp
+++ b/src/mongo/db/queryoptimizer.cpp
@@ -636,7 +636,7 @@ doneCheckOrder:
}
QueryPlanSet::QueryPlanPtr QueryPlanSet::getBestGuess() const {
- assert( _plans.size() );
+ verify( _plans.size() );
if ( _plans[ 0 ]->scanAndOrderRequired() ) {
for ( unsigned i=1; i<_plans.size(); i++ ) {
if ( ! _plans[i]->scanAndOrderRequired() )
@@ -729,7 +729,7 @@ doneCheckOrder:
_currentQps->prepareToRetryQuery() ) {
// Avoid an infinite loop here - this should never occur.
- assert( !retried );
+ verify( !retried );
_runner.reset();
return iterateRunner( originalOp, true );
}
@@ -804,7 +804,7 @@ doneCheckOrder:
}
shared_ptr<QueryOp> QueryPlanSet::Runner::next() {
- assert( !done() );
+ verify( !done() );
if ( _ops.empty() ) {
shared_ptr<QueryOp> initialRet = init();
@@ -827,7 +827,7 @@ doneCheckOrder:
}
shared_ptr<QueryOp> QueryPlanSet::Runner::_next() {
- assert( !_queue.empty() );
+ verify( !_queue.empty() );
OpHolder holder = _queue.pop();
QueryOp &op = *holder._op;
nextOp( op );
@@ -996,7 +996,7 @@ doneCheckOrder:
}
shared_ptr<QueryOp> MultiPlanScanner::nextOp() {
- assert( !doneOps() );
+ verify( !doneOps() );
shared_ptr<QueryOp> ret = _or ? nextOpOr() : nextOpSimple();
if ( ret->error() || ret->complete() ) {
_doneOps = true;
@@ -1038,7 +1038,7 @@ doneCheckOrder:
}
handleBeginningOfClause();
shared_ptr<QueryPlan> bestGuess = _currentQps->getBestGuess();
- assert( bestGuess );
+ verify( bestGuess );
return bestGuess.get();
}
@@ -1154,7 +1154,7 @@ doneCheckOrder:
_matcher = newMatcher;
_c = _queryPlan->newCursor();
// All sub cursors must support yields.
- assert( _c->supportYields() );
+ verify( _c->supportYields() );
if ( _explainPlanInfo ) {
_explainPlanInfo.reset( new ExplainPlanInfo() );
_explainPlanInfo->notePlan( *_c, _queryPlan->scanAndOrderRequired(),
@@ -1211,7 +1211,7 @@ doneCheckOrder:
b.appendMinKey( e.fieldName() );
break;
default:
- assert( false );
+ verify( false );
}
}
return b.obj();
diff --git a/src/mongo/db/queryoptimizer.h b/src/mongo/db/queryoptimizer.h
index f8af8971862..fa9e00bb810 100644
--- a/src/mongo/db/queryoptimizer.h
+++ b/src/mongo/db/queryoptimizer.h
@@ -201,7 +201,7 @@ namespace mongo {
/** To be called by QueryPlanSet::Runner only. */
QueryOp *createChild();
- void setQueryPlan( const QueryPlan *qp ) { _qp = qp; assert( _qp != NULL ); }
+ void setQueryPlan( const QueryPlan *qp ) { _qp = qp; verify( _qp != NULL ); }
void init();
void setException( const DBException &e ) {
_error = true;
@@ -443,7 +443,7 @@ namespace mongo {
/** Add explain information for a new clause. */
void addClauseInfo( const shared_ptr<ExplainClauseInfo> &clauseInfo ) {
- assert( _explainQueryInfo );
+ verify( _explainQueryInfo );
_explainQueryInfo->addClauseInfo( clauseInfo );
}
@@ -588,12 +588,12 @@ namespace mongo {
void noteYield();
const QueryPlan &queryPlan() const {
- assert( _c->ok() && _queryPlan );
+ verify( _c->ok() && _queryPlan );
return *_queryPlan;
}
const Projection::KeyOnly *keyFieldsOnly() const {
- assert( _c->ok() && _queryPlan );
+ verify( _c->ok() && _queryPlan );
return _queryPlan->keyFieldsOnly().get();
}
private:
diff --git a/src/mongo/db/queryoptimizercursorimpl.cpp b/src/mongo/db/queryoptimizercursorimpl.cpp
index 735245fa4fa..87746bf302c 100644
--- a/src/mongo/db/queryoptimizercursorimpl.cpp
+++ b/src/mongo/db/queryoptimizercursorimpl.cpp
@@ -89,7 +89,7 @@ namespace mongo {
// All candidate cursors must support yields for QueryOptimizerCursorImpl's
// prepareToYield() and prepareToTouchEarlierIterate() to work.
- assert( _c->supportYields() );
+ verify( _c->supportYields() );
_capped = _c->capped();
// TODO This violates the current Cursor interface abstraction, but for now it's simpler to keep our own set of
@@ -448,7 +448,7 @@ namespace mongo {
if ( _currOp->error() || !ok() ) {
// Advance to a non error op if one of the ops errored out.
// Advance to a following $or clause if the $or clause returned all results.
- assert( !_mps->doneOps() );
+ verify( !_mps->doneOps() );
_advance( true );
}
}
@@ -760,7 +760,7 @@ namespace mongo {
}
if ( _planPolicy.permitOptimalIdPlan() && isSimpleIdQuery( _query ) ) {
Database *database = cc().database();
- assert( database );
+ verify( database );
NamespaceDetails *d = database->namespaceIndex.details( _ns );
if ( d ) {
int idxNo = d->findIdIndex();
diff --git a/src/mongo/db/querypattern.cpp b/src/mongo/db/querypattern.cpp
index 57cc9a7737d..e431c9be3c9 100644
--- a/src/mongo/db/querypattern.cpp
+++ b/src/mongo/db/querypattern.cpp
@@ -51,7 +51,7 @@ namespace mongo {
bool QueryPattern::operator==( const QueryPattern &other ) const {
bool less = operator<( other );
bool more = other.operator<( *this );
- assert( !( less && more ) );
+ verify( !( less && more ) );
return !( less || more );
}
diff --git a/src/mongo/db/queryutil.cpp b/src/mongo/db/queryutil.cpp
index 61571ab880a..7fd3e28ae64 100644
--- a/src/mongo/db/queryutil.cpp
+++ b/src/mongo/db/queryutil.cpp
@@ -155,7 +155,7 @@ namespace mongo {
BSONObj o = e.embeddedObject();
return simpleRegex(o["$regex"].valuestrsafe(), o["$options"].valuestrsafe());
}
- default: assert(false); return ""; //return squashes compiler warning
+ default: verify(false); return ""; //return squashes compiler warning
}
}
@@ -675,7 +675,7 @@ namespace mongo {
}
void FieldRange::reverse( FieldRange &ret ) const {
- assert( _special.empty() );
+ verify( _special.empty() );
ret._intervals.clear();
ret._objData = _objData;
for( vector<FieldInterval>::const_reverse_iterator i = _intervals.rbegin(); i != _intervals.rend(); ++i ) {
@@ -939,14 +939,14 @@ namespace mongo {
FieldRangeVector::FieldRangeVector( const FieldRangeSet &frs, const IndexSpec &indexSpec,
int direction )
:_indexSpec( indexSpec ), _direction( direction >= 0 ? 1 : -1 ) {
- assert( frs.matchPossibleForIndex( _indexSpec.keyPattern ) );
+ verify( frs.matchPossibleForIndex( _indexSpec.keyPattern ) );
_queries = frs._queries;
BSONObjIterator i( _indexSpec.keyPattern );
set< string > baseObjectNonUniversalPrefixes;
while( i.more() ) {
BSONElement e = i.next();
const FieldRange *range = &frs.range( e.fieldName() );
- assert( !range->empty() );
+ verify( !range->empty() );
if ( !frs.singleKey() ) {
string prefix = str::before( e.fieldName(), '.' );
if ( baseObjectNonUniversalPrefixes.count( prefix ) > 0 ) {
@@ -969,7 +969,7 @@ namespace mongo {
true ) );
range->reverse( _ranges.back() );
}
- assert( !_ranges.back().empty() );
+ verify( !_ranges.back().empty() );
}
uassert( 13385, "combinatorial limit of $in partitioning of result set exceeded",
size() < maxCombinations );
@@ -1032,7 +1032,7 @@ namespace mongo {
BSONElement e = i.next();
const char *name = e.fieldName();
const FieldRange &eRange = range( name );
- assert( !eRange.empty() );
+ verify( !eRange.empty() );
if ( eRange.equality() )
b.appendAs( eRange.min(), name );
else if ( !eRange.universal() ) {
@@ -1236,7 +1236,7 @@ namespace mongo {
return ret;
}
}
- assert( l + 1 == h );
+ verify( l + 1 == h );
return l;
}
@@ -1278,7 +1278,7 @@ namespace mongo {
BSONObj FieldRangeVector::firstMatch( const BSONObj &obj ) const {
// NOTE Only works in forward direction.
- assert( _direction >= 0 );
+ verify( _direction >= 0 );
BSONObjSet keys( BSONObjCmp( _indexSpec.keyPattern ) );
_indexSpec.getKeys( obj, keys );
for( BSONObjSet::const_iterator i = keys.begin(); i != keys.end(); ++i ) {
@@ -1550,64 +1550,64 @@ namespace mongo {
BSONObjBuilder b;
b.appendRegex("r", "^foo");
BSONObj o = b.done();
- assert( simpleRegex(o.firstElement()) == "foo" );
+ verify( simpleRegex(o.firstElement()) == "foo" );
}
{
BSONObjBuilder b;
b.appendRegex("r", "^f?oo");
BSONObj o = b.done();
- assert( simpleRegex(o.firstElement()) == "" );
+ verify( simpleRegex(o.firstElement()) == "" );
}
{
BSONObjBuilder b;
b.appendRegex("r", "^fz?oo");
BSONObj o = b.done();
- assert( simpleRegex(o.firstElement()) == "f" );
+ verify( simpleRegex(o.firstElement()) == "f" );
}
{
BSONObjBuilder b;
b.appendRegex("r", "^f", "");
BSONObj o = b.done();
- assert( simpleRegex(o.firstElement()) == "f" );
+ verify( simpleRegex(o.firstElement()) == "f" );
}
{
BSONObjBuilder b;
b.appendRegex("r", "\\Af", "");
BSONObj o = b.done();
- assert( simpleRegex(o.firstElement()) == "f" );
+ verify( simpleRegex(o.firstElement()) == "f" );
}
{
BSONObjBuilder b;
b.appendRegex("r", "^f", "m");
BSONObj o = b.done();
- assert( simpleRegex(o.firstElement()) == "" );
+ verify( simpleRegex(o.firstElement()) == "" );
}
{
BSONObjBuilder b;
b.appendRegex("r", "\\Af", "m");
BSONObj o = b.done();
- assert( simpleRegex(o.firstElement()) == "f" );
+ verify( simpleRegex(o.firstElement()) == "f" );
}
{
BSONObjBuilder b;
b.appendRegex("r", "\\Af", "mi");
BSONObj o = b.done();
- assert( simpleRegex(o.firstElement()) == "" );
+ verify( simpleRegex(o.firstElement()) == "" );
}
{
BSONObjBuilder b;
b.appendRegex("r", "\\Af \t\vo\n\ro \\ \\# #comment", "mx");
BSONObj o = b.done();
- assert( simpleRegex(o.firstElement()) == "foo #" );
+ verify( simpleRegex(o.firstElement()) == "foo #" );
}
{
- assert( simpleRegex("^\\Qasdf\\E", "", NULL) == "asdf" );
- assert( simpleRegex("^\\Qasdf\\E.*", "", NULL) == "asdf" );
- assert( simpleRegex("^\\Qasdf", "", NULL) == "asdf" ); // PCRE supports this
- assert( simpleRegex("^\\Qasdf\\\\E", "", NULL) == "asdf\\" );
- assert( simpleRegex("^\\Qas.*df\\E", "", NULL) == "as.*df" );
- assert( simpleRegex("^\\Qas\\Q[df\\E", "", NULL) == "as\\Q[df" );
- assert( simpleRegex("^\\Qas\\E\\\\E\\Q$df\\E", "", NULL) == "as\\E$df" ); // quoted string containing \E
+ verify( simpleRegex("^\\Qasdf\\E", "", NULL) == "asdf" );
+ verify( simpleRegex("^\\Qasdf\\E.*", "", NULL) == "asdf" );
+ verify( simpleRegex("^\\Qasdf", "", NULL) == "asdf" ); // PCRE supports this
+ verify( simpleRegex("^\\Qasdf\\\\E", "", NULL) == "asdf\\" );
+ verify( simpleRegex("^\\Qas.*df\\E", "", NULL) == "as.*df" );
+ verify( simpleRegex("^\\Qas\\Q[df\\E", "", NULL) == "as\\Q[df" );
+ verify( simpleRegex("^\\Qas\\E\\\\E\\Q$df\\E", "", NULL) == "as\\E$df" ); // quoted string containing \E
}
}
diff --git a/src/mongo/db/queryutil.h b/src/mongo/db/queryutil.h
index 40553c91a94..14862cd33c4 100644
--- a/src/mongo/db/queryutil.h
+++ b/src/mongo/db/queryutil.h
@@ -305,10 +305,10 @@ namespace mongo {
* be extracted.
*/
- BSONElement min() const { assert( !empty() ); return _intervals[ 0 ]._lower._bound; }
- BSONElement max() const { assert( !empty() ); return _intervals[ _intervals.size() - 1 ]._upper._bound; }
- bool minInclusive() const { assert( !empty() ); return _intervals[ 0 ]._lower._inclusive; }
- bool maxInclusive() const { assert( !empty() ); return _intervals[ _intervals.size() - 1 ]._upper._inclusive; }
+ BSONElement min() const { verify( !empty() ); return _intervals[ 0 ]._lower._bound; }
+ BSONElement max() const { verify( !empty() ); return _intervals[ _intervals.size() - 1 ]._upper._bound; }
+ bool minInclusive() const { verify( !empty() ); return _intervals[ 0 ]._lower._inclusive; }
+ bool maxInclusive() const { verify( !empty() ); return _intervals[ _intervals.size() - 1 ]._upper._inclusive; }
/** @return true iff this range expresses a single equality interval. */
bool equality() const;
diff --git a/src/mongo/db/record.cpp b/src/mongo/db/record.cpp
index 891d2d71f1c..e98852f0d10 100644
--- a/src/mongo/db/record.cpp
+++ b/src/mongo/db/record.cpp
@@ -56,7 +56,7 @@ namespace mongo {
}
State get( int regionHash , size_t region , short offset ) {
- DEV assert( hash( region ) == regionHash );
+ DEV verify( hash( region ) == regionHash );
Entry * e = _get( regionHash , region , false );
if ( ! e )
@@ -69,7 +69,7 @@ namespace mongo {
* @return true if added, false if full
*/
bool in( int regionHash , size_t region , short offset ) {
- DEV assert( hash( region ) == regionHash );
+ DEV verify( hash( region ) == regionHash );
Entry * e = _get( regionHash , region , true );
if ( ! e )
diff --git a/src/mongo/db/repl.cpp b/src/mongo/db/repl.cpp
index 8f5a65338ca..c805ef8bff2 100644
--- a/src/mongo/db/repl.cpp
+++ b/src/mongo/db/repl.cpp
@@ -322,7 +322,7 @@ namespace mongo {
void ReplSource::save() {
BSONObjBuilder b;
- assert( !hostName.empty() );
+ verify( !hostName.empty() );
b.append("host", hostName);
// todo: finish allowing multiple source configs.
// this line doesn't work right when source is null, if that is allowed as it is now:
@@ -336,8 +336,8 @@ namespace mongo {
OpDebug debug;
Client::Context ctx("local.sources");
UpdateResult res = updateObjects("local.sources", o, pattern, true/*upsert for pair feature*/, false,false,debug);
- assert( ! res.mod );
- assert( res.num == 1 );
+ verify( ! res.mod );
+ verify( res.num == 1 );
}
}
@@ -672,7 +672,7 @@ namespace mongo {
if( cmdLine.pretouch > 1 ) {
/* note: this is bad - should be put in ReplSource. but this is first test... */
static int countdown;
- assert( countdown >= 0 );
+ verify( countdown >= 0 );
if( countdown > 0 ) {
countdown--; // was pretouched on a prev pass
}
@@ -910,7 +910,7 @@ namespace mongo {
log() << "repl ASSERTION failed : syncedTo < nextOpTime" << endl;
log() << "repl syncTo: " << syncedTo.toStringLong() << endl;
log() << "repl nextOpTime: " << nextOpTime.toStringLong() << endl;
- assert(false);
+ verify(false);
}
oplogReader.putBack( op ); // op will be processed in the loop below
nextOpTime = OpTime(); // will reread the op below
@@ -928,7 +928,7 @@ namespace mongo {
log() << "repl: tailing: " << tailing << '\n';
log() << "repl: data too stale, halting replication" << endl;
replInfo = replAllDead = "data too stale halted replication";
- assert( syncedTo < nextOpTime );
+ verify( syncedTo < nextOpTime );
throw SyncException();
}
else {
@@ -1006,7 +1006,7 @@ namespace mongo {
uassert( 10123 , "replication error last applied optime at slave >= nextOpTime from master", false);
}
if ( replSettings.slavedelay && ( unsigned( time( 0 ) ) < nextOpTime.getSecs() + replSettings.slavedelay ) ) {
- assert( justOne );
+ verify( justOne );
oplogReader.putBack( op );
_sleepAdviceTime = nextOpTime.getSecs() + replSettings.slavedelay + 1;
Lock::GlobalWrite lk;
@@ -1183,7 +1183,7 @@ namespace mongo {
}
void OplogReader::tailingQuery(const char *ns, const BSONObj& query, const BSONObj* fields ) {
- assert( !haveCursor() );
+ verify( !haveCursor() );
LOG(2) << "repl: " << ns << ".find(" << query.toString() << ')' << endl;
cursor.reset( _conn->query( ns, query, 0, 0, fields, _tailingQueryOptions ).release() );
}
@@ -1334,7 +1334,7 @@ namespace mongo {
break;
}
}
- assert( syncing == 0 ); // i.e., there is only one sync thread running. we will want to change/fix this.
+ verify( syncing == 0 ); // i.e., there is only one sync thread running. we will want to change/fix this.
syncing++;
}
try {
@@ -1355,7 +1355,7 @@ namespace mongo {
}
{
Lock::GlobalWrite lk;
- assert( syncing == 1 );
+ verify( syncing == 1 );
syncing--;
}
@@ -1484,7 +1484,7 @@ namespace mongo {
}
if ( replSettings.slave ) {
- assert( replSettings.slave == SimpleSlave );
+ verify( replSettings.slave == SimpleSlave );
log(1) << "slave=true" << endl;
boost::thread repl_thread(replSlaveThread);
}
diff --git a/src/mongo/db/repl/consensus.cpp b/src/mongo/db/repl/consensus.cpp
index 3995373f5ef..6c6107b34c7 100644
--- a/src/mongo/db/repl/consensus.cpp
+++ b/src/mongo/db/repl/consensus.cpp
@@ -168,7 +168,7 @@ namespace mongo {
void Consensus::electionFailed(unsigned meid) {
SimpleMutex::scoped_lock lk(lyMutex);
LastYea &L = ly.ref(lk);
- DEV assert( L.who == meid ); // this may not always always hold, so be aware, but adding for now as a quick sanity test
+ DEV verify( L.who == meid ); // this may not always always hold, so be aware, but adding for now as a quick sanity test
if( L.who == meid )
L.when = 0;
}
@@ -261,7 +261,7 @@ namespace mongo {
bool Consensus::weAreFreshest(bool& allUp, int& nTies) {
const OpTime ord = theReplSet->lastOpTimeWritten;
nTies = 0;
- assert( !ord.isNull() );
+ verify( !ord.isNull() );
BSONObj cmd = BSON(
"replSetFresh" << 1 <<
"set" << rs.name() <<
@@ -291,7 +291,7 @@ namespace mongo {
OpTime remoteOrd( i->result["opTime"].Date() );
if( remoteOrd == ord )
nTies++;
- assert( remoteOrd <= ord );
+ verify( remoteOrd <= ord );
if( i->result["veto"].trueValue() ) {
BSONElement msg = i->result["errmsg"];
@@ -311,14 +311,14 @@ namespace mongo {
}
}
LOG(1) << "replSet dev we are freshest of up nodes, nok:" << nok << " nTies:" << nTies << rsLog;
- assert( ord <= theReplSet->lastOpTimeWritten ); // <= as this may change while we are working...
+ verify( ord <= theReplSet->lastOpTimeWritten ); // <= as this may change while we are working...
return true;
}
extern time_t started;
void Consensus::multiCommand(BSONObj cmd, list<Target>& L) {
- assert( !rs.lockedByMe() );
+ verify( !rs.lockedByMe() );
mongo::multiCommand(cmd, L);
}
@@ -361,7 +361,7 @@ namespace mongo {
// todo: biggest / highest priority nodes should be the ones that get to not sleep
}
else {
- assert( !rs.lockedByMe() ); // bad to go to sleep locked
+ verify( !rs.lockedByMe() ); // bad to go to sleep locked
unsigned ms = ((unsigned) rand()) % 1000 + 50;
DEV log() << "replSet tie " << nTies << " sleeping a little " << ms << "ms" << rsLog;
sleptLast = true;
@@ -426,9 +426,9 @@ namespace mongo {
}
void Consensus::electSelf() {
- assert( !rs.lockedByMe() );
- assert( !rs.myConfig().arbiterOnly );
- assert( rs.myConfig().slaveDelay == 0 );
+ verify( !rs.lockedByMe() );
+ verify( !rs.myConfig().arbiterOnly );
+ verify( rs.myConfig().slaveDelay == 0 );
try {
_electSelf();
}
diff --git a/src/mongo/db/repl/health.cpp b/src/mongo/db/repl/health.cpp
index 5ad360cf45b..49332005881 100644
--- a/src/mongo/db/repl/health.cpp
+++ b/src/mongo/db/repl/health.cpp
@@ -301,7 +301,7 @@ namespace mongo {
}
const Member *_self = this->_self;
- assert(_self);
+ verify(_self);
{
stringstream s;
/* self row */
@@ -368,7 +368,7 @@ namespace mongo {
vector<BSONObj> v;
const Member *_self = this->_self;
- assert( _self );
+ verify( _self );
MemberState myState = box.getState();
@@ -451,8 +451,8 @@ namespace mongo {
static struct Test : public UnitTest {
void run() {
HealthOptions a,b;
- assert( a == b );
- assert( a.isDefault() );
+ verify( a == b );
+ verify( a.isDefault() );
}
} test;
diff --git a/src/mongo/db/repl/manager.cpp b/src/mongo/db/repl/manager.cpp
index ec970f4b34c..542485d1e03 100644
--- a/src/mongo/db/repl/manager.cpp
+++ b/src/mongo/db/repl/manager.cpp
@@ -35,7 +35,7 @@ namespace mongo {
Member *m = rs->head();
Member *p = 0;
while( m ) {
- DEV assert( m != rs->_self );
+ DEV verify( m != rs->_self );
if( m->state().primary() && m->hbinfo().up() ) {
if( p ) {
two = true;
diff --git a/src/mongo/db/repl/rs.cpp b/src/mongo/db/repl/rs.cpp
index 6c30331431a..6fed4c7ce31 100644
--- a/src/mongo/db/repl/rs.cpp
+++ b/src/mongo/db/repl/rs.cpp
@@ -70,7 +70,7 @@ namespace mongo {
void ReplSetImpl::assumePrimary() {
LOG(2) << "replSet assuming primary" << endl;
- assert( iAmPotentiallyHot() );
+ verify( iAmPotentiallyHot() );
// so we are synchronized with _logOp(). perhaps locking local db only would suffice, but until proven
// will take this route, and this is very rare so it doesn't matter anyway
Lock::GlobalWrite lk;
@@ -215,7 +215,7 @@ namespace mongo {
}
void ReplSetImpl::_fillIsMasterHost(const Member *m, vector<string>& hosts, vector<string>& passives, vector<string>& arbiters) {
- assert( m );
+ verify( m );
if( m->config().hidden )
return;
@@ -248,7 +248,7 @@ namespace mongo {
_fillIsMasterHost(_self, hosts, passives, arbiters);
for( Member *m = _members.head(); m; m = m->next() ) {
- assert( m );
+ verify( m );
_fillIsMasterHost(m, hosts, passives, arbiters);
}
@@ -449,7 +449,7 @@ namespace mongo {
const Member *old = findById(m._id);
if( old ) {
nfound++;
- assert( (int) old->id() == m._id );
+ verify( (int) old->id() == m._id );
if( old->config() != m ) {
additive = false;
}
@@ -493,10 +493,10 @@ namespace mongo {
_cfg = new ReplSetConfig(c);
dassert( &config() == _cfg ); // config() is same thing but const, so we use that when we can for clarity below
- assert( config().ok() );
- assert( _name.empty() || _name == config()._id );
+ verify( config().ok() );
+ verify( _name.empty() || _name == config()._id );
_name = config()._id;
- assert( !_name.empty() );
+ verify( !_name.empty() );
// this is a shortcut for simple changes
if( additive ) {
@@ -546,7 +546,7 @@ namespace mongo {
Member *mi;
members += ( members == "" ? "" : ", " ) + m.h.toString();
if( m.h.isSelf() ) {
- assert( me++ == 0 );
+ verify( me++ == 0 );
mi = new Member(m.h, m._id, &m, true);
if (!reconf) {
log() << "replSet I am " << m.h.toString() << rsLog;
@@ -592,7 +592,7 @@ namespace mongo {
v = cfg.version;
}
}
- assert( highest );
+ verify( highest );
if( !initFromConfig(*highest) )
return false;
@@ -727,7 +727,7 @@ namespace mongo {
if( e.getCode() == 13497 /* removed from set */ ) {
cc().shutdown();
dbexit( EXIT_CLEAN , "removed from replica set" ); // never returns
- assert(0);
+ verify(0);
}
log() << "replSet error unexpected exception in haveNewConfig() : " << e.toString() << rsLog;
_fatal();
@@ -757,9 +757,9 @@ namespace mongo {
void startReplSets(ReplSetCmdline *replSetCmdline) {
Client::initThread("rsStart");
try {
- assert( theReplSet == 0 );
+ verify( theReplSet == 0 );
if( replSetCmdline == 0 ) {
- assert(!replSet);
+ verify(!replSet);
return;
}
replLocalAuth();
diff --git a/src/mongo/db/repl/rs.h b/src/mongo/db/repl/rs.h
index 21ae0a2e5e5..26e1b3dd24b 100644
--- a/src/mongo/db/repl/rs.h
+++ b/src/mongo/db/repl/rs.h
@@ -215,7 +215,7 @@ namespace mongo {
class RSBase : boost::noncopyable {
public:
const unsigned magic;
- void assertValid() { assert( magic == 0x12345677 ); }
+ void assertValid() { verify( magic == 0x12345677 ); }
private:
mongo::mutex m;
int _locked;
@@ -237,14 +237,14 @@ namespace mongo {
return; // recursive is ok...
sl.reset( new scoped_lock(rsbase.m) );
- DEV assert(rsbase._locked == 0);
+ DEV verify(rsbase._locked == 0);
rsbase._locked++;
rsbase._lockedByMe.set(true);
}
~lock() {
if( sl.get() ) {
- assert( rsbase._lockedByMe.get() );
- DEV assert(rsbase._locked == 1);
+ verify( rsbase._lockedByMe.get() );
+ DEV verify(rsbase._locked == 1);
rsbase._lockedByMe.set(false);
rsbase._locked--;
}
@@ -305,7 +305,7 @@ namespace mongo {
void setSelfPrimary(const Member *self) { change(MemberState::RS_PRIMARY, self); }
void setOtherPrimary(const Member *mem) {
rwlock lk(m, true);
- assert( !sp.state.primary() );
+ verify( !sp.state.primary() );
sp.primary = mem;
}
void noteRemoteIsPrimary(const Member *remote) {
@@ -555,7 +555,7 @@ namespace mongo {
bool freeze(int secs) { return _freeze(secs); }
string selfFullName() {
- assert( _self );
+ verify( _self );
return _self->fullName();
}
@@ -661,7 +661,7 @@ namespace mongo {
inline Member::Member(HostAndPort h, unsigned ord, const ReplSetConfig::MemberCfg *c, bool self) :
_config(*c), _h(h), _hbinfo(ord) {
- assert(c);
+ verify(c);
if( self )
_hbinfo.health = 1.0;
}
diff --git a/src/mongo/db/repl/rs_config.cpp b/src/mongo/db/repl/rs_config.cpp
index 2e7b1048e2c..a7e483cec2c 100644
--- a/src/mongo/db/repl/rs_config.cpp
+++ b/src/mongo/db/repl/rs_config.cpp
@@ -234,7 +234,7 @@ namespace mongo {
*/
/*static*/
bool ReplSetConfig::legalChange(const ReplSetConfig& o, const ReplSetConfig& n, string& errmsg) {
- assert( theReplSet );
+ verify( theReplSet );
if( o._id != n._id ) {
errmsg = "set name may not change";
diff --git a/src/mongo/db/repl/rs_initialsync.cpp b/src/mongo/db/repl/rs_initialsync.cpp
index 29b7ce9f23c..2a751b8f3cc 100644
--- a/src/mongo/db/repl/rs_initialsync.cpp
+++ b/src/mongo/db/repl/rs_initialsync.cpp
@@ -252,8 +252,8 @@ namespace mongo {
isyncassert( "getLastOp is empty ", !minValid.isEmpty() );
OpTime mvoptime = minValid["ts"]._opTime();
- assert( !mvoptime.isNull() );
- assert( mvoptime >= startingTS );
+ verify( !mvoptime.isNull() );
+ verify( mvoptime >= startingTS );
// apply startingTS..mvoptime portion of the oplog
{
@@ -283,7 +283,7 @@ namespace mongo {
sethbmsg("initial sync finishing up",0);
- assert( !box.getState().primary() ); // wouldn't make sense if we were.
+ verify( !box.getState().primary() ); // wouldn't make sense if we were.
{
writelock lk("local.");
diff --git a/src/mongo/db/repl/rs_initiate.cpp b/src/mongo/db/repl/rs_initiate.cpp
index 77bc6c03938..53dd94b347a 100644
--- a/src/mongo/db/repl/rs_initiate.cpp
+++ b/src/mongo/db/repl/rs_initiate.cpp
@@ -116,7 +116,7 @@ namespace mongo {
if( !initial && failures <= allowableFailures ) {
const Member* m = theReplSet->findById( i->_id );
if( m ) {
- assert( m->h().toString() == i->h.toString() );
+ verify( m->h().toString() == i->h.toString() );
}
// it's okay if the down member isn't part of the config,
// we might be adding a new member that isn't up yet
diff --git a/src/mongo/db/repl/rs_optime.h b/src/mongo/db/repl/rs_optime.h
index f0ca56927ad..792e4997372 100644
--- a/src/mongo/db/repl/rs_optime.h
+++ b/src/mongo/db/repl/rs_optime.h
@@ -38,7 +38,7 @@ namespace mongo {
bool initiated() const { return ord > 0; }
void initiate() {
- assert( !initiated() );
+ verify( !initiated() );
ord = 1000000;
}
diff --git a/src/mongo/db/repl/rs_rollback.cpp b/src/mongo/db/repl/rs_rollback.cpp
index 203afe7901e..1e640e4cc2a 100644
--- a/src/mongo/db/repl/rs_rollback.cpp
+++ b/src/mongo/db/repl/rs_rollback.cpp
@@ -200,10 +200,10 @@ namespace mongo {
}
last = time(0);
- assert( d.dbMutex.atLeastReadLocked() );
+ verify( d.dbMutex.atLeastReadLocked() );
Client::Context c(rsoplog);
NamespaceDetails *nsd = nsdetails(rsoplog);
- assert(nsd);
+ verify(nsd);
ReverseCappedCursor u(nsd);
if( !u.ok() )
throw "our oplog empty or unreadable";
@@ -336,7 +336,7 @@ namespace mongo {
for( set<DocID>::iterator i = h.toRefetch.begin(); i != h.toRefetch.end(); i++ ) {
d = *i;
- assert( !d._id.eoo() );
+ verify( !d._id.eoo() );
{
/* TODO : slow. lots of round trips. */
@@ -375,7 +375,7 @@ namespace mongo {
bool warn = false;
- assert( !h.commonPointOurDiskloc.isNull() );
+ verify( !h.commonPointOurDiskloc.isNull() );
mongo::d.dbMutex.assertWriteLocked();
@@ -463,7 +463,7 @@ namespace mongo {
const DocID& d = i->first;
bo pattern = d._id.wrap(); // { _id : ... }
try {
- assert( d.ns && *d.ns );
+ verify( d.ns && *d.ns );
if( h.collectionsToResync.count(d.ns) ) {
/* we just synced this entire collection */
continue;
@@ -592,8 +592,8 @@ namespace mongo {
}
unsigned ReplSetImpl::_syncRollback(OplogReader&r) {
- assert( !lockedByMe() );
- assert( !d.dbMutex.atLeastReadLocked() );
+ verify( !lockedByMe() );
+ verify( !d.dbMutex.atLeastReadLocked() );
sethbmsg("rollback 0");
diff --git a/src/mongo/db/repl/rs_sync.cpp b/src/mongo/db/repl/rs_sync.cpp
index 122bb7539d9..40818feef6a 100644
--- a/src/mongo/db/repl/rs_sync.cpp
+++ b/src/mongo/db/repl/rs_sync.cpp
@@ -286,7 +286,7 @@ namespace mongo {
Member *target = 0, *stale = 0;
BSONObj oldest;
- assert(r.conn() == 0);
+ verify(r.conn() == 0);
while ((target = getMemberToSyncTo()) != 0) {
string current = target->fullName();
@@ -402,7 +402,7 @@ namespace mongo {
}
while( 1 ) {
- assert( !Lock::isLocked() );
+ verify( !Lock::isLocked() );
{
Timer timeInWriteLock;
scoped_ptr<writelock> lk;
@@ -488,14 +488,14 @@ namespace mongo {
if( str::contains(ns, ".$cmd") ) {
// a command may need a global write lock. so we will conservatively go ahead and grab one here. suboptimal. :-(
lk.reset();
- assert( !Lock::isLocked() );
+ verify( !Lock::isLocked() );
lk.reset( new writelock() );
}
else if( !Lock::isWriteLocked(ns) || Lock::isW() ) {
// we don't relock on every single op to try to be faster. however if switching collections, we have to.
// note here we must reset to 0 first to assure the old object is destructed before our new operator invocation.
lk.reset();
- assert( !Lock::isLocked() );
+ verify( !Lock::isLocked() );
lk.reset( new writelock(ns) );
}
}
@@ -666,7 +666,7 @@ namespace mongo {
static int n;
if( n != 0 ) {
log() << "replSet ERROR : more than one sync thread?" << rsLog;
- assert( n == 0 );
+ verify( n == 0 );
}
n++;
@@ -751,7 +751,7 @@ namespace mongo {
}
}
- assert(slave->slave);
+ verify(slave->slave);
const Member *target = rs->_currentSyncTarget;
if (!target || rs->box.getState().primary()
diff --git a/src/mongo/db/repl_block.cpp b/src/mongo/db/repl_block.cpp
index 4087e2e8bb4..3be01c7d3db 100644
--- a/src/mongo/db/repl_block.cpp
+++ b/src/mongo/db/repl_block.cpp
@@ -188,10 +188,10 @@ namespace mongo {
if ( lastOp.isNull() )
return;
- assert( str::startsWith(ns, "local.oplog.") );
+ verify( str::startsWith(ns, "local.oplog.") );
Client * c = curop.getClient();
- assert(c);
+ verify(c);
BSONObj rid = c->getRemoteID();
if ( rid.isEmpty() )
return;
diff --git a/src/mongo/db/replutil.h b/src/mongo/db/replutil.h
index 0ebcaea3dc5..aa8c25ad5f5 100644
--- a/src/mongo/db/replutil.h
+++ b/src/mongo/db/replutil.h
@@ -66,7 +66,7 @@ namespace mongo {
return true;
if ( ! dbname ) {
Database *database = cc().database();
- assert( database );
+ verify( database );
dbname = database->name.c_str();
}
return strcmp( dbname , "local" ) == 0;
@@ -74,7 +74,7 @@ namespace mongo {
inline bool isMasterNs( const char *ns ) {
if ( _isMaster() )
return true;
- assert( ns );
+ verify( ns );
if ( ! str::startsWith( ns , "local" ) )
return false;
return ns[5] == 0 || ns[5] == '.';
diff --git a/src/mongo/db/restapi.cpp b/src/mongo/db/restapi.cpp
index 578763632fd..560dbce97c5 100644
--- a/src/mongo/db/restapi.cpp
+++ b/src/mongo/db/restapi.cpp
@@ -243,7 +243,7 @@ namespace mongo {
}
writelocktry wl(10000);
- assert( wl.got() );
+ verify( wl.got() );
Client::Context cx( "admin.system.users", dbpath, false );
}
diff --git a/src/mongo/db/scanandorder.cpp b/src/mongo/db/scanandorder.cpp
index fa6734e5292..b93b632f035 100644
--- a/src/mongo/db/scanandorder.cpp
+++ b/src/mongo/db/scanandorder.cpp
@@ -26,7 +26,7 @@ namespace mongo {
const unsigned ScanAndOrder::MaxScanAndOrderBytes = 32 * 1024 * 1024;
void ScanAndOrder::add(const BSONObj& o, const DiskLoc* loc) {
- assert( o.isValid() );
+ verify( o.isValid() );
BSONObj k;
try {
k = _order.getKeyFromObject(o);
@@ -48,7 +48,7 @@ namespace mongo {
return;
}
BestMap::iterator i;
- assert( _best.end() != _best.begin() );
+ verify( _best.end() != _best.begin() );
i = _best.end();
i--;
_addIfBetter(k, o, i, loc);
@@ -98,7 +98,7 @@ namespace mongo {
void ScanAndOrder::_validateAndUpdateApproxSize( const int approxSizeDelta ) {
// note : adjust when bson return limit adjusts. note this limit should be a bit higher.
int newApproxSize = _approxSize + approxSizeDelta;
- assert( newApproxSize >= 0 );
+ verify( newApproxSize >= 0 );
uassert( ScanAndOrderMemoryLimitExceededAssertionCode,
"too much data for sort() with no index. add an index or specify a smaller limit",
(unsigned)newApproxSize < MaxScanAndOrderBytes );
diff --git a/src/mongo/db/scanandorder.h b/src/mongo/db/scanandorder.h
index 54775d770d5..acb19d21ad0 100644
--- a/src/mongo/db/scanandorder.h
+++ b/src/mongo/db/scanandorder.h
@@ -34,7 +34,7 @@ namespace mongo {
FieldRangeVector _keyCutter;
public:
KeyType(const BSONObj &pattern, const FieldRangeSet &frs):
- _spec((assert(!pattern.isEmpty()),pattern)),
+ _spec((verify(!pattern.isEmpty()),pattern)),
_keyCutter(frs, _spec, 1) {
}
diff --git a/src/mongo/db/security_common.cpp b/src/mongo/db/security_common.cpp
index a480919c27e..fa0e2931e58 100644
--- a/src/mongo/db/security_common.cpp
+++ b/src/mongo/db/security_common.cpp
@@ -109,7 +109,7 @@ namespace mongo {
void CmdAuthenticate::authenticate(const string& dbname, const string& user, const bool readOnly) {
ClientBasic* c = ClientBasic::getCurrent();
- assert(c);
+ verify(c);
AuthenticationInfo *ai = c->getAuthenticationInfo();
if ( readOnly ) {
diff --git a/src/mongo/db/stats/snapshots.cpp b/src/mongo/db/stats/snapshots.cpp
index 900cc4ff1ad..38bd348d5fb 100644
--- a/src/mongo/db/stats/snapshots.cpp
+++ b/src/mongo/db/stats/snapshots.cpp
@@ -37,7 +37,7 @@ namespace mongo {
SnapshotDelta::SnapshotDelta( const SnapshotData& older , const SnapshotData& newer )
: _older( older ) , _newer( newer ) {
- assert( _newer._created > _older._created );
+ verify( _newer._created > _older._created );
_elapsed = _newer._created - _older._created;
}
@@ -45,7 +45,7 @@ namespace mongo {
return Top::CollectionData( _older._globalUsage , _newer._globalUsage );
}
Top::UsageMap SnapshotDelta::collectionUsageDiff() {
- assert( _newer._created > _older._created );
+ verify( _newer._created > _older._created );
Top::UsageMap u;
for ( Top::UsageMap::const_iterator i=_newer._usage.begin(); i != _newer._usage.end(); i++ ) {
diff --git a/src/mongo/db/stats/top.h b/src/mongo/db/stats/top.h
index dc21927b7d3..22470cb5d90 100644
--- a/src/mongo/db/stats/top.h
+++ b/src/mongo/db/stats/top.h
@@ -18,8 +18,6 @@
#pragma once
#include <boost/date_time/posix_time/posix_time.hpp>
-#undef assert
-#define assert MONGO_assert
namespace mongo {
diff --git a/src/mongo/db/taskqueue.h b/src/mongo/db/taskqueue.h
index 1107d479448..20fad90db0b 100644
--- a/src/mongo/db/taskqueue.h
+++ b/src/mongo/db/taskqueue.h
@@ -84,7 +84,7 @@ namespace mongo {
}
_drain( _queues[toDrain] );
- assert( _queues[toDrain].empty() );
+ verify( _queues[toDrain].empty() );
}
private:
@@ -102,7 +102,7 @@ namespace mongo {
MT::go(v);
}
queue.clear();
- DEV assert( queue.capacity() == oldCap ); // just checking that clear() doesn't deallocate, we don't want that
+ DEV verify( queue.capacity() == oldCap ); // just checking that clear() doesn't deallocate, we don't want that
}
};