summaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
Diffstat (limited to 'src')
-rw-r--r--src/mongo/bson/bson-inl.h12
-rw-r--r--src/mongo/bson/bson.h4
-rw-r--r--src/mongo/bson/bsonelement.h8
-rw-r--r--src/mongo/bson/bsonobj.h6
-rw-r--r--src/mongo/bson/bsonobjbuilder.h14
-rw-r--r--src/mongo/bson/bsonobjiterator.h12
-rw-r--r--src/mongo/bson/oid.cpp9
-rw-r--r--src/mongo/bson/util/builder.h8
-rw-r--r--src/mongo/bson/util/misc.h4
-rw-r--r--src/mongo/client/clientAndShell.cpp6
-rw-r--r--src/mongo/client/clientOnly.cpp2
-rw-r--r--src/mongo/client/connpool.cpp8
-rw-r--r--src/mongo/client/connpool.h6
-rw-r--r--src/mongo/client/dbclient.cpp12
-rw-r--r--src/mongo/client/dbclient_rs.cpp14
-rw-r--r--src/mongo/client/dbclientcursor.cpp32
-rw-r--r--src/mongo/client/dbclientinterface.h16
-rw-r--r--src/mongo/client/distlock.cpp14
-rw-r--r--src/mongo/client/distlock.h8
-rw-r--r--src/mongo/client/distlock_test.cpp2
-rw-r--r--src/mongo/client/examples/authTest.cpp4
-rw-r--r--src/mongo/client/examples/clientTest.cpp64
-rw-r--r--src/mongo/client/examples/httpClientTest.cpp4
-rw-r--r--src/mongo/client/examples/mongoperf.cpp4
-rw-r--r--src/mongo/client/examples/whereExample.cpp2
-rw-r--r--src/mongo/client/gridfs.cpp2
-rw-r--r--src/mongo/client/model.cpp2
-rw-r--r--src/mongo/client/parallel.cpp88
-rw-r--r--src/mongo/client/parallel.h4
-rw-r--r--src/mongo/client/redef_macros.h2
-rw-r--r--src/mongo/client/syncclusterconnection.cpp10
-rw-r--r--src/mongo/client/syncclusterconnection.h2
-rw-r--r--src/mongo/client/undef_macros.h2
-rw-r--r--src/mongo/db/btree.cpp90
-rw-r--r--src/mongo/db/btree.h16
-rw-r--r--src/mongo/db/btreebuilder.cpp2
-rw-r--r--src/mongo/db/btreecursor.cpp16
-rw-r--r--src/mongo/db/cap.cpp30
-rw-r--r--src/mongo/db/client.cpp18
-rw-r--r--src/mongo/db/client.h4
-rw-r--r--src/mongo/db/clientcursor.cpp26
-rw-r--r--src/mongo/db/clientcursor.h2
-rw-r--r--src/mongo/db/cloner.cpp8
-rw-r--r--src/mongo/db/cmdline.cpp14
-rw-r--r--src/mongo/db/commands/cloud.cpp14
-rw-r--r--src/mongo/db/commands/distinct.cpp4
-rwxr-xr-xsrc/mongo/db/commands/document_source_cursor.cpp6
-rw-r--r--src/mongo/db/commands/mr.cpp22
-rwxr-xr-xsrc/mongo/db/commands/pipeline_command.cpp2
-rw-r--r--src/mongo/db/compact.cpp12
-rw-r--r--src/mongo/db/curop.cpp2
-rw-r--r--src/mongo/db/cursor.cpp8
-rw-r--r--src/mongo/db/cursor.h2
-rw-r--r--src/mongo/db/d_concurrency.cpp44
-rw-r--r--src/mongo/db/database.cpp18
-rw-r--r--src/mongo/db/db.cpp42
-rw-r--r--src/mongo/db/db.h10
-rw-r--r--src/mongo/db/dbcommands.cpp22
-rw-r--r--src/mongo/db/dbcommands_admin.cpp4
-rw-r--r--src/mongo/db/dbcommands_generic.cpp10
-rw-r--r--src/mongo/db/dbeval.cpp4
-rw-r--r--src/mongo/db/dbhelpers.cpp12
-rw-r--r--src/mongo/db/dbmessage.h2
-rw-r--r--src/mongo/db/dbwebserver.cpp10
-rw-r--r--src/mongo/db/diskloc.h6
-rw-r--r--src/mongo/db/dur.cpp26
-rw-r--r--src/mongo/db/dur_commitjob.cpp2
-rw-r--r--src/mongo/db/dur_commitjob.h2
-rw-r--r--src/mongo/db/dur_journal.cpp36
-rw-r--r--src/mongo/db/dur_preplogbuffer.cpp6
-rw-r--r--src/mongo/db/dur_recover.cpp36
-rw-r--r--src/mongo/db/explain.cpp6
-rw-r--r--src/mongo/db/extsort.cpp4
-rw-r--r--src/mongo/db/geo/2d.cpp176
-rw-r--r--src/mongo/db/geo/core.h24
-rw-r--r--src/mongo/db/geo/haystack.cpp6
-rw-r--r--src/mongo/db/index.cpp6
-rw-r--r--src/mongo/db/index.h2
-rw-r--r--src/mongo/db/indexkey.cpp4
-rw-r--r--src/mongo/db/instance.cpp42
-rw-r--r--src/mongo/db/introspect.cpp2
-rw-r--r--src/mongo/db/jsobj.cpp82
-rw-r--r--src/mongo/db/jsobjmanipulator.h8
-rw-r--r--src/mongo/db/json.cpp6
-rw-r--r--src/mongo/db/key.cpp14
-rw-r--r--src/mongo/db/lasterror.cpp4
-rw-r--r--src/mongo/db/lasterror.h2
-rwxr-xr-xsrc/mongo/db/matcher.cpp28
-rw-r--r--src/mongo/db/matcher.h2
-rw-r--r--src/mongo/db/minilex.h6
-rw-r--r--src/mongo/db/mongommf.cpp28
-rw-r--r--src/mongo/db/mongommf.h2
-rw-r--r--src/mongo/db/mongomutex.h4
-rw-r--r--src/mongo/db/namespace-inl.h2
-rw-r--r--src/mongo/db/namespace_details.cpp40
-rw-r--r--src/mongo/db/namespace_details.h8
-rw-r--r--src/mongo/db/nonce.cpp4
-rw-r--r--src/mongo/db/oplog.cpp24
-rw-r--r--src/mongo/db/oplog.h2
-rw-r--r--src/mongo/db/oplogreader.h2
-rw-r--r--src/mongo/db/ops/delete.cpp2
-rw-r--r--src/mongo/db/ops/query.cpp24
-rw-r--r--src/mongo/db/ops/update.cpp24
-rw-r--r--src/mongo/db/ops/update.h8
-rw-r--r--src/mongo/db/pagefault.cpp6
-rw-r--r--src/mongo/db/pdfile.cpp138
-rw-r--r--src/mongo/db/pdfile.h28
-rwxr-xr-xsrc/mongo/db/pipeline/accumulator.cpp4
-rwxr-xr-xsrc/mongo/db/pipeline/accumulator_add_to_set.cpp4
-rwxr-xr-xsrc/mongo/db/pipeline/accumulator_avg.cpp4
-rwxr-xr-xsrc/mongo/db/pipeline/accumulator_first.cpp2
-rwxr-xr-xsrc/mongo/db/pipeline/accumulator_last.cpp2
-rwxr-xr-xsrc/mongo/db/pipeline/accumulator_min_max.cpp4
-rwxr-xr-xsrc/mongo/db/pipeline/accumulator_push.cpp4
-rwxr-xr-xsrc/mongo/db/pipeline/accumulator_sum.cpp2
-rwxr-xr-xsrc/mongo/db/pipeline/document.cpp6
-rwxr-xr-xsrc/mongo/db/pipeline/document.h2
-rwxr-xr-xsrc/mongo/db/pipeline/document_source.cpp4
-rwxr-xr-xsrc/mongo/db/pipeline/document_source.h4
-rwxr-xr-xsrc/mongo/db/pipeline/document_source_bson_array.cpp8
-rwxr-xr-xsrc/mongo/db/pipeline/document_source_command_futures.cpp6
-rwxr-xr-xsrc/mongo/db/pipeline/document_source_filter_base.cpp2
-rwxr-xr-xsrc/mongo/db/pipeline/document_source_group.cpp2
-rwxr-xr-xsrc/mongo/db/pipeline/document_source_match.cpp2
-rwxr-xr-xsrc/mongo/db/pipeline/document_source_out.cpp4
-rwxr-xr-xsrc/mongo/db/pipeline/document_source_sort.cpp6
-rwxr-xr-xsrc/mongo/db/pipeline/document_source_unwind.cpp6
-rwxr-xr-xsrc/mongo/db/pipeline/expression.cpp36
-rwxr-xr-xsrc/mongo/db/pipeline/value.cpp50
-rwxr-xr-xsrc/mongo/db/pipeline/value.h2
-rw-r--r--src/mongo/db/projection.cpp4
-rw-r--r--src/mongo/db/queryoptimizer.cpp16
-rw-r--r--src/mongo/db/queryoptimizer.h8
-rw-r--r--src/mongo/db/queryoptimizercursorimpl.cpp6
-rw-r--r--src/mongo/db/querypattern.cpp2
-rw-r--r--src/mongo/db/queryutil.cpp48
-rw-r--r--src/mongo/db/queryutil.h8
-rw-r--r--src/mongo/db/record.cpp4
-rw-r--r--src/mongo/db/repl.cpp22
-rw-r--r--src/mongo/db/repl/consensus.cpp18
-rw-r--r--src/mongo/db/repl/health.cpp8
-rw-r--r--src/mongo/db/repl/manager.cpp2
-rw-r--r--src/mongo/db/repl/rs.cpp24
-rw-r--r--src/mongo/db/repl/rs.h14
-rw-r--r--src/mongo/db/repl/rs_config.cpp2
-rw-r--r--src/mongo/db/repl/rs_initialsync.cpp6
-rw-r--r--src/mongo/db/repl/rs_initiate.cpp2
-rw-r--r--src/mongo/db/repl/rs_optime.h2
-rw-r--r--src/mongo/db/repl/rs_rollback.cpp14
-rw-r--r--src/mongo/db/repl/rs_sync.cpp12
-rw-r--r--src/mongo/db/repl_block.cpp4
-rw-r--r--src/mongo/db/replutil.h4
-rw-r--r--src/mongo/db/restapi.cpp2
-rw-r--r--src/mongo/db/scanandorder.cpp6
-rw-r--r--src/mongo/db/scanandorder.h2
-rw-r--r--src/mongo/db/security_common.cpp2
-rw-r--r--src/mongo/db/stats/snapshots.cpp4
-rw-r--r--src/mongo/db/stats/top.h2
-rw-r--r--src/mongo/db/taskqueue.h4
-rw-r--r--src/mongo/dbtests/basictests.cpp8
-rw-r--r--src/mongo/dbtests/btreetests.inl38
-rw-r--r--src/mongo/dbtests/directclienttests.cpp4
-rw-r--r--src/mongo/dbtests/framework.cpp8
-rw-r--r--src/mongo/dbtests/jsobjtests.cpp36
-rw-r--r--src/mongo/dbtests/jstests.cpp4
-rw-r--r--src/mongo/dbtests/macrotests.cpp18
-rw-r--r--src/mongo/dbtests/mmaptests.cpp20
-rw-r--r--src/mongo/dbtests/pdfiletests.cpp2
-rw-r--r--src/mongo/dbtests/perftests.cpp16
-rw-r--r--src/mongo/dbtests/queryoptimizertests.cpp2
-rw-r--r--src/mongo/dbtests/querytests.cpp2
-rw-r--r--src/mongo/dbtests/queryutiltests.cpp2
-rw-r--r--src/mongo/dbtests/replsettests.cpp18
-rw-r--r--src/mongo/dbtests/repltests.cpp22
-rw-r--r--src/mongo/dbtests/threadedtests.cpp34
-rw-r--r--src/mongo/pch.h4
-rw-r--r--src/mongo/s/balance.cpp10
-rw-r--r--src/mongo/s/chunk.cpp36
-rw-r--r--src/mongo/s/chunk.h12
-rw-r--r--src/mongo/s/client.cpp2
-rw-r--r--src/mongo/s/commands_admin.cpp6
-rw-r--r--src/mongo/s/commands_public.cpp10
-rw-r--r--src/mongo/s/config.cpp18
-rw-r--r--src/mongo/s/config.h6
-rw-r--r--src/mongo/s/config_migrate.cpp12
-rw-r--r--src/mongo/s/cursors.cpp16
-rw-r--r--src/mongo/s/d_chunk_manager.cpp10
-rw-r--r--src/mongo/s/d_logic.cpp4
-rw-r--r--src/mongo/s/d_migrate.cpp62
-rw-r--r--src/mongo/s/d_split.cpp12
-rw-r--r--src/mongo/s/d_state.cpp10
-rw-r--r--src/mongo/s/grid.cpp20
-rw-r--r--src/mongo/s/request.cpp10
-rw-r--r--src/mongo/s/request.h6
-rw-r--r--src/mongo/s/s_only.cpp4
-rw-r--r--src/mongo/s/server.cpp6
-rw-r--r--src/mongo/s/shard.cpp2
-rw-r--r--src/mongo/s/shard.h12
-rw-r--r--src/mongo/s/shard_version.cpp6
-rw-r--r--src/mongo/s/shardconnection.cpp12
-rw-r--r--src/mongo/s/shardkey.cpp64
-rw-r--r--src/mongo/s/strategy.h2
-rw-r--r--src/mongo/s/strategy_shard.cpp6
-rw-r--r--src/mongo/s/strategy_single.cpp2
-rw-r--r--src/mongo/s/util.h2
-rw-r--r--src/mongo/scripting/bench.cpp16
-rw-r--r--src/mongo/scripting/engine.cpp12
-rw-r--r--src/mongo/scripting/engine_spidermonkey.cpp116
-rw-r--r--src/mongo/scripting/engine_v8.cpp2
-rw-r--r--src/mongo/scripting/sm_db.cpp102
-rw-r--r--src/mongo/scripting/utils.cpp4
-rw-r--r--src/mongo/scripting/v8_db.cpp18
-rw-r--r--src/mongo/scripting/v8_utils.h2
-rw-r--r--src/mongo/scripting/v8_wrapper.cpp4
-rw-r--r--src/mongo/shell/dbshell.cpp38
-rw-r--r--src/mongo/shell/shell_utils.cpp50
-rw-r--r--src/mongo/shell/utils.js4
-rw-r--r--src/mongo/tools/dump.cpp4
-rw-r--r--src/mongo/tools/export.cpp2
-rw-r--r--src/mongo/tools/restore.cpp2
-rw-r--r--src/mongo/tools/sniffer.cpp14
-rw-r--r--src/mongo/tools/tool.cpp6
-rw-r--r--src/mongo/util/alignedbuilder.cpp14
-rw-r--r--src/mongo/util/alignedbuilder.h2
-rw-r--r--src/mongo/util/array.h4
-rw-r--r--src/mongo/util/assert_util.cpp6
-rw-r--r--src/mongo/util/assert_util.h14
-rw-r--r--src/mongo/util/background.cpp2
-rw-r--r--src/mongo/util/base64.h4
-rw-r--r--src/mongo/util/bufreader.h2
-rw-r--r--src/mongo/util/concurrency/list.h2
-rw-r--r--src/mongo/util/concurrency/mutex.h15
-rw-r--r--src/mongo/util/concurrency/mutexdebugger.h8
-rw-r--r--src/mongo/util/concurrency/qlock.h14
-rw-r--r--src/mongo/util/concurrency/rwlock.h8
-rw-r--r--src/mongo/util/concurrency/rwlockimpl.h4
-rw-r--r--src/mongo/util/concurrency/synchronization.cpp2
-rw-r--r--src/mongo/util/concurrency/task.cpp2
-rw-r--r--src/mongo/util/concurrency/thread_pool.cpp8
-rw-r--r--src/mongo/util/concurrency/thread_pool.h2
-rw-r--r--src/mongo/util/debug_util.cpp2
-rw-r--r--src/mongo/util/file.h6
-rw-r--r--src/mongo/util/file_allocator.cpp2
-rw-r--r--src/mongo/util/goodies.h7
-rw-r--r--src/mongo/util/hashtab.h4
-rw-r--r--src/mongo/util/hex.h2
-rw-r--r--src/mongo/util/log.cpp10
-rw-r--r--src/mongo/util/logfile.cpp24
-rw-r--r--src/mongo/util/lruishmap.h2
-rwxr-xr-xsrc/mongo/util/mmap.cpp4
-rw-r--r--src/mongo/util/mmap.h8
-rw-r--r--src/mongo/util/mmap_mm.cpp2
-rw-r--r--src/mongo/util/mmap_posix.cpp4
-rw-r--r--src/mongo/util/mmap_win.cpp6
-rw-r--r--src/mongo/util/mongoutils/test.cpp10
-rw-r--r--src/mongo/util/net/hostandport.h10
-rw-r--r--src/mongo/util/net/httpclient.cpp2
-rw-r--r--src/mongo/util/net/listen.cpp4
-rw-r--r--src/mongo/util/net/message.cpp2
-rw-r--r--src/mongo/util/net/message.h22
-rw-r--r--src/mongo/util/net/message_port.cpp10
-rw-r--r--src/mongo/util/net/message_server_asio.cpp6
-rw-r--r--src/mongo/util/net/message_server_port.cpp4
-rw-r--r--src/mongo/util/net/miniwebserver.cpp2
-rw-r--r--src/mongo/util/net/sock.cpp16
-rw-r--r--src/mongo/util/processinfo_none.cpp2
-rwxr-xr-xsrc/mongo/util/processinfo_win32.cpp6
-rw-r--r--src/mongo/util/ramlog.cpp2
-rw-r--r--src/mongo/util/text.cpp6
-rw-r--r--src/mongo/util/time_support.h8
-rwxr-xr-xsrc/mongo/util/trace.cpp2
-rw-r--r--src/mongo/util/util.cpp30
-rw-r--r--src/mongo/util/version.cpp46
273 files changed, 1765 insertions, 1800 deletions
diff --git a/src/mongo/bson/bson-inl.h b/src/mongo/bson/bson-inl.h
index 3b3ab029046..1dd8ea3bdc8 100644
--- a/src/mongo/bson/bson-inl.h
+++ b/src/mongo/bson/bson-inl.h
@@ -162,7 +162,7 @@ dodouble:
return 0;
}
default:
- assert( false);
+ verify( false);
}
return -1;
}
@@ -200,12 +200,12 @@ dodouble:
}
inline BSONObj BSONElement::embeddedObject() const {
- assert( isABSONObj() );
+ verify( isABSONObj() );
return BSONObj(value());
}
inline BSONObj BSONElement::codeWScopeObject() const {
- assert( type() == CodeWScope );
+ verify( type() == CodeWScope );
int strSizeWNull = *(int *)( value() + 4 );
return BSONObj( value() + 4 + 4 + strSizeWNull );
}
@@ -599,7 +599,7 @@ dodouble:
len2 = strlen( p );
else {
size_t x = remain - len1 - 1;
- assert( x <= 0x7fffffff );
+ verify( x <= 0x7fffffff );
len2 = mongo::strnlen( p, (int) x );
}
//massert( 10319 , "Invalid regex options string", len2 != -1 ); // ERH - 4/28/10 - don't think this does anything
@@ -989,8 +989,8 @@ dodouble:
appendAs( j.next() , i.next().fieldName() );
}
- assert( ! i.more() );
- assert( ! j.more() );
+ verify( ! i.more() );
+ verify( ! j.more() );
}
inline BSONObj BSONObj::removeField(const StringData& name) const {
diff --git a/src/mongo/bson/bson.h b/src/mongo/bson/bson.h
index 2305b3975d7..be819b0726b 100644
--- a/src/mongo/bson/bson.h
+++ b/src/mongo/bson/bson.h
@@ -71,8 +71,8 @@ namespace bson {
}
namespace mongo {
-#if !defined(assert)
- inline void assert(bool expr) {
+#if !defined(verify)
+ inline void verify(bool expr) {
if(!expr) {
throw bson::assertion( 0 , "assertion failure in bson library" );
}
diff --git a/src/mongo/bson/bsonelement.h b/src/mongo/bson/bsonelement.h
index a9b722b44b4..4d018219177 100644
--- a/src/mongo/bson/bsonelement.h
+++ b/src/mongo/bson/bsonelement.h
@@ -262,7 +262,7 @@ namespace mongo {
/** Get raw binary data. Element must be of type BinData. Doesn't handle type 2 specially */
const char *binData(int& len) const {
// BinData: <int len> <byte subtype> <byte[len] data>
- assert( type() == BinData );
+ verify( type() == BinData );
len = valuestrsize();
return value() + 5;
}
@@ -281,14 +281,14 @@ namespace mongo {
BinDataType binDataType() const {
// BinData: <int len> <byte subtype> <byte[len] data>
- assert( type() == BinData );
+ verify( type() == BinData );
unsigned char c = (value() + 4)[0];
return (BinDataType)c;
}
/** Retrieve the regex string for a Regex element */
const char *regex() const {
- assert(type() == RegEx);
+ verify(type() == RegEx);
return value();
}
@@ -480,7 +480,7 @@ namespace mongo {
case CodeWScope:
return 65;
default:
- assert(0);
+ verify(0);
return -1;
}
}
diff --git a/src/mongo/bson/bsonobj.h b/src/mongo/bson/bsonobj.h
index d9a776c14b2..37013ef583c 100644
--- a/src/mongo/bson/bsonobj.h
+++ b/src/mongo/bson/bsonobj.h
@@ -433,7 +433,7 @@ namespace mongo {
BSONObjIterator begin() const;
void appendSelfToBufBuilder(BufBuilder& b) const {
- assert( objsize() );
+ verify( objsize() );
b.appendBuf(reinterpret_cast<const void *>( objdata() ), objsize());
}
@@ -451,12 +451,12 @@ namespace mongo {
friend void intrusive_ptr_add_ref(Holder* h) { h->refCount++; }
friend void intrusive_ptr_release(Holder* h) {
#if defined(_DEBUG) // cant use dassert or DEV here
- assert((int)h->refCount > 0); // make sure we haven't already freed the buffer
+ verify((int)h->refCount > 0); // make sure we haven't already freed the buffer
#endif
if(--(h->refCount) == 0){
#if defined(_DEBUG)
unsigned sz = (unsigned&) *h->data;
- assert(sz < BSONObjMaxInternalSize * 3);
+ verify(sz < BSONObjMaxInternalSize * 3);
memset(h->data, 0xdd, sz);
#endif
free(h);
diff --git a/src/mongo/bson/bsonobjbuilder.h b/src/mongo/bson/bsonobjbuilder.h
index 0b18a45abf0..a480e38a7ea 100644
--- a/src/mongo/bson/bsonobjbuilder.h
+++ b/src/mongo/bson/bsonobjbuilder.h
@@ -26,9 +26,7 @@
#include <cmath>
#include <boost/static_assert.hpp>
#if defined(MONGO_EXPOSE_MACROS)
-// boost changed it
-#undef assert
-#define assert MONGO_assert
+#define verify MONGO_verify
#endif
#include "bsonelement.h"
#include "bsonobj.h"
@@ -123,14 +121,14 @@ namespace mongo {
/** append element to the object we are building */
BSONObjBuilder& append( const BSONElement& e) {
- assert( !e.eoo() ); // do not append eoo, that would corrupt us. the builder auto appends when done() is called.
+ verify( !e.eoo() ); // do not append eoo, that would corrupt us. the builder auto appends when done() is called.
_b.appendBuf((void*) e.rawdata(), e.size());
return *this;
}
/** append an element but with a new name */
BSONObjBuilder& appendAs(const BSONElement& e, const StringData& fieldName) {
- assert( !e.eoo() ); // do not append eoo, that would corrupt us. the builder auto appends when done() is called.
+ verify( !e.eoo() ); // do not append eoo, that would corrupt us. the builder auto appends when done() is called.
_b.appendNum((char) e.type());
_b.appendStr(fieldName);
_b.appendBuf((void *) e.value(), e.valuesize());
@@ -147,12 +145,12 @@ namespace mongo {
/** add a subobject as a member */
BSONObjBuilder& appendObject(const StringData& fieldName, const char * objdata , int size = 0 ) {
- assert( objdata );
+ verify( objdata );
if ( size == 0 ) {
size = *((int*)objdata);
}
- assert( size > 4 && size < 100000000 );
+ verify( size > 4 && size < 100000000 );
_b.appendNum((char) Object);
_b.appendStr(fieldName);
@@ -582,7 +580,7 @@ namespace mongo {
/* assume ownership of the buffer - you must then free it (with free()) */
char* decouple(int& l) {
char *x = _done();
- assert( x );
+ verify( x );
l = _b.len();
_b.decouple();
return x;
diff --git a/src/mongo/bson/bsonobjiterator.h b/src/mongo/bson/bsonobjiterator.h
index dfa289372d6..8bcf62cbafa 100644
--- a/src/mongo/bson/bsonobjiterator.h
+++ b/src/mongo/bson/bsonobjiterator.h
@@ -58,13 +58,13 @@ namespace mongo {
/** @return the next element in the object. For the final element, element.eoo() will be true. */
BSONElement next( bool checkEnd ) {
- assert( _pos <= _theend );
+ verify( _pos <= _theend );
BSONElement e( _pos, checkEnd ? (int)(_theend + 1 - _pos) : -1 );
_pos += e.size( checkEnd ? (int)(_theend + 1 - _pos) : -1 );
return e;
}
BSONElement next() {
- assert( _pos <= _theend );
+ verify( _pos <= _theend );
BSONElement e(_pos);
_pos += e.size();
return e;
@@ -73,7 +73,7 @@ namespace mongo {
void operator++(int) { next(); }
BSONElement operator*() {
- assert( _pos <= _theend );
+ verify( _pos <= _theend );
return BSONElement(_pos);
}
@@ -86,7 +86,7 @@ namespace mongo {
class BSONIteratorSorted {
public:
~BSONIteratorSorted() {
- assert( _fields );
+ verify( _fields );
delete[] _fields;
_fields = 0;
}
@@ -96,7 +96,7 @@ namespace mongo {
}
BSONElement next() {
- assert( _fields );
+ verify( _fields );
if ( _cur < _nfields )
return BSONElement( _fields[_cur++] );
return BSONElement();
@@ -141,7 +141,7 @@ namespace mongo {
const char *f = e.fieldName();
try {
unsigned u = stringToNum(f);
- assert( u < 1000000 );
+ verify( u < 1000000 );
if( u >= v.size() )
v.resize(u+1);
v[u] = e;
diff --git a/src/mongo/bson/oid.cpp b/src/mongo/bson/oid.cpp
index 77c7cd0687f..b70b96352f8 100644
--- a/src/mongo/bson/oid.cpp
+++ b/src/mongo/bson/oid.cpp
@@ -21,8 +21,7 @@
#include "../db/nonce.h"
#include "bsonobjbuilder.h"
#include <boost/functional/hash.hpp>
-#undef assert
-#define assert MONGO_assert
+#define verify MONGO_verify
BOOST_STATIC_ASSERT( sizeof(mongo::OID) == 12 );
@@ -71,7 +70,7 @@ namespace mongo {
nonce64 a = Security::getNonceDuringInit();
nonce64 b = Security::getNonceDuringInit();
nonce64 c = Security::getNonceDuringInit();
- assert( !(a==b && b==c) );
+ verify( !(a==b && b==c) );
}
unsigned long long n = Security::getNonceDuringInit();
@@ -106,7 +105,7 @@ namespace mongo {
// xor in the pid into _pid. this reduces the probability of collisions.
foldInPid(x);
ourMachineAndPid = genMachineAndPid();
- assert( x != ourMachineAndPid );
+ verify( x != ourMachineAndPid );
ourMachineAndPid = x;
}
@@ -134,7 +133,7 @@ namespace mongo {
}
void OID::init( string s ) {
- assert( s.size() == 24 );
+ verify( s.size() == 24 );
const char *p = s.c_str();
for( int i = 0; i < 12; i++ ) {
data[i] = fromHex(p);
diff --git a/src/mongo/bson/util/builder.h b/src/mongo/bson/util/builder.h
index 38e606d4153..97aacd339da 100644
--- a/src/mongo/bson/util/builder.h
+++ b/src/mongo/bson/util/builder.h
@@ -290,8 +290,8 @@ namespace mongo {
const int maxSize = 32;
char * start = _buf.grow( maxSize );
int z = mongo_snprintf( start , maxSize , "%.16g" , x );
- assert( z >= 0 );
- assert( z < maxSize );
+ verify( z >= 0 );
+ verify( z < maxSize );
_buf.l = prev + z;
if( strchr(start, '.') == 0 && strchr(start, 'E') == 0 && strchr(start, 'N') == 0 ) {
write( ".0" , 2 );
@@ -324,8 +324,8 @@ namespace mongo {
StringBuilder& SBNUM(T val,int maxSize,const char *macro) {
int prev = _buf.l;
int z = mongo_snprintf( _buf.grow(maxSize) , maxSize , macro , (val) );
- assert( z >= 0 );
- assert( z < maxSize );
+ verify( z >= 0 );
+ verify( z < maxSize );
_buf.l = prev + z;
return *this;
}
diff --git a/src/mongo/bson/util/misc.h b/src/mongo/bson/util/misc.h
index a50cddf208a..ba08b3974bc 100644
--- a/src/mongo/bson/util/misc.h
+++ b/src/mongo/bson/util/misc.h
@@ -90,8 +90,8 @@ namespace mongo {
}
time_t toTimeT() const {
// cant use uassert from bson/util
- assert((long long)millis >= 0); // TODO when millis is signed, delete
- assert(((long long)millis/1000) < (std::numeric_limits<time_t>::max)());
+ verify((long long)millis >= 0); // TODO when millis is signed, delete
+ verify(((long long)millis/1000) < (std::numeric_limits<time_t>::max)());
return millis / 1000;
}
};
diff --git a/src/mongo/client/clientAndShell.cpp b/src/mongo/client/clientAndShell.cpp
index 401c8c9eb63..02a603c4e82 100644
--- a/src/mongo/client/clientAndShell.cpp
+++ b/src/mongo/client/clientAndShell.cpp
@@ -75,16 +75,16 @@ namespace mongo {
}
void Shard::getAllShards( vector<Shard>& all ) {
- assert(0);
+ verify(0);
}
bool Shard::isAShardNode( const string& ident ) {
- assert(0);
+ verify(0);
return false;
}
string prettyHostName() {
- assert(0);
+ verify(0);
return "";
}
diff --git a/src/mongo/client/clientOnly.cpp b/src/mongo/client/clientOnly.cpp
index 767e97ce878..e44de42a446 100644
--- a/src/mongo/client/clientOnly.cpp
+++ b/src/mongo/client/clientOnly.cpp
@@ -22,7 +22,7 @@ namespace mongo {
string dynHostMyName() { return ""; }
void dynHostResolve(string& name, int& port) {
- assert(false);
+ verify(false);
}
}
diff --git a/src/mongo/client/connpool.cpp b/src/mongo/client/connpool.cpp
index 89f804fd177..41ff1c0aa2d 100644
--- a/src/mongo/client/connpool.cpp
+++ b/src/mongo/client/connpool.cpp
@@ -60,7 +60,7 @@ namespace mongo {
continue;
}
- assert( sc.conn->getSoTimeout() == socketTimeout );
+ verify( sc.conn->getSoTimeout() == socketTimeout );
return sc.conn;
@@ -133,7 +133,7 @@ namespace mongo {
}
DBClientBase* DBConnectionPool::_get(const string& ident , double socketTimeout ) {
- assert( ! inShutdown() );
+ verify( ! inShutdown() );
scoped_lock L(_mutex);
PoolForHost& p = _pools[PoolKey(ident,socketTimeout)];
return p.get( this , socketTimeout );
@@ -347,7 +347,7 @@ namespace mongo {
++ap;
++bp;
}
- assert(false);
+ verify(false);
}
bool DBConnectionPool::poolKeyCompare::operator()( const PoolKey& a , const PoolKey& b ) const {
@@ -387,7 +387,7 @@ namespace mongo {
// ------ ScopedDbConnection ------
ScopedDbConnection * ScopedDbConnection::steal() {
- assert( _conn );
+ verify( _conn );
ScopedDbConnection * n = new ScopedDbConnection( _host , _conn, _socketTimeout );
_conn = 0;
return n;
diff --git a/src/mongo/client/connpool.h b/src/mongo/client/connpool.h
index 8733abb1f90..2c7618bc630 100644
--- a/src/mongo/client/connpool.h
+++ b/src/mongo/client/connpool.h
@@ -38,9 +38,9 @@ namespace mongo {
: _created(0) {}
PoolForHost( const PoolForHost& other ) {
- assert(other._pool.size() == 0);
+ verify(other._pool.size() == 0);
_created = other._created;
- assert( _created == 0 );
+ verify( _created == 0 );
}
~PoolForHost();
@@ -50,7 +50,7 @@ namespace mongo {
void createdOne( DBClientBase * base );
long long numCreated() const { return _created; }
- ConnectionString::ConnectionType type() const { assert(_created); return _type; }
+ ConnectionString::ConnectionType type() const { verify(_created); return _type; }
/**
* gets a connection or return NULL
diff --git a/src/mongo/client/dbclient.cpp b/src/mongo/client/dbclient.cpp
index fc1ace395e3..3c5544f5658 100644
--- a/src/mongo/client/dbclient.cpp
+++ b/src/mongo/client/dbclient.cpp
@@ -111,7 +111,7 @@ namespace mongo {
break;
}
- assert( 0 );
+ verify( 0 );
return 0;
}
@@ -151,7 +151,7 @@ namespace mongo {
case SYNC:
return "sync";
}
- assert(0);
+ verify(0);
return "";
}
@@ -164,7 +164,7 @@ namespace mongo {
Query& Query::where(const string &jscode, BSONObj scope) {
/* use where() before sort() and hint() and explain(), else this will assert. */
- assert( ! isComplex() );
+ verify( ! isComplex() );
BSONObjBuilder b;
b.appendElements(obj);
b.appendWhere(jscode, scope);
@@ -395,7 +395,7 @@ namespace mongo {
}
{
BSONElement e = info.getField("nonce");
- assert( e.type() == String );
+ verify( e.type() == String );
nonce = e.valuestr();
}
@@ -443,7 +443,7 @@ namespace mongo {
}
bool DBClientWithCommands::createCollection(const string &ns, long long size, bool capped, int max, BSONObj *info) {
- assert(!capped||size);
+ verify(!capped||size);
BSONObj o;
if ( info == 0 ) info = &o;
BSONObjBuilder b;
@@ -1068,7 +1068,7 @@ namespace mongo {
*host = _serverString;
if ( clientSet && nReturned ) {
- assert(data);
+ verify(data);
BSONObj o(data);
if ( isNotMasterErrorString( getErrField(o) ) ) {
clientSet->isntMaster();
diff --git a/src/mongo/client/dbclient_rs.cpp b/src/mongo/client/dbclient_rs.cpp
index 50028c28243..c2de3f1614d 100644
--- a/src/mongo/client/dbclient_rs.cpp
+++ b/src/mongo/client/dbclient_rs.cpp
@@ -248,7 +248,7 @@ namespace mongo {
bool wasMaster = false;
// This is always true, since checked in port()
- assert( prev.port() >= 0 );
+ verify( prev.port() >= 0 );
if( prev.host().size() ){
scoped_lock lk( _lock );
for ( unsigned i=0; i<_nodes.size(); i++ ) {
@@ -292,7 +292,7 @@ namespace mongo {
uassert(15899, str::stream() << "No suitable member found for slaveOk query in replica set: " << _name, _master >= 0 && _nodes[_master].ok);
// Fall back to primary
- assert( static_cast<unsigned>(_master) < _nodes.size() );
+ verify( static_cast<unsigned>(_master) < _nodes.size() );
LOG(2) << "dbclient_rs getSlave no member in secondary state found, returning primary " << _nodes[ _master ] << endl;
return _nodes[_master].addr;
}
@@ -433,7 +433,7 @@ namespace mongo {
set<string> added = diff.first;
set<int> removed = diff.second;
- assert( added.size() > 0 || removed.size() > 0 );
+ verify( added.size() > 0 || removed.size() > 0 );
changed = true;
// Delete from the end so we don't invalidate as we delete, delete indices are ascending
@@ -472,7 +472,7 @@ namespace mongo {
bool ReplicaSetMonitor::_checkConnection( DBClientConnection * c , string& maybePrimary , bool verbose , int nodesOffset ) {
- assert( c );
+ verify( c );
scoped_lock lk( _checkConnectionLock );
bool isMaster = false;
bool changed = false;
@@ -800,7 +800,7 @@ namespace mongo {
// since we don't know which server it belongs to
// can't assume master because of slave ok
// and can have a cursor survive a master change
- assert(0);
+ verify(0);
}
void DBClientReplicaSet::isntMaster() {
@@ -882,7 +882,7 @@ namespace mongo {
bool DBClientReplicaSet::recv( Message& m ) {
- assert( _lazyState._lastClient );
+ verify( _lazyState._lastClient );
// TODO: It would be nice if we could easily wrap a conn error as a result error
try {
@@ -939,7 +939,7 @@ namespace mongo {
}
else{
(void)wasMaster; // silence set-but-not-used warning
- // assert( wasMaster );
+ // verify( wasMaster );
// printStackTrace();
log() << "too many retries (" << _lazyState._retries << "), could not get data from replica set" << endl;
}
diff --git a/src/mongo/client/dbclientcursor.cpp b/src/mongo/client/dbclientcursor.cpp
index bfd4108f6ea..ab80830adf3 100644
--- a/src/mongo/client/dbclientcursor.cpp
+++ b/src/mongo/client/dbclientcursor.cpp
@@ -62,7 +62,7 @@ namespace mongo {
bool DBClientCursor::init() {
Message toSend;
_assembleInit( toSend );
- assert( _client );
+ verify( _client );
if ( !_client->call( toSend, *batch.m, false, &_originalHost ) ) {
// log msg temp?
log() << "DBClientCursor::init call() failed" << endl;
@@ -107,11 +107,11 @@ namespace mongo {
}
void DBClientCursor::requestMore() {
- assert( cursorId && batch.pos == batch.nReturned );
+ verify( cursorId && batch.pos == batch.nReturned );
if (haveLimit) {
nToReturn -= batch.nReturned;
- assert(nToReturn > 0);
+ verify(nToReturn > 0);
}
BufBuilder b;
b.appendNum(opts);
@@ -129,7 +129,7 @@ namespace mongo {
dataReceived();
}
else {
- assert( _scopedHost.size() );
+ verify( _scopedHost.size() );
ScopedDbConnection conn( _scopedHost );
conn->call( toSend , *response );
_client = conn.get();
@@ -142,10 +142,10 @@ namespace mongo {
/** with QueryOption_Exhaust, the server just blasts data at us (marked at end with cursorid==0). */
void DBClientCursor::exhaustReceiveMore() {
- assert( cursorId && batch.pos == batch.nReturned );
- assert( !haveLimit );
+ verify( cursorId && batch.pos == batch.nReturned );
+ verify( !haveLimit );
auto_ptr<Message> response(new Message());
- assert( _client );
+ verify( _client );
if ( _client->recv(*response) ) {
batch.m = response;
dataReceived();
@@ -163,7 +163,7 @@ namespace mongo {
if ( qr->resultFlags() & ResultFlag_CursorNotFound ) {
// cursor id no longer valid at the server.
- assert( qr->cursorId == 0 );
+ verify( qr->cursorId == 0 );
cursorId = 0; // 0 indicates no longer valid (dead)
if ( ! ( opts & QueryOption_CursorTailable ) )
throw UserException( 13127 , "getMore: cursor didn't exist on server, possible restart or timeout?" );
@@ -183,12 +183,12 @@ namespace mongo {
if( qr->resultFlags() & ResultFlag_ShardConfigStale ) {
BSONObj error;
- assert( peekError( &error ) );
+ verify( peekError( &error ) );
throw RecvStaleConfigException( (string)"stale config on lazy receive" + causedBy( getErrField( error ) ), error );
}
/* this assert would fire the way we currently work:
- assert( nReturned || cursorId == 0 );
+ verify( nReturned || cursorId == 0 );
*/
}
@@ -267,17 +267,17 @@ namespace mongo {
vector<BSONObj> v;
peek(v, 1);
- assert( v.size() == 1 );
- assert( hasErrField( v[0] ) );
+ verify( v.size() == 1 );
+ verify( hasErrField( v[0] ) );
if( error ) *error = v[0].getOwned();
return true;
}
void DBClientCursor::attach( AScopedConnection * conn ) {
- assert( _scopedHost.size() == 0 );
- assert( conn );
- assert( conn->get() );
+ verify( _scopedHost.size() == 0 );
+ verify( conn );
+ verify( conn->get() );
if ( conn->get()->type() == ConnectionString::SET ||
conn->get()->type() == ConnectionString::SYNC ) {
@@ -322,7 +322,7 @@ namespace mongo {
}
else {
- assert( _scopedHost.size() );
+ verify( _scopedHost.size() );
ScopedDbConnection conn( _scopedHost );
if( DBClientConnection::getLazyKillCursor() )
diff --git a/src/mongo/client/dbclientinterface.h b/src/mongo/client/dbclientinterface.h
index a979581b4a9..6f05424b348 100644
--- a/src/mongo/client/dbclientinterface.h
+++ b/src/mongo/client/dbclientinterface.h
@@ -150,17 +150,17 @@ namespace mongo {
switch ( _type ) {
case MASTER:
- assert( _servers.size() == 1 );
+ verify( _servers.size() == 1 );
break;
case SET:
- assert( _setName.size() );
- assert( _servers.size() >= 1 ); // 1 is ok since we can derive
+ verify( _setName.size() );
+ verify( _servers.size() >= 1 ); // 1 is ok since we can derive
break;
case PAIR:
- assert( _servers.size() == 2 );
+ verify( _servers.size() == 2 );
break;
default:
- assert( _servers.size() > 0 );
+ verify( _servers.size() > 0 );
}
_finishInit();
@@ -178,7 +178,7 @@ namespace mongo {
}
else {
_type = favoredMultipleType;
- assert( _type == SET || _type == SYNC );
+ verify( _type == SET || _type == SYNC );
}
_finishInit();
}
@@ -408,7 +408,7 @@ namespace mongo {
virtual void say( Message &toSend, bool isRetry = false , string * actualServer = 0 ) = 0;
virtual void sayPiggyBack( Message &toSend ) = 0;
/* used by QueryOption_Exhaust. To use that your subclass must implement this. */
- virtual bool recv( Message& m ) { assert(false); return false; }
+ virtual bool recv( Message& m ) { verify(false); return false; }
// In general, for lazy queries, we'll need to say, recv, then checkResponse
virtual void checkResponse( const char* data, int nReturned, bool* retry = NULL, string* targetHost = NULL ) {
if( retry ) *retry = false; if( targetHost ) *targetHost = "";
@@ -977,7 +977,7 @@ namespace mongo {
*/
bool isFailed() const { return _failed; }
- MessagingPort& port() { assert(p); return *p; }
+ MessagingPort& port() { verify(p); return *p; }
string toStringLong() const {
stringstream ss;
diff --git a/src/mongo/client/distlock.cpp b/src/mongo/client/distlock.cpp
index aa8936d5ce8..9310e8ad3b0 100644
--- a/src/mongo/client/distlock.cpp
+++ b/src/mongo/client/distlock.cpp
@@ -55,7 +55,7 @@ namespace mongo {
string getDistLockProcess() {
if (!_cachedProcessString)
initModule();
- assert( _cachedProcessString );
+ verify( _cachedProcessString );
return *_cachedProcessString;
}
@@ -275,7 +275,7 @@ namespace mongo {
string pingId = pingThreadId( conn, processId );
- assert( _seen.count( pingId ) > 0 );
+ verify( _seen.count( pingId ) > 0 );
_kill.insert( pingId );
}
@@ -483,7 +483,7 @@ namespace mongo {
}
// This should always be true, if not, we are using the lock incorrectly.
- assert( _name != "" );
+ verify( _name != "" );
// write to dummy if 'other' is null
BSONObj dummyOther;
@@ -638,7 +638,7 @@ namespace mongo {
}
else {
- assert( canReenter );
+ verify( canReenter );
// Lock may be re-entered, reset our timer if succeeds or fails
// Not strictly necessary, but helpful for small timeouts where thread scheduling is significant.
@@ -776,7 +776,7 @@ namespace mongo {
// Our lock should now be set until forcing.
// It's possible another lock has won entirely by now, so state could be 1 or 2 here
- assert( indUpdate["state"].numberInt() > 0 );
+ verify( indUpdate["state"].numberInt() > 0 );
}
// else our lock is the same, in which case we're safe, or it's a bigger lock,
@@ -790,7 +790,7 @@ namespace mongo {
<< up[1].first << causedBy( e ), 13661 );
}
- assert( !indUpdate.isEmpty() );
+ verify( !indUpdate.isEmpty() );
// Find max TS value
if ( currLock.isEmpty() || currLock["ts"] < indUpdate["ts"] ) {
@@ -891,7 +891,7 @@ namespace mongo {
// and so cannot tell you what lock ts you should try later.
void DistributedLock::unlock( BSONObj* oldLockPtr ) {
- assert( _name != "" );
+ verify( _name != "" );
string lockName = _name + string("/") + _processId;
diff --git a/src/mongo/client/distlock.h b/src/mongo/client/distlock.h
index 44b54560fb4..f8b55cc66f8 100644
--- a/src/mongo/client/distlock.h
+++ b/src/mongo/client/distlock.h
@@ -227,7 +227,7 @@ namespace mongo {
~dist_lock_try() {
if ( _got ) {
- assert( ! _other.isEmpty() );
+ verify( ! _other.isEmpty() );
_lock->unlock( &_other );
}
}
@@ -237,9 +237,9 @@ namespace mongo {
}
bool retry() {
- assert( _lock );
- assert( _got );
- assert( ! _other.isEmpty() );
+ verify( _lock );
+ verify( _got );
+ verify( ! _other.isEmpty() );
return _got = _lock->lock_try( _why , true, &_other );
}
diff --git a/src/mongo/client/distlock_test.cpp b/src/mongo/client/distlock_test.cpp
index a46caa44c11..9f78bbbb2c3 100644
--- a/src/mongo/client/distlock_test.cpp
+++ b/src/mongo/client/distlock_test.cpp
@@ -264,7 +264,7 @@ namespace mongo {
}
else {
log() << "**** Not unlocking for thread " << threadId << endl;
- assert( DistributedLock::killPinger( *myLock ) );
+ verify( DistributedLock::killPinger( *myLock ) );
// We're simulating a crashed process...
break;
}
diff --git a/src/mongo/client/examples/authTest.cpp b/src/mongo/client/examples/authTest.cpp
index 71cdd390cff..05a6de45b16 100644
--- a/src/mongo/client/examples/authTest.cpp
+++ b/src/mongo/client/examples/authTest.cpp
@@ -48,7 +48,7 @@ int main( int argc, const char **argv ) {
bool ok = conn.auth( "test" , "eliot" , "bar" , errmsg );
if ( ! ok )
cout << errmsg << endl;
- MONGO_assert( ok );
+ MONGO_verify( ok );
- MONGO_assert( ! conn.auth( "test" , "eliot" , "bars" , errmsg ) );
+ MONGO_verify( ! conn.auth( "test" , "eliot" , "bars" , errmsg ) );
}
diff --git a/src/mongo/client/examples/clientTest.cpp b/src/mongo/client/examples/clientTest.cpp
index aaea6bd1bdf..11acac88ae8 100644
--- a/src/mongo/client/examples/clientTest.cpp
+++ b/src/mongo/client/examples/clientTest.cpp
@@ -24,8 +24,8 @@
#include <iostream>
-#ifndef assert
-# define assert(x) MONGO_assert(x)
+#ifndef verify
+# define verify(x) MONGO_verify(x)
#endif
using namespace std;
@@ -53,24 +53,24 @@ int main( int argc, const char **argv ) {
// clean up old data from any previous tests
conn.remove( ns, BSONObj() );
- assert( conn.findOne( ns , BSONObj() ).isEmpty() );
+ verify( conn.findOne( ns , BSONObj() ).isEmpty() );
// test insert
conn.insert( ns ,BSON( "name" << "eliot" << "num" << 1 ) );
- assert( ! conn.findOne( ns , BSONObj() ).isEmpty() );
+ verify( ! conn.findOne( ns , BSONObj() ).isEmpty() );
// test remove
conn.remove( ns, BSONObj() );
- assert( conn.findOne( ns , BSONObj() ).isEmpty() );
+ verify( conn.findOne( ns , BSONObj() ).isEmpty() );
// insert, findOne testing
conn.insert( ns , BSON( "name" << "eliot" << "num" << 1 ) );
{
BSONObj res = conn.findOne( ns , BSONObj() );
- assert( strstr( res.getStringField( "name" ) , "eliot" ) );
- assert( ! strstr( res.getStringField( "name2" ) , "eliot" ) );
- assert( 1 == res.getIntField( "num" ) );
+ verify( strstr( res.getStringField( "name" ) , "eliot" ) );
+ verify( ! strstr( res.getStringField( "name2" ) , "eliot" ) );
+ verify( 1 == res.getIntField( "num" ) );
}
@@ -83,7 +83,7 @@ int main( int argc, const char **argv ) {
count++;
BSONObj obj = cursor->next();
}
- assert( count == 2 );
+ verify( count == 2 );
}
{
@@ -93,7 +93,7 @@ int main( int argc, const char **argv ) {
count++;
BSONObj obj = cursor->next();
}
- assert( count == 1 );
+ verify( count == 1 );
}
{
@@ -103,45 +103,45 @@ int main( int argc, const char **argv ) {
count++;
BSONObj obj = cursor->next();
}
- assert( count == 0 );
+ verify( count == 0 );
}
// update
{
BSONObj res = conn.findOne( ns , BSONObjBuilder().append( "name" , "eliot" ).obj() );
- assert( ! strstr( res.getStringField( "name2" ) , "eliot" ) );
+ verify( ! strstr( res.getStringField( "name2" ) , "eliot" ) );
BSONObj after = BSONObjBuilder().appendElements( res ).append( "name2" , "h" ).obj();
conn.update( ns , BSONObjBuilder().append( "name" , "eliot2" ).obj() , after );
res = conn.findOne( ns , BSONObjBuilder().append( "name" , "eliot" ).obj() );
- assert( ! strstr( res.getStringField( "name2" ) , "eliot" ) );
- assert( conn.findOne( ns , BSONObjBuilder().append( "name" , "eliot2" ).obj() ).isEmpty() );
+ verify( ! strstr( res.getStringField( "name2" ) , "eliot" ) );
+ verify( conn.findOne( ns , BSONObjBuilder().append( "name" , "eliot2" ).obj() ).isEmpty() );
conn.update( ns , BSONObjBuilder().append( "name" , "eliot" ).obj() , after );
res = conn.findOne( ns , BSONObjBuilder().append( "name" , "eliot" ).obj() );
- assert( strstr( res.getStringField( "name" ) , "eliot" ) );
- assert( strstr( res.getStringField( "name2" ) , "h" ) );
- assert( conn.findOne( ns , BSONObjBuilder().append( "name" , "eliot2" ).obj() ).isEmpty() );
+ verify( strstr( res.getStringField( "name" ) , "eliot" ) );
+ verify( strstr( res.getStringField( "name2" ) , "h" ) );
+ verify( conn.findOne( ns , BSONObjBuilder().append( "name" , "eliot2" ).obj() ).isEmpty() );
// upsert
conn.update( ns , BSONObjBuilder().append( "name" , "eliot2" ).obj() , after , 1 );
- assert( ! conn.findOne( ns , BSONObjBuilder().append( "name" , "eliot" ).obj() ).isEmpty() );
+ verify( ! conn.findOne( ns , BSONObjBuilder().append( "name" , "eliot" ).obj() ).isEmpty() );
}
{
// ensure index
- assert( conn.ensureIndex( ns , BSON( "name" << 1 ) ) );
- assert( ! conn.ensureIndex( ns , BSON( "name" << 1 ) ) );
+ verify( conn.ensureIndex( ns , BSON( "name" << 1 ) ) );
+ verify( ! conn.ensureIndex( ns , BSON( "name" << 1 ) ) );
}
{
// hint related tests
- assert( conn.findOne(ns, "{}")["name"].str() == "sara" );
+ verify( conn.findOne(ns, "{}")["name"].str() == "sara" );
- assert( conn.findOne(ns, "{ name : 'eliot' }")["name"].str() == "eliot" );
- assert( conn.getLastError() == "" );
+ verify( conn.findOne(ns, "{ name : 'eliot' }")["name"].str() == "eliot" );
+ verify( conn.getLastError() == "" );
// nonexistent index test
bool asserted = false;
@@ -151,13 +151,13 @@ int main( int argc, const char **argv ) {
catch ( ... ) {
asserted = true;
}
- assert( asserted );
+ verify( asserted );
//existing index
- assert( conn.findOne(ns, Query("{name:'eliot'}").hint("{name:1}")).hasElement("name") );
+ verify( conn.findOne(ns, Query("{name:'eliot'}").hint("{name:1}")).hasElement("name") );
// run validate
- assert( conn.validate( ns ) );
+ verify( conn.validate( ns ) );
}
{
@@ -189,14 +189,14 @@ int main( int argc, const char **argv ) {
BSONObj found = conn.findOne( tsns , mongo::BSONObj() );
cout << "old: " << out << "\nnew: " << found << endl;
- assert( ( oldTime < found["ts"].timestampTime() ) ||
+ verify( ( oldTime < found["ts"].timestampTime() ) ||
( oldTime == found["ts"].timestampTime() && oldInc < found["ts"].timestampInc() ) );
}
{
// check that killcursors doesn't affect last error
- assert( conn.getLastError().empty() );
+ verify( conn.getLastError().empty() );
BufBuilder b;
b.appendNum( (int)0 ); // reserved
@@ -209,7 +209,7 @@ int main( int argc, const char **argv ) {
// say() is protected in DBClientConnection, so get superclass
static_cast< DBConnector* >( &conn )->say( m );
- assert( conn.getLastError().empty() );
+ verify( conn.getLastError().empty() );
}
{
@@ -258,7 +258,7 @@ int main( int argc, const char **argv ) {
BSONObj res;
bool gotError = false;
- assert( conn.eval( "test" , "return db.totest.findOne().x" , res ) );
+ verify( conn.eval( "test" , "return db.totest.findOne().x" , res ) );
try {
conn.eval( "test" , "sleep(5000); return db.totest.findOne().x" , res );
}
@@ -266,11 +266,11 @@ int main( int argc, const char **argv ) {
gotError = true;
log() << e.what() << endl;
}
- assert( gotError );
+ verify( gotError );
// sleep so the server isn't locked anymore
sleepsecs( 4 );
- assert( conn.eval( "test" , "return db.totest.findOne().x" , res ) );
+ verify( conn.eval( "test" , "return db.totest.findOne().x" , res ) );
}
diff --git a/src/mongo/client/examples/httpClientTest.cpp b/src/mongo/client/examples/httpClientTest.cpp
index 4055d4492d5..f5f89ac413f 100644
--- a/src/mongo/client/examples/httpClientTest.cpp
+++ b/src/mongo/client/examples/httpClientTest.cpp
@@ -27,10 +27,10 @@ void play( string url ) {
HttpClient c;
HttpClient::Result r;
- MONGO_assert( c.get( url , &r ) == 200 );
+ MONGO_verify( c.get( url , &r ) == 200 );
HttpClient::Headers h = r.getHeaders();
- MONGO_assert( h["Content-Type"].find( "text/html" ) == 0 );
+ MONGO_verify( h["Content-Type"].find( "text/html" ) == 0 );
cout << "\tHeaders" << endl;
for ( HttpClient::Headers::iterator i = h.begin() ; i != h.end(); ++i ) {
diff --git a/src/mongo/client/examples/mongoperf.cpp b/src/mongo/client/examples/mongoperf.cpp
index f89f769d0cf..9658f9406c0 100644
--- a/src/mongo/client/examples/mongoperf.cpp
+++ b/src/mongo/client/examples/mongoperf.cpp
@@ -100,7 +100,7 @@ void workerThread() {
}
void go() {
- assert( options["r"].trueValue() || options["w"].trueValue() );
+ verify( options["r"].trueValue() || options["w"].trueValue() );
MemoryMappedFile f;
cout << "creating test file size:";
len = options["fileSizeMB"].numberLong();
@@ -138,7 +138,7 @@ void go() {
lf = 0;
mmfFile = new MemoryMappedFile();
mmf = (char *) mmfFile->map(fname);
- assert( mmf );
+ verify( mmf );
syncDelaySecs = options["syncDelay"].numberInt();
if( syncDelaySecs ) {
diff --git a/src/mongo/client/examples/whereExample.cpp b/src/mongo/client/examples/whereExample.cpp
index 12b68d7add3..413ea26137a 100644
--- a/src/mongo/client/examples/whereExample.cpp
+++ b/src/mongo/client/examples/whereExample.cpp
@@ -65,5 +65,5 @@ int main( int argc, const char **argv ) {
cout << "\t" << obj.jsonString() << endl;
num++;
}
- MONGO_assert( num == 1 );
+ MONGO_verify( num == 1 );
}
diff --git a/src/mongo/client/gridfs.cpp b/src/mongo/client/gridfs.cpp
index 7024516e9a6..056502d8888 100644
--- a/src/mongo/client/gridfs.cpp
+++ b/src/mongo/client/gridfs.cpp
@@ -117,7 +117,7 @@ namespace mongo {
chunkLen += readLen;
bufPos += readLen;
- assert(chunkLen <= _chunkSize);
+ verify(chunkLen <= _chunkSize);
}
GridFSChunk c(idObj, chunkNumber, buf, chunkLen);
diff --git a/src/mongo/client/model.cpp b/src/mongo/client/model.cpp
index bd10a3c5528..3cb77822efc 100644
--- a/src/mongo/client/model.cpp
+++ b/src/mongo/client/model.cpp
@@ -99,7 +99,7 @@ namespace mongo {
b.append( myId );
}
- assert( ! myId.eoo() );
+ verify( ! myId.eoo() );
BSONObjBuilder qb;
qb.append( myId );
diff --git a/src/mongo/client/parallel.cpp b/src/mongo/client/parallel.cpp
index 91ee6f6d624..eab7d6c3be9 100644
--- a/src/mongo/client/parallel.cpp
+++ b/src/mongo/client/parallel.cpp
@@ -83,7 +83,7 @@ namespace mongo {
}
void ClusteredCursor::_checkCursor( DBClientCursor * cursor ) {
- assert( cursor );
+ verify( cursor );
if ( cursor->hasResultFlag( ResultFlag_ShardConfigStale ) ) {
BSONObj error;
@@ -99,7 +99,7 @@ namespace mongo {
auto_ptr<DBClientCursor> ClusteredCursor::query( const string& server , int num , BSONObj extra , int skipLeft , bool lazy ) {
uassert( 10017 , "cursor already done" , ! _done );
- assert( _didInit );
+ verify( _didInit );
BSONObj q = _query;
if ( ! extra.isEmpty() ) {
@@ -131,7 +131,7 @@ namespace mongo {
massert( 13633 , str::stream() << "error querying server: " << server , cursor.get() );
cursor->attach( &conn ); // this calls done on conn
- assert( ! conn.ok() );
+ verify( ! conn.ok() );
_checkCursor( cursor.get() );
return cursor;
}
@@ -205,9 +205,9 @@ namespace mongo {
if( ( isVersioned() && ! isSharded() ) || _qShards.size() == 1 ){
map<string,list<BSONObj> > out;
_explain( out );
- assert( out.size() == 1 );
+ verify( out.size() == 1 );
list<BSONObj>& l = out.begin()->second;
- assert( l.size() == 1 );
+ verify( l.size() == 1 );
b.appendElements( *(l.begin()) );
return;
}
@@ -338,8 +338,8 @@ namespace mongo {
}
BSONObj FilteringClientCursor::next() {
- assert( ! _next.isEmpty() );
- assert( ! _done );
+ verify( ! _next.isEmpty() );
+ verify( ! _done );
BSONObj ret = _next;
_next = BSONObj();
@@ -354,7 +354,7 @@ namespace mongo {
}
void FilteringClientCursor::_advance() {
- assert( _next.isEmpty() );
+ verify( _next.isEmpty() );
if ( ! _cursor.get() || _done )
return;
@@ -469,7 +469,7 @@ namespace mongo {
_sortKey = _qSpec.sort();
_fields = _qSpec.fields();
- if( ! isVersioned() ) assert( _cInfo.isEmpty() );
+ if( ! isVersioned() ) verify( _cInfo.isEmpty() );
}
if ( ! _sortKey.isEmpty() && ! _fields.isEmpty() ) {
@@ -532,8 +532,8 @@ namespace mongo {
}
else if( initialized ){
- assert( pcState->cursor );
- assert( pcState->conn );
+ verify( pcState->cursor );
+ verify( pcState->conn );
if( ! finished && pcState->conn->ok() ){
try{
@@ -557,7 +557,7 @@ namespace mongo {
pcState.reset();
}
- else assert( finished || ! initialized );
+ else verify( finished || ! initialized );
initialized = false;
finished = false;
@@ -729,7 +729,7 @@ namespace mongo {
}
- assert( todo.size() );
+ verify( todo.size() );
log( pc ) << "initializing over " << todo.size() << " shards required by " << vinfo << endl;
@@ -749,7 +749,7 @@ namespace mongo {
if( mdata.initialized ){
- assert( mdata.pcState );
+ verify( mdata.pcState );
PCStatePtr state = mdata.pcState;
@@ -794,7 +794,7 @@ namespace mongo {
if( manager ) state->manager = manager;
else if( primary ) state->primary = primary;
- assert( ! primary || shard == *primary || ! isVersioned() );
+ verify( ! primary || shard == *primary || ! isVersioned() );
// Setup conn
if( ! state->conn ) state->conn.reset( new ShardConnection( shard, ns, manager ) );
@@ -927,17 +927,17 @@ namespace mongo {
if( ! mdata.pcState ) continue;
// Make sure all state is in shards
- assert( todo.find( shard ) != todo.end() );
- assert( mdata.initialized = true );
- if( ! mdata.completed ) assert( mdata.pcState->conn->ok() );
- assert( mdata.pcState->cursor );
- if( isVersioned() ) assert( mdata.pcState->primary || mdata.pcState->manager );
- else assert( ! mdata.pcState->primary || ! mdata.pcState->manager );
- assert( ! mdata.retryNext );
-
- if( mdata.completed ) assert( mdata.finished );
- if( mdata.finished ) assert( mdata.initialized );
- if( ! returnPartial ) assert( mdata.initialized );
+ verify( todo.find( shard ) != todo.end() );
+ verify( mdata.initialized = true );
+ if( ! mdata.completed ) verify( mdata.pcState->conn->ok() );
+ verify( mdata.pcState->cursor );
+ if( isVersioned() ) verify( mdata.pcState->primary || mdata.pcState->manager );
+ else verify( ! mdata.pcState->primary || ! mdata.pcState->manager );
+ verify( ! mdata.retryNext );
+
+ if( mdata.completed ) verify( mdata.finished );
+ if( mdata.finished ) verify( mdata.initialized );
+ if( ! returnPartial ) verify( mdata.initialized );
}
}
@@ -968,13 +968,13 @@ namespace mongo {
try {
// Sanity checks
- if( ! mdata.completed ) assert( state->conn && state->conn->ok() );
- assert( state->cursor );
+ if( ! mdata.completed ) verify( state->conn && state->conn->ok() );
+ verify( state->cursor );
if( isVersioned() ){
- assert( state->manager || state->primary );
- assert( ! state->manager || ! state->primary );
+ verify( state->manager || state->primary );
+ verify( ! state->manager || ! state->primary );
}
- else assert( ! state->manager && ! state->primary );
+ else verify( ! state->manager && ! state->primary );
// If we weren't init'ing lazily, ignore this
@@ -1095,13 +1095,13 @@ namespace mongo {
else ++i;
// Make sure all state is in shards
- assert( mdata.initialized = true );
- assert( mdata.finished = true );
- assert( mdata.completed = true );
- assert( ! mdata.pcState->conn->ok() );
- assert( mdata.pcState->cursor );
- if( isVersioned() ) assert( mdata.pcState->primary || mdata.pcState->manager );
- else assert( ! mdata.pcState->primary && ! mdata.pcState->manager );
+ verify( mdata.initialized = true );
+ verify( mdata.finished = true );
+ verify( mdata.completed = true );
+ verify( ! mdata.pcState->conn->ok() );
+ verify( mdata.pcState->cursor );
+ if( isVersioned() ) verify( mdata.pcState->primary || mdata.pcState->manager );
+ else verify( ! mdata.pcState->primary && ! mdata.pcState->manager );
}
// TODO : More cleanup of metadata?
@@ -1179,7 +1179,7 @@ namespace mongo {
// log() << "Starting parallel search..." << endl;
// make sure we're not already initialized
- assert( ! _cursors );
+ verify( ! _cursors );
_cursors = new FilteringClientCursor[_numServers];
bool returnPartial = ( _options & QueryOption_PartialResults );
@@ -1302,7 +1302,7 @@ namespace mongo {
continue;
}
- assert( conns[i] );
+ verify( conns[i] );
retryQueries.erase( i );
bool retry = false;
@@ -1374,7 +1374,7 @@ namespace mongo {
}
// Don't exceed our max retries, should not happen
- assert( retries < 5 );
+ verify( retries < 5 );
}
while( retryQueries.size() > 0 /* something to retry */ &&
( socketExs.size() == 0 || returnPartial ) /* no conn issues */ &&
@@ -1383,7 +1383,7 @@ namespace mongo {
// Assert that our conns are all closed!
for( vector< shared_ptr<ShardConnection> >::iterator i = conns.begin(); i < conns.end(); ++i ){
- assert( ! (*i) || ! (*i)->ok() );
+ verify( ! (*i) || ! (*i)->ok() );
}
// Handle errors we got during initialization.
@@ -1560,7 +1560,7 @@ namespace mongo {
}
catch ( RecvStaleConfigException& e ){
- assert( versionManager.isVersionableCB( _conn ) );
+ verify( versionManager.isVersionableCB( _conn ) );
if( i >= maxRetries ){
error() << "Future::spawnComand (part 2) stale config exception" << causedBy( e ) << endl;
@@ -1578,7 +1578,7 @@ namespace mongo {
LOG( i > 1 ? 0 : 1 ) << "retrying lazy command" << causedBy( e ) << endl;
- assert( _conn->lazySupported() );
+ verify( _conn->lazySupported() );
_done = false;
init();
continue;
diff --git a/src/mongo/client/parallel.h b/src/mongo/client/parallel.h
index b48330b6ac6..c566d92c046 100644
--- a/src/mongo/client/parallel.h
+++ b/src/mongo/client/parallel.h
@@ -417,12 +417,12 @@ namespace mongo {
bool isDone() const { return _done; }
bool ok() const {
- assert( _done );
+ verify( _done );
return _ok;
}
BSONObj result() const {
- assert( _done );
+ verify( _done );
return _res;
}
diff --git a/src/mongo/client/redef_macros.h b/src/mongo/client/redef_macros.h
index 1a492dc03b0..78bb98a37d2 100644
--- a/src/mongo/client/redef_macros.h
+++ b/src/mongo/client/redef_macros.h
@@ -29,7 +29,7 @@
#define realloc MONGO_realloc
// util/assert_util.h
-#define assert MONGO_assert
+#define verify MONGO_verify
#define dassert MONGO_dassert
#define wassert MONGO_wassert
#define massert MONGO_massert
diff --git a/src/mongo/client/syncclusterconnection.cpp b/src/mongo/client/syncclusterconnection.cpp
index 13505495e9f..6f500f4728e 100644
--- a/src/mongo/client/syncclusterconnection.cpp
+++ b/src/mongo/client/syncclusterconnection.cpp
@@ -62,7 +62,7 @@ namespace mongo {
}
SyncClusterConnection::SyncClusterConnection( SyncClusterConnection& prev, double socketTimeout) : _mutex("SyncClusterConnection"), _socketTimeout( socketTimeout ) {
- assert(0);
+ verify(0);
}
SyncClusterConnection::~SyncClusterConnection() {
@@ -120,7 +120,7 @@ namespace mongo {
errors.push_back( err );
}
- assert( _lastErrors.size() == errors.size() && _lastErrors.size() == _conns.size() );
+ verify( _lastErrors.size() == errors.size() && _lastErrors.size() == _conns.size() );
stringstream err;
bool ok = true;
@@ -313,7 +313,7 @@ namespace mongo {
if ( _writeConcern ) {
_checkLast();
- assert( _lastErrors.size() > 1 );
+ verify( _lastErrors.size() > 1 );
int a = _lastErrors[0]["n"].numberInt();
for ( unsigned i=1; i<_lastErrors.size(); i++ ) {
@@ -379,7 +379,7 @@ namespace mongo {
}
void SyncClusterConnection::sayPiggyBack( Message &toSend ) {
- assert(0);
+ verify(0);
}
int SyncClusterConnection::_lockType( const string& name ) {
@@ -402,7 +402,7 @@ namespace mongo {
void SyncClusterConnection::killCursor( long long cursorID ) {
// should never need to do this
- assert(0);
+ verify(0);
}
void SyncClusterConnection::setAllSoTimeouts( double socketTimeout ){
diff --git a/src/mongo/client/syncclusterconnection.h b/src/mongo/client/syncclusterconnection.h
index 5ef2b0a547b..b3ed36fe83b 100644
--- a/src/mongo/client/syncclusterconnection.h
+++ b/src/mongo/client/syncclusterconnection.h
@@ -125,7 +125,7 @@ namespace mongo {
public:
UpdateNotTheSame( int code , const string& msg , const vector<string>& addrs , const vector<BSONObj>& lastErrors )
: UserException( code , msg ) , _addrs( addrs ) , _lastErrors( lastErrors ) {
- assert( _addrs.size() == _lastErrors.size() );
+ verify( _addrs.size() == _lastErrors.size() );
}
virtual ~UpdateNotTheSame() throw() {
diff --git a/src/mongo/client/undef_macros.h b/src/mongo/client/undef_macros.h
index 71d0959b8c4..90a818f78f2 100644
--- a/src/mongo/client/undef_macros.h
+++ b/src/mongo/client/undef_macros.h
@@ -30,11 +30,11 @@
#undef realloc
// util/assert_util.h
-#undef assert
#undef dassert
#undef wassert
#undef massert
#undef uassert
+#undef verify
#undef DESTRUCTOR_GUARD
// util/goodies.h
diff --git a/src/mongo/db/btree.cpp b/src/mongo/db/btree.cpp
index d7ff311ffae..f652f5e13a9 100644
--- a/src/mongo/db/btree.cpp
+++ b/src/mongo/db/btree.cpp
@@ -139,8 +139,8 @@ namespace mongo {
long long BtreeBucket<V>::fullValidate(const DiskLoc& thisLoc, const BSONObj &order, long long *unusedCount, bool strict, unsigned depth) const {
{
bool f = false;
- assert( f = true );
- massert( 10281 , "assert is misdefined", f);
+ verify( f = true );
+ massert( 10281 , "verify is misdefined", f);
}
killCurrentOp.checkForInterrupt();
@@ -169,7 +169,7 @@ namespace mongo {
DiskLoc left = kn.prevChildBucket;
const BtreeBucket *b = left.btree<V>();
if ( strict ) {
- assert( b->parent == thisLoc );
+ verify( b->parent == thisLoc );
}
else {
wassert( b->parent == thisLoc );
@@ -181,7 +181,7 @@ namespace mongo {
DiskLoc ll = this->nextChild;
const BtreeBucket *b = ll.btree<V>();
if ( strict ) {
- assert( b->parent == thisLoc );
+ verify( b->parent == thisLoc );
}
else {
wassert( b->parent == thisLoc );
@@ -252,7 +252,7 @@ namespace mongo {
ONCE {
((BtreeBucket<V> *) this)->dump();
}
- assert(false);
+ verify(false);
}
}
}
@@ -260,7 +260,7 @@ namespace mongo {
template< class V >
inline void BucketBasics<V>::markUnused(int keypos) {
- assert( keypos >= 0 && keypos < this->n );
+ verify( keypos >= 0 && keypos < this->n );
k(keypos).setUnused();
}
@@ -293,21 +293,21 @@ namespace mongo {
*/
template< class V >
inline int BucketBasics<V>::_alloc(int bytes) {
- assert( this->emptySize >= bytes );
+ verify( this->emptySize >= bytes );
this->topSize += bytes;
this->emptySize -= bytes;
int ofs = totalDataSize() - this->topSize;
- assert( ofs > 0 );
+ verify( ofs > 0 );
return ofs;
}
template< class V >
void BucketBasics<V>::_delKeyAtPos(int keypos, bool mayEmpty) {
// TODO This should be keypos < n
- assert( keypos >= 0 && keypos <= this->n );
- assert( childForPos(keypos).isNull() );
+ verify( keypos >= 0 && keypos <= this->n );
+ verify( childForPos(keypos).isNull() );
// TODO audit cases where nextChild is null
- assert( ( mayEmpty && this->n > 0 ) || this->n > 1 || this->nextChild.isNull() );
+ verify( ( mayEmpty && this->n > 0 ) || this->n > 1 || this->nextChild.isNull() );
this->emptySize += sizeof(_KeyNode);
this->n--;
for ( int j = keypos; j < this->n; j++ )
@@ -322,7 +322,7 @@ namespace mongo {
template< class V >
void BucketBasics<V>::popBack(DiskLoc& recLoc, Key &key) {
massert( 10282 , "n==0 in btree popBack()", this->n > 0 );
- assert( k(this->n-1).isUsed() ); // no unused skipping in this function at this point - btreebuilder doesn't require that
+ verify( k(this->n-1).isUsed() ); // no unused skipping in this function at this point - btreebuilder doesn't require that
KeyNode kn = keyNode(this->n-1);
recLoc = kn.recordLoc;
key.assign(kn.key);
@@ -347,7 +347,7 @@ namespace mongo {
int bytesNeeded = key.dataSize() + sizeof(_KeyNode);
if ( bytesNeeded > this->emptySize )
return false;
- assert( bytesNeeded <= this->emptySize );
+ verify( bytesNeeded <= this->emptySize );
if( this->n ) {
const KeyNode klast = keyNode(this->n-1);
if( klast.key.woCompare(key, order) > 0 ) {
@@ -355,7 +355,7 @@ namespace mongo {
log() << " klast: " << keyNode(this->n-1).key.toString() << endl;
log() << " key: " << key.toString() << endl;
DEV klast.key.woCompare(key, order);
- assert(false);
+ verify(false);
}
}
this->emptySize -= sizeof(_KeyNode);
@@ -508,7 +508,7 @@ namespace mongo {
this->emptySize = tdz - dataUsed - this->n * sizeof(_KeyNode);
{
int foo = this->emptySize;
- assert( foo >= 0 );
+ verify( foo >= 0 );
}
setPacked();
@@ -518,7 +518,7 @@ namespace mongo {
template< class V >
inline void BucketBasics<V>::truncateTo(int N, const Ordering &order, int &refPos) {
- assert( Lock::somethingWriteLocked() );
+ verify( Lock::somethingWriteLocked() );
assertWritable();
this->n = N;
setNotPacked();
@@ -544,7 +544,7 @@ namespace mongo {
*/
template< class V >
int BucketBasics<V>::splitPos( int keypos ) const {
- assert( this->n > 2 );
+ verify( this->n > 2 );
int split = 0;
int rightSize = 0;
// when splitting a btree node, if the new key is greater than all the other keys, we should not do an even split, but a 90/10 split.
@@ -571,7 +571,7 @@ namespace mongo {
template< class V >
void BucketBasics<V>::reserveKeysFront( int nAdd ) {
- assert( this->emptySize >= int( sizeof( _KeyNode ) * nAdd ) );
+ verify( this->emptySize >= int( sizeof( _KeyNode ) * nAdd ) );
this->emptySize -= sizeof( _KeyNode ) * nAdd;
for( int i = this->n - 1; i > -1; --i ) {
k( i + nAdd ) = k( i );
@@ -613,7 +613,7 @@ namespace mongo {
continue;
}
- assert(b->n>0);
+ verify(b->n>0);
largestLoc = loc;
largestKey = b->n-1;
@@ -821,7 +821,7 @@ namespace mongo {
template< class V >
void BtreeBucket<V>::delBucket(const DiskLoc thisLoc, const IndexDetails& id) {
ClientCursor::informAboutToDeleteBucket(thisLoc); // slow...
- assert( !isHead() );
+ verify( !isHead() );
DiskLoc ll = this->parent;
const BtreeBucket *p = ll.btree<V>();
@@ -849,7 +849,7 @@ namespace mongo {
/** note: may delete the entire bucket! this invalid upon return sometimes. */
template< class V >
void BtreeBucket<V>::delKeyAtPos( const DiskLoc thisLoc, IndexDetails& id, int p, const Ordering &order) {
- assert(this->n>0);
+ verify(this->n>0);
DiskLoc left = this->childForPos(p);
if ( this->n == 1 ) {
@@ -907,7 +907,7 @@ namespace mongo {
void BtreeBucket<V>::deleteInternalKey( const DiskLoc thisLoc, int keypos, IndexDetails &id, const Ordering &order ) {
DiskLoc lchild = this->childForPos( keypos );
DiskLoc rchild = this->childForPos( keypos + 1 );
- assert( !lchild.isNull() || !rchild.isNull() );
+ verify( !lchild.isNull() || !rchild.isNull() );
int advanceDirection = lchild.isNull() ? 1 : -1;
int advanceKeyOfs = keypos;
DiskLoc advanceLoc = advance( thisLoc, advanceKeyOfs, advanceDirection, __FUNCTION__ );
@@ -937,9 +937,9 @@ namespace mongo {
template< class V >
void BtreeBucket<V>::replaceWithNextChild( const DiskLoc thisLoc, IndexDetails &id ) {
- assert( this->n == 0 && !this->nextChild.isNull() );
+ verify( this->n == 0 && !this->nextChild.isNull() );
if ( this->parent.isNull() ) {
- assert( id.head == thisLoc );
+ verify( id.head == thisLoc );
id.head.writing() = this->nextChild;
}
else {
@@ -953,7 +953,7 @@ namespace mongo {
template< class V >
bool BtreeBucket<V>::canMergeChildren( const DiskLoc &thisLoc, int leftIndex ) const {
- assert( leftIndex >= 0 && leftIndex < this->n );
+ verify( leftIndex >= 0 && leftIndex < this->n );
DiskLoc leftNodeLoc = this->childForPos( leftIndex );
DiskLoc rightNodeLoc = this->childForPos( leftIndex + 1 );
if ( leftNodeLoc.isNull() || rightNodeLoc.isNull() ) {
@@ -986,7 +986,7 @@ namespace mongo {
int rightSizeLimit = ( l->topSize + l->n * KNS + keyNode( leftIndex ).key.dataSize() + KNS + r->topSize + r->n * KNS ) / 2;
// This constraint should be ensured by only calling this function
// if we go below the low water mark.
- assert( rightSizeLimit < BtreeBucket<V>::bodySize() );
+ verify( rightSizeLimit < BtreeBucket<V>::bodySize() );
for( int i = r->n - 1; i > -1; --i ) {
rightSize += r->keyNode( i ).key.dataSize() + KNS;
if ( rightSize > rightSizeLimit ) {
@@ -1061,7 +1061,7 @@ namespace mongo {
template< class V >
int BtreeBucket<V>::indexInParent( const DiskLoc &thisLoc ) const {
- assert( !this->parent.isNull() );
+ verify( !this->parent.isNull() );
const BtreeBucket *p = BTREE(this->parent);
if ( p->nextChild == thisLoc ) {
return p->n;
@@ -1078,7 +1078,7 @@ namespace mongo {
dump();
out() << "Parent: " << this->parent << "\n";
p->dump();
- assert(false);
+ verify(false);
return -1; // just to compile
}
@@ -1175,7 +1175,7 @@ namespace mongo {
// By definition, if we are below the low water mark and cannot merge
// then we must actively balance.
- assert( split != l->n );
+ verify( split != l->n );
if ( split < l->n ) {
doBalanceLeftToRight( thisLoc, leftIndex, split, l, lchild, r, rchild, id, order );
}
@@ -1280,7 +1280,7 @@ namespace mongo {
this->_delKeyAtPos( keypos, true );
// Ensure we do not orphan neighbor's old child.
- assert( this->childForPos( keypos ) == rchild );
+ verify( this->childForPos( keypos ) == rchild );
// Just set temporarily - required to pass validation in insertHere()
this->childForPos( keypos ) = lchild;
@@ -1323,10 +1323,10 @@ namespace mongo {
out() << " recordLoc: " << recordLoc.toString() << " rchild: " << rchild.toString() << endl;
out() << " key: " << key.toString() << endl;
dump();
- assert(false);
+ verify(false);
}
kn->prevChildBucket = this->nextChild;
- assert( kn->prevChildBucket == lchild );
+ verify( kn->prevChildBucket == lchild );
this->nextChild.writing() = rchild;
if ( !rchild.isNull() )
BTREE(rchild)->parent.writing() = thisLoc;
@@ -1341,7 +1341,7 @@ namespace mongo {
out() << " recordLoc: " << recordLoc.toString() << " rchild: " << rchild.toString() << endl;
out() << " key: " << key.toString() << endl;
dump();
- assert(false);
+ verify(false);
}
const Loc *pc = &k(keypos+1).prevChildBucket;
*getDur().alreadyDeclared( const_cast<Loc*>(pc) ) = rchild; // declared in basicInsert()
@@ -1422,7 +1422,7 @@ namespace mongo {
}
else {
int kp = keypos-split-1;
- assert(kp>=0);
+ verify(kp>=0);
BTREE(rLoc)->insertHere(rLoc, kp, recordLoc, key, order, lchild, rchild, idx);
}
}
@@ -1460,7 +1460,7 @@ namespace mongo {
out() << " thisLoc: " << thisLoc.toString() << endl;
out() << " keyOfs: " << keyOfs << " n:" << this->n << " direction: " << direction << endl;
out() << bucketSummary() << endl;
- assert(false);
+ verify(false);
}
int adj = direction < 0 ? 1 : 0;
int ko = keyOfs + direction;
@@ -1494,7 +1494,7 @@ namespace mongo {
return ancestor;
}
}
- assert( direction<0 || an->nextChild == childLoc );
+ verify( direction<0 || an->nextChild == childLoc );
// parent exhausted also, keep going up
childLoc = ancestor;
ancestor = an->parent;
@@ -1704,7 +1704,7 @@ namespace mongo {
Continuation<V>& c,
bool dupsAllowed) const {
dassert( c.key.dataSize() <= this->KeyMax );
- assert( c.key.dataSize() > 0 );
+ verify( c.key.dataSize() > 0 );
int pos;
bool found = find(c.idx, c.key, c.recordLoc, c.order, pos, !dupsAllowed);
@@ -1753,7 +1753,7 @@ namespace mongo {
problem() << "ERROR: key too large len:" << key.dataSize() << " max:" << this->KeyMax << ' ' << key.dataSize() << ' ' << idx.indexNamespace() << endl;
return 2;
}
- assert( key.dataSize() > 0 );
+ verify( key.dataSize() > 0 );
int pos;
bool found = find(idx, key, recordLoc, order, pos, !dupsAllowed);
@@ -1926,7 +1926,7 @@ namespace mongo {
A.GETOFS() += 2;
b->bt_insert(id.head, A, key, order, true, id);
A.GETOFS() += 2;
- assert( b->k(0).isUsed() );
+ verify( b->k(0).isUsed() );
// b->k(0).setUnused();
b->k(1).setUnused();
b->k(2).setUnused();
@@ -1960,19 +1960,19 @@ namespace mongo {
DiskLoc56Bit bigl;
{
bigl = big;
- assert( big == bigl );
+ verify( big == bigl );
DiskLoc e = bigl;
- assert( big == e );
+ verify( big == e );
}
{
DiskLoc d;
- assert( d.isNull() );
+ verify( d.isNull() );
DiskLoc56Bit l;
l = d;
- assert( l.isNull() );
+ verify( l.isNull() );
d = l;
- assert( d.isNull() );
- assert( l < bigl );
+ verify( d.isNull() );
+ verify( l < bigl );
}
}
} btunittest;
diff --git a/src/mongo/db/btree.h b/src/mongo/db/btree.h
index 38f395b2d78..056eb486b85 100644
--- a/src/mongo/db/btree.h
+++ b/src/mongo/db/btree.h
@@ -90,12 +90,12 @@ namespace mongo {
unsigned short _kdo;
void setKeyDataOfs(short s) {
_kdo = s;
- assert(s>=0);
+ verify(s>=0);
}
/** Seems to be redundant. */
void setKeyDataOfsSavingUse(short s) {
_kdo = s;
- assert(s>=0);
+ verify(s>=0);
}
/**
* Unused keys are not returned by read operations. Keys may be marked
@@ -240,9 +240,9 @@ namespace mongo {
void operator=(const DiskLoc& loc) {
ofs = loc.getOfs();
int la = loc.a();
- assert( la <= 0xffffff ); // must fit in 3 bytes
+ verify( la <= 0xffffff ); // must fit in 3 bytes
if( la < 0 ) {
- assert( la == -1 );
+ verify( la == -1 );
la = 0;
ofs = OurNullOfs;
}
@@ -412,7 +412,7 @@ namespace mongo {
bool _pushBack(const DiskLoc recordLoc, const Key& key, const Ordering &order, const DiskLoc prevChild);
void pushBack(const DiskLoc recordLoc, const Key& key, const Ordering &order, const DiskLoc prevChild) {
bool ok = _pushBack( recordLoc , key , order , prevChild );
- assert(ok);
+ verify(ok);
}
/**
@@ -1045,9 +1045,9 @@ namespace mongo {
virtual bool isMultiKey() const { return _multikey; }
/*const _KeyNode& _currKeyNode() const {
- assert( !bucket.isNull() );
+ verify( !bucket.isNull() );
const _KeyNode& kn = keyNode(keyOfs);
- assert( kn.isUsed() );
+ verify( kn.isUsed() );
return kn;
}*/
@@ -1176,7 +1176,7 @@ namespace mongo {
*/
template< class V >
BtreeBucket<V> * DiskLoc::btreemod() const {
- assert( _a != -1 );
+ verify( _a != -1 );
BtreeBucket<V> *b = const_cast< BtreeBucket<V> * >( btree<V>() );
return static_cast< BtreeBucket<V>* >( getDur().writingPtr( b, V::BucketSize ) );
}
diff --git a/src/mongo/db/btreebuilder.cpp b/src/mongo/db/btreebuilder.cpp
index 0ec587a1958..5619474ee07 100644
--- a/src/mongo/db/btreebuilder.cpp
+++ b/src/mongo/db/btreebuilder.cpp
@@ -172,7 +172,7 @@ namespace mongo {
x = next;
getDur().commitIfNeeded();
}
- assert( idx.head.isNull() );
+ verify( idx.head.isNull() );
log(2) << "done rollback" << endl;
}
)
diff --git a/src/mongo/db/btreecursor.cpp b/src/mongo/db/btreecursor.cpp
index e2158762bdf..8a19aa3136f 100644
--- a/src/mongo/db/btreecursor.cpp
+++ b/src/mongo/db/btreecursor.cpp
@@ -49,7 +49,7 @@ namespace mongo {
}
virtual BSONObj keyAt(int ofs) const {
- assert( !bucket.isNull() );
+ verify( !bucket.isNull() );
const BtreeBucket<V> *b = bucket.btree<V>();
int n = b->getN();
if( n == b->INVALID_N_SENTINEL ) {
@@ -60,7 +60,7 @@ namespace mongo {
}
virtual BSONObj currKey() const {
- assert( !bucket.isNull() );
+ verify( !bucket.isNull() );
return bucket.btree<V>()->keyNode(keyOfs).key.toBson();
}
@@ -102,7 +102,7 @@ namespace mongo {
_multikey = d->isMultikey(idxNo);
if ( keyOfs >= 0 ) {
- assert( !keyAtKeyOfs.isEmpty() );
+ verify( !keyAtKeyOfs.isEmpty() );
try {
// Note keyAt() returns an empty BSONObj if keyOfs is now out of range,
@@ -173,7 +173,7 @@ namespace mongo {
private:
const KeyNode currKeyNode() const {
- assert( !bucket.isNull() );
+ verify( !bucket.isNull() );
const BtreeBucket<V> *b = bucket.btree<V>();
return b->keyNode(keyOfs);
}
@@ -205,7 +205,7 @@ namespace mongo {
}
virtual BSONObj currKey() const {
- assert( !bucket.isNull() );
+ verify( !bucket.isNull() );
return bucket.btree<V1>()->keyNode(keyOfs).key.toBson();
}
@@ -227,7 +227,7 @@ namespace mongo {
private:
const KeyNode currKeyNode() const {
- assert( !bucket.isNull() );
+ verify( !bucket.isNull() );
const BtreeBucket<V1> *b = bucket.btree<V1>();
return b->keyNode(keyOfs);
}
@@ -310,7 +310,7 @@ namespace mongo {
_order( _id.keyPattern() ),
_ordering( Ordering::make( _order ) ),
_direction( _direction ),
- _bounds( ( assert( _bounds.get() ), _bounds ) ),
+ _bounds( ( verify( _bounds.get() ), _bounds ) ),
_boundsIterator( new FieldRangeVectorIterator( *_bounds ) ),
_independentFieldRanges( true ),
_nscanned( 0 ) {
@@ -450,7 +450,7 @@ namespace mongo {
struct BtreeCursorUnitTest {
BtreeCursorUnitTest() {
- assert( minDiskLoc.compare(maxDiskLoc) < 0 );
+ verify( minDiskLoc.compare(maxDiskLoc) < 0 );
}
} btut;
diff --git a/src/mongo/db/cap.cpp b/src/mongo/db/cap.cpp
index a8be2383115..72da3f9cc2e 100644
--- a/src/mongo/db/cap.cpp
+++ b/src/mongo/db/cap.cpp
@@ -54,7 +54,7 @@ namespace mongo {
(or 3...there will be a little unused sliver at the end of the extent.)
*/
void NamespaceDetails::compact() {
- assert(capped);
+ verify(capped);
list<DiskLoc> drecs;
@@ -69,7 +69,7 @@ namespace mongo {
drecs.sort();
list<DiskLoc>::iterator j = drecs.begin();
- assert( j != drecs.end() );
+ verify( j != drecs.end() );
DiskLoc a = *j;
while ( 1 ) {
j++;
@@ -105,7 +105,7 @@ namespace mongo {
void NamespaceDetails::cappedCheckMigrate() {
// migrate old NamespaceDetails format
- assert( capped );
+ verify( capped );
if ( capExtent.a() == 0 && capExtent.getOfs() == 0 ) {
//capFirstNewRecord = DiskLoc();
capFirstNewRecord.writing().setInvalid();
@@ -128,18 +128,18 @@ namespace mongo {
}
bool NamespaceDetails::inCapExtent( const DiskLoc &dl ) const {
- assert( !dl.isNull() );
+ verify( !dl.isNull() );
// We could have a rec or drec, doesn't matter.
bool res = dl.drec()->myExtentLoc(dl) == capExtent;
DEV {
// old implementation. this check is temp to test works the same. new impl should be a little faster.
- assert( res == (dl.drec()->myExtent( dl ) == capExtent.ext()) );
+ verify( res == (dl.drec()->myExtent( dl ) == capExtent.ext()) );
}
return res;
}
bool NamespaceDetails::nextIsInCapExtent( const DiskLoc &dl ) const {
- assert( !dl.isNull() );
+ verify( !dl.isNull() );
DiskLoc next = dl.drec()->nextDeleted;
if ( next.isNull() )
return false;
@@ -186,7 +186,7 @@ namespace mongo {
else
prev.drec()->nextDeleted.writing() = ret.drec()->nextDeleted;
ret.drec()->nextDeleted.writing().setInvalid(); // defensive.
- assert( ret.drec()->extentOfs < ret.getOfs() );
+ verify( ret.drec()->extentOfs < ret.getOfs() );
}
return ret;
@@ -197,7 +197,7 @@ namespace mongo {
if ( !cappedLastDelRecLastExtent().isValid() )
getDur().writingDiskLoc( cappedLastDelRecLastExtent() ) = DiskLoc();
- assert( len < 400000000 );
+ verify( len < 400000000 );
int passes = 0;
int maxPasses = ( len / 30 ) + 2; // 30 is about the smallest entry that could go in the oplog
if ( maxPasses < 5000 ) {
@@ -209,7 +209,7 @@ namespace mongo {
// delete records until we have room and the max # objects limit achieved.
/* this fails on a rename -- that is ok but must keep commented out */
- //assert( theCapExtent()->ns == ns );
+ //verify( theCapExtent()->ns == ns );
theCapExtent()->assertOk();
DiskLoc firstEmptyExtent;
@@ -306,14 +306,14 @@ namespace mongo {
// deleted record. Here we check that 'i' is not the last deleted
// record. (We expect that there will be deleted records in the new
// capExtent as well.)
- assert( !i.drec()->nextDeleted.isNull() );
+ verify( !i.drec()->nextDeleted.isNull() );
cappedLastDelRecLastExtent().writing() = i;
}
}
void NamespaceDetails::cappedTruncateAfter(const char *ns, DiskLoc end, bool inclusive) {
- DEV assert( this == nsdetails(ns) );
- assert( cappedLastDelRecLastExtent().isValid() );
+ DEV verify( this == nsdetails(ns) );
+ verify( cappedLastDelRecLastExtent().isValid() );
// We iteratively remove the newest document until the newest document
// is 'end', then we remove 'end' if requested.
@@ -326,7 +326,7 @@ namespace mongo {
getDur().commitIfNeeded();
// 'curr' will point to the newest document in the collection.
DiskLoc curr = theCapExtent()->lastRecord;
- assert( !curr.isNull() );
+ verify( !curr.isNull() );
if ( curr == end ) {
if ( inclusive ) {
// 'end' has been found, so break next iteration.
@@ -358,7 +358,7 @@ namespace mongo {
// the 'capExtent' can't be empty, so we set 'capExtent' to
// capExtent's prev extent.
if ( theCapExtent()->lastRecord.isNull() ) {
- assert( !theCapExtent()->xprev.isNull() );
+ verify( !theCapExtent()->xprev.isNull() );
// NOTE Because we didn't delete the last document, and
// capLooped() is false, capExtent is not the first extent
// so xprev will be nonnull.
@@ -407,7 +407,7 @@ namespace mongo {
}
void NamespaceDetails::emptyCappedCollection( const char *ns ) {
- DEV assert( this == nsdetails(ns) );
+ DEV verify( this == nsdetails(ns) );
massert( 13424, "collection must be capped", capped );
massert( 13425, "background index build in progress", !indexBuildInProgress );
massert( 13426, "indexes present", nIndexes == 0 );
diff --git a/src/mongo/db/client.cpp b/src/mongo/db/client.cpp
index aabca3c05a2..de6410c2fe6 100644
--- a/src/mongo/db/client.cpp
+++ b/src/mongo/db/client.cpp
@@ -83,7 +83,7 @@ namespace mongo {
#if defined _DEBUG
static unsigned long long nThreads = 0;
void assertStartingUp() {
- assert( nThreads <= 1 );
+ verify( nThreads <= 1 );
}
#else
void assertStartingUp() { }
@@ -99,7 +99,7 @@ namespace mongo {
}
}
#endif
- assert( currentClient.get() == 0 );
+ verify( currentClient.get() == 0 );
Client *c = new Client(desc, mp);
currentClient.reset(c);
mongo::lastError.initThread();
@@ -179,7 +179,7 @@ namespace mongo {
_ns( ns ),
_db(db)
{
- assert( db == 0 || db->isOk() );
+ verify( db == 0 || db->isOk() );
_client->_context = this;
checkNsAccess( doauth );
}
@@ -266,7 +266,7 @@ namespace mongo {
_ns( ns ),
_db(db)
{
- assert(_db);
+ verify(_db);
checkNotStale();
_client->_context = this;
_client->_curOp->enter( this );
@@ -281,7 +281,7 @@ namespace mongo {
}
_db = dbHolderUnchecked().getOrCreate( _ns , _path , _justCreated );
- assert(_db);
+ verify(_db);
if( _doVersion ) checkNotStale();
massert( 16107 , str::stream() << "Don't have a lock on: " << _ns , Lock::atLeastReadLocked( _ns ) );
_client->_context = this;
@@ -305,7 +305,7 @@ namespace mongo {
}
Client::Context::~Context() {
- DEV assert( _client == currentClient.get() );
+ DEV verify( _client == currentClient.get() );
_client->_curOp->leave( this );
_client->_context = _oldContext; // note: _oldContext may be null
}
@@ -365,7 +365,7 @@ namespace mongo {
Client* curopWaitingForLock( char type ) {
Client * c = currentClient.get();
- assert( c );
+ verify( c );
CurOp * co = c->curop();
if ( co ) {
co->waitingForLock( type );
@@ -374,7 +374,7 @@ namespace mongo {
}
void curopGotLock(Client *c) {
- assert(c);
+ verify(c);
CurOp * co = c->curop();
if ( co )
co->gotLock();
@@ -422,7 +422,7 @@ namespace mongo {
{
BSONElement id = i.next();
- assert( id.type() );
+ verify( id.type() );
_remoteId = id.wrap( "_id" );
}
diff --git a/src/mongo/db/client.h b/src/mongo/db/client.h
index a277a97ec89..7dc4376868d 100644
--- a/src/mongo/db/client.h
+++ b/src/mongo/db/client.h
@@ -110,7 +110,7 @@ namespace mongo {
string toString() const;
void gotHandshake( const BSONObj& o );
bool hasRemote() const { return _mp; }
- HostAndPort getRemote() const { assert( _mp ); return _mp->remote(); }
+ HostAndPort getRemote() const { verify( _mp ); return _mp->remote(); }
BSONObj getRemoteID() const { return _remoteId; }
BSONObj getHandshake() const { return _handshake; }
AbstractMessagingPort * port() const { return _mp; }
@@ -240,7 +240,7 @@ namespace mongo {
/** get the Client object for this thread. */
inline Client& cc() {
Client * c = currentClient.get();
- assert( c );
+ verify( c );
return *c;
}
diff --git a/src/mongo/db/clientcursor.cpp b/src/mongo/db/clientcursor.cpp
index 4a80a725280..4e6674b2dd3 100644
--- a/src/mongo/db/clientcursor.cpp
+++ b/src/mongo/db/clientcursor.cpp
@@ -47,13 +47,13 @@ namespace mongo {
ClientCursor *cc = clientCursorsById.begin()->second;
log() << "first one: " << cc->_cursorid << ' ' << cc->_ns << endl;
clientCursorsById.clear();
- assert(false);
+ verify(false);
}
}
void ClientCursor::setLastLoc_inlock(DiskLoc L) {
- assert( _pos != -2 ); // defensive - see ~ClientCursor
+ verify( _pos != -2 ); // defensive - see ~ClientCursor
if ( L == _lastLoc )
return;
@@ -80,15 +80,15 @@ namespace mongo {
Lock::assertWriteLocked(ns);
int len = strlen(ns);
const char* dot = strchr(ns, '.');
- assert( len > 0 && dot);
+ verify( len > 0 && dot);
bool isDB = (dot == &ns[len-1]); // first (and only) dot is the last char
{
//cout << "\nTEMP invalidate " << ns << endl;
Database *db = cc().database();
- assert(db);
- assert( str::startsWith(ns, db->name) );
+ verify(db);
+ verify( str::startsWith(ns, db->name) );
for( LockedIterator i; i.ok(); ) {
ClientCursor *cc = i.current();
@@ -123,7 +123,7 @@ namespace mongo {
for ( CCByLoc::iterator i = bl.begin(); i != bl.end(); ++i ) {
ClientCursor *cc = i->second;
if ( strncmp(ns, cc->ns.c_str(), len) == 0 ) {
- assert( cc->_db == db );
+ verify( cc->_db == db );
toDelete.push_back(i->second);
}
}*/
@@ -214,7 +214,7 @@ namespace mongo {
recursive_scoped_lock lock(ccmutex);
Database *db = cc().database();
- assert(db);
+ verify(db);
aboutToDeleteForSharding( db , dl );
@@ -228,7 +228,7 @@ namespace mongo {
while ( 1 ) {
toAdvance.push_back(j->second);
- DEV assert( j->first.loc == dl );
+ DEV verify( j->first.loc == dl );
++j;
if ( j == stop )
break;
@@ -306,8 +306,8 @@ namespace mongo {
Lock::assertAtLeastReadLocked(ns);
- assert( _db );
- assert( str::startsWith(_ns, _db->name) );
+ verify( _db );
+ verify( str::startsWith(_ns, _db->name) );
if( queryOptions & QueryOption_NoCursorTimeout )
noTimeout();
recursive_scoped_lock lock(ccmutex);
@@ -367,7 +367,7 @@ namespace mongo {
it.next();
x--;
}
- assert( x == 0 );
+ verify( x == 0 );
ret.insert( it.next() );
return true;
}
@@ -390,7 +390,7 @@ namespace mongo {
it.next();
x--;
}
- assert( x == 0 );
+ verify( x == 0 );
if ( fromKey )
*fromKey = true;
@@ -426,7 +426,7 @@ namespace mongo {
need to call when you are ready to "unlock".
*/
void ClientCursor::updateLocation() {
- assert( _cursorid );
+ verify( _cursorid );
_idleAgeMillis = 0;
_c->prepareToYield();
DiskLoc cl = _c->refLoc();
diff --git a/src/mongo/db/clientcursor.h b/src/mongo/db/clientcursor.h
index 2a30b419f6a..4b6fe9fad67 100644
--- a/src/mongo/db/clientcursor.h
+++ b/src/mongo/db/clientcursor.h
@@ -89,7 +89,7 @@ namespace mongo {
ClientCursor * c() { return _c; }
void release() {
if( _c ) {
- assert( _c->_pinValue >= 100 );
+ verify( _c->_pinValue >= 100 );
_c->_pinValue -= 100;
_c = 0;
}
diff --git a/src/mongo/db/cloner.cpp b/src/mongo/db/cloner.cpp
index 4f890a6f723..978896b2928 100644
--- a/src/mongo/db/cloner.cpp
+++ b/src/mongo/db/cloner.cpp
@@ -155,7 +155,7 @@ namespace mongo {
BSONObj js = tmp;
if ( isindex ) {
- assert( strstr(from_collection, "system.indexes") );
+ verify( strstr(from_collection, "system.indexes") );
js = fixindex(tmp);
storedForLater->push_back( js.getOwned() );
continue;
@@ -362,8 +362,8 @@ namespace mongo {
string s = "bad system.namespaces object " + collection.toString();
massert( 10290 , s.c_str(), false);
}
- assert( !e.eoo() );
- assert( e.type() == String );
+ verify( !e.eoo() );
+ verify( e.type() == String );
const char *from_name = e.valuestr();
if( strstr(from_name, ".system.") ) {
@@ -394,7 +394,7 @@ namespace mongo {
/* change name "<fromdb>.collection" -> <todb>.collection */
const char *p = strchr(from_name, '.');
- assert(p);
+ verify(p);
string to_name = todb + p;
bool wantIdIndex = false;
diff --git a/src/mongo/db/cmdline.cpp b/src/mongo/db/cmdline.cpp
index c08258e9a4d..28b214aa418 100644
--- a/src/mongo/db/cmdline.cpp
+++ b/src/mongo/db/cmdline.cpp
@@ -142,14 +142,14 @@ namespace mongo {
}
void setupLaunchSignals() {
- assert( signal(SIGUSR2 , launchSignal ) != SIG_ERR );
+ verify( signal(SIGUSR2 , launchSignal ) != SIG_ERR );
}
void CmdLine::launchOk() {
if ( cmdLine.doFork ) {
// killing leader will propagate to parent
- assert( kill( cmdLine.leaderProc, SIGUSR2 ) == 0 );
+ verify( kill( cmdLine.leaderProc, SIGUSR2 ) == 0 );
}
}
#endif
@@ -171,9 +171,9 @@ namespace mongo {
// setup cwd
char buffer[1024];
#ifdef _WIN32
- assert( _getcwd( buffer , 1000 ) );
+ verify( _getcwd( buffer , 1000 ) );
#else
- assert( getcwd( buffer , 1000 ) );
+ verify( getcwd( buffer , 1000 ) );
#endif
cmdLine.cwd = buffer;
}
@@ -288,7 +288,7 @@ namespace mongo {
if ( params.count( "logpath" ) ) {
// test logpath
logpath = params["logpath"].as<string>();
- assert( logpath.size() );
+ verify( logpath.size() );
if ( logpath[0] != '/' ) {
logpath = cmdLine.cwd + "/" + logpath;
}
@@ -487,8 +487,8 @@ namespace mongo {
void setupCoreSignals() {
#if !defined(_WIN32)
- assert( signal(SIGUSR1 , rotateLogs ) != SIG_ERR );
- assert( signal(SIGHUP , ignoreSignal ) != SIG_ERR );
+ verify( signal(SIGUSR1 , rotateLogs ) != SIG_ERR );
+ verify( signal(SIGHUP , ignoreSignal ) != SIG_ERR );
#endif
}
diff --git a/src/mongo/db/commands/cloud.cpp b/src/mongo/db/commands/cloud.cpp
index c68b9f7564a..e4b5ce5722c 100644
--- a/src/mongo/db/commands/cloud.cpp
+++ b/src/mongo/db/commands/cloud.cpp
@@ -19,15 +19,15 @@ namespace mongo {
}
void dynHostResolve(string& name, int& port) {
- assert( !name.empty() );
- assert( !str::contains(name, ':') );
- assert( str::startsWith(name, '#') );
+ verify( !name.empty() );
+ verify( !str::contains(name, ':') );
+ verify( str::startsWith(name, '#') );
string s = dynHostNames.get(name);
if( s.empty() ) {
name.clear();
return;
}
- assert( !str::startsWith(s, '#') );
+ verify( !str::startsWith(s, '#') );
HostAndPort hp(s);
if( hp.hasPort() ) {
port = hp.port();
@@ -56,17 +56,17 @@ namespace mongo {
}
CmdCloud() : Command("cloud") {}
bool run(const string& dbname, BSONObj& cmdObj, int, string& errmsg, BSONObjBuilder& result, bool fromRepl) {
- assert(!fromRepl);
+ verify(!fromRepl);
BSONObj nodes = cmdObj["nodes"].Obj();
map<string,string> ipmap;
for( BSONObj::iterator i(nodes); i.more(); ) {
BSONElement e = i.next();
- assert( *e.fieldName() == '#' );
+ verify( *e.fieldName() == '#' );
ipmap[e.fieldName()] = e.String();
}
string me = cmdObj["me"].String();
- assert( !me.empty() && me[0] == '#' );
+ verify( !me.empty() && me[0] == '#' );
log(/*1*/) << "CmdCloud" << endl;
diff --git a/src/mongo/db/commands/distinct.cpp b/src/mongo/db/commands/distinct.cpp
index 2a40506a02b..08c78308b35 100644
--- a/src/mongo/db/commands/distinct.cpp
+++ b/src/mongo/db/commands/distinct.cpp
@@ -91,7 +91,7 @@ namespace mongo {
}
- assert( cursor );
+ verify( cursor );
string cursorName = cursor->toString();
auto_ptr<ClientCursor> cc (new ClientCursor(QueryOption_NoCursorTimeout, cursor, ns));
@@ -136,7 +136,7 @@ namespace mongo {
RARELY killCurrentOp.checkForInterrupt();
}
- assert( start == bb.buf() );
+ verify( start == bb.buf() );
result.appendArray( "values" , arr.done() );
diff --git a/src/mongo/db/commands/document_source_cursor.cpp b/src/mongo/db/commands/document_source_cursor.cpp
index 9e71eae77f4..d8bb170e6a4 100755
--- a/src/mongo/db/commands/document_source_cursor.cpp
+++ b/src/mongo/db/commands/document_source_cursor.cpp
@@ -95,12 +95,12 @@ namespace mongo {
void DocumentSourceCursor::setSource(DocumentSource *pSource) {
/* this doesn't take a source */
- assert(false);
+ verify(false);
}
void DocumentSourceCursor::sourceToBson(BSONObjBuilder *pBuilder) const {
/* this has no analog in the BSON world */
- assert(false);
+ verify(false);
}
DocumentSourceCursor::DocumentSourceCursor(
@@ -121,7 +121,7 @@ namespace mongo {
const shared_ptr<Cursor> &pCursor,
const string &ns,
const intrusive_ptr<ExpressionContext> &pExpCtx) {
- assert(pCursor.get());
+ verify(pCursor.get());
intrusive_ptr<DocumentSourceCursor> pSource(
new DocumentSourceCursor(pCursor, ns, pExpCtx));
return pSource;
diff --git a/src/mongo/db/commands/mr.cpp b/src/mongo/db/commands/mr.cpp
index f4d43fe2125..420249a007c 100644
--- a/src/mongo/db/commands/mr.cpp
+++ b/src/mongo/db/commands/mr.cpp
@@ -48,7 +48,7 @@ namespace mongo {
void JSFunction::init( State * state ) {
_scope = state->scope();
- assert( _scope );
+ verify( _scope );
_scope->init( &_wantedScope );
_func = _scope->createFunction( _code.c_str() );
@@ -68,7 +68,7 @@ namespace mongo {
*/
void JSMapper::map( const BSONObj& o ) {
Scope * s = _func.scope();
- assert( s );
+ verify( s );
if ( s->invoke( _func.func() , &_params, &o , 0 , true, false, true ) )
throw UserException( 9014, str::stream() << "map invoke failed: " + s->getError() );
}
@@ -176,14 +176,14 @@ namespace mongo {
uassert( 13070 , "value too large to reduce" , ee.size() < ( BSONObjMaxUserSize / 2 ) );
if ( sizeSoFar + ee.size() > BSONObjMaxUserSize ) {
- assert( n > 1 ); // if not, inf. loop
+ verify( n > 1 ); // if not, inf. loop
break;
}
valueBuilder->append( ee );
sizeSoFar += ee.size();
}
- assert(valueBuilder);
+ verify(valueBuilder);
valueBuilder->done();
BSONObj args = reduceArgs.obj();
@@ -438,7 +438,7 @@ namespace mongo {
BSONObj key = i->first;
BSONList& all = i->second;
- assert( all.size() == 1 );
+ verify( all.size() == 1 );
BSONObjIterator vi( all[0] );
vi.next();
@@ -543,7 +543,7 @@ namespace mongo {
* Insert doc in collection
*/
void State::insert( const string& ns , const BSONObj& o ) {
- assert( _onDisk );
+ verify( _onDisk );
writelock l( ns );
Client::Context ctx( ns );
@@ -564,7 +564,7 @@ namespace mongo {
* Insert doc into the inc collection
*/
void State::_insertToInc( BSONObj& o ) {
- assert( _onDisk );
+ verify( _onDisk );
theDataFileMgr.insertWithObjMod( _config.incLong.c_str() , o , true );
getDur().commitIfNeeded();
}
@@ -717,7 +717,7 @@ namespace mongo {
BSONObj key = i->first;
BSONList& all = i->second;
- assert( all.size() == 1 );
+ verify( all.size() == 1 );
BSONObj res = _config.finalizer->finalize( all[0] );
@@ -731,7 +731,7 @@ namespace mongo {
}
// use index on "0" to pull sorted data
- assert( _temp->size() == 0 );
+ verify( _temp->size() == 0 );
BSONObj sortKey = BSON( "0" << 1 );
{
bool foundIndex = false;
@@ -745,7 +745,7 @@ namespace mongo {
}
}
- assert( foundIndex );
+ verify( foundIndex );
}
Client::ReadContext ctx( _config.incLong );
@@ -753,7 +753,7 @@ namespace mongo {
BSONObj prev;
BSONList all;
- assert( pm == op->setMessage( "m/r: (3/3) final reduce to collection" , _db.count( _config.incLong, BSONObj(), QueryOption_SlaveOk ) ) );
+ verify( pm == op->setMessage( "m/r: (3/3) final reduce to collection" , _db.count( _config.incLong, BSONObj(), QueryOption_SlaveOk ) ) );
shared_ptr<Cursor> temp =
NamespaceDetailsTransient::bestGuessCursor( _config.incLong.c_str() , BSONObj() ,
diff --git a/src/mongo/db/commands/pipeline_command.cpp b/src/mongo/db/commands/pipeline_command.cpp
index 295b3b2b770..386ce3d2d16 100755
--- a/src/mongo/db/commands/pipeline_command.cpp
+++ b/src/mongo/db/commands/pipeline_command.cpp
@@ -159,7 +159,7 @@ namespace mongo {
}
/* NOTREACHED */
- assert(false);
+ verify(false);
return false;
}
diff --git a/src/mongo/db/compact.cpp b/src/mongo/db/compact.cpp
index 24716cc45fc..533e1845446 100644
--- a/src/mongo/db/compact.cpp
+++ b/src/mongo/db/compact.cpp
@@ -60,7 +60,7 @@ namespace mongo {
Extent *e = ext.ext();
e->assertOk();
- assert( e->validates() );
+ verify( e->validates() );
unsigned skipped = 0;
{
@@ -145,8 +145,8 @@ namespace mongo {
}
} // if !L.isNull()
- assert( d->firstExtent == ext );
- assert( d->lastExtent != ext );
+ verify( d->firstExtent == ext );
+ verify( d->lastExtent != ext );
DiskLoc newFirst = e->xnext;
d->firstExtent.writing() = newFirst;
newFirst.ext()->xprev.writing().Null();
@@ -257,7 +257,7 @@ namespace mongo {
result.append("invalidObjects", skipped);
}
- assert( d->firstExtent.ext()->xprev.isNull() );
+ verify( d->firstExtent.ext()->xprev.isNull() );
// indexes will do their own progress meter?
pm.finished();
@@ -373,11 +373,11 @@ namespace mongo {
int pb = 0;
if( cmdObj.hasElement("paddingFactor") ) {
pf = cmdObj["paddingFactor"].Number();
- assert( pf >= 1.0 && pf <= 4.0 );
+ verify( pf >= 1.0 && pf <= 4.0 );
}
if( cmdObj.hasElement("paddingBytes") ) {
pb = (int) cmdObj["paddingBytes"].Number();
- assert( pb >= 0 && pb <= 1024 * 1024 );
+ verify( pb >= 0 && pb <= 1024 * 1024 );
}
bool validate = !cmdObj.hasElement("validate") || cmdObj["validate"].trueValue(); // default is true at the moment
diff --git a/src/mongo/db/curop.cpp b/src/mongo/db/curop.cpp
index 798856e9547..18abe49188a 100644
--- a/src/mongo/db/curop.cpp
+++ b/src/mongo/db/curop.cpp
@@ -75,7 +75,7 @@ namespace mongo {
if ( progressMeterTotal ) {
if ( _progressMeter.isActive() ) {
cout << "about to assert, old _message: " << _message << " new message:" << msg << endl;
- assert( ! _progressMeter.isActive() );
+ verify( ! _progressMeter.isActive() );
}
_progressMeter.reset( progressMeterTotal , secondsBetween );
}
diff --git a/src/mongo/db/cursor.cpp b/src/mongo/db/cursor.cpp
index ac7afc1532b..13944b20da0 100644
--- a/src/mongo/db/cursor.cpp
+++ b/src/mongo/db/cursor.cpp
@@ -59,7 +59,7 @@ namespace mongo {
}
DiskLoc nextLoop( NamespaceDetails *nsd, const DiskLoc &prev ) {
- assert( nsd->capLooped() );
+ verify( nsd->capLooped() );
DiskLoc next = forward()->next( prev );
if ( !next.isNull() )
return next;
@@ -67,7 +67,7 @@ namespace mongo {
}
DiskLoc prevLoop( NamespaceDetails *nsd, const DiskLoc &curr ) {
- assert( nsd->capLooped() );
+ verify( nsd->capLooped() );
DiskLoc prev = reverse()->next( curr );
if ( !prev.isNull() )
return prev;
@@ -96,7 +96,7 @@ namespace mongo {
}
DiskLoc ForwardCappedCursor::next( const DiskLoc &prev ) const {
- assert( nsd );
+ verify( nsd );
if ( !nsd->capLooped() )
return forward()->next( prev );
@@ -134,7 +134,7 @@ namespace mongo {
}
DiskLoc ReverseCappedCursor::next( const DiskLoc &prev ) const {
- assert( nsd );
+ verify( nsd );
if ( !nsd->capLooped() )
return reverse()->next( prev );
diff --git a/src/mongo/db/cursor.h b/src/mongo/db/cursor.h
index 6b2b04cac56..ee97865438e 100644
--- a/src/mongo/db/cursor.h
+++ b/src/mongo/db/cursor.h
@@ -224,7 +224,7 @@ namespace mongo {
}
bool ok() { return !curr.isNull(); }
Record* _current() {
- assert( ok() );
+ verify( ok() );
return curr.rec();
}
BSONObj current() {
diff --git a/src/mongo/db/d_concurrency.cpp b/src/mongo/db/d_concurrency.cpp
index 84c1cf34516..def72841920 100644
--- a/src/mongo/db/d_concurrency.cpp
+++ b/src/mongo/db/d_concurrency.cpp
@@ -105,7 +105,7 @@ namespace mongo {
void QLock::runExclusively(void (*f)(void)) {
dlog(1) << "QLock::runExclusively" << endl;
boost::mutex::scoped_lock lk( m );
- assert( w.n > 0 );
+ verify( w.n > 0 );
greed++; // stop new acquisitions
X.n++;
while( X.n ) {
@@ -183,14 +183,14 @@ namespace mongo {
}
static bool lock_R_try(int ms) {
- assert( threadState() == 0 );
+ verify( threadState() == 0 );
bool got = q.lock_R_try(ms);
if( got )
threadState() = 'R';
return got;
}
static bool lock_W_try(int ms) {
- assert( threadState() == 0 );
+ verify( threadState() == 0 );
bool got = q.lock_W_try(ms);
if( got ) {
threadState() = 'W';
@@ -199,7 +199,7 @@ namespace mongo {
return got;
}
static void lock_W_stop_greed() {
- assert( threadState() == 0 );
+ verify( threadState() == 0 );
threadState() = 'W';
{
Acquiring a('W');
@@ -241,7 +241,7 @@ namespace mongo {
}
static void lock_w() {
char &ts = threadState();
- assert( ts == 0 );
+ verify( ts == 0 );
getDur().commitIfNeeded();
ts = 'w';
Acquiring a('w');
@@ -255,7 +255,7 @@ namespace mongo {
}
static void lock_r() {
char& ts = threadState();
- assert( ts == 0 );
+ verify( ts == 0 );
ts = 'r';
Acquiring a('r');
q.lock_r();
@@ -269,23 +269,23 @@ namespace mongo {
// these are safe for use ACROSS threads. i.e. one thread can lock and
// another unlock
void Lock::ThreadSpanningOp::setWLockedNongreedy() {
- assert( threadState() == 0 ); // as this spans threads the tls wouldn't make sense
+ verify( threadState() == 0 ); // as this spans threads the tls wouldn't make sense
lock_W_stop_greed();
}
void Lock::ThreadSpanningOp::W_to_R() {
- assert( threadState() == 'W' );
+ verify( threadState() == 'W' );
dur::assertNothingSpooled();
q.W_to_R();
threadState() = 'R';
}
void Lock::ThreadSpanningOp::unsetW() { // note there is no unlocking_W() call here
- assert( threadState() == 'W' );
+ verify( threadState() == 'W' );
q.unlock_W();
q.start_greed();
threadState() = 0;
}
void Lock::ThreadSpanningOp::unsetR() {
- assert( threadState() == 'R' || threadState() == 0 );
+ verify( threadState() == 'R' || threadState() == 0 );
q.unlock_R();
q.start_greed();
threadState() = 0;
@@ -491,15 +491,15 @@ namespace mongo {
}
}
void Lock::GlobalWrite::downgrade() {
- assert( !noop );
- assert( threadState() == 'W' );
+ verify( !noop );
+ verify( threadState() == 'W' );
q.W_to_R();
threadState() = 'R';
}
// you will deadlock if 2 threads doing this
bool Lock::GlobalWrite::upgrade() {
- assert( !noop );
- assert( threadState() == 'R' );
+ verify( !noop );
+ verify( threadState() == 'R' );
if( q.R_to_W() ) {
threadState() = 'W';
return true;
@@ -534,18 +534,18 @@ namespace mongo {
case 'R' :
{
error() << "trying to get a w lock after already getting an R lock is not allowed" << endl;
- assert(false);
+ verify(false);
}
case 'r' :
{
error() << "trying to get a w lock after already getting an r lock is not allowed" << endl;
- assert(false);
+ verify(false);
}
return false;
case 'W' :
return true; // lock nothing further
default:
- assert(false);
+ verify(false);
case 'w' :
case 0 :
break;
@@ -559,7 +559,7 @@ namespace mongo {
error() << "can't lock local and admin db at the same time " << (int) db << ' ' << (int) ls.whichNestable << endl;
fassert(16131,false);
}
- assert( ls.nestableCount > 0 );
+ verify( ls.nestableCount > 0 );
}
else {
ls.whichNestable = db;
@@ -741,7 +741,7 @@ namespace mongo {
case 'w' :
return false;
default:
- assert(false);
+ verify(false);
case 0 :
;
}
@@ -753,7 +753,7 @@ namespace mongo {
case 'w':
break;
default:
- assert(false);
+ verify(false);
case 0 :
lock_w();
locked_w = true;
@@ -765,7 +765,7 @@ namespace mongo {
case 'w':
break;
default:
- assert(false);
+ verify(false);
case 0 :
lock_r();
locked_r = true;
@@ -895,6 +895,6 @@ namespace mongo {
}
MongoMutex::MongoMutex() {
static int n = 0;
- assert( ++n == 1 );
+ verify( ++n == 1 );
}
}
diff --git a/src/mongo/db/database.cpp b/src/mongo/db/database.cpp
index 83fe214312a..dd0bd31704e 100644
--- a/src/mongo/db/database.cpp
+++ b/src/mongo/db/database.cpp
@@ -34,7 +34,7 @@ namespace mongo {
Lock::assertAtLeastReadLocked(db->name);
}
else {
- assert( Lock::isLocked() );
+ verify( Lock::isLocked() );
}
}
@@ -150,13 +150,13 @@ namespace mongo {
}
bool Database::openExistingFile( int n ) {
- assert(this);
+ verify(this);
Lock::assertWriteLocked(name);
{
// must not yet be visible to others as we aren't in the db's write lock and
// we will write to _files vector - thus this assert.
bool loaded = dbHolder().__isLoaded(name, path);
- assert( !loaded );
+ verify( !loaded );
}
// additionally must be in the dbholder mutex (no assert for that yet)
@@ -202,7 +202,7 @@ namespace mongo {
// repair purposes yet we do not.
void Database::openAllFiles() {
//log() << "TEMP openallfiles " << path << ' ' << name << endl;
- assert(this);
+ verify(this);
int n = 0;
while( openExistingFile(n) ) {
n++;
@@ -224,7 +224,7 @@ namespace mongo {
// todo: this is called a lot. streamline the common case
MongoDataFile* Database::getFile( int n, int sizeNeeded , bool preallocateOnly) {
- assert(this);
+ verify(this);
DEV assertDbAtLeastReadLocked(this);
namespaceIndex.init();
@@ -240,12 +240,12 @@ namespace mongo {
MongoDataFile* p = 0;
if ( !preallocateOnly ) {
while ( n >= (int) _files.size() ) {
- assert(this);
+ verify(this);
if( !Lock::isWriteLocked(this->name) ) {
log() << "error: getFile() called in a read lock, yet file to return is not yet open" << endl;
log() << " getFile(" << n << ") _files.size:" <<_files.size() << ' ' << fileName(n).string() << endl;
log() << " context ns: " << cc().ns() << " openallfiles:" << _openAllFiles << endl;
- assert(false);
+ verify(false);
}
_files.push_back(0);
}
@@ -368,7 +368,7 @@ namespace mongo {
return true;
}
- assert( cc().database() == this );
+ verify( cc().database() == this );
if ( ! namespaceIndex.details( profileName.c_str() ) ) {
log() << "creating profile collection: " << profileName << endl;
@@ -437,7 +437,7 @@ namespace mongo {
{
SimpleMutex::scoped_lock lk(_m);
DBs& m = _paths[path];
- assert( m[dbname] == 0 );
+ verify( m[dbname] == 0 );
m[dbname] = db;
_size++;
}
diff --git a/src/mongo/db/db.cpp b/src/mongo/db/db.cpp
index 13cf7faaf86..6d710724ce7 100644
--- a/src/mongo/db/db.cpp
+++ b/src/mongo/db/db.cpp
@@ -99,7 +99,7 @@ namespace mongo {
struct MyStartupTests {
MyStartupTests() {
- assert( sizeof(OID) == 12 );
+ verify( sizeof(OID) == 12 );
}
} mystartupdbcpp;
@@ -114,12 +114,12 @@ namespace mongo {
sleepsecs(1);
unsigned n = 0;
auto f = [&n](const BSONObj& o) {
- assert( o.valid() );
+ verify( o.valid() );
//cout << o << endl;
n++;
bool testClosingSocketOnError = false;
if( testClosingSocketOnError )
- assert(false);
+ verify(false);
};
DBClientConnection db(false);
db.connect("localhost");
@@ -190,7 +190,7 @@ namespace mongo {
QueryResult *qr = (QueryResult *) header;
long long cursorid = qr->cursorId;
if( cursorid ) {
- assert( dbresponse.exhaust && *dbresponse.exhaust != 0 );
+ verify( dbresponse.exhaust && *dbresponse.exhaust != 0 );
string ns = dbresponse.exhaust; // before reset() free's it...
m.reset();
BufBuilder b(512);
@@ -246,8 +246,8 @@ namespace mongo {
static DBDirectClient db;
if ( h->version == 4 && h->versionMinor == 4 ) {
- assert( PDFILE_VERSION == 4 );
- assert( PDFILE_VERSION_MINOR == 5 );
+ verify( PDFILE_VERSION == 4 );
+ verify( PDFILE_VERSION_MINOR == 5 );
list<string> colls = db.getCollectionNames( dbName );
for ( list<string>::iterator i=colls.begin(); i!=colls.end(); i++) {
@@ -276,7 +276,7 @@ namespace mongo {
Client::GodScope gs;
log(1) << "enter repairDatabases (to check pdfile version #)" << endl;
- //assert(checkNsFilesOnLoad);
+ //verify(checkNsFilesOnLoad);
checkNsFilesOnLoad = false; // we are mainly just checking the header - don't scan the whole .ns file for every db here.
Lock::GlobalWrite lk;
@@ -304,7 +304,7 @@ namespace mongo {
// QUESTION: Repair even if file format is higher version than code?
log() << "\t starting upgrade" << endl;
string errmsg;
- assert( doDBUpgrade( dbName , errmsg , h ) );
+ verify( doDBUpgrade( dbName , errmsg , h ) );
}
else {
log() << "\t Not upgrading, exiting" << endl;
@@ -573,8 +573,6 @@ namespace mongo {
using namespace mongo;
#include <boost/program_options.hpp>
-#undef assert
-#define assert MONGO_assert
namespace po = boost::program_options;
@@ -838,7 +836,7 @@ int main(int argc, char* argv[]) {
}
if (params.count("smallfiles")) {
cmdLine.smallfiles = true;
- assert( dur::DataLimitPerJournalFile >= 128 * 1024 * 1024 );
+ verify( dur::DataLimitPerJournalFile >= 128 * 1024 * 1024 );
dur::DataLimitPerJournalFile = 128 * 1024 * 1024;
}
if (params.count("diaglog")) {
@@ -914,7 +912,7 @@ int main(int argc, char* argv[]) {
dbexit( EXIT_BADOPTIONS );
}
lenForNewNsFiles = x * 1024 * 1024;
- assert(lenForNewNsFiles > 0);
+ verify(lenForNewNsFiles > 0);
}
if (params.count("oplogSize")) {
long long x = params["oplogSize"].as<int>();
@@ -928,7 +926,7 @@ int main(int argc, char* argv[]) {
dbexit( EXIT_BADOPTIONS );
}
cmdLine.oplogSize = x * 1024 * 1024;
- assert(cmdLine.oplogSize > 0);
+ verify(cmdLine.oplogSize > 0);
}
if (params.count("cacheSize")) {
long x = params["cacheSize"].as<long>();
@@ -1201,27 +1199,27 @@ namespace mongo {
sigemptyset( &addrSignals.sa_mask );
addrSignals.sa_flags = SA_SIGINFO;
- assert( sigaction(SIGSEGV, &addrSignals, 0) == 0 );
- assert( sigaction(SIGBUS, &addrSignals, 0) == 0 );
- assert( sigaction(SIGILL, &addrSignals, 0) == 0 );
- assert( sigaction(SIGFPE, &addrSignals, 0) == 0 );
+ verify( sigaction(SIGSEGV, &addrSignals, 0) == 0 );
+ verify( sigaction(SIGBUS, &addrSignals, 0) == 0 );
+ verify( sigaction(SIGILL, &addrSignals, 0) == 0 );
+ verify( sigaction(SIGFPE, &addrSignals, 0) == 0 );
- assert( signal(SIGABRT, abruptQuit) != SIG_ERR );
- assert( signal(SIGQUIT, abruptQuit) != SIG_ERR );
- assert( signal(SIGPIPE, pipeSigHandler) != SIG_ERR );
+ verify( signal(SIGABRT, abruptQuit) != SIG_ERR );
+ verify( signal(SIGQUIT, abruptQuit) != SIG_ERR );
+ verify( signal(SIGPIPE, pipeSigHandler) != SIG_ERR );
setupSIGTRAPforGDB();
sigemptyset( &asyncSignals );
if ( inFork )
- assert( signal( SIGHUP , setupSignals_ignoreHelper ) != SIG_ERR );
+ verify( signal( SIGHUP , setupSignals_ignoreHelper ) != SIG_ERR );
else
sigaddset( &asyncSignals, SIGHUP );
sigaddset( &asyncSignals, SIGINT );
sigaddset( &asyncSignals, SIGTERM );
- assert( pthread_sigmask( SIG_SETMASK, &asyncSignals, 0 ) == 0 );
+ verify( pthread_sigmask( SIG_SETMASK, &asyncSignals, 0 ) == 0 );
boost::thread it( interruptThread );
set_terminate( myterminate );
diff --git a/src/mongo/db/db.h b/src/mongo/db/db.h
index 94841024194..7d2928c25ba 100644
--- a/src/mongo/db/db.h
+++ b/src/mongo/db/db.h
@@ -35,7 +35,7 @@ namespace mongo {
dbtemprelease() {
const Client& c = cc();
_context = c.getContext();
- assert( Lock::isLocked() );
+ verify( Lock::isLocked() );
if( Lock::nested() ) {
Lock::nested();
massert(10298 , "can't temprelease nested lock", false);
@@ -44,7 +44,7 @@ namespace mongo {
_context->unlocked();
}
tr.reset(new Lock::TempRelease);
- assert( c.curop() );
+ verify( c.curop() );
c.curop()->yielded();
}
~dbtemprelease() {
@@ -55,7 +55,7 @@ namespace mongo {
};
/** must be write locked
- no assert (and no release) if nested write lock
+ no verify(and no release) if nested write lock
a lot like dbtempreleasecond, eliminate?
*/
struct dbtempreleasewritelock {
@@ -65,13 +65,13 @@ namespace mongo {
dbtempreleasewritelock() {
const Client& c = cc();
_context = c.getContext();
- assert( Lock::isW() );
+ verify( Lock::isW() );
if( Lock::nested() )
return;
if ( _context )
_context->unlocked();
tr.reset(new Lock::TempRelease);
- assert( c.curop() );
+ verify( c.curop() );
c.curop()->yielded();
}
~dbtempreleasewritelock() {
diff --git a/src/mongo/db/dbcommands.cpp b/src/mongo/db/dbcommands.cpp
index cebb50ce38e..293e1d6f429 100644
--- a/src/mongo/db/dbcommands.cpp
+++ b/src/mongo/db/dbcommands.cpp
@@ -89,7 +89,7 @@ namespace mongo {
CmdResetError() : Command("resetError", false, "reseterror") {}
bool run(const string& db, BSONObj& cmdObj, int, string& errmsg, BSONObjBuilder& result, bool fromRepl) {
LastError *le = lastError.get();
- assert( le );
+ verify( le );
le->reset();
return true;
}
@@ -223,7 +223,7 @@ namespace mongo {
return true;
}
- assert( sprintf( buf , "w block pass: %lld" , ++passes ) < 30 );
+ verify( sprintf( buf , "w block pass: %lld" , ++passes ) < 30 );
c.curop()->setMessage( buf );
sleepmillis(1);
killCurrentOp.checkForInterrupt();
@@ -698,12 +698,12 @@ namespace mongo {
struct DBCommandsUnitTest {
DBCommandsUnitTest() {
- assert( removeBit(1, 0) == 0 );
- assert( removeBit(2, 0) == 1 );
- assert( removeBit(2, 1) == 0 );
- assert( removeBit(255, 1) == 127 );
- assert( removeBit(21, 2) == 9 );
- assert( removeBit(0x4000000000000001ULL, 62) == 1 );
+ verify( removeBit(1, 0) == 0 );
+ verify( removeBit(2, 0) == 1 );
+ verify( removeBit(2, 1) == 0 );
+ verify( removeBit(255, 1) == 127 );
+ verify( removeBit(21, 2) == 9 );
+ verify( removeBit(0x4000000000000001ULL, 62) == 1 );
}
} dbc_unittest;
@@ -1127,7 +1127,7 @@ namespace mongo {
cursor->advance();
BSONElement ne = obj["n"];
- assert(ne.isNumber());
+ verify(ne.isNumber());
int myn = ne.numberInt();
if ( n != myn ) {
log() << "should have chunk: " << n << " have:" << myn << endl;
@@ -1874,7 +1874,7 @@ namespace mongo {
bool retval = false;
if ( c->locktype() == Command::NONE ) {
- assert( !c->lockGlobally() );
+ verify( !c->lockGlobally() );
// we also trust that this won't crash
retval = true;
@@ -1894,7 +1894,7 @@ namespace mongo {
}
else if( c->locktype() != Command::WRITE ) {
// read lock
- assert( ! c->logTheOp() );
+ verify( ! c->logTheOp() );
string ns = c->parseNs(dbname, cmdObj);
scoped_ptr<Lock::GlobalRead> lk;
if( c->lockGlobally() )
diff --git a/src/mongo/db/dbcommands_admin.cpp b/src/mongo/db/dbcommands_admin.cpp
index 20116040feb..223c71cd07f 100644
--- a/src/mongo/db/dbcommands_admin.cpp
+++ b/src/mongo/db/dbcommands_admin.cpp
@@ -440,7 +440,7 @@ namespace mongo {
log() << "CMD fsync: sync:" << sync << " lock:" << lock << endl;
if( lock ) {
Lock::ThreadSpanningOp::setWLockedNongreedy();
- assert( !locked ); // impossible to get here if locked is true
+ verify( !locked ); // impossible to get here if locked is true
try {
//uassert(12034, "fsync: can't lock while an unlock is pending", !unlockRequested);
uassert(12032, "fsync: sync option must be true when using lock", sync);
@@ -458,7 +458,7 @@ namespace mongo {
Lock::ThreadSpanningOp::unsetR();
throw;
}
- assert( !locked );
+ verify( !locked );
locked = true;
log() << "db is now locked for snapshotting, no writes allowed. db.fsyncUnlock() to unlock" << endl;
log() << " For more info see " << FSyncCommand::url() << endl;
diff --git a/src/mongo/db/dbcommands_generic.cpp b/src/mongo/db/dbcommands_generic.cpp
index 4e9e4cdb7ed..accc6fe262e 100644
--- a/src/mongo/db/dbcommands_generic.cpp
+++ b/src/mongo/db/dbcommands_generic.cpp
@@ -197,13 +197,13 @@ namespace mongo {
return false;
}
int x = (int) cmdObj["journalCommitInterval"].Number();
- assert( x > 1 && x < 500 );
+ verify( x > 1 && x < 500 );
cmdLine.journalCommitInterval = x;
log() << "setParameter journalCommitInterval=" << x << endl;
s++;
}
if( cmdObj.hasElement("notablescan") ) {
- assert( !cmdLine.isMongos() );
+ verify( !cmdLine.isMongos() );
if( s == 0 )
result.append("was", cmdLine.noTableScan);
cmdLine.noTableScan = cmdObj["notablescan"].Bool();
@@ -216,7 +216,7 @@ namespace mongo {
s++;
}
if( cmdObj.hasElement("syncdelay") ) {
- assert( !cmdLine.isMongos() );
+ verify( !cmdLine.isMongos() );
if( s == 0 )
result.append("was", cmdLine.syncdelay );
cmdLine.syncdelay = cmdObj["syncdelay"].Number();
@@ -233,7 +233,7 @@ namespace mongo {
result.append("was", replApplyBatchSize );
BSONElement e = cmdObj["replApplyBatchSize"];
ParameterValidator * v = ParameterValidator::get( e.fieldName() );
- assert( v );
+ verify( v );
if ( ! v->isValid( e , errmsg ) )
return false;
replApplyBatchSize = e.numberInt();
@@ -386,7 +386,7 @@ namespace mongo {
log() << "terminating, shutdown command received" << endl;
dbexit( EXIT_CLEAN , "shutdown called" , true ); // this never returns
- assert(0);
+ verify(0);
return true;
}
diff --git a/src/mongo/db/dbeval.cpp b/src/mongo/db/dbeval.cpp
index 67e110da71e..4b27cf881fc 100644
--- a/src/mongo/db/dbeval.cpp
+++ b/src/mongo/db/dbeval.cpp
@@ -51,9 +51,9 @@ namespace mongo {
code = e.codeWScopeCode();
break;
default:
- assert(0);
+ verify(0);
}
- assert( code );
+ verify( code );
if ( ! globalScriptEngine ) {
errmsg = "db side execution is disabled";
diff --git a/src/mongo/db/dbhelpers.cpp b/src/mongo/db/dbhelpers.cpp
index 70372d5b868..e34027d01e2 100644
--- a/src/mongo/db/dbhelpers.cpp
+++ b/src/mongo/db/dbhelpers.cpp
@@ -97,7 +97,7 @@ namespace mongo {
bool * nsFound , bool * indexFound ) {
Lock::assertAtLeastReadLocked(ns);
Database *database = c.database();
- assert( database );
+ verify( database );
NamespaceDetails *d = database->namespaceIndex.details(ns);
if ( ! d )
return false;
@@ -122,7 +122,7 @@ namespace mongo {
}
DiskLoc Helpers::findById(NamespaceDetails *d, BSONObj idquery) {
- assert(d);
+ verify(d);
int idxNo = d->findIdIndex();
uassert(13430, "no _id index", idxNo>=0);
IndexDetails& i = d->idx( idxNo );
@@ -166,7 +166,7 @@ namespace mongo {
void Helpers::upsert( const string& ns , const BSONObj& o, bool fromMigrate ) {
BSONElement e = o["_id"];
- assert( e.type() );
+ verify( e.type() );
BSONObj id = e.wrap();
OpDebug debug;
@@ -206,7 +206,7 @@ namespace mongo {
BSONObj keya , keyb;
BSONObj minClean = toKeyFormat( min , keya );
BSONObj maxClean = toKeyFormat( max , keyb );
- assert( keya == keyb );
+ verify( keya == keyb );
Client::Context ctx(ns);
@@ -218,7 +218,7 @@ namespace mongo {
return 0;
int ii = nsd->findIndexByKeyPattern( keya );
- assert( ii >= 0 );
+ verify( ii >= 0 );
IndexDetails& i = nsd->idx( ii );
@@ -275,7 +275,7 @@ namespace mongo {
_root /= a;
if ( b.size() )
_root /= b;
- assert( a.size() || b.size() );
+ verify( a.size() || b.size() );
_file = _root;
diff --git a/src/mongo/db/dbmessage.h b/src/mongo/db/dbmessage.h
index ad5aea2cdff..9a8f1d08864 100644
--- a/src/mongo/db/dbmessage.h
+++ b/src/mongo/db/dbmessage.h
@@ -217,7 +217,7 @@ namespace mongo {
}
void markReset() {
- assert( mark );
+ verify( mark );
nextjsobj = mark;
}
diff --git a/src/mongo/db/dbwebserver.cpp b/src/mongo/db/dbwebserver.cpp
index eb19ba3be6c..a9818dc29fb 100644
--- a/src/mongo/db/dbwebserver.cpp
+++ b/src/mongo/db/dbwebserver.cpp
@@ -35,8 +35,6 @@
#include "../util/admin_access.h"
#include "dbwebserver.h"
#include <boost/date_time/posix_time/posix_time.hpp>
-#undef assert
-#define assert MONGO_assert
namespace mongo {
@@ -407,8 +405,8 @@ namespace mongo {
string cmd = commands[i];
Command * c = Command::findCommand( cmd );
- assert( c );
- assert( c->locktype() == 0 );
+ verify( c );
+ verify( c->locktype() == 0 );
BSONObj co;
{
@@ -498,9 +496,9 @@ namespace mongo {
vector<string>& headers, const SockAddr &from ) {
string cmd;
bool text = false;
- assert( _cmd( url , cmd , text, params ) );
+ verify( _cmd( url , cmd , text, params ) );
Command * c = _cmd( cmd );
- assert( c );
+ verify( c );
BSONObj cmdObj = BSON( cmd << 1 );
Client& client = cc();
diff --git a/src/mongo/db/diskloc.h b/src/mongo/db/diskloc.h
index 5295df3e260..0b7e3334312 100644
--- a/src/mongo/db/diskloc.h
+++ b/src/mongo/db/diskloc.h
@@ -69,7 +69,7 @@ namespace mongo {
_a = -1;
ofs = 0; /* note NullOfs is different. todo clean up. see refs to NullOfs in code - use is valid but outside DiskLoc context so confusing as-is. */
}
- void assertOk() { assert(!isNull()); }
+ void assertOk() { verify(!isNull()); }
void setInvalid() {
_a = -2;
ofs = 0;
@@ -96,7 +96,7 @@ namespace mongo {
}
void inc(int amt) {
- assert( !isNull() );
+ verify( !isNull() );
ofs += amt;
}
@@ -113,7 +113,7 @@ namespace mongo {
const DiskLoc& operator=(const DiskLoc& b) {
_a=b._a;
ofs = b.ofs;
- //assert(ofs!=0);
+ //verify(ofs!=0);
return *this;
}
int compare(const DiskLoc& b) const {
diff --git a/src/mongo/db/dur.cpp b/src/mongo/db/dur.cpp
index 09ab9ebfb35..d3b29019d3c 100644
--- a/src/mongo/db/dur.cpp
+++ b/src/mongo/db/dur.cpp
@@ -188,12 +188,12 @@ namespace mongo {
DurableInterface* DurableInterface::_impl = nonDurableImpl;
void DurableInterface::enableDurability() {
- assert(_impl == nonDurableImpl);
+ verify(_impl == nonDurableImpl);
_impl = durableImpl;
}
void DurableInterface::disableDurability() {
- assert(_impl == durableImpl);
+ verify(_impl == durableImpl);
massert(13616, "can't disable durability with pending writes", !commitJob.hasWritten());
_impl = nonDurableImpl;
}
@@ -337,7 +337,7 @@ namespace mongo {
static int n;
++n;
- assert(debug && cmdLine.dur);
+ verify(debug && cmdLine.dur);
if (commitJob.writes().empty())
return;
const WriteIntent &i = commitJob.lastWrite();
@@ -386,7 +386,7 @@ namespace mongo {
_bytes += mmf->length();
- assert( mmf->length() == (unsigned) mmf->length() );
+ verify( mmf->length() == (unsigned) mmf->length() );
if (memcmp(p, w, (unsigned) mmf->length()) == 0)
return; // next file
@@ -457,7 +457,7 @@ namespace mongo {
LOG(4) << "journal REMAPPRIVATEVIEW" << endl;
d.dbMutex.assertWriteLocked();
- assert( !commitJob.hasWritten() );
+ verify( !commitJob.hasWritten() );
// we want to remap all private views about every 2 seconds. there could be ~1000 views so
// we do a little each pass; beyond the remap time, more significantly, there will be copy on write
@@ -505,7 +505,7 @@ namespace mongo {
dassert( i != e );
if( (*i)->isMongoMMF() ) {
MongoMMF *mmf = (MongoMMF*) *i;
- assert(mmf);
+ verify(mmf);
if( mmf->willNeedRemap() ) {
mmf->willNeedRemap() = false;
mmf->remapThePrivateView();
@@ -535,7 +535,7 @@ namespace mongo {
unspoolWriteIntents(); // in case we were doing some writing ourself (likely impossible with limitedlocks version)
AlignedBuilder &ab = __theBuilder;
- assert( !d.dbMutex.atLeastReadLocked() );
+ verify( !d.dbMutex.atLeastReadLocked() );
// do we need this to be greedy, so that it can start working fairly soon?
// probably: as this is a read lock, it wouldn't change anything if only reads anyway.
@@ -560,7 +560,7 @@ namespace mongo {
unsigned abLen = ab.len();
commitJob.committingReset(); // must be reset before allowing anyone to write
- DEV assert( !commitJob.hasWritten() );
+ DEV verify( !commitJob.hasWritten() );
// release the readlock -- allowing others to now write while we are writing to the journal (etc.)
lk1.reset();
@@ -568,14 +568,14 @@ namespace mongo {
// ****** now other threads can do writes ******
WRITETOJOURNAL(h, ab);
- assert( abLen == ab.len() ); // a check that no one touched the builder while we were doing work. if so, our locking is wrong.
+ verify( abLen == ab.len() ); // a check that no one touched the builder while we were doing work. if so, our locking is wrong.
// data is now in the journal, which is sufficient for acknowledging getLastError.
// (ok to crash after that)
commitJob.committingNotifyCommitted();
WRITETODATAFILES(h, ab);
- assert( abLen == ab.len() ); // check again wasn't modded
+ verify( abLen == ab.len() ); // check again wasn't modded
ab.reset();
// can't : d.dbMutex._remapPrivateViewRequested = true;
@@ -652,7 +652,7 @@ namespace mongo {
// remapping private views must occur after WRITETODATAFILES otherwise
// we wouldn't see newly written data on reads.
//
- DEV assert( !commitJob.hasWritten() );
+ DEV verify( !commitJob.hasWritten() );
if( !Lock::isW() ) {
// REMAPPRIVATEVIEW needs done in a write lock (as there is a short window during remapping when each view
// might not exist) thus we do it later.
@@ -745,7 +745,7 @@ namespace mongo {
getDur().commitIfNeeded(true);
}
else {
- assert( inShutdown() );
+ verify( inShutdown() );
if( commitJob.hasWritten() ) {
log() << "journal warning files are closing outside locks with writes pending" << endl;
}
@@ -872,7 +872,7 @@ namespace mongo {
MongoFile::flushAll(true);
journalCleanup();
- assert(!haveJournalFiles()); // Double check post-conditions
+ verify(!haveJournalFiles()); // Double check post-conditions
}
} // namespace dur
diff --git a/src/mongo/db/dur_commitjob.cpp b/src/mongo/db/dur_commitjob.cpp
index b135827036e..813de2629f9 100644
--- a/src/mongo/db/dur_commitjob.cpp
+++ b/src/mongo/db/dur_commitjob.cpp
@@ -75,7 +75,7 @@ namespace mongo {
log() << "me:" << tlIntents.get()->n_informational() << endl;
else
log() << "no tlIntent for my thread" << endl;
- assert(false);
+ verify(false);
}
#endif
}
diff --git a/src/mongo/db/dur_commitjob.h b/src/mongo/db/dur_commitjob.h
index dd705cc92a7..e3aafbe06e6 100644
--- a/src/mongo/db/dur_commitjob.h
+++ b/src/mongo/db/dur_commitjob.h
@@ -127,7 +127,7 @@ namespace mongo {
*/
class CommitJob : boost::noncopyable {
void _committingReset();
- ~CommitJob(){ assert(!"shouldn't destroy CommitJob!"); }
+ ~CommitJob(){ verify(!"shouldn't destroy CommitJob!"); }
/** record/note an intent to write */
void note(void* p, int len);
diff --git a/src/mongo/db/dur_journal.cpp b/src/mongo/db/dur_journal.cpp
index 7f1e4351c46..9e767816dca 100644
--- a/src/mongo/db/dur_journal.cpp
+++ b/src/mongo/db/dur_journal.cpp
@@ -28,8 +28,6 @@
#include "../util/net/listen.h" // getelapsedtimemillis
#include <boost/static_assert.hpp>
#include <boost/filesystem.hpp>
-#undef assert
-#define assert MONGO_assert
#include "../util/mongoutils/str.h"
#include "dur_journalimpl.h"
#include "../util/file.h"
@@ -96,7 +94,7 @@ namespace mongo {
(2b) refuse to do a recovery startup if that is there without manual override.
*/
log() << "journaling failure/error: " << msg << endl;
- assert(false);
+ verify(false);
}
JSectFooter::JSectFooter() {
@@ -214,7 +212,7 @@ namespace mongo {
log() << "error removing journal files " << e.what() << endl;
throw;
}
- assert(!haveJournalFiles());
+ verify(!haveJournalFiles());
flushMyDirectory(getJournalDir() / "file"); // flushes parent of argument (in this case journal dir)
@@ -293,7 +291,7 @@ namespace mongo {
log() << "preallocating a journal file " << p.string() << endl;
const unsigned BLKSZ = 1024 * 1024;
- assert( len % BLKSZ == 0 );
+ verify( len % BLKSZ == 0 );
AlignedBuilder b(BLKSZ);
memset((void*)b.buf(), 0, BLKSZ);
@@ -302,21 +300,21 @@ namespace mongo {
File f;
f.open( p.string().c_str() , /*read-only*/false , /*direct-io*/false );
- assert( f.is_open() );
+ verify( f.is_open() );
fileofs loc = 0;
while ( loc < len ) {
f.write( loc , b.buf() , BLKSZ );
loc += BLKSZ;
m.hit(BLKSZ);
}
- assert( loc == len );
+ verify( loc == len );
f.fsync();
}
const int NUM_PREALLOC_FILES = 3;
inline boost::filesystem::path preallocPath(int n) {
- assert(n >= 0);
- assert(n < NUM_PREALLOC_FILES);
+ verify(n >= 0);
+ verify(n < NUM_PREALLOC_FILES);
string fn = str::stream() << "prealloc." << n;
return getJournalDir() / fn;
}
@@ -447,7 +445,7 @@ namespace mongo {
void Journal::_open() {
_curFileId = 0;
- assert( _curLogFile == 0 );
+ verify( _curLogFile == 0 );
boost::filesystem::path fname = getFilePathFor(_nextFileNumber);
// if we have a prealloced file, use it
@@ -476,7 +474,7 @@ namespace mongo {
{
JHeader h(fname.string());
_curFileId = h.fileId;
- assert(_curFileId);
+ verify(_curFileId);
AlignedBuilder b(8192);
b.appendStruct(h);
_curLogFile->synchronousAppend(b.buf(), b.len());
@@ -484,13 +482,13 @@ namespace mongo {
}
void Journal::init() {
- assert( _curLogFile == 0 );
+ verify( _curLogFile == 0 );
MongoFile::notifyPreFlush = preFlush;
MongoFile::notifyPostFlush = postFlush;
}
void Journal::open() {
- assert( MongoFile::notifyPreFlush == preFlush );
+ verify( MongoFile::notifyPreFlush == preFlush );
SimpleMutex::scoped_lock lk(_curLogFileMutex);
_open();
}
@@ -527,7 +525,7 @@ namespace mongo {
LSNFile L;
File f;
f.open(lsnPath().string().c_str());
- assert(f.is_open());
+ verify(f.is_open());
if( f.len() == 0 ) {
// this could be 'normal' if we crashed at the right moment
log() << "info lsn file is zero bytes long" << endl;
@@ -700,15 +698,15 @@ namespace mongo {
size_t compressedLength = 0;
rawCompress(uncompressed.buf(), uncompressed.len(), b.cur(), &compressedLength);
- assert( compressedLength < 0xffffffff );
- assert( compressedLength < max );
+ verify( compressedLength < 0xffffffff );
+ verify( compressedLength < max );
b.skip(compressedLength);
// footer
unsigned L = 0xffffffff;
{
// pad to alignment, and set the total section length in the JSectHeader
- assert( 0xffffe000 == (~(Alignment-1)) );
+ verify( 0xffffe000 == (~(Alignment-1)) );
unsigned lenUnpadded = b.len() + sizeof(JSectFooter);
L = (lenUnpadded + Alignment-1) & (~(Alignment-1));
dassert( L >= lenUnpadded );
@@ -727,12 +725,12 @@ namespace mongo {
SimpleMutex::scoped_lock lk(_curLogFileMutex);
// must already be open -- so that _curFileId is correct for previous buffer building
- assert( _curLogFile );
+ verify( _curLogFile );
stats.curr->_uncompressedBytes += uncompressed.len();
unsigned w = b.len();
_written += w;
- assert( w <= L );
+ verify( w <= L );
stats.curr->_journaledBytes += L;
_curLogFile->synchronousAppend((const void *) b.buf(), L);
_rotate();
diff --git a/src/mongo/db/dur_preplogbuffer.cpp b/src/mongo/db/dur_preplogbuffer.cpp
index b22ce6ce4f9..46f55e7a45d 100644
--- a/src/mongo/db/dur_preplogbuffer.cpp
+++ b/src/mongo/db/dur_preplogbuffer.cpp
@@ -82,7 +82,7 @@ namespace mongo {
JEntry e;
e.len = min(i->length(), (unsigned)(mmf->length() - ofs)); //dont write past end of file
- assert( ofs <= 0x80000000 );
+ verify( ofs <= 0x80000000 );
e.ofs = (unsigned) ofs;
e.setFileNo( mmf->fileSuffixNo() );
if( mmf->relativePath() == local ) {
@@ -128,7 +128,7 @@ namespace mongo {
assertNothingSpooled();
const vector<WriteIntent>& _intents = commitJob.getIntentsSorted();
- assert( !_intents.empty() );
+ verify( !_intents.empty() );
WriteIntent last;
for( vector<WriteIntent>::const_iterator i = _intents.begin(); i != _intents.end(); i++ ) {
@@ -160,7 +160,7 @@ namespace mongo {
@return partially populated sectheader and _ab set
*/
static void _PREPLOGBUFFER(JSectHeader& h, AlignedBuilder& bb) {
- assert( cmdLine.dur );
+ verify( cmdLine.dur );
assertLockedForCommitting();
resetLogBuffer(h, bb); // adds JSectHeader
diff --git a/src/mongo/db/dur_recover.cpp b/src/mongo/db/dur_recover.cpp
index 4ccc75dd60c..66b6e411cb8 100644
--- a/src/mongo/db/dur_recover.cpp
+++ b/src/mongo/db/dur_recover.cpp
@@ -106,7 +106,7 @@ namespace mongo {
_lastDbName(0)
, _doDurOps(doDurOpsRecovering)
{
- assert( doDurOpsRecovering );
+ verify( doDurOpsRecovering );
bool ok = uncompress((const char *)compressed, compressedLen, &_uncompressed);
if( !ok ) {
// it should always be ok (i think?) as there is a previous check to see that the JSectFooter is ok
@@ -114,7 +114,7 @@ namespace mongo {
msgasserted(15874, "couldn't uncompress journal section");
}
const char *p = _uncompressed.c_str();
- assert( compressedLen == _h.sectionLen() - sizeof(JSectFooter) - sizeof(JSectHeader) );
+ verify( compressedLen == _h.sectionLen() - sizeof(JSectFooter) - sizeof(JSectHeader) );
_entries = auto_ptr<BufReader>( new BufReader(p, _uncompressed.size()) );
}
@@ -142,7 +142,7 @@ namespace mongo {
switch( lenOrOpCode ) {
case JEntry::OpCode_Footer: {
- assert( false );
+ verify( false );
}
case JEntry::OpCode_FileCreated:
@@ -172,11 +172,11 @@ namespace mongo {
}
// JEntry - a basic write
- assert( lenOrOpCode && lenOrOpCode < JEntry::OpCode_Min );
+ verify( lenOrOpCode && lenOrOpCode < JEntry::OpCode_Min );
_entries->rewind(4);
e.e = (JEntry *) _entries->skip(sizeof(JEntry));
e.dbName = e.e->isLocalDbContext() ? "local" : _lastDbName;
- assert( e.e->len == lenOrOpCode );
+ verify( e.e->len == lenOrOpCode );
_entries->skip(e.e->len);
}
@@ -185,7 +185,7 @@ namespace mongo {
static string fileName(const char* dbName, int fileNo) {
stringstream ss;
ss << dbName << '.';
- assert( fileNo >= 0 );
+ verify( fileNo >= 0 );
if( fileNo == JEntry::DotNsSuffix )
ss << "ns";
else
@@ -216,9 +216,9 @@ namespace mongo {
void RecoveryJob::write(const ParsedJournalEntry& entry) {
//TODO(mathias): look into making some of these dasserts
- assert(entry.e);
- assert(entry.dbName);
- assert(strnlen(entry.dbName, MaxDatabaseNameLen) < MaxDatabaseNameLen);
+ verify(entry.e);
+ verify(entry.dbName);
+ verify(strnlen(entry.dbName, MaxDatabaseNameLen) < MaxDatabaseNameLen);
const string fn = fileName(entry.dbName, entry.e->getFileNo());
MongoFile* file;
@@ -229,23 +229,23 @@ namespace mongo {
MongoMMF* mmf;
if (file) {
- assert(file->isMongoMMF());
+ verify(file->isMongoMMF());
mmf = (MongoMMF*)file;
}
else {
if( !_recovering ) {
log() << "journal error applying writes, file " << fn << " is not open" << endl;
- assert(false);
+ verify(false);
}
boost::shared_ptr<MongoMMF> sp (new MongoMMF);
- assert(sp->open(fn, false));
+ verify(sp->open(fn, false));
_mmfs.push_back(sp);
mmf = sp.get();
}
if ((entry.e->ofs + entry.e->len) <= mmf->length()) {
- assert(mmf->view_write());
- assert(entry.e->srcData());
+ verify(mmf->view_write());
+ verify(entry.e->srcData());
void* dest = (char*)mmf->view_write() + entry.e->ofs;
memcpy(dest, entry.e->srcData(), entry.e->len);
@@ -353,7 +353,7 @@ namespace mongo {
// after the entries check the footer checksum
if( _recovering ) {
- assert( ((const char *)h) + sizeof(JSectHeader) == p );
+ verify( ((const char *)h) + sizeof(JSectHeader) == p );
if( !f->checkHash(h, len + sizeof(JSectHeader)) ) {
msgasserted(13594, "journal checksum doesn't match");
}
@@ -482,7 +482,7 @@ namespace mongo {
}
void _recover() {
- assert( cmdLine.dur );
+ verify( cmdLine.dur );
boost::filesystem::path p = getJournalDir();
if( !exists(p) ) {
@@ -532,10 +532,10 @@ namespace mongo {
char x;
BufReaderY y;
r.read(x); //cout << x; // a
- assert( x == 'a' );
+ verify( x == 'a' );
r.read(y);
r.read(x);
- assert( x == 'b' );
+ verify( x == 'b' );
}
} brunittest;
diff --git a/src/mongo/db/explain.cpp b/src/mongo/db/explain.cpp
index d4444ca6aba..e16781f292a 100644
--- a/src/mongo/db/explain.cpp
+++ b/src/mongo/db/explain.cpp
@@ -178,17 +178,17 @@ namespace mongo {
ret = *i;
}
}
- assert( ret );
+ verify( ret );
return *ret;
}
void ExplainQueryInfo::noteIterate( bool match, bool loadedRecord, bool chunkSkip ) {
- assert( !_clauses.empty() );
+ verify( !_clauses.empty() );
_clauses.back()->noteIterate( match, loadedRecord, chunkSkip );
}
void ExplainQueryInfo::reviseN( long long n ) {
- assert( !_clauses.empty() );
+ verify( !_clauses.empty() );
_clauses.back()->reviseN( n );
}
diff --git a/src/mongo/db/extsort.cpp b/src/mongo/db/extsort.cpp
index 0bedcd1f891..3242f3978f7 100644
--- a/src/mongo/db/extsort.cpp
+++ b/src/mongo/db/extsort.cpp
@@ -236,7 +236,7 @@ namespace mongo {
}
- assert( slot >= 0 );
+ verify( slot >= 0 );
_stash[slot].second = false;
return best;
@@ -248,7 +248,7 @@ namespace mongo {
unsigned long long length;
_buf = (char*)_file.map( file.c_str() , length , MemoryMappedFile::SEQUENTIAL );
massert( 10308 , "mmap failed" , _buf );
- assert( length == (unsigned long long)boost::filesystem::file_size( file ) );
+ verify( length == (unsigned long long)boost::filesystem::file_size( file ) );
_end = _buf + length;
}
BSONObjExternalSorter::FileIterator::~FileIterator() {}
diff --git a/src/mongo/db/geo/2d.cpp b/src/mongo/db/geo/2d.cpp
index 642cff4987c..0ba1250c77b 100644
--- a/src/mongo/db/geo/2d.cpp
+++ b/src/mongo/db/geo/2d.cpp
@@ -355,7 +355,7 @@ namespace mongo {
unsigned _convert( double in ) const {
uassert( 13027 , str::stream() << "point not in interval of [ " << _min << ", " << _max << " )", in < _max && in >= _min );
in -= _min;
- assert( in >= 0 );
+ verify( in >= 0 );
return (unsigned)(in * _scaling);
}
@@ -489,8 +489,8 @@ namespace mongo {
}
bool mid( double amin , double amax , double bmin , double bmax , bool min , double& res ) const {
- assert( amin <= amax );
- assert( bmin <= bmax );
+ verify( amin <= amax );
+ verify( bmin <= bmax );
if ( amin < bmin ) {
if ( amax < bmin )
@@ -858,7 +858,7 @@ namespace mongo {
}
DiskLoc loc() const {
- assert( ! _dirty );
+ verify( ! _dirty );
return _loc;
}
@@ -888,8 +888,8 @@ namespace mongo {
// Definitely need to re-find our current max/min locations too
bool unDirty( const Geo2dType* g, DiskLoc& oldLoc ){
- assert( _dirty );
- assert( ! _id.isEmpty() );
+ verify( _dirty );
+ verify( ! _id.isEmpty() );
oldLoc = _loc;
_loc = DiskLoc();
@@ -952,9 +952,9 @@ namespace mongo {
bool makeDirty(){
if( ! _dirty ){
- assert( ! obj()["_id"].eoo() );
- assert( ! _bucket.isNull() );
- assert( _pos >= 0 );
+ verify( ! obj()["_id"].eoo() );
+ verify( ! _bucket.isNull() );
+ verify( _pos >= 0 );
if( _id.isEmpty() ){
_id = obj()["_id"].wrap( "" ).getOwned();
@@ -1398,7 +1398,7 @@ namespace mongo {
LOG( CDEBUG ) << "Undirtying stack point with id " << i->_id << endl;
if( i->makeDirty() ) _nDirtied++;
- assert( i->isDirty() );
+ verify( i->isDirty() );
}
// Check current item
@@ -1469,7 +1469,7 @@ namespace mongo {
_nRemovedOnYield++;
_found--;
- assert( _found >= 0 );
+ verify( _found >= 0 );
// Can't find our key again, remove
i = _stack.erase( i );
@@ -1504,9 +1504,9 @@ namespace mongo {
_noted = false;
}
- virtual Record* _current() { assert(ok()); LOG( CDEBUG + 1 ) << "_current " << _cur._loc.obj()["_id"] << endl; return _cur._loc.rec(); }
- virtual BSONObj current() { assert(ok()); LOG( CDEBUG + 1 ) << "current " << _cur._o << endl; return _cur._o; }
- virtual DiskLoc currLoc() { assert(ok()); LOG( CDEBUG + 1 ) << "currLoc " << _cur._loc << endl; return _cur._loc; }
+ virtual Record* _current() { verify(ok()); LOG( CDEBUG + 1 ) << "_current " << _cur._loc.obj()["_id"] << endl; return _cur._loc.rec(); }
+ virtual BSONObj current() { verify(ok()); LOG( CDEBUG + 1 ) << "current " << _cur._o << endl; return _cur._o; }
+ virtual DiskLoc currLoc() { verify(ok()); LOG( CDEBUG + 1 ) << "currLoc " << _cur._loc << endl; return _cur._loc; }
virtual BSONObj currKey() const { return _cur._key; }
virtual CoveredIndexMatcher* matcher() const {
@@ -1536,11 +1536,11 @@ namespace mongo {
if( maxToAdd < 0 ) maxToAdd = maxToCheck;
int maxFound = _foundInExp + maxToCheck;
- assert( maxToCheck > 0 );
- assert( maxFound > 0 );
- assert( _found <= 0x7fffffff ); // conversion to int
+ verify( maxToCheck > 0 );
+ verify( maxFound > 0 );
+ verify( _found <= 0x7fffffff ); // conversion to int
int maxAdded = static_cast<int>(_found) + maxToAdd;
- assert( maxAdded >= 0 ); // overflow check
+ verify( maxAdded >= 0 ); // overflow check
bool isNeighbor = _centerPrefix.constrains();
@@ -1682,7 +1682,7 @@ namespace mongo {
}
// Make sure we've got a reasonable center
- assert( _centerPrefix.constrains() );
+ verify( _centerPrefix.constrains() );
GeoHash _neighborPrefix = _centerPrefix;
_neighborPrefix.move( i, j );
@@ -1727,9 +1727,9 @@ namespace mongo {
// Restart our search from a diff box.
_state = START;
- assert( ! onlyExpand );
+ verify( ! onlyExpand );
- assert( _found <= 0x7fffffff );
+ verify( _found <= 0x7fffffff );
fillStack( maxFound - _foundInExp, maxAdded - static_cast<int>(_found) );
// When we return from the recursive fillStack call, we'll either have checked enough points or
@@ -1738,12 +1738,12 @@ namespace mongo {
// If we're maxed out on points, return
if( _foundInExp >= maxFound || _found >= maxAdded ) {
// Make sure we'll come back to add more points
- assert( _state == DOING_EXPAND );
+ verify( _state == DOING_EXPAND );
return;
}
// Otherwise we must be finished to return
- assert( _state == DONE );
+ verify( _state == DONE );
return;
}
@@ -1817,7 +1817,7 @@ namespace mongo {
// if the exact checks are more expensive.
bool needExact = true;
if( expensiveExact ){
- assert( false );
+ verify( false );
KeyResult result = approxKeyCheck( p, d );
if( result == BAD ) continue;
else if( result == GOOD ) needExact = false;
@@ -1939,9 +1939,9 @@ namespace mongo {
checkEarthBounds( p );
d = spheredist_deg( _near, p );
break;
- default: assert( false );
+ default: verify( false );
}
- assert( d >= 0 );
+ verify( d >= 0 );
GEODEBUG( "\t\t\t\t\t\t\t checkDistance " << _near.toString()
<< "\t" << p.toString() << "\t" << d
@@ -1970,7 +1970,7 @@ namespace mongo {
d = spheredist_deg( _near, p );
within = ( d <= _maxDistance );
break;
- default: assert( false );
+ default: verify( false );
}
return within;
@@ -2013,7 +2013,7 @@ namespace mongo {
GEODEBUG( "\t\tInserted new point " << newPoint.toString() << " approx : " << keyD );
- assert( _max > 0 );
+ verify( _max > 0 );
Holder::iterator lastPtIt = _points.end();
lastPtIt--;
@@ -2050,7 +2050,7 @@ namespace mongo {
GEODEBUG( "\t\tNot erasing point " << startErase->toString() );
numToErase--;
startErase++;
- assert( startErase != _points.end() || numToErase == 0 );
+ verify( startErase != _points.end() || numToErase == 0 );
}
if( _uniqueDocs ){
@@ -2092,7 +2092,7 @@ namespace mongo {
_type(type)
{
- assert( g->getDetails() );
+ verify( g->getDetails() );
_nscanned = 0;
_found = 0;
@@ -2108,7 +2108,7 @@ namespace mongo {
_scanDistance = computeXScanDistance( startPt._y, rad2deg( _maxDistance ) + _spec->_error );
}
- assert( _scanDistance > 0 );
+ verify( _scanDistance > 0 );
}
@@ -2143,7 +2143,7 @@ namespace mongo {
{
do {
long long f = found();
- assert( f <= 0x7fffffff );
+ verify( f <= 0x7fffffff );
fillStack( maxPointsHeuristic, _numWanted - static_cast<int>(f) , true );
processExtraPoints();
} while( _state != DONE && _state != DONE_NEIGHBOR &&
@@ -2182,7 +2182,7 @@ namespace mongo {
// Enough found, but need to search neighbor boxes
farDist = std::min( _scanDistance, computeXScanDistance( _near._y, rad2deg( farDist ) ) + 2 * _spec->_error );
}
- assert( farDist >= 0 );
+ verify( farDist >= 0 );
GEODEBUGPRINT( farDist );
// Find the box that includes all the points we need to return
@@ -2317,7 +2317,7 @@ namespace mongo {
GEODEBUG( "\t\tEnding search at point " << ( _points.size() == 0 ? "(beginning)" : maybePointIt->toString() ) );
int numToAddBack = erased - numToErase;
- assert( numToAddBack >= 0 );
+ verify( numToAddBack >= 0 );
GEODEBUG( "\t\tNum tested valid : " << tested.size() << " erased : " << erased << " added back : " << numToAddBack );
@@ -2440,9 +2440,9 @@ namespace mongo {
return _cur != _end;
}
- virtual Record* _current() { assert(ok()); return _cur->_loc.rec(); }
- virtual BSONObj current() { assert(ok()); return _cur->_o; }
- virtual DiskLoc currLoc() { assert(ok()); return _cur->_loc; }
+ virtual Record* _current() { verify(ok()); return _cur->_loc.rec(); }
+ virtual BSONObj current() { verify(ok()); return _cur->_o; }
+ virtual DiskLoc currLoc() { verify(ok()); return _cur->_loc; }
virtual bool advance() {
if( ok() ){
_cur++;
@@ -2570,7 +2570,7 @@ namespace mongo {
error = _g->_errorSphere;
break;
}
- default: assert( false );
+ default: verify( false );
}
// If our distance is in the error bounds...
@@ -2589,7 +2589,7 @@ namespace mongo {
checkEarthBounds( p );
if( spheredist_deg( _startPt , p ) <= _maxDistance ) return true;
break;
- default: assert( false );
+ default: verify( false );
}
return false;
@@ -2898,12 +2898,12 @@ namespace mongo {
IndexDetails& id = d->idx( geoIdx );
Geo2dType * g = (Geo2dType*)id.getSpec().getType();
- assert( &id == g->getDetails() );
+ verify( &id == g->getDetails() );
int numWanted = 100;
if ( cmdObj["num"].isNumber() ) {
numWanted = cmdObj["num"].numberInt();
- assert( numWanted >= 0 );
+ verify( numWanted >= 0 );
}
bool uniqueDocs = false;
@@ -3019,7 +3019,7 @@ namespace mongo {
IndexDetails& id = d->idx( geoIdx );
Geo2dType * g = (Geo2dType*)id.getSpec().getType();
- assert( &id == g->getDetails() );
+ verify( &id == g->getDetails() );
int max = 100000;
@@ -3048,12 +3048,12 @@ namespace mongo {
return (int)(.5+(d*1000));
}
-#define GEOHEQ(a,b) if ( a.toString() != b ){ cout << "[" << a.toString() << "] != [" << b << "]" << endl; assert( a == GeoHash(b) ); }
+#define GEOHEQ(a,b) if ( a.toString() != b ){ cout << "[" << a.toString() << "] != [" << b << "]" << endl; verify( a == GeoHash(b) ); }
void run() {
- assert( ! GeoHash::isBitSet( 0 , 0 ) );
- assert( ! GeoHash::isBitSet( 0 , 31 ) );
- assert( GeoHash::isBitSet( 1 , 31 ) );
+ verify( ! GeoHash::isBitSet( 0 , 0 ) );
+ verify( ! GeoHash::isBitSet( 0 , 31 ) );
+ verify( GeoHash::isBitSet( 1 , 31 ) );
IndexSpec i( BSON( "loc" << "2d" ) );
Geo2dType g( &geo2dplugin , &i );
@@ -3063,10 +3063,10 @@ namespace mongo {
BSONObj in = BSON( "x" << x << "y" << y );
GeoHash h = g._hash( in );
BSONObj out = g._unhash( h );
- assert( round(x) == round( out["x"].number() ) );
- assert( round(y) == round( out["y"].number() ) );
- assert( round( in["x"].number() ) == round( out["x"].number() ) );
- assert( round( in["y"].number() ) == round( out["y"].number() ) );
+ verify( round(x) == round( out["x"].number() ) );
+ verify( round(y) == round( out["y"].number() ) );
+ verify( round( in["x"].number() ) == round( out["x"].number() ) );
+ verify( round( in["y"].number() ) == round( out["y"].number() ) );
}
{
@@ -3075,10 +3075,10 @@ namespace mongo {
BSONObj in = BSON( "x" << x << "y" << y );
GeoHash h = g._hash( in );
BSONObj out = g._unhash( h );
- assert( round(x) == round( out["x"].number() ) );
- assert( round(y) == round( out["y"].number() ) );
- assert( round( in["x"].number() ) == round( out["x"].number() ) );
- assert( round( in["y"].number() ) == round( out["y"].number() ) );
+ verify( round(x) == round( out["x"].number() ) );
+ verify( round(y) == round( out["y"].number() ) );
+ verify( round( in["x"].number() ) == round( out["x"].number() ) );
+ verify( round( in["y"].number() ) == round( out["y"].number() ) );
}
{
@@ -3102,26 +3102,26 @@ namespace mongo {
{
Box b( 5 , 5 , 2 );
- assert( "(5,5) -->> (7,7)" == b.toString() );
+ verify( "(5,5) -->> (7,7)" == b.toString() );
}
{
GeoHash a = g.hash( 1 , 1 );
GeoHash b = g.hash( 4 , 5 );
- assert( 5 == (int)(g.distance( a , b ) ) );
+ verify( 5 == (int)(g.distance( a , b ) ) );
a = g.hash( 50 , 50 );
b = g.hash( 42 , 44 );
- assert( round(10) == round(g.distance( a , b )) );
+ verify( round(10) == round(g.distance( a , b )) );
}
{
GeoHash x("0000");
- assert( 0 == x.getHash() );
+ verify( 0 == x.getHash() );
x.init( 0 , 1 , 32 );
GEOHEQ( x , "0000000000000000000000000000000000000000000000000000000000000001" )
-
- assert( GeoHash( "1100").hasPrefix( GeoHash( "11" ) ) );
- assert( ! GeoHash( "1000").hasPrefix( GeoHash( "11" ) ) );
+
+ verify( GeoHash( "1100").hasPrefix( GeoHash( "11" ) ) );
+ verify( ! GeoHash( "1000").hasPrefix( GeoHash( "11" ) ) );
}
{
@@ -3153,8 +3153,8 @@ namespace mongo {
cout << "b: " << ob.hexDump() << endl;
cout << "c: " << oc.hexDump() << endl;
*/
- assert( oa.woCompare( ob ) < 0 );
- assert( oa.woCompare( oc ) < 0 );
+ verify( oa.woCompare( ob ) < 0 );
+ verify( oa.woCompare( oc ) < 0 );
}
@@ -3171,32 +3171,32 @@ namespace mongo {
{
GeoHash prefix( "110011000000" );
GeoHash entry( "1100110000011100000111000001110000011100000111000001000000000000" );
- assert( ! entry.hasPrefix( prefix ) );
+ verify( ! entry.hasPrefix( prefix ) );
entry = GeoHash("1100110000001100000111000001110000011100000111000001000000000000");
- assert( entry.toString().find( prefix.toString() ) == 0 );
- assert( entry.hasPrefix( GeoHash( "1100" ) ) );
- assert( entry.hasPrefix( prefix ) );
+ verify( entry.toString().find( prefix.toString() ) == 0 );
+ verify( entry.hasPrefix( GeoHash( "1100" ) ) );
+ verify( entry.hasPrefix( prefix ) );
}
{
GeoHash a = g.hash( 50 , 50 );
GeoHash b = g.hash( 48 , 54 );
- assert( round( 4.47214 ) == round( g.distance( a , b ) ) );
+ verify( round( 4.47214 ) == round( g.distance( a , b ) ) );
}
{
Box b( Point( 29.762283 , -95.364271 ) , Point( 29.764283000000002 , -95.36227099999999 ) );
- assert( b.inside( 29.763 , -95.363 ) );
- assert( ! b.inside( 32.9570255 , -96.1082497 ) );
- assert( ! b.inside( 32.9570255 , -96.1082497 , .01 ) );
+ verify( b.inside( 29.763 , -95.363 ) );
+ verify( ! b.inside( 32.9570255 , -96.1082497 ) );
+ verify( ! b.inside( 32.9570255 , -96.1082497 , .01 ) );
}
{
GeoHash a( "11001111" );
- assert( GeoHash( "11" ) == a.commonPrefix( GeoHash("11") ) );
- assert( GeoHash( "11" ) == a.commonPrefix( GeoHash("11110000") ) );
+ verify( GeoHash( "11" ) == a.commonPrefix( GeoHash("11") ) );
+ verify( GeoHash( "11" ) == a.commonPrefix( GeoHash("11110000") ) );
}
{
@@ -3209,8 +3209,8 @@ namespace mongo {
GeoHash h( x , y );
unsigned a,b;
h.unhash_slow( a,b );
- assert( a == x );
- assert( b == y );
+ verify( a == x );
+ verify( b == y );
}
//cout << "slow: " << t.millis() << endl;
}
@@ -3223,8 +3223,8 @@ namespace mongo {
GeoHash h( x , y );
unsigned a,b;
h.unhash_fast( a,b );
- assert( a == x );
- assert( b == y );
+ verify( a == x );
+ verify( b == y );
}
//cout << "fast: " << t.millis() << endl;
}
@@ -3242,8 +3242,8 @@ namespace mongo {
double dist2 = spheredist_deg(LAX, BNA);
// target is 0.45306
- assert( 0.45305 <= dist1 && dist1 <= 0.45307 );
- assert( 0.45305 <= dist2 && dist2 <= 0.45307 );
+ verify( 0.45305 <= dist1 && dist1 <= 0.45307 );
+ verify( 0.45305 <= dist2 && dist2 <= 0.45307 );
}
{
Point BNA (-1.5127, 0.6304);
@@ -3253,32 +3253,32 @@ namespace mongo {
double dist2 = spheredist_rad(LAX, BNA);
// target is 0.45306
- assert( 0.45305 <= dist1 && dist1 <= 0.45307 );
- assert( 0.45305 <= dist2 && dist2 <= 0.45307 );
+ verify( 0.45305 <= dist1 && dist1 <= 0.45307 );
+ verify( 0.45305 <= dist2 && dist2 <= 0.45307 );
}
{
Point JFK (-73.77694444, 40.63861111 );
Point LAX (-118.40, 33.94);
double dist = spheredist_deg(JFK, LAX) * EARTH_RADIUS_MILES;
- assert( dist > 2469 && dist < 2470 );
+ verify( dist > 2469 && dist < 2470 );
}
{
Point BNA (-86.67, 36.12);
Point LAX (-118.40, 33.94);
Point JFK (-73.77694444, 40.63861111 );
- assert( spheredist_deg(BNA, BNA) < 1e-6);
- assert( spheredist_deg(LAX, LAX) < 1e-6);
- assert( spheredist_deg(JFK, JFK) < 1e-6);
+ verify( spheredist_deg(BNA, BNA) < 1e-6);
+ verify( spheredist_deg(LAX, LAX) < 1e-6);
+ verify( spheredist_deg(JFK, JFK) < 1e-6);
Point zero (0, 0);
Point antizero (0,-180);
// these were known to cause NaN
- assert( spheredist_deg(zero, zero) < 1e-6);
- assert( fabs(M_PI-spheredist_deg(zero, antizero)) < 1e-6);
- assert( fabs(M_PI-spheredist_deg(antizero, zero)) < 1e-6);
+ verify( spheredist_deg(zero, zero) < 1e-6);
+ verify( fabs(M_PI-spheredist_deg(zero, antizero)) < 1e-6);
+ verify( fabs(M_PI-spheredist_deg(antizero, zero)) < 1e-6);
}
}
}
diff --git a/src/mongo/db/geo/core.h b/src/mongo/db/geo/core.h
index bdbf1af48f6..27a1d0f1c2d 100644
--- a/src/mongo/db/geo/core.h
+++ b/src/mongo/db/geo/core.h
@@ -98,7 +98,7 @@ namespace mongo {
if ( e.type() == BinData ) {
int len = 0;
_copy( (char*)&_hash , e.binData( len ) );
- assert( len == 8 );
+ verify( len == 8 );
_bits = bits;
}
else {
@@ -123,7 +123,7 @@ namespace mongo {
}
void init( unsigned x , unsigned y , unsigned bits ) {
- assert( bits <= 32 );
+ verify( bits <= 32 );
_hash = 0;
_bits = bits;
for ( unsigned i=0; i<bits; i++ ) {
@@ -172,7 +172,7 @@ namespace mongo {
}
bool hasPrefix( const GeoHash& other ) const {
- assert( other._bits <= _bits );
+ verify( other._bits <= _bits );
if ( other._bits == 0 )
return true;
long long x = other._hash ^ _hash;
@@ -203,7 +203,7 @@ namespace mongo {
}
void setBit( unsigned pos , bool one ) {
- assert( pos < _bits * 2 );
+ verify( pos < _bits * 2 );
if ( one )
_hash |= geoBitSets.masks64[pos];
else if ( _hash & geoBitSets.masks64[pos] )
@@ -215,12 +215,12 @@ namespace mongo {
}
bool getBitX( unsigned pos ) const {
- assert( pos < 32 );
+ verify( pos < 32 );
return getBit( pos * 2 );
}
bool getBitY( unsigned pos ) const {
- assert( pos < 32 );
+ verify( pos < 32 );
return getBit( ( pos * 2 ) + 1 );
}
@@ -228,7 +228,7 @@ namespace mongo {
BSONObjBuilder b(20);
append( b , name );
BSONObj o = b.obj();
- if( ! strlen( name ) ) assert( o.objsize() == 20 );
+ if( ! strlen( name ) ) verify( o.objsize() == 20 );
return o;
}
@@ -258,7 +258,7 @@ namespace mongo {
}
void move( int x , int y ) {
- assert( _bits );
+ verify( _bits );
_move( 0 , x );
_move( 1 , y );
}
@@ -266,7 +266,7 @@ namespace mongo {
void _move( unsigned offset , int d ) {
if ( d == 0 )
return;
- assert( d <= 1 && d>= -1 ); // TEMP
+ verify( d <= 1 && d>= -1 ); // TEMP
bool from, to;
if ( d > 0 ) {
@@ -299,7 +299,7 @@ namespace mongo {
pos -= 2;
}
- assert(0);
+ verify(0);
}
GeoHash& operator=(const GeoHash& h) {
@@ -324,7 +324,7 @@ namespace mongo {
GeoHash& operator+=( const char * s ) {
unsigned pos = _bits * 2;
_bits += strlen(s) / 2;
- assert( _bits <= 32 );
+ verify( _bits <= 32 );
while ( s[0] ) {
if ( s[0] == '1' )
setBit( pos , 1 );
@@ -532,7 +532,7 @@ namespace mongo {
if (cross_prod >= 1 || cross_prod <= -1) {
// fun with floats
- assert( fabs(cross_prod)-1 < 1e-6 );
+ verify( fabs(cross_prod)-1 < 1e-6 );
return cross_prod > 0 ? 0 : M_PI;
}
diff --git a/src/mongo/db/geo/haystack.cpp b/src/mongo/db/geo/haystack.cpp
index 104665087f6..7fe646e62a0 100644
--- a/src/mongo/db/geo/haystack.cpp
+++ b/src/mongo/db/geo/haystack.cpp
@@ -149,7 +149,7 @@ namespace mongo {
}
- assert( _other.size() == 1 );
+ verify( _other.size() == 1 );
BSONElementSet all;
obj.getFieldsDotted( _other[0] , all );
@@ -167,7 +167,7 @@ namespace mongo {
shared_ptr<Cursor> newCursor( const BSONObj& query , const BSONObj& order , int numWanted ) const {
shared_ptr<Cursor> c;
- assert(0);
+ verify(0);
return c;
}
@@ -290,7 +290,7 @@ namespace mongo {
IndexDetails& id = d->idx( idxNum );
GeoHaystackSearchIndex * si = (GeoHaystackSearchIndex*)id.getSpec().getType();
- assert( &id == si->getDetails() );
+ verify( &id == si->getDetails() );
BSONElement n = cmdObj["near"];
BSONElement maxDistance = cmdObj["maxDistance"];
diff --git a/src/mongo/db/index.cpp b/src/mongo/db/index.cpp
index 7c6551549ec..2781b07e592 100644
--- a/src/mongo/db/index.cpp
+++ b/src/mongo/db/index.cpp
@@ -237,7 +237,7 @@ namespace mongo {
void setDifference(BSONObjSet &l, BSONObjSet &r, vector<BSONObj*> &diff) {
// l and r must use the same ordering spec.
- assert( l.key_comp().order() == r.key_comp().order() );
+ verify( l.key_comp().order() == r.key_comp().order() );
BSONObjSet::iterator i = l.begin();
BSONObjSet::iterator j = r.begin();
while ( 1 ) {
@@ -342,7 +342,7 @@ namespace mongo {
}
sourceCollection = nsdetails(sourceNS.c_str());
tlog() << "info: creating collection " << sourceNS << " on add index" << endl;
- assert( sourceCollection );
+ verify( sourceCollection );
}
if ( sourceCollection->findIndexByName(name) >= 0 ) {
@@ -438,7 +438,7 @@ namespace mongo {
keyPattern = info["key"].embeddedObjectUserCheck();
if ( keyPattern.objsize() == 0 ) {
out() << info.toString() << endl;
- assert(false);
+ verify(false);
}
_init();
}
diff --git a/src/mongo/db/index.h b/src/mongo/db/index.h
index 4418f2ad382..8fb0478cd57 100644
--- a/src/mongo/db/index.h
+++ b/src/mongo/db/index.h
@@ -127,7 +127,7 @@ namespace mongo {
string s;
s.reserve(Namespace::MaxNsLen);
s = io.getStringField("ns");
- assert( !s.empty() );
+ verify( !s.empty() );
s += ".$";
s += io.getStringField("name");
return s;
diff --git a/src/mongo/db/indexkey.cpp b/src/mongo/db/indexkey.cpp
index be2df6bed39..2eae7c776a4 100644
--- a/src/mongo/db/indexkey.cpp
+++ b/src/mongo/db/indexkey.cpp
@@ -75,7 +75,7 @@ namespace mongo {
}
void IndexSpec::_init() {
- assert( keyPattern.objsize() );
+ verify( keyPattern.objsize() );
// some basics
_nFields = keyPattern.nFields();
@@ -233,7 +233,7 @@ namespace mongo {
}
else {
// nonterminal array element to expand, so recurse
- assert( !arrElt.eoo() );
+ verify( !arrElt.eoo() );
BSONObjIterator i( arrElt.embeddedObject() );
if ( i.more() ) {
while( i.more() ) {
diff --git a/src/mongo/db/instance.cpp b/src/mongo/db/instance.cpp
index fcd30364782..b471bcf0ff2 100644
--- a/src/mongo/db/instance.cpp
+++ b/src/mongo/db/instance.cpp
@@ -118,14 +118,14 @@ namespace mongo {
*getDur().writing( reinterpret_cast< double * >( value() ) ) = d;
else if ( _element.type() == NumberInt )
*getDur().writing( reinterpret_cast< int * >( value() ) ) = (int) d;
- else assert(0);
+ else verify(0);
}
void BSONElementManipulator::SetLong(long long n) {
- assert( _element.type() == NumberLong );
+ verify( _element.type() == NumberLong );
*getDur().writing( reinterpret_cast< long long * >(value()) ) = n;
}
void BSONElementManipulator::SetInt(int n) {
- assert( _element.type() == NumberInt );
+ verify( _element.type() == NumberInt );
getDur().writingInt( *reinterpret_cast< int * >( value() ) ) = n;
}
/* dur:: version */
@@ -157,12 +157,12 @@ namespace mongo {
scoped_ptr<Matcher> m(new Matcher(q.query));
for( set<Client*>::iterator i = Client::clients.begin(); i != Client::clients.end(); i++ ) {
Client *c = *i;
- assert( c );
+ verify( c );
CurOp* co = c->curop();
if ( c == &me && !co ) {
continue;
}
- assert( co );
+ verify( co );
if( all || co->displayInCurop() ) {
BSONObj info = co->infoNoauth();
if ( all || m->matches( info )) {
@@ -240,7 +240,7 @@ namespace mongo {
try {
dbresponse.exhaust = runQuery(m, q, op, *resp);
- assert( !resp->empty() );
+ verify( !resp->empty() );
}
catch ( SendStaleConfigException& e ){
ex.reset( new SendStaleConfigException( e.getns(), e.getInfo().msg, e.getVersionReceived(), e.getVersionWanted() ) );
@@ -489,7 +489,7 @@ namespace mongo {
if ( n > 2000 ) {
log( n < 30000 ? LL_WARNING : LL_ERROR ) << "receivedKillCursors, n=" << n << endl;
- assert( n < 30000 );
+ verify( n < 30000 );
}
int found = ClientCursor::erase(n, (long long *) x);
@@ -507,10 +507,10 @@ namespace mongo {
assertInWriteLock();
Client::Context * ctx = cc().getContext();
- assert( ctx );
- assert( ctx->inDB( db , path ) );
+ verify( ctx );
+ verify( ctx->inDB( db , path ) );
Database *database = ctx->db();
- assert( database->name == db );
+ verify( database->name == db );
oplogCheckCloseDatabase( database ); // oplog caches some things, dirty its caches
@@ -537,12 +537,12 @@ namespace mongo {
int flags = d.pullInt();
BSONObj query = d.nextJsObj();
- assert( d.moreJSObjs() );
- assert( query.objsize() < m.header()->dataLen() );
+ verify( d.moreJSObjs() );
+ verify( query.objsize() < m.header()->dataLen() );
BSONObj toupdate = d.nextJsObj();
uassert( 10055 , "update object too large", toupdate.objsize() <= BSONObjMaxUserSize);
- assert( toupdate.objsize() < m.header()->dataLen() );
- assert( query.objsize() + toupdate.objsize() < m.header()->dataLen() );
+ verify( toupdate.objsize() < m.header()->dataLen() );
+ verify( query.objsize() + toupdate.objsize() < m.header()->dataLen() );
bool upsert = flags & UpdateOption_Upsert;
bool multi = flags & UpdateOption_Multi;
bool broadcast = flags & UpdateOption_Broadcast;
@@ -582,7 +582,7 @@ namespace mongo {
int flags = d.pullInt();
bool justOne = flags & RemoveOption_JustOne;
bool broadcast = flags & RemoveOption_Broadcast;
- assert( d.moreJSObjs() );
+ verify( d.moreJSObjs() );
BSONObj pattern = d.nextJsObj();
op.debug().query = pattern;
@@ -838,7 +838,7 @@ namespace mongo {
lastError.startRequest( toSend, lastError._get() );
DbResponse dbResponse;
assembleResponse( toSend, dbResponse , _clientHost );
- assert( dbResponse.response );
+ verify( dbResponse.response );
dbResponse.response->concat(); // can get rid of this if we make response handling smarter
response = *dbResponse.response;
getDur().commitIfNeeded();
@@ -859,7 +859,7 @@ namespace mongo {
//if ( ! query.obj.isEmpty() || nToReturn != 0 || nToSkip != 0 || fieldsToReturn || queryOptions )
return DBClientBase::query( ns , query , nToReturn , nToSkip , fieldsToReturn , queryOptions , batchSize );
//
- //assert( query.obj.isEmpty() );
+ //verify( query.obj.isEmpty() );
//throw UserException( (string)"yay:" + ns );
}
@@ -1055,9 +1055,9 @@ namespace mongo {
string s = ss.str();
const char * data = s.c_str();
#ifdef _WIN32
- assert ( _write( fd, data, strlen( data ) ) );
+ verify( _write( fd, data, strlen( data ) ) );
#else
- assert ( write( fd, data, strlen( data ) ) );
+ verify( write( fd, data, strlen( data ) ) );
#endif
}
@@ -1191,7 +1191,7 @@ namespace mongo {
}
void DiagLog::openFile() {
- assert( f == 0 );
+ verify( f == 0 );
stringstream ss;
ss << dbpath << "/diaglog." << hex << time(0);
string name = ss.str();
@@ -1238,7 +1238,7 @@ namespace mongo {
OCCASIONALLY log = true;
if ( log ) {
scoped_lock lk(mutex);
- assert( f );
+ verify( f );
f->write(data,len);
}
}
diff --git a/src/mongo/db/introspect.cpp b/src/mongo/db/introspect.cpp
index 7e1d19ce2f3..fd80d6f8f10 100644
--- a/src/mongo/db/introspect.cpp
+++ b/src/mongo/db/introspect.cpp
@@ -33,7 +33,7 @@ namespace mongo {
assertInWriteLock();
Database *db = c.database();
- DEV assert( db );
+ DEV verify( db );
const char *ns = db->profileName.c_str();
// build object
diff --git a/src/mongo/db/jsobj.cpp b/src/mongo/db/jsobj.cpp
index f418f5d034c..627d346445e 100644
--- a/src/mongo/db/jsobj.cpp
+++ b/src/mongo/db/jsobj.cpp
@@ -34,8 +34,6 @@
#include "jsobjmanipulator.h"
#include "../util/optime.h"
#include <boost/static_assert.hpp>
-#undef assert
-#define assert MONGO_assert
// make sure our assumptions are valid
BOOST_STATIC_ASSERT( sizeof(short) == 2 );
@@ -372,7 +370,7 @@ namespace mongo {
}
log() << "compareDottedFieldNames ERROR l: " << l << " r: " << r << " TOO MANY LOOPS" << endl;
- assert(0);
+ verify(0);
return SAME; // will never get here
}
@@ -732,7 +730,7 @@ namespace mongo {
returns n added not counting _id unless requested.
*/
int BSONObj::addFields(BSONObj& from, set<string>& fields) {
- assert( isEmpty() && !isOwned() ); /* partial implementation for now... */
+ verify( isEmpty() && !isOwned() ); /* partial implementation for now... */
BSONObjBuilder b;
@@ -952,8 +950,8 @@ namespace mongo {
c.appendRegex("x", "goo");
BSONObj p = c.done();
- assert( !o.binaryEqual( p ) );
- assert( o.woCompare( p ) < 0 );
+ verify( !o.binaryEqual( p ) );
+ verify( o.woCompare( p ) < 0 );
}
void testoid() {
@@ -964,10 +962,10 @@ namespace mongo {
OID b;
// goes with sleep above...
// b.init();
- // assert( memcmp(id.getData(), b.getData(), 12) < 0 );
+ // verify( memcmp(id.getData(), b.getData(), 12) < 0 );
b.init( id.str() );
- assert( b == id );
+ verify( b == id );
}
void testbounds() {
@@ -982,15 +980,15 @@ namespace mongo {
b.append( "x" , numeric_limits<double>::max() );
r = b.obj();
}
- assert( l.woCompare( r ) < 0 );
- assert( r.woCompare( l ) > 0 );
+ verify( l.woCompare( r ) < 0 );
+ verify( r.woCompare( l ) > 0 );
{
BSONObjBuilder b;
b.append( "x" , numeric_limits<int>::max() );
l = b.obj();
}
- assert( l.woCompare( r ) < 0 );
- assert( r.woCompare( l ) > 0 );
+ verify( l.woCompare( r ) < 0 );
+ verify( r.woCompare( l ) > 0 );
}
void testorder() {
@@ -999,12 +997,12 @@ namespace mongo {
{ BSONObjBuilder b; b.append( "x" , (long long)2 ); x = b.obj(); }
{ BSONObjBuilder b; b.append( "x" , (int)3 ); y = b.obj(); }
{ BSONObjBuilder b; b.append( "x" , (long long)4 ); z = b.obj(); }
- assert( x.woCompare( y ) < 0 );
- assert( x.woCompare( z ) < 0 );
- assert( y.woCompare( x ) > 0 );
- assert( z.woCompare( x ) > 0 );
- assert( y.woCompare( z ) < 0 );
- assert( z.woCompare( y ) > 0 );
+ verify( x.woCompare( y ) < 0 );
+ verify( x.woCompare( z ) < 0 );
+ verify( y.woCompare( x ) > 0 );
+ verify( z.woCompare( x ) > 0 );
+ verify( y.woCompare( z ) < 0 );
+ verify( z.woCompare( y ) > 0 );
}
{
@@ -1015,36 +1013,36 @@ namespace mongo {
{ BSONObjBuilder b; b.appendNull( "x" ); n = b.obj(); }
{ BSONObjBuilder b; u = b.obj(); }
- assert( ll.woCompare( u ) == d.woCompare( u ) );
- assert( ll.woCompare( u ) == i.woCompare( u ) );
+ verify( ll.woCompare( u ) == d.woCompare( u ) );
+ verify( ll.woCompare( u ) == i.woCompare( u ) );
BSONObj k = BSON( "x" << 1 );
- assert( ll.woCompare( u , k ) == d.woCompare( u , k ) );
- assert( ll.woCompare( u , k ) == i.woCompare( u , k ) );
+ verify( ll.woCompare( u , k ) == d.woCompare( u , k ) );
+ verify( ll.woCompare( u , k ) == i.woCompare( u , k ) );
- assert( u.woCompare( ll ) == u.woCompare( d ) );
- assert( u.woCompare( ll ) == u.woCompare( i ) );
- assert( u.woCompare( ll , k ) == u.woCompare( d , k ) );
- assert( u.woCompare( ll , k ) == u.woCompare( d , k ) );
+ verify( u.woCompare( ll ) == u.woCompare( d ) );
+ verify( u.woCompare( ll ) == u.woCompare( i ) );
+ verify( u.woCompare( ll , k ) == u.woCompare( d , k ) );
+ verify( u.woCompare( ll , k ) == u.woCompare( d , k ) );
- assert( i.woCompare( n ) == d.woCompare( n ) );
+ verify( i.woCompare( n ) == d.woCompare( n ) );
- assert( ll.woCompare( n ) == d.woCompare( n ) );
- assert( ll.woCompare( n ) == i.woCompare( n ) );
- assert( ll.woCompare( n , k ) == d.woCompare( n , k ) );
- assert( ll.woCompare( n , k ) == i.woCompare( n , k ) );
+ verify( ll.woCompare( n ) == d.woCompare( n ) );
+ verify( ll.woCompare( n ) == i.woCompare( n ) );
+ verify( ll.woCompare( n , k ) == d.woCompare( n , k ) );
+ verify( ll.woCompare( n , k ) == i.woCompare( n , k ) );
- assert( n.woCompare( ll ) == n.woCompare( d ) );
- assert( n.woCompare( ll ) == n.woCompare( i ) );
- assert( n.woCompare( ll , k ) == n.woCompare( d , k ) );
- assert( n.woCompare( ll , k ) == n.woCompare( d , k ) );
+ verify( n.woCompare( ll ) == n.woCompare( d ) );
+ verify( n.woCompare( ll ) == n.woCompare( i ) );
+ verify( n.woCompare( ll , k ) == n.woCompare( d , k ) );
+ verify( n.woCompare( ll , k ) == n.woCompare( d , k ) );
}
{
BSONObj l,r;
{ BSONObjBuilder b; b.append( "x" , "eliot" ); l = b.obj(); }
{ BSONObjBuilder b; b.appendSymbol( "x" , "eliot" ); r = b.obj(); }
- assert( l.woCompare( r ) == 0 );
- assert( r.woCompare( l ) == 0 );
+ verify( l.woCompare( r ) == 0 );
+ verify( r.woCompare( l ) == 0 );
}
}
@@ -1057,11 +1055,11 @@ namespace mongo {
BSONObj a = A.done();
BSONObj b = B.done();
BSONObj c = C.done();
- assert( !a.binaryEqual( b ) ); // comments on operator==
+ verify( !a.binaryEqual( b ) ); // comments on operator==
int cmp = a.woCompare(b);
- assert( cmp == 0 );
+ verify( cmp == 0 );
cmp = a.woCompare(c);
- assert( cmp < 0 );
+ verify( cmp < 0 );
testoid();
testbounds();
testorder();
@@ -1226,9 +1224,9 @@ namespace mongo {
BSONObjIterator i( o );
while ( i.more() ) {
_fields[x++] = i.next().rawdata();
- assert( _fields[x-1] );
+ verify( _fields[x-1] );
}
- assert( x == _nfields );
+ verify( x == _nfields );
std::sort( _fields , _fields + _nfields , cmp );
_cur = 0;
}
diff --git a/src/mongo/db/jsobjmanipulator.h b/src/mongo/db/jsobjmanipulator.h
index 880fde8b409..05666409e62 100644
--- a/src/mongo/db/jsobjmanipulator.h
+++ b/src/mongo/db/jsobjmanipulator.h
@@ -30,7 +30,7 @@ namespace mongo {
public:
BSONElementManipulator( const BSONElement &element ) :
_element( element ) {
- assert( !_element.eoo() );
+ verify( !_element.eoo() );
}
/** Replace a Timestamp type with a Date type initialized to
OpTime::now().asDate()
@@ -43,16 +43,16 @@ namespace mongo {
void setNumber(double d) {
if ( _element.type() == NumberDouble ) *reinterpret_cast< double * >( value() ) = d;
else if ( _element.type() == NumberInt ) *reinterpret_cast< int * >( value() ) = (int) d;
- else assert(0);
+ else verify(0);
}
void SetNumber(double d);
void setLong(long long n) {
- assert( _element.type() == NumberLong );
+ verify( _element.type() == NumberLong );
*reinterpret_cast< long long * >( value() ) = n;
}
void SetLong(long long n);
void setInt(int n) {
- assert( _element.type() == NumberInt );
+ verify( _element.type() == NumberInt );
*reinterpret_cast< int * >( value() ) = n;
}
void SetInt(int n);
diff --git a/src/mongo/db/json.cpp b/src/mongo/db/json.cpp
index f27ccbf896e..265f53e161a 100644
--- a/src/mongo/db/json.cpp
+++ b/src/mongo/db/json.cpp
@@ -29,8 +29,6 @@
#include <boost/spirit/utility/loops.hpp>
#include <boost/spirit/utility/lists.hpp>
#endif
-#undef assert
-#define assert MONGO_assert
#include "json.h"
#include "../bson/util/builder.h"
@@ -191,7 +189,7 @@ namespace mongo {
o = '\v';
break;
default:
- assert( false );
+ verify( false );
}
b.ss << o;
}
@@ -642,7 +640,7 @@ namespace mongo {
msgasserted(10340, "Failure parsing JSON string near: " + string( result.stop, limit ));
}
BSONObj ret = b.pop();
- assert( b.empty() );
+ verify( b.empty() );
return ret;
}
diff --git a/src/mongo/db/key.cpp b/src/mongo/db/key.cpp
index 47449986d21..f7deb79927c 100644
--- a/src/mongo/db/key.cpp
+++ b/src/mongo/db/key.cpp
@@ -124,7 +124,7 @@ namespace mongo {
}
default:
out() << "oldCompareElementValues: bad type " << (int) l.type() << endl;
- assert(false);
+ verify(false);
}
return -1;
}
@@ -314,7 +314,7 @@ namespace mongo {
long long m = 2LL << 52;
DEV {
long long d = m-1;
- assert( ((long long) ((double) -d)) == -d );
+ verify( ((long long) ((double) -d)) == -d );
}
if( n >= m || n <= -m ) {
// can't represent exactly as a double
@@ -351,7 +351,7 @@ namespace mongo {
}
BSONObj KeyV1::toBson() const {
- assert( _keyData != 0 );
+ verify( _keyData != 0 );
if( !isCompactFormat() )
return bson();
@@ -413,7 +413,7 @@ namespace mongo {
p += sizeof(double);
break;
default:
- assert(false);
+ verify(false);
}
if( (bits & cHASMORE) == 0 )
@@ -577,7 +577,7 @@ namespace mongo {
sz = ((unsigned) p[1]) + 2;
}
else {
- assert( type == cbindata );
+ verify( type == cbindata );
sz = binDataCodeToLength(p[1]) + 2;
}
}
@@ -655,7 +655,7 @@ namespace mongo {
case cmaxkey:
break;
default:
- assert(false);
+ verify(false);
}
if( (lval&cHASMORE) == 0 )
break;
@@ -671,7 +671,7 @@ namespace mongo {
a[1] = 0;
b[0] = 3;
b[1] = 0;
- assert( strcmp(a,b)>0 && memcmp(a,b,2)>0 );
+ verify( strcmp(a,b)>0 && memcmp(a,b,2)>0 );
}
} cunittest;
diff --git a/src/mongo/db/lasterror.cpp b/src/mongo/db/lasterror.cpp
index dbd3f1815d4..cf107462b78 100644
--- a/src/mongo/db/lasterror.cpp
+++ b/src/mongo/db/lasterror.cpp
@@ -130,7 +130,7 @@ namespace mongo {
void prepareErrForNewRequest( Message &m, LastError * err ) {
// a killCursors message shouldn't affect last error
- assert( err );
+ verify( err );
if ( m.operation() == dbKillCursors ) {
err->disabled = true;
}
@@ -141,7 +141,7 @@ namespace mongo {
}
LastError * LastErrorHolder::startRequest( Message& m , LastError * le ) {
- assert( le );
+ verify( le );
prepareErrForNewRequest( m, le );
return le;
}
diff --git a/src/mongo/db/lasterror.h b/src/mongo/db/lasterror.h
index b47a936caeb..9605bbfcf05 100644
--- a/src/mongo/db/lasterror.h
+++ b/src/mongo/db/lasterror.h
@@ -114,7 +114,7 @@ namespace mongo {
LastError * le = get(false);
if ( ! le ) {
error() << " no LastError!" << endl;
- assert( le );
+ verify( le );
}
return le;
}
diff --git a/src/mongo/db/matcher.cpp b/src/mongo/db/matcher.cpp
index 897af960c22..df57d4dd208 100755
--- a/src/mongo/db/matcher.cpp
+++ b/src/mongo/db/matcher.cpp
@@ -168,12 +168,12 @@ namespace mongo {
}
int ElementMatcher::inverseOfNegativeCompareOp() const {
- assert( negativeCompareOp() );
+ verify( negativeCompareOp() );
return _compareOp == BSONObj::NE ? BSONObj::Equality : BSONObj::opIN;
}
bool ElementMatcher::negativeCompareOpContainsNull() const {
- assert( negativeCompareOp() );
+ verify( negativeCompareOp() );
return (_compareOp == BSONObj::NE && _toMatch.type() != jstNULL) ||
(_compareOp == BSONObj::NIN && _myset->count( staticNull.firstElement()) == 0 );
}
@@ -547,7 +547,7 @@ namespace mongo {
}
inline int Matcher::valuesMatch(const BSONElement& l, const BSONElement& r, int op, const ElementMatcher& bm) const {
- assert( op != BSONObj::NE && op != BSONObj::NIN );
+ verify( op != BSONObj::NE && op != BSONObj::NIN );
if ( op == BSONObj::Equality ) {
return l.valuesEqual(r);
@@ -713,7 +713,7 @@ namespace mongo {
cout << "obj: " << obj << endl;
cout << "fieldName: " << fieldName << endl;
cout << "_constrainIndexKey: " << _constrainIndexKey << endl;
- assert( !e.eoo() );
+ verify( !e.eoo() );
}
}
else {
@@ -1236,7 +1236,7 @@ namespace mongo {
{
// a quick check that we are using our mongo assert macro
int x = 1;
- assert( ++x );
+ verify( ++x );
if( x != 2 ) {
log() << "bad build - wrong assert macro" << endl;
::abort();
@@ -1246,18 +1246,18 @@ namespace mongo {
BSONObj j1((const char *) &js1);
BSONObj j2((const char *) &js2);
Matcher m(j2);
- assert( m.matches(j1) );
+ verify( m.matches(j1) );
js2.sval[0] = 'z';
- assert( !m.matches(j1) );
+ verify( !m.matches(j1) );
Matcher n(j1);
- assert( n.matches(j1) );
- assert( !n.matches(j2) );
+ verify( n.matches(j1) );
+ verify( !n.matches(j2) );
BSONObj j0 = BSONObj();
// BSONObj j0((const char *) &js0);
Matcher p(j0);
- assert( p.matches(j1) );
- assert( p.matches(j2) );
+ verify( p.matches(j1) );
+ verify( p.matches(j2) );
}
} jsunittest;
@@ -1283,14 +1283,14 @@ namespace mongo {
pcrecpp::RE re1(")({a}h.*o");
pcrecpp::RE re("h.llo");
- assert( re.FullMatch("hello") );
- assert( !re1.FullMatch("hello") );
+ verify( re.FullMatch("hello") );
+ verify( !re1.FullMatch("hello") );
pcrecpp::RE_Options options;
options.set_utf8(true);
pcrecpp::RE part("dwi", options);
- assert( part.PartialMatch("dwight") );
+ verify( part.PartialMatch("dwight") );
pcre_config( PCRE_CONFIG_UNICODE_PROPERTIES , &ret );
if ( ! ret )
diff --git a/src/mongo/db/matcher.h b/src/mongo/db/matcher.h
index 7071267c9eb..c5386faa23a 100644
--- a/src/mongo/db/matcher.h
+++ b/src/mongo/db/matcher.h
@@ -102,7 +102,7 @@ namespace mongo {
bool hasLoadedRecord() const { return _loadedRecord; }
bool hasElemMatchKey() const { return _elemMatchKeyFound; }
string elemMatchKey() const {
- assert( hasElemMatchKey() );
+ verify( hasElemMatchKey() );
return _elemMatchKey;
}
diff --git a/src/mongo/db/minilex.h b/src/mongo/db/minilex.h
index 677514aa47c..f77bd9b464b 100644
--- a/src/mongo/db/minilex.h
+++ b/src/mongo/db/minilex.h
@@ -114,9 +114,9 @@ namespace mongo {
MiniLex() {
strhashmap atest;
atest["foo"] = 3;
- assert( atest.count("bar") == 0 );
- assert( atest.count("foo") == 1 );
- assert( atest["foo"] == 3 );
+ verify( atest.count("bar") == 0 );
+ verify( atest.count("foo") == 1 );
+ verify( atest["foo"] == 3 );
for ( int i = 0; i < 256; i++ ) {
ic[i] = starter[i] = false;
diff --git a/src/mongo/db/mongommf.cpp b/src/mongo/db/mongommf.cpp
index 5767d92ffb1..beff3cfa923 100644
--- a/src/mongo/db/mongommf.cpp
+++ b/src/mongo/db/mongommf.cpp
@@ -73,7 +73,7 @@ namespace mongo {
if( !ok ) {
DWORD e = GetLastError();
log() << "VirtualProtect failed (mcw) " << mmf->filename() << ' ' << chunkno << hex << protectStart << ' ' << protectSize << ' ' << errnoWithDescription(e) << endl;
- assert(false);
+ verify(false);
}
}
@@ -81,7 +81,7 @@ namespace mongo {
}
void* MemoryMappedFile::createPrivateMap() {
- assert( maphandle );
+ verify( maphandle );
scoped_lock lk(mapViewMutex);
void *p = MapViewOfFile(maphandle, FILE_MAP_READ, 0, 0, 0);
if ( p == 0 ) {
@@ -113,14 +113,14 @@ namespace mongo {
if( !ok ) {
DWORD e = GetLastError();
log() << "VirtualProtect failed in remapPrivateView " << filename() << hex << oldPrivateAddr << ' ' << len << ' ' << errnoWithDescription(e) << endl;
- assert(false);
+ verify(false);
}
return oldPrivateAddr;
#else
if( !UnmapViewOfFile(oldPrivateAddr) ) {
DWORD e = GetLastError();
log() << "UnMapViewOfFile failed " << filename() << ' ' << errnoWithDescription(e) << endl;
- assert(false);
+ verify(false);
}
// we want the new address to be the same as the old address in case things keep pointers around (as namespaceindex does).
@@ -131,16 +131,16 @@ namespace mongo {
if ( p == 0 ) {
DWORD e = GetLastError();
log() << "MapViewOfFileEx failed " << filename() << " " << errnoWithDescription(e) << endl;
- assert(p);
+ verify(p);
}
- assert(p == oldPrivateAddr);
+ verify(p == oldPrivateAddr);
return p;
#endif
}
#endif
void MongoMMF::remapThePrivateView() {
- assert( cmdLine.dur );
+ verify( cmdLine.dur );
// todo 1.9 : it turns out we require that we always remap to the same address.
// so the remove / add isn't necessary and can be removed?
@@ -153,8 +153,8 @@ namespace mongo {
/** register view. threadsafe */
void PointerToMMF::add(void *view, MongoMMF *f) {
- assert(view);
- assert(f);
+ verify(view);
+ verify(f);
mutex::scoped_lock lk(_m);
_views.insert( pair<void*,MongoMMF*>(view,f) );
}
@@ -173,7 +173,7 @@ namespace mongo {
#else
size_t max = ~((size_t)0);
#endif
- assert( max > (size_t) this ); // just checking that no one redef'd SIZE_MAX and that it is sane
+ verify( max > (size_t) this ); // just checking that no one redef'd SIZE_MAX and that it is sane
// this way we don't need any boundary checking in _find()
_views.insert( pair<void*,MongoMMF*>((void*)0,(MongoMMF*)0) );
@@ -217,8 +217,8 @@ namespace mongo {
PointerToMMF privateViews;
/* void* MongoMMF::switchToPrivateView(void *readonly_ptr) {
- assert( cmdLine.dur );
- assert( testIntent );
+ verify( cmdLine.dur );
+ verify( testIntent );
void *p = readonly_ptr;
@@ -253,7 +253,7 @@ namespace mongo {
void* MongoMMF::_switchToWritableView(void *p) {
size_t ofs;
MongoMMF *f = privateViews.find(p, ofs);
- assert( f );
+ verify( f );
return (((char *)f->_view_write)+ofs);
}
@@ -332,7 +332,7 @@ namespace mongo {
rather vague and not checking if the right database is locked
*/
if( !Lock::somethingWriteLocked() ) {
- assert( inShutdown() );
+ verify( inShutdown() );
DEV {
log() << "is it really ok to close a mongommf outside a write lock? file:" << filename() << endl;
}
diff --git a/src/mongo/db/mongommf.h b/src/mongo/db/mongommf.h
index 62a6cdfd3fd..2cbd5f4d2e6 100644
--- a/src/mongo/db/mongommf.h
+++ b/src/mongo/db/mongommf.h
@@ -76,7 +76,7 @@ namespace mongo {
if the suffix is "ns", fileSuffixNo -1
*/
const RelativePath& relativePath() const {
- DEV assert( !_p._p.empty() );
+ DEV verify( !_p._p.empty() );
return _p;
}
diff --git a/src/mongo/db/mongomutex.h b/src/mongo/db/mongomutex.h
index 0e033124d27..51c746ec634 100644
--- a/src/mongo/db/mongomutex.h
+++ b/src/mongo/db/mongomutex.h
@@ -38,11 +38,11 @@ namespace mongo {
if ( locked == 0 )
enter = curTimeMicros64();
locked++;
- assert( locked >= 1 );
+ verify( locked >= 1 );
}
void leaving() {
locked--;
- assert( locked >= 0 );
+ verify( locked >= 0 );
if ( locked == 0 )
timeLocked += curTimeMicros64() - enter;
}
diff --git a/src/mongo/db/namespace-inl.h b/src/mongo/db/namespace-inl.h
index c18f681e0b9..b482b3d5c36 100644
--- a/src/mongo/db/namespace-inl.h
+++ b/src/mongo/db/namespace-inl.h
@@ -63,7 +63,7 @@ namespace mongo {
/* future : this doesn't need to be an inline. */
inline string Namespace::getSisterNS( const char * local ) const {
- assert( local && local[0] != '.' );
+ verify( local && local[0] != '.' );
string old(buf);
if ( old.find( "." ) != string::npos )
old = old.substr( 0 , old.find( "." ) );
diff --git a/src/mongo/db/namespace_details.cpp b/src/mongo/db/namespace_details.cpp
index d5ffe5eaf97..ffb1853bbc3 100644
--- a/src/mongo/db/namespace_details.cpp
+++ b/src/mongo/db/namespace_details.cpp
@@ -59,7 +59,7 @@ namespace mongo {
// For capped case, signal that we are doing initial extent allocation.
if ( capped )
cappedLastDelRecLastExtent().setInvalid();
- assert( sizeof(dataFileVersion) == 2 );
+ verify( sizeof(dataFileVersion) == 2 );
dataFileVersion = 0;
indexFileVersion = 0;
multiKeyIndexBits = 0;
@@ -145,7 +145,7 @@ namespace mongo {
bool checkNsFilesOnLoad = true;
NOINLINE_DECL void NamespaceIndex::_init() {
- assert( !ht );
+ verify( !ht );
Lock::assertWriteLocked(database_);
@@ -182,7 +182,7 @@ namespace mongo {
if( f.create(pathString, l, true) ) {
getDur().createdFile(pathString, l); // always a new file
len = l;
- assert( len == lenForNewNsFiles );
+ verify( len == lenForNewNsFiles );
p = f.getView();
}
}
@@ -194,7 +194,7 @@ namespace mongo {
}
- assert( len <= 0x7fffffff );
+ verify( len <= 0x7fffffff );
ht = new HashTable<Namespace,NamespaceDetails>(p, (int) len, "namespace index");
if( checkNsFilesOnLoad )
ht->iterAll(namespaceOnLoadCallback);
@@ -206,7 +206,7 @@ namespace mongo {
l->push_back( (string)k );
}
void NamespaceIndex::getNamespaces( list<string>& tofill , bool onlyCollections ) const {
- assert( onlyCollections ); // TODO: need to implement this
+ verify( onlyCollections ); // TODO: need to implement this
// need boost::bind or something to make this less ugly
if ( ht )
@@ -288,7 +288,7 @@ namespace mongo {
to go in a forward direction which is important for performance. */
int regionlen = r->lengthWithHeaders;
extentLoc.set(loc.a(), r->extentOfs);
- assert( r->extentOfs < loc.getOfs() );
+ verify( r->extentOfs < loc.getOfs() );
DEBUGGING out() << "TEMP: alloc() returns " << loc.toString() << ' ' << ns << " lentoalloc:" << lenToAlloc << " ext:" << extentLoc.toString() << endl;
@@ -386,7 +386,7 @@ namespace mongo {
const DeletedRecord *bmr = bestmatch.drec();
*getDur().writing(bestprev) = bmr->nextDeleted;
bmr->nextDeleted.writing().setInvalid(); // defensive.
- assert(bmr->extentOfs < bestmatch.getOfs());
+ verify(bmr->extentOfs < bestmatch.getOfs());
}
return bestmatch;
@@ -442,7 +442,7 @@ namespace mongo {
out() << " fr: " << e.ext()->firstRecord.toString() <<
" lr: " << e.ext()->lastRecord.toString() << " extent->len: " << e.ext()->length << '\n';
}
- assert( len * 5 > lastExtentSize ); // assume it is unusually large record; if not, something is broken
+ verify( len * 5 > lastExtentSize ); // assume it is unusually large record; if not, something is broken
}
}
@@ -486,7 +486,7 @@ namespace mongo {
/* extra space for indexes when more than 10 */
NamespaceDetails::Extra* NamespaceIndex::newExtra(const char *ns, int i, NamespaceDetails *d) {
Lock::assertWriteLocked(ns);
- assert( i >= 0 && i <= 1 );
+ verify( i >= 0 && i <= 1 );
Namespace n(ns);
Namespace extra(n.extraName(i).c_str()); // throws userexception if ns name too long
@@ -505,13 +505,13 @@ namespace mongo {
Extra *e = ni->newExtra(ns, i, this);
long ofs = e->ofsFrom(this);
if( i == 0 ) {
- assert( extraOffset == 0 );
+ verify( extraOffset == 0 );
*getDur().writing(&extraOffset) = ofs;
- assert( extra() == e );
+ verify( extra() == e );
}
else {
Extra *hd = extra();
- assert( hd->next(this) == 0 );
+ verify( hd->next(this) == 0 );
hd->setNext(ofs);
}
return e;
@@ -550,7 +550,7 @@ namespace mongo {
e->setNext( nxt->ofsFrom(this) );
e = nxt;
}
- assert( extraOffset );
+ verify( extraOffset );
}
}
@@ -571,7 +571,7 @@ namespace mongo {
long long NamespaceDetails::storageSize( int * numExtents , BSONArrayBuilder * extentInfo ) const {
Extent * e = firstExtent.ext();
- assert( e );
+ verify( e );
long long total = 0;
int n = 0;
@@ -617,9 +617,9 @@ namespace mongo {
/*static*/ NOINLINE_DECL NamespaceDetailsTransient& NamespaceDetailsTransient::make_inlock(const char *ns) {
shared_ptr< NamespaceDetailsTransient > &t = _nsdMap[ ns ];
- assert( t.get() == 0 );
+ verify( t.get() == 0 );
Database *database = cc().database();
- assert( database );
+ verify( database );
if( _nsdMap.size() % 20000 == 10000 ) {
// so we notice if insanely large #s
log() << "opening namespace " << ns << endl;
@@ -707,9 +707,9 @@ namespace mongo {
void renameNamespace( const char *from, const char *to, bool stayTemp) {
NamespaceIndex *ni = nsindex( from );
- assert( ni );
- assert( ni->details( from ) );
- assert( ! ni->details( to ) );
+ verify( ni );
+ verify( ni->details( from ) );
+ verify( ! ni->details( to ) );
// Our namespace and index details will move to a different
// memory location. The only references to namespace and
@@ -737,7 +737,7 @@ namespace mongo {
nsToDatabase(from, database);
string s = database;
s += ".system.namespaces";
- assert( Helpers::findOne( s.c_str(), BSON( "name" << from ), oldSpec ) );
+ verify( Helpers::findOne( s.c_str(), BSON( "name" << from ), oldSpec ) );
BSONObjBuilder newSpecB;
BSONObjIterator i( oldSpec.getObjectField( "options" ) );
diff --git a/src/mongo/db/namespace_details.h b/src/mongo/db/namespace_details.h
index 595b1dabcc0..d2434fb4931 100644
--- a/src/mongo/db/namespace_details.h
+++ b/src/mongo/db/namespace_details.h
@@ -114,8 +114,8 @@ namespace mongo {
private:
unsigned reserved2;
unsigned reserved3;
- Extra(const Extra&) { assert(false); }
- Extra& operator=(const Extra& r) { assert(false); return *this; }
+ Extra(const Extra&) { verify(false); }
+ Extra& operator=(const Extra& r) { verify(false); return *this; }
public:
Extra() { }
long ofsFrom(NamespaceDetails *d) {
@@ -190,7 +190,7 @@ namespace mongo {
/** get the IndexDetails for the index currently being built in the background. (there is at most one) */
IndexDetails& inProgIdx() {
- DEV assert(indexBuildInProgress);
+ DEV verify(indexBuildInProgress);
return idx(nIndexes);
}
@@ -479,7 +479,7 @@ namespace mongo {
SimpleMutex::scoped_lock lk(_isMutex);
if ( ! spec._finishedInit ) {
spec.reset( details );
- assert( spec._finishedInit );
+ verify( spec._finishedInit );
}
}
return spec;
diff --git a/src/mongo/db/nonce.cpp b/src/mongo/db/nonce.cpp
index 9ea78f01f7a..027bc2219d2 100644
--- a/src/mongo/db/nonce.cpp
+++ b/src/mongo/db/nonce.cpp
@@ -62,8 +62,8 @@ namespace mongo {
massert(10355 , "devrandom failed", !_devrandom->fail());
#elif defined(_WIN32)
unsigned a=0, b=0;
- assert( rand_s(&a) == 0 );
- assert( rand_s(&b) == 0 );
+ verify( rand_s(&a) == 0 );
+ verify( rand_s(&b) == 0 );
n = (((unsigned long long)a)<<32) | b;
#else
n = (((unsigned long long)random())<<32) | random();
diff --git a/src/mongo/db/oplog.cpp b/src/mongo/db/oplog.cpp
index 9f42ab86fc0..458dc2e177f 100644
--- a/src/mongo/db/oplog.cpp
+++ b/src/mongo/db/oplog.cpp
@@ -66,7 +66,7 @@ namespace mongo {
if ( rsOplogDetails == 0 ) {
Client::Context ctx( logns , dbpath, false);
localDB = ctx.db();
- assert( localDB );
+ verify( localDB );
rsOplogDetails = nsdetails(logns);
massert(13389, "local.oplog.rs missing. did you drop it? if so restart server", rsOplogDetails);
}
@@ -143,7 +143,7 @@ namespace mongo {
}
else {
// must be initiation
- assert( *ns == 0 );
+ verify( *ns == 0 );
hashNew = 0;
}
@@ -168,13 +168,13 @@ namespace mongo {
int len = posz + obj.objsize() + 1 + 2 /*o:*/;
Record *r;
- DEV assert( logNS == 0 );
+ DEV verify( logNS == 0 );
{
const char *logns = rsoplog;
if ( rsOplogDetails == 0 ) {
Client::Context ctx( logns , dbpath, false);
localDB = ctx.db();
- assert( localDB );
+ verify( localDB );
rsOplogDetails = nsdetails(logns);
massert(13347, "local.oplog.rs missing. did you drop it? if so restart server", rsOplogDetails);
}
@@ -264,16 +264,16 @@ namespace mongo {
if ( localOplogMainDetails == 0 ) {
Client::Context ctx( logNS , dbpath, false);
localDB = ctx.db();
- assert( localDB );
+ verify( localDB );
localOplogMainDetails = nsdetails(logNS);
- assert( localOplogMainDetails );
+ verify( localOplogMainDetails );
}
Client::Context ctx( logNS , localDB, false );
r = theDataFileMgr.fast_oplog_insert(localOplogMainDetails, logNS, len);
}
else {
Client::Context ctx( logNS, dbpath, false );
- assert( nsdetails( logNS ) );
+ verify( nsdetails( logNS ) );
// first we allocate the space, then we fill it below.
r = theDataFileMgr.fast_oplog_insert( nsdetails( logNS ), logNS, len);
}
@@ -570,19 +570,19 @@ namespace mongo {
OpTime t;
for ( int i = 0; i < 10; i++ ) {
OpTime s = OpTime::_now();
- assert( s != t );
+ verify( s != t );
t = s;
}
OpTime q = t;
- assert( q == t );
- assert( !(q != t) );
+ verify( q == t );
+ verify( !(q != t) );
}
} testoptime;
int _dummy_z;
void pretouchN(vector<BSONObj>& v, unsigned a, unsigned b) {
- DEV assert( !d.dbMutex.isWriteLocked() );
+ DEV verify( !d.dbMutex.isWriteLocked() );
Client *c = currentClient.get();
if( c == 0 ) {
@@ -823,7 +823,7 @@ namespace mongo {
if ( opType[1] == 0 )
deleteObjects(ns, o, /*justOne*/ fields[3].booleanSafe());
else
- assert( opType[1] == 'b' ); // "db" advertisement
+ verify( opType[1] == 'b' ); // "db" advertisement
}
else if ( *opType == 'c' ) {
opCounters->gotCommand();
diff --git a/src/mongo/db/oplog.h b/src/mongo/db/oplog.h
index 6d39265310a..6a7a64d530b 100644
--- a/src/mongo/db/oplog.h
+++ b/src/mongo/db/oplog.h
@@ -79,7 +79,7 @@ namespace mongo {
bool done() const { return !_findingStart; }
/** @return cursor pointing to the first matching op, if done(). */
- shared_ptr<Cursor> cursor() { assert( done() ); return _c; }
+ shared_ptr<Cursor> cursor() { verify( done() ); return _c; }
/** Iterate the cursor, to continue trying to find matching op. */
void next();
diff --git a/src/mongo/db/oplogreader.h b/src/mongo/db/oplogreader.h
index 72eef31a718..fb8d607b01c 100644
--- a/src/mongo/db/oplogreader.h
+++ b/src/mongo/db/oplogreader.h
@@ -50,7 +50,7 @@ namespace mongo {
is needed; if not fine, but if so, need to change.
*//*
void query(const char *ns, const BSONObj& query) {
- assert( !haveCursor() );
+ verify( !haveCursor() );
cursor.reset( _conn->query(ns, query, 0, 0, 0, QueryOption_SlaveOk).release() );
}*/
diff --git a/src/mongo/db/ops/delete.cpp b/src/mongo/db/ops/delete.cpp
index 4f711caf5b8..02ce88f010e 100644
--- a/src/mongo/db/ops/delete.cpp
+++ b/src/mongo/db/ops/delete.cpp
@@ -104,7 +104,7 @@ namespace mongo {
if ( ! match )
continue;
- assert( !dup ); // can't be a dup, we deleted it!
+ verify( !dup ); // can't be a dup, we deleted it!
if ( !justOne ) {
/* NOTE: this is SLOW. this is not good, noteLocation() was designed to be called across getMore
diff --git a/src/mongo/db/ops/query.cpp b/src/mongo/db/ops/query.cpp
index ee2b29bfbbf..f04267395f9 100644
--- a/src/mongo/db/ops/query.cpp
+++ b/src/mongo/db/ops/query.cpp
@@ -45,7 +45,7 @@ namespace mongo {
throw;
}
catch ( AssertionException& e ) {
- assert( e.getCode() != SendStaleConfigCode && e.getCode() != RecvStaleConfigCode );
+ verify( e.getCode() != SendStaleConfigCode && e.getCode() != RecvStaleConfigCode );
e.getInfo().append( anObjBuilder , "assertion" , "assertionCode" );
curop.debug().exceptionInfo = e.getInfo();
@@ -138,7 +138,7 @@ namespace mongo {
}
p.release();
bool ok = ClientCursor::erase(cursorid);
- assert(ok);
+ verify(ok);
cursorid = 0;
cc = 0;
break;
@@ -188,7 +188,7 @@ namespace mongo {
if ( cc ) {
if ( c->supportYields() ) {
ClientCursor::YieldData data;
- assert( cc->prepareToYield( data ) );
+ verify( cc->prepareToYield( data ) );
}
else {
cc->c()->noteLocation();
@@ -227,7 +227,7 @@ namespace mongo {
}
shared_ptr<ExplainQueryInfo> NoExplainStrategy::_doneQueryInfo() {
- assert( false );
+ verify( false );
return shared_ptr<ExplainQueryInfo>();
}
@@ -317,7 +317,7 @@ namespace mongo {
}
}
BSONObj ret = _cursor->current();
- assert( ret.isValid() );
+ verify( ret.isValid() );
return ret;
}
@@ -391,17 +391,17 @@ namespace mongo {
ScanAndOrder *
ReorderBuildStrategy::newScanAndOrder( const QueryPlan::Summary &queryPlan ) const {
- assert( !_parsedQuery.getOrder().isEmpty() );
- assert( _cursor->ok() );
+ verify( !_parsedQuery.getOrder().isEmpty() );
+ verify( _cursor->ok() );
const FieldRangeSet *fieldRangeSet = 0;
if ( queryPlan.valid() ) {
fieldRangeSet = queryPlan._fieldRangeSetMulti.get();
}
else {
- assert( _queryOptimizerCursor );
+ verify( _queryOptimizerCursor );
fieldRangeSet = _queryOptimizerCursor->initialFieldRangeSet();
}
- assert( fieldRangeSet );
+ verify( fieldRangeSet );
return new ScanAndOrder( _parsedQuery.getSkip(),
_parsedQuery.getNumToReturn(),
_parsedQuery.getOrder(),
@@ -631,7 +631,7 @@ namespace mongo {
NamespaceDetailsTransient::getCursor( ns, query, order, QueryPlanSelectionPolicy::any(),
0, &pq, &queryPlan );
}
- assert( cursor );
+ verify( cursor );
QueryResponseBuilder queryResponseBuilder( pq, cursor, queryPlan, oldPlan );
bool saveClientCursor = false;
@@ -928,10 +928,10 @@ namespace mongo {
return queryWithQueryOptimizer( m, queryOptions, ns, jsobj, curop, query, order,
pq_shared, oldPlan, shardingVersionAtStart, result );
} catch ( const QueryRetryException & ) {
- assert( retry == 0 );
+ verify( retry == 0 );
}
}
- assert( false );
+ verify( false );
return 0;
}
diff --git a/src/mongo/db/ops/update.cpp b/src/mongo/db/ops/update.cpp
index 3cbf5fcc085..fa5121cbb70 100644
--- a/src/mongo/db/ops/update.cpp
+++ b/src/mongo/db/ops/update.cpp
@@ -279,7 +279,7 @@ namespace mongo {
}
ms.pushStartSize = n;
- assert( ms.pushStartSize == in.embeddedObject().nFields() );
+ verify( ms.pushStartSize == in.embeddedObject().nFields() );
bb.done();
break;
}
@@ -300,14 +300,14 @@ namespace mongo {
switch( in.type() ) {
case NumberInt: x = x&e.numberInt(); break;
case NumberLong: y = y&e.numberLong(); break;
- default: assert( 0 );
+ default: verify( 0 );
}
}
else if ( str::equals(e.fieldName(), "or") ) {
switch( in.type() ) {
case NumberInt: x = x|e.numberInt(); break;
case NumberLong: y = y|e.numberLong(); break;
- default: assert( 0 );
+ default: verify( 0 );
}
}
else {
@@ -318,7 +318,7 @@ namespace mongo {
switch( in.type() ) {
case NumberInt: b.append( shortFieldName , x ); break;
case NumberLong: b.append( shortFieldName , y ); break;
- default: assert( 0 );
+ default: verify( 0 );
}
break;
@@ -552,7 +552,7 @@ namespace mongo {
newObjBuilder.appendAs( newVal , shortFieldName );
BSONObjBuilder b;
b.appendAs( newVal, shortFieldName );
- assert( _objData.isEmpty() );
+ verify( _objData.isEmpty() );
_objData = b.obj();
newVal = _objData.firstElement();
}
@@ -560,7 +560,7 @@ namespace mongo {
void ModSetState::applyModsInPlace( bool isOnDisk ) {
// TODO i think this assert means that we can get rid of the isOnDisk param
// and just use isOwned as the determination
- DEV assert( isOnDisk == ! _obj.isOwned() );
+ DEV verify( isOnDisk == ! _obj.isOwned() );
for ( ModStateHolder::iterator i = _mods.begin(); i != _mods.end(); ++i ) {
ModState& m = *i->second;
@@ -581,7 +581,7 @@ namespace mongo {
// this should have been handled by prepare
break;
case Mod::POP:
- assert( m.old.eoo() || ( m.old.isABSONObj() && m.old.Obj().isEmpty() ) );
+ verify( m.old.eoo() || ( m.old.isABSONObj() && m.old.Obj().isEmpty() ) );
break;
// [dm] the BSONElementManipulator statements below are for replication (correct?)
case Mod::INC:
@@ -973,12 +973,12 @@ namespace mongo {
else {
BSONObj newObj = mss->createNewFromMods();
checkTooLarge(newObj);
- assert(nsdt);
+ verify(nsdt);
theDataFileMgr.updateRecord(ns, d, nsdt, r, loc , newObj.objdata(), newObj.objsize(), debug);
}
if ( logop ) {
- DEV assert( mods->size() );
+ DEV verify( mods->size() );
BSONObj pattern = patternOrig;
if ( mss->haveArrayDepMod() ) {
@@ -1003,7 +1003,7 @@ namespace mongo {
// regular update
BSONElementManipulator::lookForTimestamps( updateobj );
checkNoMods( updateobj );
- assert(nsdt);
+ verify(nsdt);
theDataFileMgr.updateRecord(ns, d, nsdt, r, loc , updateobj.objdata(), updateobj.objsize(), debug );
if ( logop ) {
logOp("u", ns, updateobj, &patternOrig, 0, fromMigrate );
@@ -1120,7 +1120,7 @@ namespace mongo {
// The Cursor must have a Matcher to record an elemMatchKey. But currently
// a modifier on a dynamic array field may be applied even if there is no
// elemMatchKey, so a matcher cannot be required.
- //assert( c->matcher() );
+ //verify( c->matcher() );
details.requestElemMatchKey();
}
@@ -1236,7 +1236,7 @@ namespace mongo {
}
if ( logop ) {
- DEV assert( mods->size() );
+ DEV verify( mods->size() );
if ( mss->haveArrayDepMod() ) {
BSONObjBuilder patternBuilder;
diff --git a/src/mongo/db/ops/update.h b/src/mongo/db/ops/update.h
index a4934302e7b..6a37bba1baa 100644
--- a/src/mongo/db/ops/update.h
+++ b/src/mongo/db/ops/update.h
@@ -136,7 +136,7 @@ namespace mongo {
manip.setInt( elt.numberInt() + in.numberInt() );
break;
default:
- assert(0);
+ verify(0);
}
}
void IncrementMe( BSONElement& in ) const {
@@ -152,7 +152,7 @@ namespace mongo {
manip.SetInt( elt.numberInt() + in.numberInt() );
break;
default:
- assert(0);
+ verify(0);
}
}
@@ -298,7 +298,7 @@ namespace mongo {
bool _hasDynamicArray;
static Mod::Op opFromStr( const char *fn ) {
- assert( fn[0] == '$' );
+ verify( fn[0] == '$' );
switch( fn[1] ) {
case 'i': {
if ( fn[2] == 'n' && fn[3] == 'c' && fn[4] == 0 )
@@ -513,7 +513,7 @@ namespace mongo {
case NumberInt:
b.append( n , incint ); break;
default:
- assert(0);
+ verify(0);
}
}
diff --git a/src/mongo/db/pagefault.cpp b/src/mongo/db/pagefault.cpp
index 713c60c7c50..a782921d4ba 100644
--- a/src/mongo/db/pagefault.cpp
+++ b/src/mongo/db/pagefault.cpp
@@ -11,7 +11,7 @@ namespace mongo {
PageFaultException::PageFaultException(Record *_r)
{
- assert( cc().allowedToThrowPageFaultException() );
+ verify( cc().allowedToThrowPageFaultException() );
cc().getPageFaultRetryableSection()->didLap();
r = _r;
era = LockMongoFilesShared::getEra();
@@ -19,7 +19,7 @@ namespace mongo {
}
void PageFaultException::touch() {
- assert( !d.dbMutex.atLeastReadLocked() );
+ verify( !d.dbMutex.atLeastReadLocked() );
LockMongoFilesShared lk;
if( LockMongoFilesShared::getEra() != era ) {
// files opened and closed. we don't try to handle but just bail out; this is much simpler
@@ -35,7 +35,7 @@ namespace mongo {
}
PageFaultRetryableSection::PageFaultRetryableSection() {
_laps = 0;
- assert( cc()._pageFaultRetryableSection == 0 );
+ verify( cc()._pageFaultRetryableSection == 0 );
if( d.dbMutex.atLeastReadLocked() ) {
cc()._pageFaultRetryableSection = 0;
if( debug || logLevel > 2 ) {
diff --git a/src/mongo/db/pdfile.cpp b/src/mongo/db/pdfile.cpp
index 877a2e92981..e7eb94dfd0e 100644
--- a/src/mongo/db/pdfile.cpp
+++ b/src/mongo/db/pdfile.cpp
@@ -82,7 +82,7 @@ namespace mongo {
bool inDBRepair = false;
struct doingRepair {
doingRepair() {
- assert( ! inDBRepair );
+ verify( ! inDBRepair );
inDBRepair = true;
}
~doingRepair() {
@@ -117,7 +117,7 @@ namespace mongo {
BackgroundOperation::BackgroundOperation(const char *ns) : _ns(ns) {
SimpleMutex::scoped_lock lk(m);
dbsInProg[_ns.db]++;
- assert( nsInProg.count(_ns.ns()) == 0 );
+ verify( nsInProg.count(_ns.ns()) == 0 );
nsInProg.insert(_ns.ns());
}
@@ -221,7 +221,7 @@ namespace mongo {
if ( sz > 1000000000 )
sz = 1000000000;
int z = ((int)sz) & 0xffffff00;
- assert( z > len );
+ verify( z > len );
return z;
}
@@ -274,7 +274,7 @@ namespace mongo {
while( i.more() ) {
BSONElement e = i.next();
int size = int( e.number() );
- assert( size <= 0x7fffffff );
+ verify( size <= 0x7fffffff );
// $nExtents is just for testing - always allocate new extents
// rather than reuse existing extents so we have some predictibility
// in the extent size used by our tests
@@ -284,9 +284,9 @@ namespace mongo {
else if ( int( e.number() ) > 0 ) {
// We create '$nExtents' extents, each of size 'size'.
int nExtents = int( e.number() );
- assert( size <= 0x7fffffff );
+ verify( size <= 0x7fffffff );
for ( int i = 0; i < nExtents; ++i ) {
- assert( size <= 0x7fffffff );
+ verify( size <= 0x7fffffff );
// $nExtents is just for testing - always allocate new extents
// rather than reuse existing extents so we have some predictibility
// in the extent size used by our tests
@@ -308,7 +308,7 @@ namespace mongo {
}
NamespaceDetails *d = nsdetails(ns);
- assert(d);
+ verify(d);
bool ensure = false;
if ( options.getField( "autoIndexId" ).type() ) {
@@ -408,24 +408,24 @@ namespace mongo {
/** @return true if found and opened. if uninitialized (prealloc only) does not open. */
bool MongoDataFile::openExisting( const char *filename ) {
- assert( _mb == 0 );
+ verify( _mb == 0 );
if( !boost::filesystem::exists(filename) )
return false;
if( !mmf.open(filename,false) ) {
dlog(2) << "info couldn't open " << filename << " probably end of datafile list" << endl;
return false;
}
- _mb = mmf.getView(); assert(_mb);
+ _mb = mmf.getView(); verify(_mb);
unsigned long long sz = mmf.length();
- assert( sz <= 0x7fffffff );
- assert( sz % 4096 == 0 );
+ verify( sz <= 0x7fffffff );
+ verify( sz % 4096 == 0 );
if( sz < 64*1024*1024 && !cmdLine.smallfiles ) {
if( sz >= 16*1024*1024 && sz % (1024*1024) == 0 ) {
log() << "info openExisting file size " << sz << " but cmdLine.smallfiles=false" << endl;
}
else {
log() << "openExisting size " << sz << " less then minimum file size expectation " << filename << endl;
- assert(false);
+ verify(false);
}
}
check(_mb);
@@ -447,8 +447,8 @@ namespace mongo {
if ( size > maxSize() )
size = maxSize();
- assert( size >= 64*1024*1024 || cmdLine.smallfiles );
- assert( size % 4096 == 0 );
+ verify( size >= 64*1024*1024 || cmdLine.smallfiles );
+ verify( size % 4096 == 0 );
if ( preallocateOnly ) {
if ( cmdLine.prealloc ) {
@@ -458,11 +458,11 @@ namespace mongo {
}
{
- assert( _mb == 0 );
+ verify( _mb == 0 );
unsigned long long sz = size;
if( mmf.create(filename, sz, false) )
_mb = mmf.getView();
- assert( sz <= 0x7fffffff );
+ verify( sz <= 0x7fffffff );
size = (int) sz;
}
check(_mb);
@@ -477,11 +477,11 @@ namespace mongo {
NamespaceIndex *ni = nsindex(ns);
NamespaceDetails *details = ni->details(ns);
if ( details ) {
- assert( !details->lastExtent.isNull() );
- assert( !details->firstExtent.isNull() );
+ verify( !details->lastExtent.isNull() );
+ verify( !details->firstExtent.isNull() );
getDur().writingDiskLoc(e->xprev) = details->lastExtent;
getDur().writingDiskLoc(details->lastExtent.ext()->xnext) = eloc;
- assert( !eloc.isNull() );
+ verify( !eloc.isNull() );
getDur().writingDiskLoc(details->lastExtent) = eloc;
}
else {
@@ -500,7 +500,7 @@ namespace mongo {
{
// make sizes align with VM page size
int newSize = (approxSize + 0xfff) & 0xfffff000;
- assert( newSize >= 0 );
+ verify( newSize >= 0 );
if( newSize < Extent::maxSize() )
approxSize = newSize;
}
@@ -513,7 +513,7 @@ namespace mongo {
/* note there could be a lot of looping here is db just started and
no files are open yet. we might want to do something about that. */
if ( loops > 8 ) {
- assert( loops < 10000 );
+ verify( loops < 10000 );
out() << "warning: loops=" << loops << " fileno:" << fileNo << ' ' << ns << '\n';
}
log() << "newExtent: " << ns << " file " << fileNo << " full, adding a new file\n";
@@ -694,7 +694,7 @@ namespace mongo {
if( firstEmptyRegion.isNull() )8
return 0;
- assert(len > 0);
+ verify(len > 0);
int newRecSize = len + Record::HeaderSize;
DiskLoc newRecordLoc = firstEmptyRegion;
Record *r = getRecord(newRecordLoc);
@@ -709,13 +709,13 @@ namespace mongo {
r->lengthWithHeaders = newRecSize;
r->next.markAsFirstOrLastInExtent(this); // we're now last in the extent
if( !lastRecord.isNull() ) {
- assert(getRecord(lastRecord)->next.lastInExtent()); // it was the last one
+ verify(getRecord(lastRecord)->next.lastInExtent()); // it was the last one
getRecord(lastRecord)->next.set(newRecordLoc); // until now
r->prev.set(lastRecord);
}
else {
r->prev.markAsFirstOrLastInExtent(this); // we are the first in the extent
- assert( firstRecord.isNull() );
+ verify( firstRecord.isNull() );
firstRecord = newRecordLoc;
}
lastRecord = newRecordLoc;
@@ -843,13 +843,13 @@ namespace mongo {
*/
void freeExtents(DiskLoc firstExt, DiskLoc lastExt) {
{
- assert( !firstExt.isNull() && !lastExt.isNull() );
+ verify( !firstExt.isNull() && !lastExt.isNull() );
Extent *f = firstExt.ext();
Extent *l = lastExt.ext();
- assert( f->xprev.isNull() );
- assert( l->xnext.isNull() );
- assert( f==l || !f->xnext.isNull() );
- assert( f==l || !l->xprev.isNull() );
+ verify( f->xprev.isNull() );
+ verify( l->xnext.isNull() );
+ verify( f==l || !f->xnext.isNull() );
+ verify( f==l || !l->xprev.isNull() );
}
string s = cc().database()->name + FREELIST_NS;
@@ -866,7 +866,7 @@ namespace mongo {
}
else {
DiskLoc a = freeExtents->firstExtent;
- assert( a.ext()->xprev.isNull() );
+ verify( a.ext()->xprev.isNull() );
getDur().writingDiskLoc( a.ext()->xprev ) = lastExt;
getDur().writingDiskLoc( lastExt.ext()->xnext ) = a;
getDur().writingDiskLoc( freeExtents->firstExtent ) = firstExt;
@@ -883,7 +883,7 @@ namespace mongo {
BackgroundOperation::assertNoBgOpInProgForNs(nsToDrop.c_str());
NamespaceString s(nsToDrop);
- assert( s.db == cc().database()->name );
+ verify( s.db == cc().database()->name );
if( s.isSystem() ) {
if( s.coll == "system.profile" )
uassert( 10087 , "turn off profiling before dropping system.profile collection", cc().database()->profile == 0 );
@@ -920,7 +920,7 @@ namespace mongo {
if ( d->nIndexes != 0 ) {
try {
- assert( dropIndexes(d, name.c_str(), "*", errmsg, result, true) );
+ verify( dropIndexes(d, name.c_str(), "*", errmsg, result, true) );
}
catch( DBException& e ) {
stringstream ss;
@@ -928,7 +928,7 @@ namespace mongo {
ss << " cause: " << e.what();
uasserted(12503,ss.str());
}
- assert( d->nIndexes == 0 );
+ verify( d->nIndexes == 0 );
}
log(1) << "\t dropIndexes done" << endl;
result.append("ns", name.c_str());
@@ -1078,8 +1078,8 @@ namespace mongo {
BSONObj objOld(toupdate);
BSONObj objNew(_buf);
- DEV assert( objNew.objsize() == _len );
- DEV assert( objNew.objdata() == _buf );
+ DEV verify( objNew.objsize() == _len );
+ DEV verify( objNew.objdata() == _buf );
if( !objNew.hasElement("_id") && objOld.hasElement("_id") ) {
/* add back the old _id value if the update removes it. Note this implementation is slow
@@ -1088,7 +1088,7 @@ namespace mongo {
*/
BSONObjBuilder b;
BSONElement e;
- assert( objOld.getObjectID(e) );
+ verify( objOld.getObjectID(e) );
b.append(e); // put _id first, for best performance
b.appendElements(objNew);
objNew = b.obj();
@@ -1138,7 +1138,7 @@ namespace mongo {
problem() << " caught assertion update unindex " << idx.indexNamespace() << endl;
}
}
- assert( !dl.isNull() );
+ verify( !dl.isNull() );
BSONObj idxKey = idx.info.obj().getObjectField("key");
Ordering ordering = Ordering::make(idxKey);
keyUpdates += changes[x].added.size();
@@ -1166,7 +1166,7 @@ namespace mongo {
}
int Extent::followupSize(int len, int lastExtentLen) {
- assert( len < Extent::maxSize() );
+ verify( len < Extent::maxSize() );
int x = initialSize(len);
// changed from 1.20 to 1.35 in v2.1.x to get to larger extent size faster
int y = (int) (lastExtentLen < 4000000 ? lastExtentLen * 4.0 : lastExtentLen * 1.35);
@@ -1182,7 +1182,7 @@ namespace mongo {
}
sz = ((int)sz) & 0xffffff00;
- assert( sz > len );
+ verify( sz > len );
return sz;
}
@@ -1199,7 +1199,7 @@ namespace mongo {
IndexInterface& ii = idx.idxInterface();
Ordering ordering = Ordering::make(order);
- assert( !recordLoc.isNull() );
+ verify( !recordLoc.isNull() );
try {
// we can't do the two step method with multi keys as insertion of one key changes the indexes
@@ -1321,7 +1321,7 @@ namespace mongo {
if( ++n == 2 ) {
d->setIndexIsMultikey(idxNo);
}
- assert( !recordLoc.isNull() );
+ verify( !recordLoc.isNull() );
try {
ii.bt_insert(idx.head, recordLoc, *i, ordering, dupsAllowed, idx);
}
@@ -1376,7 +1376,7 @@ namespace mongo {
BtreeBuilder<V> btBuilder(dupsAllowed, idx);
BSONObj keyLast;
auto_ptr<BSONObjExternalSorter::Iterator> i = sorter.iterator();
- assert( pm == op->setMessage( "index: (2/3) btree bottom up" , phase1->nkeys , 10 ) );
+ verify( pm == op->setMessage( "index: (2/3) btree bottom up" , phase1->nkeys , 10 ) );
while( i->more() ) {
RARELY killCurrentOp.checkForInterrupt();
BSONObjExternalSorter::Data d = i->next();
@@ -1479,7 +1479,7 @@ namespace mongo {
else if( idx.version() == 1 )
buildBottomUpPhases2And3<V1>(dupsAllowed, idx, sorter, dropDups, dupsToDrop, op, phase1, pm, t);
else
- assert(false);
+ verify(false);
if( dropDups )
log() << "\t fastBuildIndex dupsToDrop:" << dupsToDrop.size() << endl;
@@ -1591,14 +1591,14 @@ namespace mongo {
unsigned long long n = 0;
prep(ns.c_str(), d);
- assert( idxNo == d->nIndexes );
+ verify( idxNo == d->nIndexes );
try {
idx.head.writing() = idx.idxInterface().addBucket(idx);
n = addExistingToIndex(ns.c_str(), d, idx, idxNo);
}
catch(...) {
if( cc().database() && nsdetails(ns.c_str()) == d ) {
- assert( idxNo == d->nIndexes );
+ verify( idxNo == d->nIndexes );
done(ns.c_str(), d);
}
else {
@@ -1606,7 +1606,7 @@ namespace mongo {
}
throw;
}
- assert( idxNo == d->nIndexes );
+ verify( idxNo == d->nIndexes );
done(ns.c_str(), d);
return n;
}
@@ -1641,9 +1641,9 @@ namespace mongo {
Timer t;
unsigned long long n;
- assert( !BackgroundOperation::inProgForNs(ns.c_str()) ); // should have been checked earlier, better not be...
- assert( d->indexBuildInProgress == 0 );
- assert( Lock::isWriteLocked(ns) );
+ verify( !BackgroundOperation::inProgForNs(ns.c_str()) ); // should have been checked earlier, better not be...
+ verify( d->indexBuildInProgress == 0 );
+ verify( Lock::isWriteLocked(ns) );
RecoverableIndexState recoverable( d );
// Build index spec here in case the collection is empty and the index details are invalid
@@ -1651,7 +1651,7 @@ namespace mongo {
if( inDBRepair || !background ) {
n = fastBuildIndex(ns.c_str(), d, idx, idxNo);
- assert( !idx.head.isNull() );
+ verify( !idx.head.isNull() );
}
else {
BackgroundIndexBuildJob j(ns.c_str());
@@ -1725,7 +1725,7 @@ namespace mongo {
IDToInsert_() {
type = (char) jstOID;
strcpy(_id, "_id");
- assert( sizeof(IDToInsert_) == 17 );
+ verify( sizeof(IDToInsert_) == 17 );
}
} idToInsert_;
struct IDToInsert : public BSONElement {
@@ -1906,7 +1906,7 @@ namespace mongo {
log() << "failed to drop index after a unique key error building it: " << errmsg << ' ' << tabletoidxns << ' ' << name << endl;
}
- assert( le && !saveerrmsg.empty() );
+ verify( le && !saveerrmsg.empty() );
raiseError(savecode,saveerrmsg.c_str());
throw;
}
@@ -1941,7 +1941,7 @@ namespace mongo {
string tabletoidxns;
BSONObj fixedIndexObject;
if ( addIndex ) {
- assert( obuf );
+ verify( obuf );
BSONObj io((const char *) obuf);
if( !prepareToBuildIndex(io, god, tabletoidxns, tableToIndex, fixedIndexObject ) ) {
// prepare creates _id itself, or this indicates to fail the build silently (such
@@ -1978,7 +1978,7 @@ namespace mongo {
lenWHdr = (int) (lenWHdr * d->paddingFactor);
if ( lenWHdr == 0 ) {
// old datafiles, backward compatible here.
- assert( d->paddingFactor == 0 );
+ verify( d->paddingFactor == 0 );
*getDur().writing(&d->paddingFactor) = 1.0;
lenWHdr = len + Record::HeaderSize;
}
@@ -2006,14 +2006,14 @@ namespace mongo {
}
if ( loc.isNull() ) {
log() << "insert: couldn't alloc space for object ns:" << ns << " capped:" << d->capped << endl;
- assert(d->capped);
+ verify(d->capped);
return DiskLoc();
}
if( earlyIndex ) {
// add record to indexes using two step method so we can do the reading outside a write lock
if ( d->nIndexes ) {
- assert( obuf );
+ verify( obuf );
BSONObj obj((const char *) obuf);
try {
indexRecordUsingTwoSteps(d, obj, loc, true);
@@ -2027,12 +2027,12 @@ namespace mongo {
}
// really allocate now
DiskLoc real = allocateSpaceForANewRecord(ns, d, lenWHdr, god);
- assert( real == loc );
+ verify( real == loc );
}
Record *r = loc.rec();
{
- assert( r->lengthWithHeaders >= lenWHdr );
+ verify( r->lengthWithHeaders >= lenWHdr );
r = (Record*) getDur().writingPtr(r, lenWHdr);
if( addID ) {
/* a little effort was made here to avoid a double copy when we add an ID */
@@ -2099,17 +2099,17 @@ namespace mongo {
assumes ns is capped and no indexes
*/
Record* DataFileMgr::fast_oplog_insert(NamespaceDetails *d, const char *ns, int len) {
- assert( d );
- RARELY assert( d == nsdetails(ns) );
- DEV assert( d == nsdetails(ns) );
+ verify( d );
+ RARELY verify( d == nsdetails(ns) );
+ DEV verify( d == nsdetails(ns) );
DiskLoc extentLoc;
int lenWHdr = len + Record::HeaderSize;
DiskLoc loc = d->alloc(ns, lenWHdr, extentLoc);
- assert( !loc.isNull() );
+ verify( !loc.isNull() );
Record *r = loc.rec();
- assert( r->lengthWithHeaders >= lenWHdr );
+ verify( r->lengthWithHeaders >= lenWHdr );
Extent *e = r->myExtent(loc);
if ( e->lastRecord.isNull() ) {
@@ -2163,8 +2163,8 @@ namespace mongo {
log(1) << "dropDatabase " << db << endl;
Lock::assertWriteLocked(db);
Database *d = cc().database();
- assert( d );
- assert( d->name == db );
+ verify( d );
+ verify( d->name == db );
BackgroundOperation::assertNoBgOpInProgForDb(d->name.c_str());
@@ -2293,8 +2293,8 @@ namespace mongo {
string localhost = ss.str();
problem() << "repairDatabase " << dbName << endl;
- assert( cc().database()->name == dbName );
- assert( cc().database()->path == dbpath );
+ verify( cc().database()->name == dbName );
+ verify( cc().database()->path == dbpath );
BackgroundOperation::assertNoBgOpInProgForDb(dbName);
@@ -2321,7 +2321,7 @@ namespace mongo {
{
// clone to temp location, which effectively does repair
Client::Context ctx( dbName, reservedPathString );
- assert( ctx.justCreated() );
+ verify( ctx.justCreated() );
res = cloneFrom(localhost.c_str(), errmsg, dbName,
/*logForReplication=*/false, /*slaveOk*/false, /*replauth*/false,
@@ -2381,7 +2381,7 @@ namespace mongo {
int i = 0;
int extra = 10; // should not be necessary, this is defensive in case there are missing files
while ( 1 ) {
- assert( i <= DiskLoc::MaxFiles );
+ verify( i <= DiskLoc::MaxFiles );
stringstream ss;
ss << c << i;
q = p / ss.str();
diff --git a/src/mongo/db/pdfile.h b/src/mongo/db/pdfile.h
index 908159ebd5b..d9ebb3f3a6e 100644
--- a/src/mongo/db/pdfile.h
+++ b/src/mongo/db/pdfile.h
@@ -215,7 +215,7 @@ namespace mongo {
DiskLoc nextInExtent(const DiskLoc& myLoc) {
if ( nextOfs == DiskLoc::NullOfs )
return DiskLoc();
- assert( nextOfs );
+ verify( nextOfs );
return DiskLoc(myLoc.a(), nextOfs);
}
@@ -302,15 +302,15 @@ namespace mongo {
DiskLoc reuse(const char *nsname, bool newUseIsAsCapped);
bool isOk() const { return magic == 0x41424344; }
- void assertOk() const { assert(isOk()); }
+ void assertOk() const { verify(isOk()); }
Record* newRecord(int len);
Record* getRecord(DiskLoc dl) {
- assert( !dl.isNull() );
- assert( dl.sameFile(myLoc) );
+ verify( !dl.isNull() );
+ verify( dl.sameFile(myLoc) );
int x = dl.getOfs() - myLoc.getOfs();
- assert( x > 0 );
+ verify( x > 0 );
return (Record *) (((char *) this) + x);
}
@@ -398,13 +398,13 @@ namespace mongo {
}
getDur().createdFile(filename, filelength);
- assert( HeaderSize == 8192 );
+ verify( HeaderSize == 8192 );
DataFileHeader *h = getDur().writing(this);
h->fileLength = filelength;
h->version = PDFILE_VERSION;
h->versionMinor = PDFILE_VERSION_MINOR;
h->unused.set( fileno, HeaderSize );
- assert( (data-(char*)this) == HeaderSize );
+ verify( (data-(char*)this) == HeaderSize );
h->unusedLength = fileLength - HeaderSize - 16;
}
}
@@ -481,7 +481,7 @@ namespace mongo {
return BSONObj(rec()->accessed());
}
inline DeletedRecord* DiskLoc::drec() const {
- assert( _a != -1 );
+ verify( _a != -1 );
DeletedRecord* dr = (DeletedRecord*) rec();
memconcept::is(dr, memconcept::concept::deletedrecord);
return dr;
@@ -493,7 +493,7 @@ namespace mongo {
template< class V >
inline
const BtreeBucket<V> * DiskLoc::btree() const {
- assert( _a != -1 );
+ verify( _a != -1 );
Record *r = rec();
memconcept::is(r, memconcept::concept::btreebucket, "", 8192);
return (const BtreeBucket<V> *) r->data;
@@ -510,7 +510,7 @@ namespace mongo {
inline NamespaceIndex* nsindex(const char *ns) {
Database *database = cc().database();
- assert( database );
+ verify( database );
memconcept::is(database, memconcept::concept::database, ns, sizeof(Database));
DEV {
char buf[256];
@@ -519,7 +519,7 @@ namespace mongo {
out() << "ERROR: attempt to write to wrong database\n";
out() << " ns:" << ns << '\n';
out() << " database->name:" << database->name << endl;
- assert( database->name == buf );
+ verify( database->name == buf );
}
}
return &database->namespaceIndex;
@@ -535,12 +535,12 @@ namespace mongo {
}
inline Extent* DataFileMgr::getExtent(const DiskLoc& dl) {
- assert( dl.a() != -1 );
+ verify( dl.a() != -1 );
return cc().database()->getFile(dl.a())->getExtent(dl);
}
inline Record* DataFileMgr::getRecord(const DiskLoc& dl) {
- assert( dl.a() != -1 );
+ verify( dl.a() != -1 );
Record* r = cc().database()->getFile(dl.a())->recordAt(dl);
return r;
}
@@ -548,7 +548,7 @@ namespace mongo {
BOOST_STATIC_ASSERT( 16 == sizeof(DeletedRecord) );
inline DeletedRecord* DataFileMgr::makeDeletedRecord(const DiskLoc& dl, int len) {
- assert( dl.a() != -1 );
+ verify( dl.a() != -1 );
return (DeletedRecord*) cc().database()->getFile(dl.a())->makeRecord(dl, sizeof(DeletedRecord));
}
diff --git a/src/mongo/db/pipeline/accumulator.cpp b/src/mongo/db/pipeline/accumulator.cpp
index 84e61ca82a0..84b4bf25a7c 100755
--- a/src/mongo/db/pipeline/accumulator.cpp
+++ b/src/mongo/db/pipeline/accumulator.cpp
@@ -39,7 +39,7 @@ namespace mongo {
void Accumulator::opToBson(
BSONObjBuilder *pBuilder, string opName,
string fieldName, unsigned depth) const {
- assert(vpOperand.size() == 1);
+ verify(vpOperand.size() == 1);
BSONObjBuilder builder;
vpOperand[0]->addToBsonObj(&builder, opName, depth);
pBuilder->append(fieldName, builder.done());
@@ -52,7 +52,7 @@ namespace mongo {
void Accumulator::addToBsonArray(
BSONArrayBuilder *pBuilder, unsigned depth) const {
- assert(false); // these can't appear in arrays
+ verify(false); // these can't appear in arrays
}
void agg_framework_reservedErrors() {
diff --git a/src/mongo/db/pipeline/accumulator_add_to_set.cpp b/src/mongo/db/pipeline/accumulator_add_to_set.cpp
index 2e006caf55d..61a0ca5a39b 100755
--- a/src/mongo/db/pipeline/accumulator_add_to_set.cpp
+++ b/src/mongo/db/pipeline/accumulator_add_to_set.cpp
@@ -23,7 +23,7 @@
namespace mongo {
intrusive_ptr<const Value> AccumulatorAddToSet::evaluate(
const intrusive_ptr<Document> &pDocument) const {
- assert(vpOperand.size() == 1);
+ verify(vpOperand.size() == 1);
intrusive_ptr<const Value> prhs(vpOperand[0]->evaluate(pDocument));
if (prhs->getType() == Undefined)
@@ -37,7 +37,7 @@ namespace mongo {
If we didn't, then we'd get an array of arrays, with one array
from each shard that responds.
*/
- assert(prhs->getType() == Array);
+ verify(prhs->getType() == Array);
intrusive_ptr<ValueIterator> pvi(prhs->getArray());
while(pvi->more()) {
diff --git a/src/mongo/db/pipeline/accumulator_avg.cpp b/src/mongo/db/pipeline/accumulator_avg.cpp
index d9df112f52b..f166c185f9a 100755
--- a/src/mongo/db/pipeline/accumulator_avg.cpp
+++ b/src/mongo/db/pipeline/accumulator_avg.cpp
@@ -40,12 +40,12 @@ namespace mongo {
*/
intrusive_ptr<const Value> prhs(
vpOperand[0]->evaluate(pDocument));
- assert(prhs->getType() == Object);
+ verify(prhs->getType() == Object);
intrusive_ptr<Document> pShardDoc(prhs->getDocument());
intrusive_ptr<const Value> pSubTotal(
pShardDoc->getValue(subTotalName));
- assert(pSubTotal.get());
+ verify(pSubTotal.get());
BSONType subTotalType = pSubTotal->getType();
if ((totalType == NumberLong) || (subTotalType == NumberLong))
totalType = NumberLong;
diff --git a/src/mongo/db/pipeline/accumulator_first.cpp b/src/mongo/db/pipeline/accumulator_first.cpp
index 9c45e409237..937b260f136 100755
--- a/src/mongo/db/pipeline/accumulator_first.cpp
+++ b/src/mongo/db/pipeline/accumulator_first.cpp
@@ -23,7 +23,7 @@ namespace mongo {
intrusive_ptr<const Value> AccumulatorFirst::evaluate(
const intrusive_ptr<Document> &pDocument) const {
- assert(vpOperand.size() == 1);
+ verify(vpOperand.size() == 1);
/* only remember the first value seen */
if (!pValue.get())
diff --git a/src/mongo/db/pipeline/accumulator_last.cpp b/src/mongo/db/pipeline/accumulator_last.cpp
index 3d929fc57c5..820907a1151 100755
--- a/src/mongo/db/pipeline/accumulator_last.cpp
+++ b/src/mongo/db/pipeline/accumulator_last.cpp
@@ -23,7 +23,7 @@ namespace mongo {
intrusive_ptr<const Value> AccumulatorLast::evaluate(
const intrusive_ptr<Document> &pDocument) const {
- assert(vpOperand.size() == 1);
+ verify(vpOperand.size() == 1);
/* always remember the last value seen */
pValue = vpOperand[0]->evaluate(pDocument);
diff --git a/src/mongo/db/pipeline/accumulator_min_max.cpp b/src/mongo/db/pipeline/accumulator_min_max.cpp
index ce0151847d4..902f910dcb8 100755
--- a/src/mongo/db/pipeline/accumulator_min_max.cpp
+++ b/src/mongo/db/pipeline/accumulator_min_max.cpp
@@ -23,7 +23,7 @@ namespace mongo {
intrusive_ptr<const Value> AccumulatorMinMax::evaluate(
const intrusive_ptr<Document> &pDocument) const {
- assert(vpOperand.size() == 1);
+ verify(vpOperand.size() == 1);
intrusive_ptr<const Value> prhs(vpOperand[0]->evaluate(pDocument));
/* if this is the first value, just use it */
@@ -42,7 +42,7 @@ namespace mongo {
AccumulatorMinMax::AccumulatorMinMax(int theSense):
AccumulatorSingleValue(),
sense(theSense) {
- assert((sense == 1) || (sense == -1));
+ verify((sense == 1) || (sense == -1));
}
intrusive_ptr<Accumulator> AccumulatorMinMax::createMin(
diff --git a/src/mongo/db/pipeline/accumulator_push.cpp b/src/mongo/db/pipeline/accumulator_push.cpp
index b0c11dcf70f..932ca6361cd 100755
--- a/src/mongo/db/pipeline/accumulator_push.cpp
+++ b/src/mongo/db/pipeline/accumulator_push.cpp
@@ -23,7 +23,7 @@
namespace mongo {
intrusive_ptr<const Value> AccumulatorPush::evaluate(
const intrusive_ptr<Document> &pDocument) const {
- assert(vpOperand.size() == 1);
+ verify(vpOperand.size() == 1);
intrusive_ptr<const Value> prhs(vpOperand[0]->evaluate(pDocument));
if (prhs->getType() == Undefined)
@@ -37,7 +37,7 @@ namespace mongo {
If we didn't, then we'd get an array of arrays, with one array
from each shard that responds.
*/
- assert(prhs->getType() == Array);
+ verify(prhs->getType() == Array);
intrusive_ptr<ValueIterator> pvi(prhs->getArray());
while(pvi->more()) {
diff --git a/src/mongo/db/pipeline/accumulator_sum.cpp b/src/mongo/db/pipeline/accumulator_sum.cpp
index 7f268efcb32..26258c2f19a 100755
--- a/src/mongo/db/pipeline/accumulator_sum.cpp
+++ b/src/mongo/db/pipeline/accumulator_sum.cpp
@@ -23,7 +23,7 @@ namespace mongo {
intrusive_ptr<const Value> AccumulatorSum::evaluate(
const intrusive_ptr<Document> &pDocument) const {
- assert(vpOperand.size() == 1);
+ verify(vpOperand.size() == 1);
intrusive_ptr<const Value> prhs(vpOperand[0]->evaluate(pDocument));
/* upgrade to the widest type required to hold the result */
diff --git a/src/mongo/db/pipeline/document.cpp b/src/mongo/db/pipeline/document.cpp
index f7a85459a99..a224f56eacb 100755
--- a/src/mongo/db/pipeline/document.cpp
+++ b/src/mongo/db/pipeline/document.cpp
@@ -16,8 +16,6 @@
#include "pch.h"
#include <boost/functional/hash.hpp>
-#undef assert
-#define assert MONGO_assert
#include "db/jsobj.h"
#include "db/pipeline/dependency_tracker.h"
#include "db/pipeline/document.h"
@@ -204,7 +202,7 @@ namespace mongo {
}
/* NOTREACHED */
- assert(false);
+ verify(false);
return 0;
}
@@ -220,7 +218,7 @@ namespace mongo {
}
pair<string, intrusive_ptr<const Value> > FieldIterator::next() {
- assert(more());
+ verify(more());
pair<string, intrusive_ptr<const Value> > result(
pDocument->vFieldName[index], pDocument->vpValue[index]);
++index;
diff --git a/src/mongo/db/pipeline/document.h b/src/mongo/db/pipeline/document.h
index 533f5b2fc30..0069c985ea9 100755
--- a/src/mongo/db/pipeline/document.h
+++ b/src/mongo/db/pipeline/document.h
@@ -245,7 +245,7 @@ namespace mongo {
}
inline Document::FieldPair Document::getField(size_t index) const {
- assert( index < vFieldName.size() );
+ verify( index < vFieldName.size() );
return FieldPair(vFieldName[index], vpValue[index]);
}
diff --git a/src/mongo/db/pipeline/document_source.cpp b/src/mongo/db/pipeline/document_source.cpp
index 25cc6c6dbd6..51224286402 100755
--- a/src/mongo/db/pipeline/document_source.cpp
+++ b/src/mongo/db/pipeline/document_source.cpp
@@ -37,7 +37,7 @@ namespace mongo {
}
void DocumentSource::setSource(DocumentSource *pTheSource) {
- assert(!pSource);
+ verify(!pSource);
pSource = pTheSource;
}
@@ -52,7 +52,7 @@ namespace mongo {
void DocumentSource::manageDependencies(
const intrusive_ptr<DependencyTracker> &pTracker) {
#ifdef MONGO_LATER_SERVER_4644
- assert(false); // identify any sources that need this but don't have it
+ verify(false); // identify any sources that need this but don't have it
#endif /* MONGO_LATER_SERVER_4644 */
}
diff --git a/src/mongo/db/pipeline/document_source.h b/src/mongo/db/pipeline/document_source.h
index e16843b1316..6d22426b89b 100755
--- a/src/mongo/db/pipeline/document_source.h
+++ b/src/mongo/db/pipeline/document_source.h
@@ -188,8 +188,8 @@ namespace mongo {
from. This is a convenience for them.
The default implementation of setSource() sets this; if you don't
- need a source, override that to assert(). The default is to
- assert() if this has already been set.
+ need a source, override that to verify(). The default is to
+ verify() if this has already been set.
*/
DocumentSource *pSource;
diff --git a/src/mongo/db/pipeline/document_source_bson_array.cpp b/src/mongo/db/pipeline/document_source_bson_array.cpp
index 46ac2eb82ac..fe90acc0dd4 100755
--- a/src/mongo/db/pipeline/document_source_bson_array.cpp
+++ b/src/mongo/db/pipeline/document_source_bson_array.cpp
@@ -45,7 +45,7 @@ namespace mongo {
}
intrusive_ptr<Document> DocumentSourceBsonArray::getCurrent() {
- assert(haveCurrent);
+ verify(haveCurrent);
BSONObj documentObj(currentElement.Obj());
intrusive_ptr<Document> pDocument(
Document::createFromBsonObj(&documentObj));
@@ -54,7 +54,7 @@ namespace mongo {
void DocumentSourceBsonArray::setSource(DocumentSource *pSource) {
/* this doesn't take a source */
- assert(false);
+ verify(false);
}
DocumentSourceBsonArray::DocumentSourceBsonArray(
@@ -74,7 +74,7 @@ namespace mongo {
BSONElement *pBsonElement,
const intrusive_ptr<ExpressionContext> &pExpCtx) {
- assert(pBsonElement->type() == Array);
+ verify(pBsonElement->type() == Array);
intrusive_ptr<DocumentSourceBsonArray> pSource(
new DocumentSourceBsonArray(pBsonElement, pExpCtx));
@@ -82,6 +82,6 @@ namespace mongo {
}
void DocumentSourceBsonArray::sourceToBson(BSONObjBuilder *pBuilder) const {
- assert(false); // this has no analog in the BSON world
+ verify(false); // this has no analog in the BSON world
}
}
diff --git a/src/mongo/db/pipeline/document_source_command_futures.cpp b/src/mongo/db/pipeline/document_source_command_futures.cpp
index bcb31d588b0..991dabaf781 100755
--- a/src/mongo/db/pipeline/document_source_command_futures.cpp
+++ b/src/mongo/db/pipeline/document_source_command_futures.cpp
@@ -44,19 +44,19 @@ namespace mongo {
}
intrusive_ptr<Document> DocumentSourceCommandFutures::getCurrent() {
- assert(!eof());
+ verify(!eof());
return pCurrent;
}
void DocumentSourceCommandFutures::setSource(DocumentSource *pSource) {
/* this doesn't take a source */
- assert(false);
+ verify(false);
}
void DocumentSourceCommandFutures::sourceToBson(
BSONObjBuilder *pBuilder) const {
/* this has no BSON equivalent */
- assert(false);
+ verify(false);
}
DocumentSourceCommandFutures::DocumentSourceCommandFutures(
diff --git a/src/mongo/db/pipeline/document_source_filter_base.cpp b/src/mongo/db/pipeline/document_source_filter_base.cpp
index 9b4cd64a54a..c04ff7a9f29 100755
--- a/src/mongo/db/pipeline/document_source_filter_base.cpp
+++ b/src/mongo/db/pipeline/document_source_filter_base.cpp
@@ -75,7 +75,7 @@ namespace mongo {
if (unstarted)
findNext();
- assert(pCurrent.get() != NULL);
+ verify(pCurrent.get() != NULL);
return pCurrent;
}
diff --git a/src/mongo/db/pipeline/document_source_group.cpp b/src/mongo/db/pipeline/document_source_group.cpp
index c47b3b373a4..54f77f69a9f 100755
--- a/src/mongo/db/pipeline/document_source_group.cpp
+++ b/src/mongo/db/pipeline/document_source_group.cpp
@@ -48,7 +48,7 @@ namespace mongo {
if (!populated)
populate();
- assert(groupsIterator != groups.end());
+ verify(groupsIterator != groups.end());
++groupsIterator;
if (groupsIterator == groups.end()) {
diff --git a/src/mongo/db/pipeline/document_source_match.cpp b/src/mongo/db/pipeline/document_source_match.cpp
index 5a293ceebe6..bd3e32bef48 100755
--- a/src/mongo/db/pipeline/document_source_match.cpp
+++ b/src/mongo/db/pipeline/document_source_match.cpp
@@ -87,7 +87,7 @@ namespace mongo {
void DocumentSourceMatch::manageDependencies(
const intrusive_ptr<DependencyTracker> &pTracker) {
#ifdef MONGO_LATER_SERVER_4644
- assert(false); // $$$ implement dependencies on Matcher
+ verify(false); // $$$ implement dependencies on Matcher
#endif /* MONGO_LATER_SERVER_4644 */
}
}
diff --git a/src/mongo/db/pipeline/document_source_out.cpp b/src/mongo/db/pipeline/document_source_out.cpp
index 6ddc65fe2d2..45381de555d 100755
--- a/src/mongo/db/pipeline/document_source_out.cpp
+++ b/src/mongo/db/pipeline/document_source_out.cpp
@@ -48,7 +48,7 @@ namespace mongo {
BSONElement *pBsonElement,
const intrusive_ptr<ExpressionContext> &pExpCtx):
DocumentSource(pExpCtx) {
- assert(false && "unimplemented");
+ verify(false && "unimplemented");
}
intrusive_ptr<DocumentSourceOut> DocumentSourceOut::createFromBson(
@@ -61,6 +61,6 @@ namespace mongo {
}
void DocumentSourceOut::sourceToBson(BSONObjBuilder *pBuilder) const {
- assert(false); // CW TODO
+ verify(false); // CW TODO
}
}
diff --git a/src/mongo/db/pipeline/document_source_sort.cpp b/src/mongo/db/pipeline/document_source_sort.cpp
index 1ccb4377f66..63d231aa283 100755
--- a/src/mongo/db/pipeline/document_source_sort.cpp
+++ b/src/mongo/db/pipeline/document_source_sort.cpp
@@ -50,7 +50,7 @@ namespace mongo {
if (!populated)
populate();
- assert(listIterator != documents.end());
+ verify(listIterator != documents.end());
++listIterator;
if (listIterator == documents.end()) {
@@ -149,7 +149,7 @@ namespace mongo {
void DocumentSourceSort::populate() {
/* make sure we've got a sort key */
- assert(vSortKey.size());
+ verify(vSortKey.size());
/* track and warn about how much physical memory has been used */
DocMemMonitor dmm(this);
@@ -215,7 +215,7 @@ namespace mongo {
bool DocumentSourceSort::Carrier::lessThan(
const Carrier &rL, const Carrier &rR) {
/* make sure these aren't from different lists */
- assert(rL.pSort == rR.pSort);
+ verify(rL.pSort == rR.pSort);
/* compare the documents according to the sort key */
return (rL.pSort->compare(rL.pDocument, rR.pDocument) < 0);
diff --git a/src/mongo/db/pipeline/document_source_unwind.cpp b/src/mongo/db/pipeline/document_source_unwind.cpp
index 496557c548f..b250b0ab253 100755
--- a/src/mongo/db/pipeline/document_source_unwind.cpp
+++ b/src/mongo/db/pipeline/document_source_unwind.cpp
@@ -143,7 +143,7 @@ namespace mongo {
/* get the iterator we'll use to unwind the array */
pUnwinder = pUnwindArray->getArray();
- assert(pUnwinder->more()); // we just checked above...
+ verify(pUnwinder->more()); // we just checked above...
pUnwindValue = pUnwinder->next();
}
}
@@ -169,12 +169,12 @@ namespace mongo {
For this to be valid, we must already have pNoUnwindDocument set,
and have set up the vector of indices for that document in fieldIndex.
*/
- assert(pNoUnwindDocument.get());
+ verify(pNoUnwindDocument.get());
intrusive_ptr<Document> pClone(pNoUnwindDocument->clone());
intrusive_ptr<Document> pCurrent(pClone);
const size_t n = fieldIndex.size();
- assert(n);
+ verify(n);
for(size_t i = 0; i < n; ++i) {
const size_t fi = fieldIndex[i];
Document::FieldPair fp(pCurrent->getField(fi));
diff --git a/src/mongo/db/pipeline/expression.cpp b/src/mongo/db/pipeline/expression.cpp
index 76e39a8bd05..674090d89e7 100755
--- a/src/mongo/db/pipeline/expression.cpp
+++ b/src/mongo/db/pipeline/expression.cpp
@@ -33,7 +33,7 @@ namespace mongo {
void Expression::toMatcherBson(
BSONObjBuilder *pBuilder, unsigned depth) const {
- assert(false && "Expression::toMatcherBson()");
+ verify(false && "Expression::toMatcherBson()");
}
Expression::ObjectCtx::ObjectCtx(int theOptions):
@@ -42,9 +42,9 @@ namespace mongo {
}
void Expression::ObjectCtx::unwind(string fieldName) {
- assert(unwindOk());
- assert(!unwindUsed());
- assert(fieldName.size());
+ verify(unwindOk());
+ verify(!unwindUsed());
+ verify(fieldName.size());
unwindField = fieldName;
}
@@ -105,7 +105,7 @@ namespace mongo {
/* if it's our first time, create the document expression */
if (!pExpression.get()) {
- assert(pCtx->documentOk());
+ verify(pCtx->documentOk());
// CW TODO error: document not allowed in this context
pExpressionObject = ExpressionObject::create();
@@ -343,7 +343,7 @@ namespace mongo {
} // switch(type)
/* NOTREACHED */
- assert(false);
+ verify(false);
return intrusive_ptr<Expression>();
}
@@ -589,7 +589,7 @@ namespace mongo {
expressions. Direct equality is a degenerate range expression;
range expressions can be open-ended.
*/
- assert(false && "unimplemented");
+ verify(false && "unimplemented");
}
intrusive_ptr<ExpressionNary> (*ExpressionAnd::getFactory() const)() {
@@ -648,12 +648,12 @@ namespace mongo {
void ExpressionCoerceToBool::addToBsonObj(
BSONObjBuilder *pBuilder, string fieldName, unsigned depth) const {
- assert(false && "not possible"); // no equivalent of this
+ verify(false && "not possible"); // no equivalent of this
}
void ExpressionCoerceToBool::addToBsonArray(
BSONArrayBuilder *pBuilder, unsigned depth) const {
- assert(false && "not possible"); // no equivalent of this
+ verify(false && "not possible"); // no equivalent of this
}
/* ----------------------- ExpressionCompare --------------------------- */
@@ -855,7 +855,7 @@ namespace mongo {
return Value::getOne();
default:
- assert(false); // CW TODO internal error
+ verify(false); // CW TODO internal error
return Value::getNull();
}
}
@@ -982,7 +982,7 @@ namespace mongo {
}
const char *ExpressionConstant::getOpName() const {
- assert(false); // this has no name
+ verify(false); // this has no name
return NULL;
}
@@ -1222,7 +1222,7 @@ namespace mongo {
ExpressionObject *pChild =
dynamic_cast<ExpressionObject *>(pE);
- assert(pChild);
+ verify(pChild);
/*
Check on the type of the result object. If it's an
@@ -1322,7 +1322,7 @@ namespace mongo {
void ExpressionObject::addField(const string &fieldName,
const intrusive_ptr<Expression> &pExpression) {
/* must have an expression */
- assert(pExpression.get());
+ verify(pExpression.get());
/* parse the field path */
FieldPath fieldPath(fieldName);
@@ -1397,7 +1397,7 @@ namespace mongo {
if (i < n) {
/* the intermediate child already exists */
pChild = dynamic_cast<ExpressionObject *>(vpExpression[i].get());
- assert(pChild);
+ verify(pChild);
}
else {
/*
@@ -1496,7 +1496,7 @@ namespace mongo {
*/
Expression *pE = vpExpression[iField].get();
ExpressionObject *pEO = dynamic_cast<ExpressionObject *>(pE);
- assert(pEO);
+ verify(pEO);
/*
Add the current field name to the path being built up,
@@ -1783,7 +1783,7 @@ namespace mongo {
void ExpressionFieldRange::toMatcherBson(
BSONObjBuilder *pBuilder, unsigned depth) const {
- assert(pRange.get()); // otherwise, we can't do anything
+ verify(pRange.get()); // otherwise, we can't do anything
/* if there are no endpoints, then every value is accepted */
if (!pRange->pBottom.get() && !pRange->pTop.get())
@@ -1873,7 +1873,7 @@ namespace mongo {
break;
case CMP:
- assert(false); // not allowed
+ verify(false); // not allowed
break;
}
}
@@ -2566,7 +2566,7 @@ namespace mongo {
void ExpressionNary::toBson(
BSONObjBuilder *pBuilder, const char *pOpName, unsigned depth) const {
const size_t nOperand = vpOperand.size();
- assert(nOperand > 0);
+ verify(nOperand > 0);
if (nOperand == 1) {
vpOperand[0]->addToBsonObj(pBuilder, pOpName, depth + 1);
return;
diff --git a/src/mongo/db/pipeline/value.cpp b/src/mongo/db/pipeline/value.cpp
index c4a03a1d560..fb6ab9adaa8 100755
--- a/src/mongo/db/pipeline/value.cpp
+++ b/src/mongo/db/pipeline/value.cpp
@@ -274,17 +274,17 @@ namespace mongo {
if (type == NumberLong)
return static_cast< double >( simple.longValue );
- assert(type == NumberDouble);
+ verify(type == NumberDouble);
return simple.doubleValue;
}
string Value::getString() const {
- assert(getType() == String);
+ verify(getType() == String);
return stringValue;
}
intrusive_ptr<Document> Value::getDocument() const {
- assert(getType() == Object);
+ verify(getType() == Object);
return pDocumentValue;
}
@@ -299,7 +299,7 @@ namespace mongo {
}
intrusive_ptr<const Value> Value::vi::next() {
- assert(more());
+ verify(more());
return (*pvpValue)[nextIndex++];
}
@@ -311,44 +311,44 @@ namespace mongo {
}
intrusive_ptr<ValueIterator> Value::getArray() const {
- assert(getType() == Array);
+ verify(getType() == Array);
intrusive_ptr<ValueIterator> pVI(
new vi(intrusive_ptr<const Value>(this), &vpValue));
return pVI;
}
OID Value::getOid() const {
- assert(getType() == jstOID);
+ verify(getType() == jstOID);
return oidValue;
}
bool Value::getBool() const {
- assert(getType() == Bool);
+ verify(getType() == Bool);
return simple.boolValue;
}
Date_t Value::getDate() const {
- assert(getType() == Date);
+ verify(getType() == Date);
return dateValue;
}
string Value::getRegex() const {
- assert(getType() == RegEx);
+ verify(getType() == RegEx);
return stringValue;
}
string Value::getSymbol() const {
- assert(getType() == Symbol);
+ verify(getType() == Symbol);
return stringValue;
}
int Value::getInt() const {
- assert(getType() == NumberInt);
+ verify(getType() == NumberInt);
return simple.intValue;
}
unsigned long long Value::getTimestamp() const {
- assert(getType() == Timestamp);
+ verify(getType() == Timestamp);
return dateValue;
}
@@ -357,7 +357,7 @@ namespace mongo {
if (type == NumberInt)
return simple.intValue;
- assert(type == NumberLong);
+ verify(type == NumberLong);
return simple.longValue;
}
@@ -393,7 +393,7 @@ namespace mongo {
case BinData:
// pBuilder->appendBinData(fieldName, ...);
- assert(false); // CW TODO unimplemented
+ verify(false); // CW TODO unimplemented
break;
case jstOID:
@@ -417,7 +417,7 @@ namespace mongo {
break;
case CodeWScope:
- assert(false); // CW TODO unimplemented
+ verify(false); // CW TODO unimplemented
break;
case NumberInt:
@@ -443,7 +443,7 @@ namespace mongo {
case DBRef:
case Code:
case MaxKey:
- assert(false); // CW TODO better message
+ verify(false); // CW TODO better message
break;
}
}
@@ -483,7 +483,7 @@ namespace mongo {
break;
case CodeWScope:
- assert(false); // CW TODO unimplemented
+ verify(false); // CW TODO unimplemented
break;
case NumberInt:
@@ -507,7 +507,7 @@ namespace mongo {
case DBRef:
case Code:
case MaxKey:
- assert(false); // CW TODO better message
+ verify(false); // CW TODO better message
break;
}
@@ -618,7 +618,7 @@ namespace mongo {
false);
} // switch(type)
- assert(false); // CW TODO no conversion available
+ verify(false); // CW TODO no conversion available
return jstNULL;
}
@@ -746,7 +746,7 @@ namespace mongo {
case NumberInt:
case NumberLong:
/* these types were handled above */
- assert(false);
+ verify(false);
case String:
return rL->stringValue.compare(rR->stringValue);
@@ -780,7 +780,7 @@ namespace mongo {
}
/* NOTREACHED */
- assert(false);
+ verify(false);
break;
}
@@ -834,7 +834,7 @@ namespace mongo {
case DBRef:
case Code:
case MaxKey:
- assert(false);
+ verify(false);
break;
} // switch(lType)
@@ -920,7 +920,7 @@ namespace mongo {
case DBRef:
case Code:
case MaxKey:
- assert(false); // CW TODO better message
+ verify(false); // CW TODO better message
break;
} // switch(type)
}
@@ -1029,7 +1029,7 @@ namespace mongo {
case DBRef:
case Code:
case MaxKey:
- assert(false); // CW TODO better message
+ verify(false); // CW TODO better message
return sizeof(Value);
}
@@ -1040,7 +1040,7 @@ namespace mongo {
default. However, not all the compilers seem to do that. Therefore,
this final catch-all is here.
*/
- assert(false);
+ verify(false);
return sizeof(Value);
}
diff --git a/src/mongo/db/pipeline/value.h b/src/mongo/db/pipeline/value.h
index a638bd012a3..ddfcade02d4 100755
--- a/src/mongo/db/pipeline/value.h
+++ b/src/mongo/db/pipeline/value.h
@@ -411,7 +411,7 @@ namespace mongo {
}
inline size_t Value::getArrayLength() const {
- assert(getType() == Array);
+ verify(getType() == Array);
return vpValue.size();
}
diff --git a/src/mongo/db/projection.cpp b/src/mongo/db/projection.cpp
index d07e56527af..a2ec4af325d 100644
--- a/src/mongo/db/projection.cpp
+++ b/src/mongo/db/projection.cpp
@@ -281,14 +281,14 @@ namespace mongo {
}
BSONObj Projection::KeyOnly::hydrate( const BSONObj& key ) const {
- assert( _include.size() == _names.size() );
+ verify( _include.size() == _names.size() );
BSONObjBuilder b( key.objsize() + _stringSize + 16 );
BSONObjIterator i(key);
unsigned n=0;
while ( i.more() ) {
- assert( n < _include.size() );
+ verify( n < _include.size() );
BSONElement e = i.next();
if ( _include[n] ) {
b.appendAs( e , _names[n] );
diff --git a/src/mongo/db/queryoptimizer.cpp b/src/mongo/db/queryoptimizer.cpp
index 7a9429c8868..40ca5926d57 100644
--- a/src/mongo/db/queryoptimizer.cpp
+++ b/src/mongo/db/queryoptimizer.cpp
@@ -636,7 +636,7 @@ doneCheckOrder:
}
QueryPlanSet::QueryPlanPtr QueryPlanSet::getBestGuess() const {
- assert( _plans.size() );
+ verify( _plans.size() );
if ( _plans[ 0 ]->scanAndOrderRequired() ) {
for ( unsigned i=1; i<_plans.size(); i++ ) {
if ( ! _plans[i]->scanAndOrderRequired() )
@@ -729,7 +729,7 @@ doneCheckOrder:
_currentQps->prepareToRetryQuery() ) {
// Avoid an infinite loop here - this should never occur.
- assert( !retried );
+ verify( !retried );
_runner.reset();
return iterateRunner( originalOp, true );
}
@@ -804,7 +804,7 @@ doneCheckOrder:
}
shared_ptr<QueryOp> QueryPlanSet::Runner::next() {
- assert( !done() );
+ verify( !done() );
if ( _ops.empty() ) {
shared_ptr<QueryOp> initialRet = init();
@@ -827,7 +827,7 @@ doneCheckOrder:
}
shared_ptr<QueryOp> QueryPlanSet::Runner::_next() {
- assert( !_queue.empty() );
+ verify( !_queue.empty() );
OpHolder holder = _queue.pop();
QueryOp &op = *holder._op;
nextOp( op );
@@ -996,7 +996,7 @@ doneCheckOrder:
}
shared_ptr<QueryOp> MultiPlanScanner::nextOp() {
- assert( !doneOps() );
+ verify( !doneOps() );
shared_ptr<QueryOp> ret = _or ? nextOpOr() : nextOpSimple();
if ( ret->error() || ret->complete() ) {
_doneOps = true;
@@ -1038,7 +1038,7 @@ doneCheckOrder:
}
handleBeginningOfClause();
shared_ptr<QueryPlan> bestGuess = _currentQps->getBestGuess();
- assert( bestGuess );
+ verify( bestGuess );
return bestGuess.get();
}
@@ -1154,7 +1154,7 @@ doneCheckOrder:
_matcher = newMatcher;
_c = _queryPlan->newCursor();
// All sub cursors must support yields.
- assert( _c->supportYields() );
+ verify( _c->supportYields() );
if ( _explainPlanInfo ) {
_explainPlanInfo.reset( new ExplainPlanInfo() );
_explainPlanInfo->notePlan( *_c, _queryPlan->scanAndOrderRequired(),
@@ -1211,7 +1211,7 @@ doneCheckOrder:
b.appendMinKey( e.fieldName() );
break;
default:
- assert( false );
+ verify( false );
}
}
return b.obj();
diff --git a/src/mongo/db/queryoptimizer.h b/src/mongo/db/queryoptimizer.h
index f8af8971862..fa9e00bb810 100644
--- a/src/mongo/db/queryoptimizer.h
+++ b/src/mongo/db/queryoptimizer.h
@@ -201,7 +201,7 @@ namespace mongo {
/** To be called by QueryPlanSet::Runner only. */
QueryOp *createChild();
- void setQueryPlan( const QueryPlan *qp ) { _qp = qp; assert( _qp != NULL ); }
+ void setQueryPlan( const QueryPlan *qp ) { _qp = qp; verify( _qp != NULL ); }
void init();
void setException( const DBException &e ) {
_error = true;
@@ -443,7 +443,7 @@ namespace mongo {
/** Add explain information for a new clause. */
void addClauseInfo( const shared_ptr<ExplainClauseInfo> &clauseInfo ) {
- assert( _explainQueryInfo );
+ verify( _explainQueryInfo );
_explainQueryInfo->addClauseInfo( clauseInfo );
}
@@ -588,12 +588,12 @@ namespace mongo {
void noteYield();
const QueryPlan &queryPlan() const {
- assert( _c->ok() && _queryPlan );
+ verify( _c->ok() && _queryPlan );
return *_queryPlan;
}
const Projection::KeyOnly *keyFieldsOnly() const {
- assert( _c->ok() && _queryPlan );
+ verify( _c->ok() && _queryPlan );
return _queryPlan->keyFieldsOnly().get();
}
private:
diff --git a/src/mongo/db/queryoptimizercursorimpl.cpp b/src/mongo/db/queryoptimizercursorimpl.cpp
index 735245fa4fa..87746bf302c 100644
--- a/src/mongo/db/queryoptimizercursorimpl.cpp
+++ b/src/mongo/db/queryoptimizercursorimpl.cpp
@@ -89,7 +89,7 @@ namespace mongo {
// All candidate cursors must support yields for QueryOptimizerCursorImpl's
// prepareToYield() and prepareToTouchEarlierIterate() to work.
- assert( _c->supportYields() );
+ verify( _c->supportYields() );
_capped = _c->capped();
// TODO This violates the current Cursor interface abstraction, but for now it's simpler to keep our own set of
@@ -448,7 +448,7 @@ namespace mongo {
if ( _currOp->error() || !ok() ) {
// Advance to a non error op if one of the ops errored out.
// Advance to a following $or clause if the $or clause returned all results.
- assert( !_mps->doneOps() );
+ verify( !_mps->doneOps() );
_advance( true );
}
}
@@ -760,7 +760,7 @@ namespace mongo {
}
if ( _planPolicy.permitOptimalIdPlan() && isSimpleIdQuery( _query ) ) {
Database *database = cc().database();
- assert( database );
+ verify( database );
NamespaceDetails *d = database->namespaceIndex.details( _ns );
if ( d ) {
int idxNo = d->findIdIndex();
diff --git a/src/mongo/db/querypattern.cpp b/src/mongo/db/querypattern.cpp
index 57cc9a7737d..e431c9be3c9 100644
--- a/src/mongo/db/querypattern.cpp
+++ b/src/mongo/db/querypattern.cpp
@@ -51,7 +51,7 @@ namespace mongo {
bool QueryPattern::operator==( const QueryPattern &other ) const {
bool less = operator<( other );
bool more = other.operator<( *this );
- assert( !( less && more ) );
+ verify( !( less && more ) );
return !( less || more );
}
diff --git a/src/mongo/db/queryutil.cpp b/src/mongo/db/queryutil.cpp
index 61571ab880a..7fd3e28ae64 100644
--- a/src/mongo/db/queryutil.cpp
+++ b/src/mongo/db/queryutil.cpp
@@ -155,7 +155,7 @@ namespace mongo {
BSONObj o = e.embeddedObject();
return simpleRegex(o["$regex"].valuestrsafe(), o["$options"].valuestrsafe());
}
- default: assert(false); return ""; //return squashes compiler warning
+ default: verify(false); return ""; //return squashes compiler warning
}
}
@@ -675,7 +675,7 @@ namespace mongo {
}
void FieldRange::reverse( FieldRange &ret ) const {
- assert( _special.empty() );
+ verify( _special.empty() );
ret._intervals.clear();
ret._objData = _objData;
for( vector<FieldInterval>::const_reverse_iterator i = _intervals.rbegin(); i != _intervals.rend(); ++i ) {
@@ -939,14 +939,14 @@ namespace mongo {
FieldRangeVector::FieldRangeVector( const FieldRangeSet &frs, const IndexSpec &indexSpec,
int direction )
:_indexSpec( indexSpec ), _direction( direction >= 0 ? 1 : -1 ) {
- assert( frs.matchPossibleForIndex( _indexSpec.keyPattern ) );
+ verify( frs.matchPossibleForIndex( _indexSpec.keyPattern ) );
_queries = frs._queries;
BSONObjIterator i( _indexSpec.keyPattern );
set< string > baseObjectNonUniversalPrefixes;
while( i.more() ) {
BSONElement e = i.next();
const FieldRange *range = &frs.range( e.fieldName() );
- assert( !range->empty() );
+ verify( !range->empty() );
if ( !frs.singleKey() ) {
string prefix = str::before( e.fieldName(), '.' );
if ( baseObjectNonUniversalPrefixes.count( prefix ) > 0 ) {
@@ -969,7 +969,7 @@ namespace mongo {
true ) );
range->reverse( _ranges.back() );
}
- assert( !_ranges.back().empty() );
+ verify( !_ranges.back().empty() );
}
uassert( 13385, "combinatorial limit of $in partitioning of result set exceeded",
size() < maxCombinations );
@@ -1032,7 +1032,7 @@ namespace mongo {
BSONElement e = i.next();
const char *name = e.fieldName();
const FieldRange &eRange = range( name );
- assert( !eRange.empty() );
+ verify( !eRange.empty() );
if ( eRange.equality() )
b.appendAs( eRange.min(), name );
else if ( !eRange.universal() ) {
@@ -1236,7 +1236,7 @@ namespace mongo {
return ret;
}
}
- assert( l + 1 == h );
+ verify( l + 1 == h );
return l;
}
@@ -1278,7 +1278,7 @@ namespace mongo {
BSONObj FieldRangeVector::firstMatch( const BSONObj &obj ) const {
// NOTE Only works in forward direction.
- assert( _direction >= 0 );
+ verify( _direction >= 0 );
BSONObjSet keys( BSONObjCmp( _indexSpec.keyPattern ) );
_indexSpec.getKeys( obj, keys );
for( BSONObjSet::const_iterator i = keys.begin(); i != keys.end(); ++i ) {
@@ -1550,64 +1550,64 @@ namespace mongo {
BSONObjBuilder b;
b.appendRegex("r", "^foo");
BSONObj o = b.done();
- assert( simpleRegex(o.firstElement()) == "foo" );
+ verify( simpleRegex(o.firstElement()) == "foo" );
}
{
BSONObjBuilder b;
b.appendRegex("r", "^f?oo");
BSONObj o = b.done();
- assert( simpleRegex(o.firstElement()) == "" );
+ verify( simpleRegex(o.firstElement()) == "" );
}
{
BSONObjBuilder b;
b.appendRegex("r", "^fz?oo");
BSONObj o = b.done();
- assert( simpleRegex(o.firstElement()) == "f" );
+ verify( simpleRegex(o.firstElement()) == "f" );
}
{
BSONObjBuilder b;
b.appendRegex("r", "^f", "");
BSONObj o = b.done();
- assert( simpleRegex(o.firstElement()) == "f" );
+ verify( simpleRegex(o.firstElement()) == "f" );
}
{
BSONObjBuilder b;
b.appendRegex("r", "\\Af", "");
BSONObj o = b.done();
- assert( simpleRegex(o.firstElement()) == "f" );
+ verify( simpleRegex(o.firstElement()) == "f" );
}
{
BSONObjBuilder b;
b.appendRegex("r", "^f", "m");
BSONObj o = b.done();
- assert( simpleRegex(o.firstElement()) == "" );
+ verify( simpleRegex(o.firstElement()) == "" );
}
{
BSONObjBuilder b;
b.appendRegex("r", "\\Af", "m");
BSONObj o = b.done();
- assert( simpleRegex(o.firstElement()) == "f" );
+ verify( simpleRegex(o.firstElement()) == "f" );
}
{
BSONObjBuilder b;
b.appendRegex("r", "\\Af", "mi");
BSONObj o = b.done();
- assert( simpleRegex(o.firstElement()) == "" );
+ verify( simpleRegex(o.firstElement()) == "" );
}
{
BSONObjBuilder b;
b.appendRegex("r", "\\Af \t\vo\n\ro \\ \\# #comment", "mx");
BSONObj o = b.done();
- assert( simpleRegex(o.firstElement()) == "foo #" );
+ verify( simpleRegex(o.firstElement()) == "foo #" );
}
{
- assert( simpleRegex("^\\Qasdf\\E", "", NULL) == "asdf" );
- assert( simpleRegex("^\\Qasdf\\E.*", "", NULL) == "asdf" );
- assert( simpleRegex("^\\Qasdf", "", NULL) == "asdf" ); // PCRE supports this
- assert( simpleRegex("^\\Qasdf\\\\E", "", NULL) == "asdf\\" );
- assert( simpleRegex("^\\Qas.*df\\E", "", NULL) == "as.*df" );
- assert( simpleRegex("^\\Qas\\Q[df\\E", "", NULL) == "as\\Q[df" );
- assert( simpleRegex("^\\Qas\\E\\\\E\\Q$df\\E", "", NULL) == "as\\E$df" ); // quoted string containing \E
+ verify( simpleRegex("^\\Qasdf\\E", "", NULL) == "asdf" );
+ verify( simpleRegex("^\\Qasdf\\E.*", "", NULL) == "asdf" );
+ verify( simpleRegex("^\\Qasdf", "", NULL) == "asdf" ); // PCRE supports this
+ verify( simpleRegex("^\\Qasdf\\\\E", "", NULL) == "asdf\\" );
+ verify( simpleRegex("^\\Qas.*df\\E", "", NULL) == "as.*df" );
+ verify( simpleRegex("^\\Qas\\Q[df\\E", "", NULL) == "as\\Q[df" );
+ verify( simpleRegex("^\\Qas\\E\\\\E\\Q$df\\E", "", NULL) == "as\\E$df" ); // quoted string containing \E
}
}
diff --git a/src/mongo/db/queryutil.h b/src/mongo/db/queryutil.h
index 40553c91a94..14862cd33c4 100644
--- a/src/mongo/db/queryutil.h
+++ b/src/mongo/db/queryutil.h
@@ -305,10 +305,10 @@ namespace mongo {
* be extracted.
*/
- BSONElement min() const { assert( !empty() ); return _intervals[ 0 ]._lower._bound; }
- BSONElement max() const { assert( !empty() ); return _intervals[ _intervals.size() - 1 ]._upper._bound; }
- bool minInclusive() const { assert( !empty() ); return _intervals[ 0 ]._lower._inclusive; }
- bool maxInclusive() const { assert( !empty() ); return _intervals[ _intervals.size() - 1 ]._upper._inclusive; }
+ BSONElement min() const { verify( !empty() ); return _intervals[ 0 ]._lower._bound; }
+ BSONElement max() const { verify( !empty() ); return _intervals[ _intervals.size() - 1 ]._upper._bound; }
+ bool minInclusive() const { verify( !empty() ); return _intervals[ 0 ]._lower._inclusive; }
+ bool maxInclusive() const { verify( !empty() ); return _intervals[ _intervals.size() - 1 ]._upper._inclusive; }
/** @return true iff this range expresses a single equality interval. */
bool equality() const;
diff --git a/src/mongo/db/record.cpp b/src/mongo/db/record.cpp
index 891d2d71f1c..e98852f0d10 100644
--- a/src/mongo/db/record.cpp
+++ b/src/mongo/db/record.cpp
@@ -56,7 +56,7 @@ namespace mongo {
}
State get( int regionHash , size_t region , short offset ) {
- DEV assert( hash( region ) == regionHash );
+ DEV verify( hash( region ) == regionHash );
Entry * e = _get( regionHash , region , false );
if ( ! e )
@@ -69,7 +69,7 @@ namespace mongo {
* @return true if added, false if full
*/
bool in( int regionHash , size_t region , short offset ) {
- DEV assert( hash( region ) == regionHash );
+ DEV verify( hash( region ) == regionHash );
Entry * e = _get( regionHash , region , true );
if ( ! e )
diff --git a/src/mongo/db/repl.cpp b/src/mongo/db/repl.cpp
index 8f5a65338ca..c805ef8bff2 100644
--- a/src/mongo/db/repl.cpp
+++ b/src/mongo/db/repl.cpp
@@ -322,7 +322,7 @@ namespace mongo {
void ReplSource::save() {
BSONObjBuilder b;
- assert( !hostName.empty() );
+ verify( !hostName.empty() );
b.append("host", hostName);
// todo: finish allowing multiple source configs.
// this line doesn't work right when source is null, if that is allowed as it is now:
@@ -336,8 +336,8 @@ namespace mongo {
OpDebug debug;
Client::Context ctx("local.sources");
UpdateResult res = updateObjects("local.sources", o, pattern, true/*upsert for pair feature*/, false,false,debug);
- assert( ! res.mod );
- assert( res.num == 1 );
+ verify( ! res.mod );
+ verify( res.num == 1 );
}
}
@@ -672,7 +672,7 @@ namespace mongo {
if( cmdLine.pretouch > 1 ) {
/* note: this is bad - should be put in ReplSource. but this is first test... */
static int countdown;
- assert( countdown >= 0 );
+ verify( countdown >= 0 );
if( countdown > 0 ) {
countdown--; // was pretouched on a prev pass
}
@@ -910,7 +910,7 @@ namespace mongo {
log() << "repl ASSERTION failed : syncedTo < nextOpTime" << endl;
log() << "repl syncTo: " << syncedTo.toStringLong() << endl;
log() << "repl nextOpTime: " << nextOpTime.toStringLong() << endl;
- assert(false);
+ verify(false);
}
oplogReader.putBack( op ); // op will be processed in the loop below
nextOpTime = OpTime(); // will reread the op below
@@ -928,7 +928,7 @@ namespace mongo {
log() << "repl: tailing: " << tailing << '\n';
log() << "repl: data too stale, halting replication" << endl;
replInfo = replAllDead = "data too stale halted replication";
- assert( syncedTo < nextOpTime );
+ verify( syncedTo < nextOpTime );
throw SyncException();
}
else {
@@ -1006,7 +1006,7 @@ namespace mongo {
uassert( 10123 , "replication error last applied optime at slave >= nextOpTime from master", false);
}
if ( replSettings.slavedelay && ( unsigned( time( 0 ) ) < nextOpTime.getSecs() + replSettings.slavedelay ) ) {
- assert( justOne );
+ verify( justOne );
oplogReader.putBack( op );
_sleepAdviceTime = nextOpTime.getSecs() + replSettings.slavedelay + 1;
Lock::GlobalWrite lk;
@@ -1183,7 +1183,7 @@ namespace mongo {
}
void OplogReader::tailingQuery(const char *ns, const BSONObj& query, const BSONObj* fields ) {
- assert( !haveCursor() );
+ verify( !haveCursor() );
LOG(2) << "repl: " << ns << ".find(" << query.toString() << ')' << endl;
cursor.reset( _conn->query( ns, query, 0, 0, fields, _tailingQueryOptions ).release() );
}
@@ -1334,7 +1334,7 @@ namespace mongo {
break;
}
}
- assert( syncing == 0 ); // i.e., there is only one sync thread running. we will want to change/fix this.
+ verify( syncing == 0 ); // i.e., there is only one sync thread running. we will want to change/fix this.
syncing++;
}
try {
@@ -1355,7 +1355,7 @@ namespace mongo {
}
{
Lock::GlobalWrite lk;
- assert( syncing == 1 );
+ verify( syncing == 1 );
syncing--;
}
@@ -1484,7 +1484,7 @@ namespace mongo {
}
if ( replSettings.slave ) {
- assert( replSettings.slave == SimpleSlave );
+ verify( replSettings.slave == SimpleSlave );
log(1) << "slave=true" << endl;
boost::thread repl_thread(replSlaveThread);
}
diff --git a/src/mongo/db/repl/consensus.cpp b/src/mongo/db/repl/consensus.cpp
index 3995373f5ef..6c6107b34c7 100644
--- a/src/mongo/db/repl/consensus.cpp
+++ b/src/mongo/db/repl/consensus.cpp
@@ -168,7 +168,7 @@ namespace mongo {
void Consensus::electionFailed(unsigned meid) {
SimpleMutex::scoped_lock lk(lyMutex);
LastYea &L = ly.ref(lk);
- DEV assert( L.who == meid ); // this may not always always hold, so be aware, but adding for now as a quick sanity test
+ DEV verify( L.who == meid ); // this may not always always hold, so be aware, but adding for now as a quick sanity test
if( L.who == meid )
L.when = 0;
}
@@ -261,7 +261,7 @@ namespace mongo {
bool Consensus::weAreFreshest(bool& allUp, int& nTies) {
const OpTime ord = theReplSet->lastOpTimeWritten;
nTies = 0;
- assert( !ord.isNull() );
+ verify( !ord.isNull() );
BSONObj cmd = BSON(
"replSetFresh" << 1 <<
"set" << rs.name() <<
@@ -291,7 +291,7 @@ namespace mongo {
OpTime remoteOrd( i->result["opTime"].Date() );
if( remoteOrd == ord )
nTies++;
- assert( remoteOrd <= ord );
+ verify( remoteOrd <= ord );
if( i->result["veto"].trueValue() ) {
BSONElement msg = i->result["errmsg"];
@@ -311,14 +311,14 @@ namespace mongo {
}
}
LOG(1) << "replSet dev we are freshest of up nodes, nok:" << nok << " nTies:" << nTies << rsLog;
- assert( ord <= theReplSet->lastOpTimeWritten ); // <= as this may change while we are working...
+ verify( ord <= theReplSet->lastOpTimeWritten ); // <= as this may change while we are working...
return true;
}
extern time_t started;
void Consensus::multiCommand(BSONObj cmd, list<Target>& L) {
- assert( !rs.lockedByMe() );
+ verify( !rs.lockedByMe() );
mongo::multiCommand(cmd, L);
}
@@ -361,7 +361,7 @@ namespace mongo {
// todo: biggest / highest priority nodes should be the ones that get to not sleep
}
else {
- assert( !rs.lockedByMe() ); // bad to go to sleep locked
+ verify( !rs.lockedByMe() ); // bad to go to sleep locked
unsigned ms = ((unsigned) rand()) % 1000 + 50;
DEV log() << "replSet tie " << nTies << " sleeping a little " << ms << "ms" << rsLog;
sleptLast = true;
@@ -426,9 +426,9 @@ namespace mongo {
}
void Consensus::electSelf() {
- assert( !rs.lockedByMe() );
- assert( !rs.myConfig().arbiterOnly );
- assert( rs.myConfig().slaveDelay == 0 );
+ verify( !rs.lockedByMe() );
+ verify( !rs.myConfig().arbiterOnly );
+ verify( rs.myConfig().slaveDelay == 0 );
try {
_electSelf();
}
diff --git a/src/mongo/db/repl/health.cpp b/src/mongo/db/repl/health.cpp
index 5ad360cf45b..49332005881 100644
--- a/src/mongo/db/repl/health.cpp
+++ b/src/mongo/db/repl/health.cpp
@@ -301,7 +301,7 @@ namespace mongo {
}
const Member *_self = this->_self;
- assert(_self);
+ verify(_self);
{
stringstream s;
/* self row */
@@ -368,7 +368,7 @@ namespace mongo {
vector<BSONObj> v;
const Member *_self = this->_self;
- assert( _self );
+ verify( _self );
MemberState myState = box.getState();
@@ -451,8 +451,8 @@ namespace mongo {
static struct Test : public UnitTest {
void run() {
HealthOptions a,b;
- assert( a == b );
- assert( a.isDefault() );
+ verify( a == b );
+ verify( a.isDefault() );
}
} test;
diff --git a/src/mongo/db/repl/manager.cpp b/src/mongo/db/repl/manager.cpp
index ec970f4b34c..542485d1e03 100644
--- a/src/mongo/db/repl/manager.cpp
+++ b/src/mongo/db/repl/manager.cpp
@@ -35,7 +35,7 @@ namespace mongo {
Member *m = rs->head();
Member *p = 0;
while( m ) {
- DEV assert( m != rs->_self );
+ DEV verify( m != rs->_self );
if( m->state().primary() && m->hbinfo().up() ) {
if( p ) {
two = true;
diff --git a/src/mongo/db/repl/rs.cpp b/src/mongo/db/repl/rs.cpp
index 6c30331431a..6fed4c7ce31 100644
--- a/src/mongo/db/repl/rs.cpp
+++ b/src/mongo/db/repl/rs.cpp
@@ -70,7 +70,7 @@ namespace mongo {
void ReplSetImpl::assumePrimary() {
LOG(2) << "replSet assuming primary" << endl;
- assert( iAmPotentiallyHot() );
+ verify( iAmPotentiallyHot() );
// so we are synchronized with _logOp(). perhaps locking local db only would suffice, but until proven
// will take this route, and this is very rare so it doesn't matter anyway
Lock::GlobalWrite lk;
@@ -215,7 +215,7 @@ namespace mongo {
}
void ReplSetImpl::_fillIsMasterHost(const Member *m, vector<string>& hosts, vector<string>& passives, vector<string>& arbiters) {
- assert( m );
+ verify( m );
if( m->config().hidden )
return;
@@ -248,7 +248,7 @@ namespace mongo {
_fillIsMasterHost(_self, hosts, passives, arbiters);
for( Member *m = _members.head(); m; m = m->next() ) {
- assert( m );
+ verify( m );
_fillIsMasterHost(m, hosts, passives, arbiters);
}
@@ -449,7 +449,7 @@ namespace mongo {
const Member *old = findById(m._id);
if( old ) {
nfound++;
- assert( (int) old->id() == m._id );
+ verify( (int) old->id() == m._id );
if( old->config() != m ) {
additive = false;
}
@@ -493,10 +493,10 @@ namespace mongo {
_cfg = new ReplSetConfig(c);
dassert( &config() == _cfg ); // config() is same thing but const, so we use that when we can for clarity below
- assert( config().ok() );
- assert( _name.empty() || _name == config()._id );
+ verify( config().ok() );
+ verify( _name.empty() || _name == config()._id );
_name = config()._id;
- assert( !_name.empty() );
+ verify( !_name.empty() );
// this is a shortcut for simple changes
if( additive ) {
@@ -546,7 +546,7 @@ namespace mongo {
Member *mi;
members += ( members == "" ? "" : ", " ) + m.h.toString();
if( m.h.isSelf() ) {
- assert( me++ == 0 );
+ verify( me++ == 0 );
mi = new Member(m.h, m._id, &m, true);
if (!reconf) {
log() << "replSet I am " << m.h.toString() << rsLog;
@@ -592,7 +592,7 @@ namespace mongo {
v = cfg.version;
}
}
- assert( highest );
+ verify( highest );
if( !initFromConfig(*highest) )
return false;
@@ -727,7 +727,7 @@ namespace mongo {
if( e.getCode() == 13497 /* removed from set */ ) {
cc().shutdown();
dbexit( EXIT_CLEAN , "removed from replica set" ); // never returns
- assert(0);
+ verify(0);
}
log() << "replSet error unexpected exception in haveNewConfig() : " << e.toString() << rsLog;
_fatal();
@@ -757,9 +757,9 @@ namespace mongo {
void startReplSets(ReplSetCmdline *replSetCmdline) {
Client::initThread("rsStart");
try {
- assert( theReplSet == 0 );
+ verify( theReplSet == 0 );
if( replSetCmdline == 0 ) {
- assert(!replSet);
+ verify(!replSet);
return;
}
replLocalAuth();
diff --git a/src/mongo/db/repl/rs.h b/src/mongo/db/repl/rs.h
index 21ae0a2e5e5..26e1b3dd24b 100644
--- a/src/mongo/db/repl/rs.h
+++ b/src/mongo/db/repl/rs.h
@@ -215,7 +215,7 @@ namespace mongo {
class RSBase : boost::noncopyable {
public:
const unsigned magic;
- void assertValid() { assert( magic == 0x12345677 ); }
+ void assertValid() { verify( magic == 0x12345677 ); }
private:
mongo::mutex m;
int _locked;
@@ -237,14 +237,14 @@ namespace mongo {
return; // recursive is ok...
sl.reset( new scoped_lock(rsbase.m) );
- DEV assert(rsbase._locked == 0);
+ DEV verify(rsbase._locked == 0);
rsbase._locked++;
rsbase._lockedByMe.set(true);
}
~lock() {
if( sl.get() ) {
- assert( rsbase._lockedByMe.get() );
- DEV assert(rsbase._locked == 1);
+ verify( rsbase._lockedByMe.get() );
+ DEV verify(rsbase._locked == 1);
rsbase._lockedByMe.set(false);
rsbase._locked--;
}
@@ -305,7 +305,7 @@ namespace mongo {
void setSelfPrimary(const Member *self) { change(MemberState::RS_PRIMARY, self); }
void setOtherPrimary(const Member *mem) {
rwlock lk(m, true);
- assert( !sp.state.primary() );
+ verify( !sp.state.primary() );
sp.primary = mem;
}
void noteRemoteIsPrimary(const Member *remote) {
@@ -555,7 +555,7 @@ namespace mongo {
bool freeze(int secs) { return _freeze(secs); }
string selfFullName() {
- assert( _self );
+ verify( _self );
return _self->fullName();
}
@@ -661,7 +661,7 @@ namespace mongo {
inline Member::Member(HostAndPort h, unsigned ord, const ReplSetConfig::MemberCfg *c, bool self) :
_config(*c), _h(h), _hbinfo(ord) {
- assert(c);
+ verify(c);
if( self )
_hbinfo.health = 1.0;
}
diff --git a/src/mongo/db/repl/rs_config.cpp b/src/mongo/db/repl/rs_config.cpp
index 2e7b1048e2c..a7e483cec2c 100644
--- a/src/mongo/db/repl/rs_config.cpp
+++ b/src/mongo/db/repl/rs_config.cpp
@@ -234,7 +234,7 @@ namespace mongo {
*/
/*static*/
bool ReplSetConfig::legalChange(const ReplSetConfig& o, const ReplSetConfig& n, string& errmsg) {
- assert( theReplSet );
+ verify( theReplSet );
if( o._id != n._id ) {
errmsg = "set name may not change";
diff --git a/src/mongo/db/repl/rs_initialsync.cpp b/src/mongo/db/repl/rs_initialsync.cpp
index 29b7ce9f23c..2a751b8f3cc 100644
--- a/src/mongo/db/repl/rs_initialsync.cpp
+++ b/src/mongo/db/repl/rs_initialsync.cpp
@@ -252,8 +252,8 @@ namespace mongo {
isyncassert( "getLastOp is empty ", !minValid.isEmpty() );
OpTime mvoptime = minValid["ts"]._opTime();
- assert( !mvoptime.isNull() );
- assert( mvoptime >= startingTS );
+ verify( !mvoptime.isNull() );
+ verify( mvoptime >= startingTS );
// apply startingTS..mvoptime portion of the oplog
{
@@ -283,7 +283,7 @@ namespace mongo {
sethbmsg("initial sync finishing up",0);
- assert( !box.getState().primary() ); // wouldn't make sense if we were.
+ verify( !box.getState().primary() ); // wouldn't make sense if we were.
{
writelock lk("local.");
diff --git a/src/mongo/db/repl/rs_initiate.cpp b/src/mongo/db/repl/rs_initiate.cpp
index 77bc6c03938..53dd94b347a 100644
--- a/src/mongo/db/repl/rs_initiate.cpp
+++ b/src/mongo/db/repl/rs_initiate.cpp
@@ -116,7 +116,7 @@ namespace mongo {
if( !initial && failures <= allowableFailures ) {
const Member* m = theReplSet->findById( i->_id );
if( m ) {
- assert( m->h().toString() == i->h.toString() );
+ verify( m->h().toString() == i->h.toString() );
}
// it's okay if the down member isn't part of the config,
// we might be adding a new member that isn't up yet
diff --git a/src/mongo/db/repl/rs_optime.h b/src/mongo/db/repl/rs_optime.h
index f0ca56927ad..792e4997372 100644
--- a/src/mongo/db/repl/rs_optime.h
+++ b/src/mongo/db/repl/rs_optime.h
@@ -38,7 +38,7 @@ namespace mongo {
bool initiated() const { return ord > 0; }
void initiate() {
- assert( !initiated() );
+ verify( !initiated() );
ord = 1000000;
}
diff --git a/src/mongo/db/repl/rs_rollback.cpp b/src/mongo/db/repl/rs_rollback.cpp
index 203afe7901e..1e640e4cc2a 100644
--- a/src/mongo/db/repl/rs_rollback.cpp
+++ b/src/mongo/db/repl/rs_rollback.cpp
@@ -200,10 +200,10 @@ namespace mongo {
}
last = time(0);
- assert( d.dbMutex.atLeastReadLocked() );
+ verify( d.dbMutex.atLeastReadLocked() );
Client::Context c(rsoplog);
NamespaceDetails *nsd = nsdetails(rsoplog);
- assert(nsd);
+ verify(nsd);
ReverseCappedCursor u(nsd);
if( !u.ok() )
throw "our oplog empty or unreadable";
@@ -336,7 +336,7 @@ namespace mongo {
for( set<DocID>::iterator i = h.toRefetch.begin(); i != h.toRefetch.end(); i++ ) {
d = *i;
- assert( !d._id.eoo() );
+ verify( !d._id.eoo() );
{
/* TODO : slow. lots of round trips. */
@@ -375,7 +375,7 @@ namespace mongo {
bool warn = false;
- assert( !h.commonPointOurDiskloc.isNull() );
+ verify( !h.commonPointOurDiskloc.isNull() );
mongo::d.dbMutex.assertWriteLocked();
@@ -463,7 +463,7 @@ namespace mongo {
const DocID& d = i->first;
bo pattern = d._id.wrap(); // { _id : ... }
try {
- assert( d.ns && *d.ns );
+ verify( d.ns && *d.ns );
if( h.collectionsToResync.count(d.ns) ) {
/* we just synced this entire collection */
continue;
@@ -592,8 +592,8 @@ namespace mongo {
}
unsigned ReplSetImpl::_syncRollback(OplogReader&r) {
- assert( !lockedByMe() );
- assert( !d.dbMutex.atLeastReadLocked() );
+ verify( !lockedByMe() );
+ verify( !d.dbMutex.atLeastReadLocked() );
sethbmsg("rollback 0");
diff --git a/src/mongo/db/repl/rs_sync.cpp b/src/mongo/db/repl/rs_sync.cpp
index 122bb7539d9..40818feef6a 100644
--- a/src/mongo/db/repl/rs_sync.cpp
+++ b/src/mongo/db/repl/rs_sync.cpp
@@ -286,7 +286,7 @@ namespace mongo {
Member *target = 0, *stale = 0;
BSONObj oldest;
- assert(r.conn() == 0);
+ verify(r.conn() == 0);
while ((target = getMemberToSyncTo()) != 0) {
string current = target->fullName();
@@ -402,7 +402,7 @@ namespace mongo {
}
while( 1 ) {
- assert( !Lock::isLocked() );
+ verify( !Lock::isLocked() );
{
Timer timeInWriteLock;
scoped_ptr<writelock> lk;
@@ -488,14 +488,14 @@ namespace mongo {
if( str::contains(ns, ".$cmd") ) {
// a command may need a global write lock. so we will conservatively go ahead and grab one here. suboptimal. :-(
lk.reset();
- assert( !Lock::isLocked() );
+ verify( !Lock::isLocked() );
lk.reset( new writelock() );
}
else if( !Lock::isWriteLocked(ns) || Lock::isW() ) {
// we don't relock on every single op to try to be faster. however if switching collections, we have to.
// note here we must reset to 0 first to assure the old object is destructed before our new operator invocation.
lk.reset();
- assert( !Lock::isLocked() );
+ verify( !Lock::isLocked() );
lk.reset( new writelock(ns) );
}
}
@@ -666,7 +666,7 @@ namespace mongo {
static int n;
if( n != 0 ) {
log() << "replSet ERROR : more than one sync thread?" << rsLog;
- assert( n == 0 );
+ verify( n == 0 );
}
n++;
@@ -751,7 +751,7 @@ namespace mongo {
}
}
- assert(slave->slave);
+ verify(slave->slave);
const Member *target = rs->_currentSyncTarget;
if (!target || rs->box.getState().primary()
diff --git a/src/mongo/db/repl_block.cpp b/src/mongo/db/repl_block.cpp
index 4087e2e8bb4..3be01c7d3db 100644
--- a/src/mongo/db/repl_block.cpp
+++ b/src/mongo/db/repl_block.cpp
@@ -188,10 +188,10 @@ namespace mongo {
if ( lastOp.isNull() )
return;
- assert( str::startsWith(ns, "local.oplog.") );
+ verify( str::startsWith(ns, "local.oplog.") );
Client * c = curop.getClient();
- assert(c);
+ verify(c);
BSONObj rid = c->getRemoteID();
if ( rid.isEmpty() )
return;
diff --git a/src/mongo/db/replutil.h b/src/mongo/db/replutil.h
index 0ebcaea3dc5..aa8c25ad5f5 100644
--- a/src/mongo/db/replutil.h
+++ b/src/mongo/db/replutil.h
@@ -66,7 +66,7 @@ namespace mongo {
return true;
if ( ! dbname ) {
Database *database = cc().database();
- assert( database );
+ verify( database );
dbname = database->name.c_str();
}
return strcmp( dbname , "local" ) == 0;
@@ -74,7 +74,7 @@ namespace mongo {
inline bool isMasterNs( const char *ns ) {
if ( _isMaster() )
return true;
- assert( ns );
+ verify( ns );
if ( ! str::startsWith( ns , "local" ) )
return false;
return ns[5] == 0 || ns[5] == '.';
diff --git a/src/mongo/db/restapi.cpp b/src/mongo/db/restapi.cpp
index 578763632fd..560dbce97c5 100644
--- a/src/mongo/db/restapi.cpp
+++ b/src/mongo/db/restapi.cpp
@@ -243,7 +243,7 @@ namespace mongo {
}
writelocktry wl(10000);
- assert( wl.got() );
+ verify( wl.got() );
Client::Context cx( "admin.system.users", dbpath, false );
}
diff --git a/src/mongo/db/scanandorder.cpp b/src/mongo/db/scanandorder.cpp
index fa6734e5292..b93b632f035 100644
--- a/src/mongo/db/scanandorder.cpp
+++ b/src/mongo/db/scanandorder.cpp
@@ -26,7 +26,7 @@ namespace mongo {
const unsigned ScanAndOrder::MaxScanAndOrderBytes = 32 * 1024 * 1024;
void ScanAndOrder::add(const BSONObj& o, const DiskLoc* loc) {
- assert( o.isValid() );
+ verify( o.isValid() );
BSONObj k;
try {
k = _order.getKeyFromObject(o);
@@ -48,7 +48,7 @@ namespace mongo {
return;
}
BestMap::iterator i;
- assert( _best.end() != _best.begin() );
+ verify( _best.end() != _best.begin() );
i = _best.end();
i--;
_addIfBetter(k, o, i, loc);
@@ -98,7 +98,7 @@ namespace mongo {
void ScanAndOrder::_validateAndUpdateApproxSize( const int approxSizeDelta ) {
// note : adjust when bson return limit adjusts. note this limit should be a bit higher.
int newApproxSize = _approxSize + approxSizeDelta;
- assert( newApproxSize >= 0 );
+ verify( newApproxSize >= 0 );
uassert( ScanAndOrderMemoryLimitExceededAssertionCode,
"too much data for sort() with no index. add an index or specify a smaller limit",
(unsigned)newApproxSize < MaxScanAndOrderBytes );
diff --git a/src/mongo/db/scanandorder.h b/src/mongo/db/scanandorder.h
index 54775d770d5..acb19d21ad0 100644
--- a/src/mongo/db/scanandorder.h
+++ b/src/mongo/db/scanandorder.h
@@ -34,7 +34,7 @@ namespace mongo {
FieldRangeVector _keyCutter;
public:
KeyType(const BSONObj &pattern, const FieldRangeSet &frs):
- _spec((assert(!pattern.isEmpty()),pattern)),
+ _spec((verify(!pattern.isEmpty()),pattern)),
_keyCutter(frs, _spec, 1) {
}
diff --git a/src/mongo/db/security_common.cpp b/src/mongo/db/security_common.cpp
index a480919c27e..fa0e2931e58 100644
--- a/src/mongo/db/security_common.cpp
+++ b/src/mongo/db/security_common.cpp
@@ -109,7 +109,7 @@ namespace mongo {
void CmdAuthenticate::authenticate(const string& dbname, const string& user, const bool readOnly) {
ClientBasic* c = ClientBasic::getCurrent();
- assert(c);
+ verify(c);
AuthenticationInfo *ai = c->getAuthenticationInfo();
if ( readOnly ) {
diff --git a/src/mongo/db/stats/snapshots.cpp b/src/mongo/db/stats/snapshots.cpp
index 900cc4ff1ad..38bd348d5fb 100644
--- a/src/mongo/db/stats/snapshots.cpp
+++ b/src/mongo/db/stats/snapshots.cpp
@@ -37,7 +37,7 @@ namespace mongo {
SnapshotDelta::SnapshotDelta( const SnapshotData& older , const SnapshotData& newer )
: _older( older ) , _newer( newer ) {
- assert( _newer._created > _older._created );
+ verify( _newer._created > _older._created );
_elapsed = _newer._created - _older._created;
}
@@ -45,7 +45,7 @@ namespace mongo {
return Top::CollectionData( _older._globalUsage , _newer._globalUsage );
}
Top::UsageMap SnapshotDelta::collectionUsageDiff() {
- assert( _newer._created > _older._created );
+ verify( _newer._created > _older._created );
Top::UsageMap u;
for ( Top::UsageMap::const_iterator i=_newer._usage.begin(); i != _newer._usage.end(); i++ ) {
diff --git a/src/mongo/db/stats/top.h b/src/mongo/db/stats/top.h
index dc21927b7d3..22470cb5d90 100644
--- a/src/mongo/db/stats/top.h
+++ b/src/mongo/db/stats/top.h
@@ -18,8 +18,6 @@
#pragma once
#include <boost/date_time/posix_time/posix_time.hpp>
-#undef assert
-#define assert MONGO_assert
namespace mongo {
diff --git a/src/mongo/db/taskqueue.h b/src/mongo/db/taskqueue.h
index 1107d479448..20fad90db0b 100644
--- a/src/mongo/db/taskqueue.h
+++ b/src/mongo/db/taskqueue.h
@@ -84,7 +84,7 @@ namespace mongo {
}
_drain( _queues[toDrain] );
- assert( _queues[toDrain].empty() );
+ verify( _queues[toDrain].empty() );
}
private:
@@ -102,7 +102,7 @@ namespace mongo {
MT::go(v);
}
queue.clear();
- DEV assert( queue.capacity() == oldCap ); // just checking that clear() doesn't deallocate, we don't want that
+ DEV verify( queue.capacity() == oldCap ); // just checking that clear() doesn't deallocate, we don't want that
}
};
diff --git a/src/mongo/dbtests/basictests.cpp b/src/mongo/dbtests/basictests.cpp
index a1bac3bff70..587aae53767 100644
--- a/src/mongo/dbtests/basictests.cpp
+++ b/src/mongo/dbtests/basictests.cpp
@@ -471,7 +471,7 @@ namespace BasicTests {
// if that changes, should put this on the stack
{
Database * db = new Database( "dbtests_basictests_ownsns" , isNew );
- assert( isNew );
+ verify( isNew );
ASSERT( db->ownsNS( "dbtests_basictests_ownsns.x" ) );
ASSERT( db->ownsNS( "dbtests_basictests_ownsns.x.y" ) );
@@ -657,12 +657,12 @@ namespace BasicTests {
const char * c = "this is a test";
std::string s;
size_t len = compress(c, strlen(c)+1, &s);
- assert( len > 0 );
+ verify( len > 0 );
std::string out;
bool ok = uncompress(s.c_str(), s.size(), &out);
- assert(ok);
- assert( strcmp(out.c_str(), c) == 0 );
+ verify(ok);
+ verify( strcmp(out.c_str(), c) == 0 );
}
} ctest1;
diff --git a/src/mongo/dbtests/btreetests.inl b/src/mongo/dbtests/btreetests.inl
index 724287263b6..cde943d51ad 100644
--- a/src/mongo/dbtests/btreetests.inl
+++ b/src/mongo/dbtests/btreetests.inl
@@ -30,8 +30,8 @@
_context( ns() ) {
{
bool f = false;
- assert( f = true );
- massert( 10402 , "assert is misdefined", f);
+ verify( f = true );
+ massert( 10402 , "verify is misdefined", f);
}
}
virtual ~Base() {}
@@ -53,7 +53,7 @@
}
IndexDetails& id() {
NamespaceDetails *nsd = nsdetails( ns() );
- assert( nsd );
+ verify( nsd );
return nsd->idx( 1 );
}
void checkValid( int nKeys ) {
@@ -112,7 +112,7 @@
return id().keyPattern();
}
const BtreeBucket *child( const BtreeBucket *b, int i ) {
- assert( i <= b->nKeys() );
+ verify( i <= b->nKeys() );
DiskLoc d;
if ( i == b->nKeys() ) {
d = b->getNextChild();
@@ -120,7 +120,7 @@
else {
d = b->keyNode( i ).prevChildBucket;
}
- assert( !d.isNull() );
+ verify( !d.isNull() );
return d.btree();
}
void checkKey( char i ) {
@@ -621,7 +621,7 @@
ASSERT_EQUALS( 7, nsdetails( ns.c_str() )->stats.nrecords );
BSONObj k = BSON( "" << "bb" );
- assert( unindex( k ) );
+ verify( unindex( k ) );
// dump();
ASSERT_EQUALS( 7, bt()->fullValidate( dl(), order(), 0, true ) );
ASSERT_EQUALS( 5, nsdetails( ns.c_str() )->stats.nrecords );
@@ -639,7 +639,7 @@
ASSERT_EQUALS( 7, nsdetails( ns.c_str() )->stats.nrecords );
BSONObj k = BSON( "" << "bb" );
- assert( unindex( k ) );
+ verify( unindex( k ) );
// dump();
ASSERT_EQUALS( 9, bt()->fullValidate( dl(), order(), 0, true ) );
ASSERT_EQUALS( 5, nsdetails( ns.c_str() )->stats.nrecords );
@@ -657,7 +657,7 @@
ASSERT_EQUALS( 4, bt()->fullValidate( dl(), order(), 0, true ) );
ASSERT_EQUALS( 3, nsdetails( ns.c_str() )->stats.nrecords );
BSONObj k = BSON( "" << "c" );
- assert( unindex( k ) );
+ verify( unindex( k ) );
// dump();
ASSERT_EQUALS( 3, bt()->fullValidate( dl(), order(), 0, true ) );
ASSERT_EQUALS( 3, nsdetails( ns.c_str() )->stats.nrecords );
@@ -675,7 +675,7 @@
ASSERT_EQUALS( 7, nsdetails( ns.c_str() )->stats.nrecords );
BSONObj k = BSON( "" << "bb" );
- assert( unindex( k ) );
+ verify( unindex( k ) );
// dump();
ASSERT_EQUALS( 10, bt()->fullValidate( dl(), order(), 0, true ) );
// child does not currently replace parent in this case
@@ -694,7 +694,7 @@
ASSERT_EQUALS( 7, nsdetails( ns.c_str() )->stats.nrecords );
BSONObj k = BSON( "" << "ff" );
- assert( unindex( k ) );
+ verify( unindex( k ) );
// dump();
ASSERT_EQUALS( 10, bt()->fullValidate( dl(), order(), 0, true ) );
// child does not currently replace parent in this case
@@ -713,7 +713,7 @@
ASSERT_EQUALS( 7, nsdetails( ns.c_str() )->stats.nrecords );
BSONObj k = BSON( "" << "bb" );
- assert( unindex( k ) );
+ verify( unindex( k ) );
// dump();
ASSERT_EQUALS( 10, bt()->fullValidate( dl(), order(), 0, true ) );
ASSERT_EQUALS( 7, nsdetails( ns.c_str() )->stats.nrecords );
@@ -731,7 +731,7 @@
ASSERT_EQUALS( 5, nsdetails( ns.c_str() )->stats.nrecords );
BSONObj k = BSON( "" << "g" );
- assert( unindex( k ) );
+ verify( unindex( k ) );
// dump();
ASSERT_EQUALS( 6, bt()->fullValidate( dl(), order(), 0, true ) );
ASSERT_EQUALS( 5, nsdetails( ns.c_str() )->stats.nrecords );
@@ -749,7 +749,7 @@
ASSERT_EQUALS( 7, nsdetails( ns.c_str() )->stats.nrecords );
BSONObj k = BSON( "" << "ee" );
- assert( unindex( k ) );
+ verify( unindex( k ) );
// dump();
ASSERT_EQUALS( 8, bt()->fullValidate( dl(), order(), 0, true ) );
ASSERT_EQUALS( 6, nsdetails( ns.c_str() )->stats.nrecords );
@@ -767,7 +767,7 @@
ASSERT_EQUALS( 7, nsdetails( ns.c_str() )->stats.nrecords );
BSONObj k = BSON( "" << "ee" );
- assert( unindex( k ) );
+ verify( unindex( k ) );
// dump();
ASSERT_EQUALS( 9, bt()->fullValidate( dl(), order(), 0, true ) );
ASSERT_EQUALS( 6, nsdetails( ns.c_str() )->stats.nrecords );
@@ -785,7 +785,7 @@
ASSERT_EQUALS( 7, nsdetails( ns.c_str() )->stats.nrecords );
BSONObj k = BSON( "" << "ee" );
- assert( unindex( k ) );
+ verify( unindex( k ) );
// dump();
ASSERT_EQUALS( 9, bt()->fullValidate( dl(), order(), 0, true ) );
ASSERT_EQUALS( 6, nsdetails( ns.c_str() )->stats.nrecords );
@@ -803,7 +803,7 @@
ASSERT_EQUALS( 6, nsdetails( ns.c_str() )->stats.nrecords );
BSONObj k = BSON( "" << "c" );
- assert( unindex( k ) );
+ verify( unindex( k ) );
// dump();
ASSERT_EQUALS( 9, bt()->fullValidate( dl(), order(), 0, true ) );
ASSERT_EQUALS( 4, nsdetails( ns.c_str() )->stats.nrecords );
@@ -822,7 +822,7 @@
ASSERT_EQUALS( 6, nsdetails( ns.c_str() )->stats.nrecords );
BSONObj k = BSON( "" << "c" );
- assert( unindex( k ) );
+ verify( unindex( k ) );
// dump();
ASSERT_EQUALS( 8, bt()->fullValidate( dl(), order(), 0, true ) );
ASSERT_EQUALS( 3, nsdetails( ns.c_str() )->stats.nrecords );
@@ -838,7 +838,7 @@
ASSERT_EQUALS( 8, bt()->fullValidate( dl(), order(), 0, true ) );
ASSERT_EQUALS( 6, nsdetails( ns.c_str() )->stats.nrecords );
BSONObj k = BSON( "" << "c" );
- assert( unindex( k ) );
+ verify( unindex( k ) );
long long keyCount = bt()->fullValidate( dl(), order(), 0, true );
ASSERT_EQUALS( 7, keyCount );
ASSERT_EQUALS( 4, nsdetails( ns.c_str() )->stats.nrecords );
@@ -926,7 +926,7 @@
while( size < targetSize ) {
int space = targetSize - size;
int nextSize = space - sizeof( _KeyNode );
- assert( nextSize > 0 );
+ verify( nextSize > 0 );
BSONObj newKey = key( startKey++, nextSize );
t->push( newKey, DiskLoc() );
size += BtreeBucket::KeyOwned(newKey).dataSize() + sizeof( _KeyNode );
diff --git a/src/mongo/dbtests/directclienttests.cpp b/src/mongo/dbtests/directclienttests.cpp
index 860eb7e7e5c..2add3c5cd6b 100644
--- a/src/mongo/dbtests/directclienttests.cpp
+++ b/src/mongo/dbtests/directclienttests.cpp
@@ -61,10 +61,10 @@ namespace DirectClientTests {
//cout << cmd.toString() << endl;
bool ok = client().runCommand("a", cmd, info);
//cout << info.toString() << endl;
- assert(ok);
+ verify(ok);
}
- assert( client().dropCollection(ns) );
+ verify( client().dropCollection(ns) );
}
}
};
diff --git a/src/mongo/dbtests/framework.cpp b/src/mongo/dbtests/framework.cpp
index 6b9c3d8b546..67cd770e231 100644
--- a/src/mongo/dbtests/framework.cpp
+++ b/src/mongo/dbtests/framework.cpp
@@ -20,10 +20,6 @@
#include "../util/version.h"
#include <boost/program_options.hpp>
#include <boost/filesystem/operations.hpp>
-
-#undef assert
-#define assert MONGO_assert
-
#include "framework.h"
#include "../util/file_allocator.h"
#include "../db/dur.h"
@@ -365,7 +361,7 @@ namespace mongo {
for ( list<string>::iterator i=torun.begin(); i!=torun.end(); i++ ) {
string name = *i;
Suite * s = (*_suites)[name];
- assert( s );
+ verify( s );
log() << "going to run suite: " << name << endl;
results.push_back( s->run( filter ) );
@@ -424,7 +420,7 @@ namespace mongo {
}
void fail( const char * exp , const char * file , unsigned line ) {
- assert(0);
+ verify(0);
}
MyAssertionException * MyAsserts::getBase() {
diff --git a/src/mongo/dbtests/jsobjtests.cpp b/src/mongo/dbtests/jsobjtests.cpp
index 97cbe33fa91..a39c749fd53 100644
--- a/src/mongo/dbtests/jsobjtests.cpp
+++ b/src/mongo/dbtests/jsobjtests.cpp
@@ -1414,11 +1414,11 @@ namespace JsobjTests {
public:
void run() {
BSONObj x = BSON( "a" << 10 << "b" << 11 );
- assert( BSON( "a" << 10 ).woCompare( x.extractFields( BSON( "a" << 1 ) ) ) == 0 );
- assert( BSON( "b" << 11 ).woCompare( x.extractFields( BSON( "b" << 1 ) ) ) == 0 );
- assert( x.woCompare( x.extractFields( BSON( "a" << 1 << "b" << 1 ) ) ) == 0 );
+ verify( BSON( "a" << 10 ).woCompare( x.extractFields( BSON( "a" << 1 ) ) ) == 0 );
+ verify( BSON( "b" << 11 ).woCompare( x.extractFields( BSON( "b" << 1 ) ) ) == 0 );
+ verify( x.woCompare( x.extractFields( BSON( "a" << 1 << "b" << 1 ) ) ) == 0 );
- assert( (string)"a" == x.extractFields( BSON( "a" << 1 << "c" << 1 ) ).firstElementFieldName() );
+ verify( (string)"a" == x.extractFields( BSON( "a" << 1 << "c" << 1 ) ).firstElementFieldName() );
}
};
@@ -1488,12 +1488,12 @@ namespace JsobjTests {
while ( i->more() ) {
pair<BSONObj,DiskLoc> p = i->next();
if ( num == 0 )
- assert( p.first["x"].number() == 2 );
+ verify( p.first["x"].number() == 2 );
else if ( num <= 2 ) {
- assert( p.first["x"].number() == 5 );
+ verify( p.first["x"].number() == 5 );
}
else if ( num == 3 )
- assert( p.first["x"].number() == 10 );
+ verify( p.first["x"].number() == 10 );
else
ASSERT( 0 );
num++;
@@ -1520,13 +1520,13 @@ namespace JsobjTests {
while ( i->more() ) {
pair<BSONObj,DiskLoc> p = i->next();
if ( num == 0 ) {
- assert( p.first["x"].number() == 2 );
+ verify( p.first["x"].number() == 2 );
ASSERT_EQUALS( p.second.toString() , "3:1" );
}
else if ( num <= 2 )
- assert( p.first["x"].number() == 5 );
+ verify( p.first["x"].number() == 5 );
else if ( num == 3 ) {
- assert( p.first["x"].number() == 10 );
+ verify( p.first["x"].number() == 10 );
ASSERT_EQUALS( p.second.toString() , "5:b" );
}
else
@@ -1544,7 +1544,7 @@ namespace JsobjTests {
sorter.sort();
auto_ptr<BSONObjExternalSorter::Iterator> i = sorter.iterator();
- assert( ! i->more() );
+ verify( ! i->more() );
}
};
@@ -1567,12 +1567,12 @@ namespace JsobjTests {
while ( i->more() ) {
pair<BSONObj,DiskLoc> p = i->next();
if ( num == 0 )
- assert( p.first["x"].number() == 2 );
+ verify( p.first["x"].number() == 2 );
else if ( num <= 3 ) {
- assert( p.first["x"].number() == 5 );
+ verify( p.first["x"].number() == 5 );
}
else if ( num == 4 )
- assert( p.first["x"].number() == 10 );
+ verify( p.first["x"].number() == 10 );
else
ASSERT( 0 );
ASSERT_EQUALS( num , p.second.getOfs() );
@@ -1601,10 +1601,10 @@ namespace JsobjTests {
pair<BSONObj,DiskLoc> p = i->next();
num++;
double cur = p.first["x"].number();
- assert( cur >= prev );
+ verify( cur >= prev );
prev = cur;
}
- assert( num == 10000 );
+ verify( num == 10000 );
}
};
@@ -1626,10 +1626,10 @@ namespace JsobjTests {
pair<BSONObj,DiskLoc> p = i->next();
num++;
double cur = p.first["x"].number();
- assert( cur >= prev );
+ verify( cur >= prev );
prev = cur;
}
- assert( num == total );
+ verify( num == total );
ASSERT( sorter.numFiles() > 2 );
}
};
diff --git a/src/mongo/dbtests/jstests.cpp b/src/mongo/dbtests/jstests.cpp
index 9782eedaacb..dd728132216 100644
--- a/src/mongo/dbtests/jstests.cpp
+++ b/src/mongo/dbtests/jstests.cpp
@@ -687,7 +687,7 @@ namespace JSTests {
BSONObjBuilder result;
string errmsg;
dbEval( "test", cmd, result, errmsg);
- assert(0);
+ verify(0);
}
DBDirectClient client;
@@ -814,7 +814,7 @@ namespace JSTests {
{
BSONObj fromA = client.findOne( _a , BSONObj() );
- assert( fromA.valid() );
+ verify( fromA.valid() );
//cout << "Froma : " << fromA << endl;
BSONObjBuilder b;
b.append( "b" , 18 );
diff --git a/src/mongo/dbtests/macrotests.cpp b/src/mongo/dbtests/macrotests.cpp
index f547c851677..27515296830 100644
--- a/src/mongo/dbtests/macrotests.cpp
+++ b/src/mongo/dbtests/macrotests.cpp
@@ -22,26 +22,26 @@
# error malloc defined 0
#endif
-#ifdef assert
-# error assert defined 1
+#ifdef verify
+# error verify defined 1
#endif
-#include "../client/parallel.h" //uses assert
+#include "../client/parallel.h" //uses verify
-#ifdef assert
-# error assert defined 2
+#ifdef verify
+# error verify defined 2
#endif
#include "../client/redef_macros.h"
-#ifndef assert
-# error assert not defined 3
+#ifndef verify
+# error verify not defined 3
#endif
#include "../client/undef_macros.h"
-#ifdef assert
-# error assert defined 3
+#ifdef verify
+# error verify defined 3
#endif
diff --git a/src/mongo/dbtests/mmaptests.cpp b/src/mongo/dbtests/mmaptests.cpp
index 50da7391884..c9409c59138 100644
--- a/src/mongo/dbtests/mmaptests.cpp
+++ b/src/mongo/dbtests/mmaptests.cpp
@@ -49,10 +49,10 @@ namespace MMapTests {
{
MongoMMF f;
unsigned long long len = 256 * 1024 * 1024;
- assert( f.create(fn, len, /*sequential*/false) );
+ verify( f.create(fn, len, /*sequential*/false) );
{
char *p = (char *) f.getView();
- assert(p);
+ verify(p);
// write something to the private view as a test
if( cmdLine.dur )
MemoryMappedFile::makeWritable(p, 6);
@@ -81,10 +81,10 @@ namespace MMapTests {
Timer t;
for( int i = 0; i < N; i++ ) {
MongoMMF f;
- assert( f.open(fn, i%4==1) );
+ verify( f.open(fn, i%4==1) );
{
char *p = (char *) f.getView();
- assert(p);
+ verify(p);
if( cmdLine.dur )
MemoryMappedFile::makeWritable(p, 4);
strcpy(p, "zzz");
@@ -93,7 +93,7 @@ namespace MMapTests {
char *w = (char *) f.view_write();
if( i % 2 == 0 )
++(*w);
- assert( w[6] == 'w' );
+ verify( w[6] == 'w' );
}
}
if( t.millis() > 10000 ) {
@@ -122,7 +122,7 @@ namespace MMapTests {
MemoryMappedFile f;
char *p = (char *) f.create(fn, 1024 * 1024 * 1024, true);
- assert(p);
+ verify(p);
strcpy(p, "hello");
{
@@ -141,7 +141,7 @@ namespace MMapTests {
char *q;
for( int i = 0; i < 1000; i++ ) {
q = (char *) f.testGetCopyOnWriteView();
- assert( q );
+ verify( q );
if( i == 999 ) {
strcpy(q+2, "there");
}
@@ -169,7 +169,7 @@ namespace MMapTests {
Timer t;
char *q = (char *) f.testGetCopyOnWriteView();
for( int i = 0; i < 10; i++ ) {
- assert( q );
+ verify( q );
memset(q+100, 'c', 200 * 1024 * 1024);
}
f.testCloseCopyOnWriteView(q);
@@ -182,7 +182,7 @@ namespace MMapTests {
Timer t;
for( int i = 0; i < 10; i++ ) {
char *q = (char *) f.testGetCopyOnWriteView();
- assert( q );
+ verify( q );
memset(q+100, 'c', 200 * 1024 * 1024);
f.testCloseCopyOnWriteView(q);
}
@@ -195,7 +195,7 @@ namespace MMapTests {
Timer t;
for( int i = 0; i < 100; i++ ) {
char *q = (char *) f.testGetCopyOnWriteView();
- assert( q );
+ verify( q );
memset(q+100, 'c', 20 * 1024 * 1024);
f.testCloseCopyOnWriteView(q);
}
diff --git a/src/mongo/dbtests/pdfiletests.cpp b/src/mongo/dbtests/pdfiletests.cpp
index 4d2f01cb609..fb83742beef 100644
--- a/src/mongo/dbtests/pdfiletests.cpp
+++ b/src/mongo/dbtests/pdfiletests.cpp
@@ -348,7 +348,7 @@ namespace PdfileTests {
bool isnew;
Database * d = dbHolderW().getOrCreate( dbname , dbpath , isnew );
- assert( d );
+ verify( d );
int big = 10 * 1024;
//int small = 1024;
diff --git a/src/mongo/dbtests/perftests.cpp b/src/mongo/dbtests/perftests.cpp
index 0b8212d0b5d..fb47dd5de7a 100644
--- a/src/mongo/dbtests/perftests.cpp
+++ b/src/mongo/dbtests/perftests.cpp
@@ -106,7 +106,7 @@ namespace PerfTests {
d.defer(v);
}
d.invoke();
- assert( x == tot );
+ verify( x == tot );
}
};
int TaskQueueTest::tot;
@@ -155,7 +155,7 @@ namespace PerfTests {
if( c->connect("perfdb.10gen.cc", err) ) {
if( !c->auth("perf", "perf", pwd, err) ) {
cout << "info: authentication with stats db failed: " << err << endl;
- assert(false);
+ verify(false);
}
conn = c;
@@ -469,8 +469,8 @@ namespace PerfTests {
{}
virtual bool showDurStats() { return false; }
void timed() {
- assert( a.woEqual(b) );
- assert( !a.woEqual(c) );
+ verify( a.woEqual(b) );
+ verify( !a.woEqual(c) );
}
};
@@ -907,7 +907,7 @@ namespace PerfTests {
client().insert( ns(), o );
}
void post() {
- assert( client().count(ns()) == 1 );
+ verify( client().count(ns()) == 1 );
}
};
@@ -938,7 +938,7 @@ namespace PerfTests {
}
void post() {
#if !defined(_DEBUG)
- assert( client().count(ns()) > 50 );
+ verify( client().count(ns()) > 50 );
#endif
}
};
@@ -1026,10 +1026,10 @@ namespace PerfTests {
string fn = "/tmp/t1";
MongoMMF f;
unsigned long long len = 1 * 1024 * 1024;
- assert( f.create(fn, len, /*sequential*/rand()%2==0) );
+ verify( f.create(fn, len, /*sequential*/rand()%2==0) );
{
char *p = (char *) f.getView();
- assert(p);
+ verify(p);
// write something to the private view as a test
strcpy(p, "hello");
}
diff --git a/src/mongo/dbtests/queryoptimizertests.cpp b/src/mongo/dbtests/queryoptimizertests.cpp
index f9a168b0ab2..8e836bac79b 100644
--- a/src/mongo/dbtests/queryoptimizertests.cpp
+++ b/src/mongo/dbtests/queryoptimizertests.cpp
@@ -88,7 +88,7 @@ namespace QueryOptimizerTests {
if ( d->idx(i).keyPattern() == key /*indexName() == name*/ || ( d->idx(i).isIdIndex() && IndexDetails::isIdIndexPattern( key ) ) )
return &d->idx(i);
}
- assert( false );
+ verify( false );
return 0;
}
int indexno( const BSONObj &key ) {
diff --git a/src/mongo/dbtests/querytests.cpp b/src/mongo/dbtests/querytests.cpp
index c7dcbb76b05..b6d327f1805 100644
--- a/src/mongo/dbtests/querytests.cpp
+++ b/src/mongo/dbtests/querytests.cpp
@@ -917,7 +917,7 @@ namespace QueryTests {
auto_ptr< DBClientCursor > cursor = client().query( ns, Query().sort( "7" ) );
while ( cursor->more() ) {
BSONObj o = cursor->next();
- assert( o.valid() );
+ verify( o.valid() );
//cout << " foo " << o << endl;
}
diff --git a/src/mongo/dbtests/queryutiltests.cpp b/src/mongo/dbtests/queryutiltests.cpp
index 20723cb3ac2..60eb292630e 100644
--- a/src/mongo/dbtests/queryutiltests.cpp
+++ b/src/mongo/dbtests/queryutiltests.cpp
@@ -1108,7 +1108,7 @@ namespace QueryUtilTests {
if ( d->idx(i).keyPattern() == key /*indexName() == name*/ || ( d->idx(i).isIdIndex() && IndexDetails::isIdIndexPattern( key ) ) )
return &d->idx(i);
}
- assert( false );
+ verify( false );
return 0;
}
int indexno( const BSONObj &key ) {
diff --git a/src/mongo/dbtests/replsettests.cpp b/src/mongo/dbtests/replsettests.cpp
index 186355acc8d..8891198e9a2 100644
--- a/src/mongo/dbtests/replsettests.cpp
+++ b/src/mongo/dbtests/replsettests.cpp
@@ -164,7 +164,7 @@ namespace ReplSetTests {
sync.applyOp(obj, o1);
BSONObj fin = findOne();
- assert(fin["x"].Number() == 456);
+ verify(fin["x"].Number() == 456);
}
};
@@ -203,7 +203,7 @@ namespace ReplSetTests {
b.append("ns", _ns);
BSONObj o = b.obj();
- assert(!apply(o));
+ verify(!apply(o));
return o;
}
public:
@@ -232,7 +232,7 @@ namespace ReplSetTests {
BSONObj op = updateFail();
Sync s("");
- assert(!s.shouldRetry(op));
+ verify(!s.shouldRetry(op));
}
};
@@ -250,14 +250,14 @@ namespace ReplSetTests {
b.append("o2", BSON("x" << 456));
b.append("ns", cappedNs());
- assert(apply(b.obj()));
+ verify(apply(b.obj()));
}
void insert() {
Client::Context ctx( cappedNs() );
BSONObj o = BSON("x" << 456);
DiskLoc loc = theDataFileMgr.insert( cappedNs().c_str(), o.objdata(), o.objsize(), false );
- assert(!loc.isNull());
+ verify(!loc.isNull());
}
public:
virtual ~CappedUpdate() {}
@@ -270,12 +270,12 @@ namespace ReplSetTests {
DBDirectClient client;
int count = (int) client.count(cappedNs(), BSONObj());
- assert(count > 1);
+ verify(count > 1);
// Just to be sure, no _id index, right?
Client::Context ctx(cappedNs());
NamespaceDetails *nsd = nsdetails(cappedNs().c_str());
- assert(nsd->findIdIndex() == -1);
+ verify(nsd->findIdIndex() == -1);
}
};
@@ -289,7 +289,7 @@ namespace ReplSetTests {
b.append("op", "i");
b.append("o", BSON("_id" << 123 << "x" << 456));
b.append("ns", cappedNs());
- assert(apply(b.obj()));
+ verify(apply(b.obj()));
}
public:
virtual ~CappedInsert() {}
@@ -302,7 +302,7 @@ namespace ReplSetTests {
// Just to be sure, no _id index, right?
Client::Context ctx(cappedNs());
NamespaceDetails *nsd = nsdetails(cappedNs().c_str());
- assert(nsd->findIdIndex() == -1);
+ verify(nsd->findIdIndex() == -1);
}
};
diff --git a/src/mongo/dbtests/repltests.cpp b/src/mongo/dbtests/repltests.cpp
index 6ac5305cd00..58956729eac 100644
--- a/src/mongo/dbtests/repltests.cpp
+++ b/src/mongo/dbtests/repltests.cpp
@@ -75,7 +75,7 @@ namespace ReplTests {
}
void checkAll( const BSONObj &o ) const {
auto_ptr< DBClientCursor > c = client()->query( ns(), o );
- assert( c->more() );
+ verify( c->more() );
while( c->more() ) {
check( o, c->next() );
}
@@ -1123,17 +1123,17 @@ namespace ReplTests {
public:
void run() {
ReplSetConfig::MemberCfg m1, m2;
- assert(m1 == m2);
+ verify(m1 == m2);
m1.tags["x"] = "foo";
- assert(m1 != m2);
+ verify(m1 != m2);
m2.tags["y"] = "bar";
- assert(m1 != m2);
+ verify(m1 != m2);
m1.tags["y"] = "bar";
- assert(m1 != m2);
+ verify(m1 != m2);
m2.tags["x"] = "foo";
- assert(m1 == m2);
+ verify(m1 == m2);
m1.tags.clear();
- assert(m1 != m2);
+ verify(m1 != m2);
}
};
@@ -1165,16 +1165,16 @@ namespace ReplTests {
catch (DBException&) {
threw = true;
}
- assert(threw);
+ verify(threw);
// now this should succeed
SyncTest t;
- assert(t.shouldRetry(o));
- assert(!client()->findOne(ns(), BSON("_id" << "on remote")).isEmpty());
+ verify(t.shouldRetry(o));
+ verify(!client()->findOne(ns(), BSON("_id" << "on remote")).isEmpty());
// force it not to find an obj
t.returnEmpty = true;
- assert(!t.shouldRetry(o));
+ verify(!t.shouldRetry(o));
}
};
diff --git a/src/mongo/dbtests/threadedtests.cpp b/src/mongo/dbtests/threadedtests.cpp
index 9b38e12badf..a1db20bee58 100644
--- a/src/mongo/dbtests/threadedtests.cpp
+++ b/src/mongo/dbtests/threadedtests.cpp
@@ -381,20 +381,20 @@ namespace ThreadedTests {
cout << "A : " << &x1 << endl;
boost::thread t1( boost::bind( worker1 , &lk , &x1 ) );
while ( ! x1 );
- assert( x1 == 1 );
+ verify( x1 == 1 );
sleepmillis( 500 );
- assert( x1 == 1 );
+ verify( x1 == 1 );
AtomicUInt x2 = 0;
boost::thread t2( boost::bind( worker2, &lk , &x2 ) );
t2.join();
- assert( x2 == 1 );
+ verify( x2 == 1 );
a.reset();
for ( int i=0; i<2000; i++ ) {
if ( x1 == 2 )
break;
sleepmillis(1);
}
- assert( x1 == 2 );
+ verify( x1 == 2 );
t1.join();
}
};
@@ -402,7 +402,7 @@ namespace ThreadedTests {
class RWLockTest3 {
public:
static void worker2( RWLockRecursiveNongreedy * lk , AtomicUInt * x ) {
- assert( ! lk->__lock_try(0) );
+ verify( ! lk->__lock_try(0) );
RWLockRecursiveNongreedy::Shared c( *lk );
(*x)++;
}
@@ -420,7 +420,7 @@ namespace ThreadedTests {
boost::thread t2( boost::bind( worker2, &lk , &x2 ) );
t2.join();
- assert( x2 == 1 );
+ verify( x2 == 1 );
a.reset();
}
@@ -460,23 +460,23 @@ namespace ThreadedTests {
// create
pthread_rwlock_t lk;
- assert( pthread_rwlock_init( &lk , 0 ) == 0 );
+ verify( pthread_rwlock_init( &lk , 0 ) == 0 );
// read lock
- assert( pthread_rwlock_rdlock( &lk ) == 0 );
+ verify( pthread_rwlock_rdlock( &lk ) == 0 );
AtomicUInt x1 = 0;
boost::thread t1( boost::bind( worker1 , &lk , &x1 ) );
while ( ! x1 );
- assert( x1 == 1 );
+ verify( x1 == 1 );
sleepmillis( 500 );
- assert( x1 == 1 );
+ verify( x1 == 1 );
AtomicUInt x2 = 0;
boost::thread t2( boost::bind( worker2, &lk , &x2 ) );
t2.join();
- assert( x2 == 1 );
+ verify( x2 == 1 );
pthread_rwlock_unlock( &lk );
@@ -486,7 +486,7 @@ namespace ThreadedTests {
sleepmillis(1);
}
- assert( x1 == 2 );
+ verify( x1 == 2 );
t1.join();
#endif
}
@@ -761,7 +761,7 @@ namespace ThreadedTests {
Timer t;
while( 1 ) {
n.waitToBeNotified();
- assert( k == 0 );
+ verify( k == 0 );
k = 1;
// not very long, we'd like to simulate about 100K locks per second
sleepalittle();
@@ -850,7 +850,7 @@ namespace ThreadedTests {
Timer t;
log(Z) << mongo::curTimeMillis64() % 10000 << " 3 lock_r()..." << endl;
m.lock_r();
- assert( gotW );
+ verify( gotW );
log(Z) << mongo::curTimeMillis64() % 10000 << " 3 got" << gotW << endl;
log(Z) << t.millis() << endl;
m.unlock_r();
@@ -879,14 +879,14 @@ namespace ThreadedTests {
void checkIn(){
scoped_lock lk( _frontDesk );
_checkedIn++;
- assert( _checkedIn <= _nRooms );
+ verify( _checkedIn <= _nRooms );
if( _checkedIn > _maxRooms ) _maxRooms = _checkedIn;
}
void checkOut(){
scoped_lock lk( _frontDesk );
_checkedIn--;
- assert( _checkedIn >= 0 );
+ verify( _checkedIn >= 0 );
}
mongo::mutex _frontDesk;
@@ -928,7 +928,7 @@ namespace ThreadedTests {
// This should always be true, assuming that it takes < 1 sec for the hardware to process a check-out/check-in
// Time for test is then ~ #threads / _nRooms * 2 seconds
- assert( _hotel._maxRooms == _hotel._nRooms );
+ verify( _hotel._maxRooms == _hotel._nRooms );
}
diff --git a/src/mongo/pch.h b/src/mongo/pch.h
index 9e5be0fbcf9..f2c82e9eec5 100644
--- a/src/mongo/pch.h
+++ b/src/mongo/pch.h
@@ -89,8 +89,6 @@
#include <boost/thread/condition.hpp>
#include <boost/thread/recursive_mutex.hpp>
#include <boost/thread/xtime.hpp>
-#undef assert
-#define assert MONGO_assert
namespace mongo {
@@ -145,7 +143,7 @@ namespace mongo {
// TODO: Rework the headers so we don't need this craziness
#include "bson/inline_decls.h"
-#define MONGO_assert(_Expression) (void)( MONGO_likely(!!(_Expression)) || (mongo::asserted(#_Expression, __FILE__, __LINE__), 0) )
+#define MONGO_verify(_Expression) (void)( MONGO_likely(!!(_Expression)) || (mongo::asserted(#_Expression, __FILE__, __LINE__), 0) )
#include "util/debug_util.h"
#include "util/goodies.h"
diff --git a/src/mongo/s/balance.cpp b/src/mongo/s/balance.cpp
index 2d89dfd459b..a97636ae958 100644
--- a/src/mongo/s/balance.cpp
+++ b/src/mongo/s/balance.cpp
@@ -46,17 +46,17 @@ namespace mongo {
const CandidateChunk& chunkInfo = *it->get();
DBConfigPtr cfg = grid.getDBConfig( chunkInfo.ns );
- assert( cfg );
+ verify( cfg );
ChunkManagerPtr cm = cfg->getChunkManager( chunkInfo.ns );
- assert( cm );
+ verify( cm );
const BSONObj& chunkToMove = chunkInfo.chunk;
ChunkPtr c = cm->findChunk( chunkToMove["min"].Obj() );
if ( c->getMin().woCompare( chunkToMove["min"].Obj() ) || c->getMax().woCompare( chunkToMove["max"].Obj() ) ) {
// likely a split happened somewhere
cm = cfg->getChunkManager( chunkInfo.ns , true /* reload */);
- assert( cm );
+ verify( cm );
c = cm->findChunk( chunkToMove["min"].Obj() );
if ( c->getMin().woCompare( chunkToMove["min"].Obj() ) || c->getMax().woCompare( chunkToMove["max"].Obj() ) ) {
@@ -79,7 +79,7 @@ namespace mongo {
if ( res["chunkTooBig"].trueValue() ) {
// reload just to be safe
cm = cfg->getChunkManager( chunkInfo.ns );
- assert( cm );
+ verify( cm );
c = cm->findChunk( chunkToMove["min"].Obj() );
log() << "forcing a split because migrate failed for size reasons" << endl;
@@ -143,7 +143,7 @@ namespace mongo {
}
void Balancer::_doBalanceRound( DBClientBase& conn, vector<CandidateChunkPtr>* candidateChunks ) {
- assert( candidateChunks );
+ verify( candidateChunks );
//
// 1. Check whether there is any sharded collection to be balanced by querying
diff --git a/src/mongo/s/chunk.cpp b/src/mongo/s/chunk.cpp
index 6a034694d38..c68630be64d 100644
--- a/src/mongo/s/chunk.cpp
+++ b/src/mongo/s/chunk.cpp
@@ -57,7 +57,7 @@ namespace mongo {
_shard.reset( from.getStringField( "shard" ) );
_lastmod = from["lastmod"];
- assert( _lastmod > 0 );
+ verify( _lastmod > 0 );
_min = from.getObjectField( "min" ).getOwned();
_max = from.getObjectField( "max" ).getOwned();
@@ -83,7 +83,7 @@ namespace mongo {
}
string Chunk::getns() const {
- assert( _manager );
+ verify( _manager );
return _manager->getns();
}
@@ -477,11 +477,11 @@ namespace mongo {
to.appendTimestamp( "lastmod" , myLastMod );
}
else if ( _lastmod.isSet() ) {
- assert( _lastmod > 0 && _lastmod < 1000 );
+ verify( _lastmod > 0 && _lastmod < 1000 );
to.appendTimestamp( "lastmod" , _lastmod );
}
else {
- assert(0);
+ verify(0);
}
to << "ns" << _manager->getns();
@@ -614,7 +614,7 @@ namespace mongo {
// TODO really need the sort?
auto_ptr<DBClientCursor> cursor = conn->query( Chunk::chunkMetadataNS, QUERY("ns" << _ns).sort("lastmod",-1), 0, 0, 0, 0,
(DEBUG_BUILD ? 2 : 1000000)); // batch size. Try to induce potential race conditions in debug builds
- assert( cursor.get() );
+ verify( cursor.get() );
while ( cursor->more() ) {
BSONObj d = cursor->next();
if ( d["isMaxMarker"].trueValue() ) {
@@ -678,7 +678,7 @@ namespace mongo {
void ChunkManager::createFirstChunks( const Shard& primary , vector<BSONObj>* initPoints , vector<Shard>* initShards ) const {
// TODO distlock?
- assert( _chunkMap.size() == 0 );
+ verify( _chunkMap.size() == 0 );
vector<BSONObj> splitPoints;
vector<Shard> shards;
@@ -996,22 +996,22 @@ namespace mongo {
try {
// No Nulls
for (ChunkRangeMap::const_iterator it=_ranges.begin(), end=_ranges.end(); it != end; ++it) {
- assert(it->second);
+ verify(it->second);
}
// Check endpoints
- assert(allOfType(MinKey, _ranges.begin()->second->getMin()));
- assert(allOfType(MaxKey, boost::prior(_ranges.end())->second->getMax()));
+ verify(allOfType(MinKey, _ranges.begin()->second->getMin()));
+ verify(allOfType(MaxKey, boost::prior(_ranges.end())->second->getMax()));
// Make sure there are no gaps or overlaps
for (ChunkRangeMap::const_iterator it=boost::next(_ranges.begin()), end=_ranges.end(); it != end; ++it) {
ChunkRangeMap::const_iterator last = boost::prior(it);
- assert(it->second->getMin() == last->second->getMax());
+ verify(it->second->getMin() == last->second->getMax());
}
// Check Map keys
for (ChunkRangeMap::const_iterator it=_ranges.begin(), end=_ranges.end(); it != end; ++it) {
- assert(it->first == it->second->getMax());
+ verify(it->first == it->second->getMax());
}
// Make sure we match the original chunks
@@ -1022,12 +1022,12 @@ namespace mongo {
ChunkRangeMap::const_iterator min = _ranges.upper_bound(chunk->getMin());
ChunkRangeMap::const_iterator max = _ranges.lower_bound(chunk->getMax());
- assert(min != _ranges.end());
- assert(max != _ranges.end());
- assert(min == max);
- assert(min->second->getShard() == chunk->getShard());
- assert(min->second->contains( chunk->getMin() ));
- assert(min->second->contains( chunk->getMax() ) || (min->second->getMax() == chunk->getMax()));
+ verify(min != _ranges.end());
+ verify(max != _ranges.end());
+ verify(min == max);
+ verify(min->second->getShard() == chunk->getShard());
+ verify(min->second->contains( chunk->getMin() ));
+ verify(min->second->contains( chunk->getMax() ) || (min->second->getMax() == chunk->getMax()));
}
}
@@ -1105,7 +1105,7 @@ namespace mongo {
for ( unsigned i=0; i<all.size(); i++ ) {
for ( unsigned j=i+1; j<all.size(); j++ ) {
- assert( all[i] < all[j] );
+ verify( all[i] < all[j] );
}
}
diff --git a/src/mongo/s/chunk.h b/src/mongo/s/chunk.h
index 8fd3f7f8eb4..122c42f8825 100644
--- a/src/mongo/s/chunk.h
+++ b/src/mongo/s/chunk.h
@@ -235,11 +235,11 @@ namespace mongo {
, _shard(begin->second->getShard())
, _min(begin->second->getMin())
, _max(boost::prior(end)->second->getMax()) {
- assert( begin != end );
+ verify( begin != end );
DEV while (begin != end) {
- assert(begin->second->getManager() == _manager);
- assert(begin->second->getShard() == _shard);
+ verify(begin->second->getManager() == _manager);
+ verify(begin->second->getShard() == _shard);
++begin;
}
}
@@ -250,9 +250,9 @@ namespace mongo {
, _shard(min.getShard())
, _min(min.getMin())
, _max(max.getMax()) {
- assert(min.getShard() == max.getShard());
- assert(min.getManager() == max.getManager());
- assert(min.getMax() == max.getMin());
+ verify(min.getShard() == max.getShard());
+ verify(min.getManager() == max.getManager());
+ verify(min.getMax() == max.getMin());
}
friend ostream& operator<<(ostream& out, const ChunkRange& cr) {
diff --git a/src/mongo/s/client.cpp b/src/mongo/s/client.cpp
index 9e88ff856be..2e7e32d430e 100644
--- a/src/mongo/s/client.cpp
+++ b/src/mongo/s/client.cpp
@@ -206,7 +206,7 @@ namespace mongo {
// all we're going to report is the first
// since that's the current write
// but we block for all
- assert( v.size() >= 1 );
+ verify( v.size() >= 1 );
result.appendElements( v[0] );
result.appendElementsUnique( res );
result.append( "writebackGLE" , v[0] );
diff --git a/src/mongo/s/commands_admin.cpp b/src/mongo/s/commands_admin.cpp
index 7d284e4eff3..bc75f54a0bc 100644
--- a/src/mongo/s/commands_admin.cpp
+++ b/src/mongo/s/commands_admin.cpp
@@ -181,7 +181,7 @@ namespace mongo {
{
RamLog* rl = RamLog::get( "warnings" );
- assert(rl);
+ verify(rl);
if (rl->lastWrite() >= time(0)-(10*60)){ // only show warnings from last 10 minutes
vector<const char*> lines;
@@ -633,7 +633,7 @@ namespace mongo {
ChunkPtr chunk = info->findChunk( find );
BSONObj middle = cmdObj.getObjectField( "middle" );
- assert( chunk.get() );
+ verify( chunk.get() );
log() << "splitting: " << ns << " shard: " << chunk << endl;
BSONObj res;
@@ -1048,7 +1048,7 @@ namespace mongo {
virtual bool run(const string& dbName, BSONObj& cmdObj, int, string& errmsg, BSONObjBuilder& result, bool) {
LastError *le = lastError.disableForCommand();
{
- assert( le );
+ verify( le );
if ( le->msg.size() && le->nPrev == 1 ) {
le->appendSelf( result );
return true;
diff --git a/src/mongo/s/commands_public.cpp b/src/mongo/s/commands_public.cpp
index eaef1767224..1733f41aca6 100644
--- a/src/mongo/s/commands_public.cpp
+++ b/src/mongo/s/commands_public.cpp
@@ -461,7 +461,7 @@ namespace mongo {
set<Shard> shards;
cm->getShardsForQuery( shards , filter );
- assert( shards.size() );
+ verify( shards.size() );
hadToBreak = false;
@@ -1110,7 +1110,7 @@ namespace mongo {
// modify command to run on shards with output to tmp collection
string badShardedField;
- assert( maxChunkSizeBytes < 0x7fffffff );
+ verify( maxChunkSizeBytes < 0x7fffffff );
BSONObj shardedCommand = fixForShards( cmdObj , shardResultCollection , badShardedField, static_cast<int>(maxChunkSizeBytes) );
if ( ! shardedInput && ! shardedOutput && ! customOutDB ) {
@@ -1317,7 +1317,7 @@ namespace mongo {
for (unsigned int i = 0; i < sizes.size(); i += 2) {
BSONObj key = sizes[i].Obj().getOwned();
long long size = sizes[i+1].numberLong();
- assert( size < 0x7fffffff );
+ verify( size < 0x7fffffff );
chunkSizes[key] = static_cast<int>(size);
}
}
@@ -1329,7 +1329,7 @@ namespace mongo {
for ( map<BSONObj, int>::iterator it = chunkSizes.begin() ; it != chunkSizes.end() ; ++it ) {
BSONObj key = it->first;
int size = it->second;
- assert( size < 0x7fffffff );
+ verify( size < 0x7fffffff );
// key reported should be the chunk's minimum
ChunkPtr c = cm->findChunk(key);
@@ -1484,7 +1484,7 @@ namespace mongo {
i != end; i++) {
boost::shared_ptr<ShardConnection> temp(
new ShardConnection(i->getConnString(), fullns));
- assert(temp->get());
+ verify(temp->get());
futures.push_back(
Future::spawnCommand(i->getConnString(), dbName,
shardedCommand , 0, temp->get()));
diff --git a/src/mongo/s/config.cpp b/src/mongo/s/config.cpp
index 1e131db96ac..839e70c0334 100644
--- a/src/mongo/s/config.cpp
+++ b/src/mongo/s/config.cpp
@@ -135,7 +135,7 @@ namespace mongo {
if ( _shardingEnabled )
return;
- assert( _name != "config" );
+ verify( _name != "config" );
scoped_lock lk( _lock );
_shardingEnabled = true;
@@ -249,8 +249,8 @@ namespace mongo {
}
}
- assert( manager || primary );
- assert( ! manager || ! primary );
+ verify( manager || primary );
+ verify( ! manager || ! primary );
}
@@ -281,7 +281,7 @@ namespace mongo {
CollectionInfo& ci = _collections[ns];
uassert( 10181 , (string)"not sharded:" + ns , ci.isSharded() );
- assert( ! ci.key().isEmpty() );
+ verify( ! ci.key().isEmpty() );
if ( ! ( shouldReload || forceReload ) || earlyReload )
return ci.getCM();
@@ -292,7 +292,7 @@ namespace mongo {
oldVersion = ci.getCM()->getVersion();
}
- assert( ! key.isEmpty() );
+ verify( ! key.isEmpty() );
BSONObj newest;
if ( oldVersion > 0 && ! forceReload ) {
@@ -383,7 +383,7 @@ namespace mongo {
void DBConfig::unserialize(const BSONObj& from) {
LOG(1) << "DBConfig unserialize: " << _name << " " << from << endl;
- assert( _name == from["_id"].String() );
+ verify( _name == from["_id"].String() );
_shardingEnabled = from.getBoolField("partitioned");
_primary.reset( from.getStringField("primary") );
@@ -417,7 +417,7 @@ namespace mongo {
b.appendRegex( "_id" , (string)"^" + pcrecpp::RE::QuoteMeta( _name ) + "\\." );
auto_ptr<DBClientCursor> cursor = conn->query( ShardNS::collection, b.obj() );
- assert( cursor.get() );
+ verify( cursor.get() );
while ( cursor->more() ) {
BSONObj o = cursor->next();
if( o["dropped"].trueValue() ) _collections.erase( o["_id"].String() );
@@ -792,7 +792,7 @@ namespace mongo {
ScopedDbConnection conn( _primary, 30.0 );
auto_ptr<DBClientCursor> c = conn->query( ShardNS::settings , BSONObj() );
- assert( c.get() );
+ verify( c.get() );
while ( c->more() ) {
BSONObj o = c->next();
string name = o["_id"].valuestrsafe();
@@ -871,7 +871,7 @@ namespace mongo {
<< "time" << DATENOW << "what" << what << "ns" << ns << "details" << detail );
log() << "about to log metadata event: " << msg << endl;
- assert( _primary.ok() );
+ verify( _primary.ok() );
ScopedDbConnection conn( _primary, 30.0 );
diff --git a/src/mongo/s/config.h b/src/mongo/s/config.h
index d8cedcaaa04..9d47085eeb6 100644
--- a/src/mongo/s/config.h
+++ b/src/mongo/s/config.h
@@ -83,8 +83,8 @@ namespace mongo {
}
void resetCM( ChunkManager * cm ) {
- assert(cm);
- assert(_cm); // this has to be already sharded
+ verify(cm);
+ verify(_cm); // this has to be already sharded
_cm.reset( cm );
}
@@ -118,7 +118,7 @@ namespace mongo {
_shardingEnabled(false),
_lock("DBConfig") ,
_hitConfigServerLock( "DBConfig::_hitConfigServerLock" ) {
- assert( name.size() );
+ verify( name.size() );
}
virtual ~DBConfig() {}
diff --git a/src/mongo/s/config_migrate.cpp b/src/mongo/s/config_migrate.cpp
index fff023cfb5b..2a3b6a54831 100644
--- a/src/mongo/s/config_migrate.cpp
+++ b/src/mongo/s/config_migrate.cpp
@@ -39,7 +39,7 @@ namespace mongo {
ScopedDbConnection conn( _primary );
conn->insert( "config.version" , BSON( "_id" << 1 << "version" << VERSION ) );
pool.flush();
- assert( VERSION == dbConfigVersion( conn.conn() ) );
+ verify( VERSION == dbConfigVersion( conn.conn() ) );
conn.done();
return 0;
}
@@ -47,7 +47,7 @@ namespace mongo {
if ( cur == 2 ) {
// need to upgrade
- assert( VERSION == 3 );
+ verify( VERSION == 3 );
if ( ! upgrade ) {
log() << "newer version of mongo meta data\n"
<< "need to --upgrade after shutting all mongos down"
@@ -94,8 +94,8 @@ namespace mongo {
n++;
}
- assert( n == hostToShard.size() );
- assert( n == shards.size() );
+ verify( n == hostToShard.size() );
+ verify( n == shards.size() );
conn->remove( ShardNS::shard , BSONObj() );
@@ -138,7 +138,7 @@ namespace mongo {
newDBs[old["name"].String()] = x;
}
- assert( n == newDBs.size() );
+ verify( n == newDBs.size() );
conn->remove( ShardNS::database , BSONObj() );
@@ -174,7 +174,7 @@ namespace mongo {
num++;
}
- assert( num == chunks.size() );
+ verify( num == chunks.size() );
conn->remove( ShardNS::chunk , BSONObj() );
for ( map<string,BSONObj>::iterator i=chunks.begin(); i!=chunks.end(); i++ ) {
diff --git a/src/mongo/s/cursors.cpp b/src/mongo/s/cursors.cpp
index 0c1890ceba0..49aaeceb98a 100644
--- a/src/mongo/s/cursors.cpp
+++ b/src/mongo/s/cursors.cpp
@@ -29,7 +29,7 @@ namespace mongo {
// -------- ShardedCursor -----------
ShardedClientCursor::ShardedClientCursor( QueryMessage& q , ClusteredCursor * cursor ) {
- assert( cursor );
+ verify( cursor );
_cursor = cursor;
_skip = q.ntoskip;
@@ -48,7 +48,7 @@ namespace mongo {
}
ShardedClientCursor::~ShardedClientCursor() {
- assert( _cursor );
+ verify( _cursor );
delete _cursor;
_cursor = 0;
}
@@ -56,7 +56,7 @@ namespace mongo {
long long ShardedClientCursor::getId() {
if ( _id <= 0 ) {
_id = cursorCache.genId();
- assert( _id >= 0 );
+ verify( _id >= 0 );
}
return _id;
}
@@ -174,26 +174,26 @@ namespace mongo {
void CursorCache::store( ShardedClientCursorPtr cursor ) {
LOG(_myLogLevel) << "CursorCache::store cursor " << " id: " << cursor->getId() << endl;
- assert( cursor->getId() );
+ verify( cursor->getId() );
scoped_lock lk( _mutex );
_cursors[cursor->getId()] = cursor;
_shardedTotal++;
}
void CursorCache::remove( long long id ) {
- assert( id );
+ verify( id );
scoped_lock lk( _mutex );
_cursors.erase( id );
}
void CursorCache::storeRef( const string& server , long long id ) {
LOG(_myLogLevel) << "CursorCache::storeRef server: " << server << " id: " << id << endl;
- assert( id );
+ verify( id );
scoped_lock lk( _mutex );
_refs[id] = server;
}
string CursorCache::getRef( long long id ) const {
- assert( id );
+ verify( id );
scoped_lock lk( _mutex );
MapNormal::const_iterator i = _refs.find( id );
@@ -270,7 +270,7 @@ namespace mongo {
LOG(_myLogLevel) << "CursorCache::found gotKillCursors id: " << id << " server: " << server << endl;
- assert( server.size() );
+ verify( server.size() );
ScopedDbConnection conn( server );
conn->killCursor( id );
conn.done();
diff --git a/src/mongo/s/d_chunk_manager.cpp b/src/mongo/s/d_chunk_manager.cpp
index 8940853c1e4..2ecbd03f05a 100644
--- a/src/mongo/s/d_chunk_manager.cpp
+++ b/src/mongo/s/d_chunk_manager.cpp
@@ -85,7 +85,7 @@ namespace mongo {
}
void ShardChunkManager::_fillChunks( DBClientCursorInterface* cursor ) {
- assert( cursor );
+ verify( cursor );
ShardChunkVersion version;
while ( cursor->more() ) {
@@ -129,7 +129,7 @@ namespace mongo {
min = currMin;
max = currMax;
}
- assert( ! min.isEmpty() );
+ verify( ! min.isEmpty() );
_rangesMap.insert( make_pair( min , max ) );
}
@@ -139,7 +139,7 @@ namespace mongo {
}
bool ShardChunkManager::belongsToMe( ClientCursor* cc ) const {
- assert( cc );
+ verify( cc );
if ( _rangesMap.size() == 0 )
return false;
@@ -173,8 +173,8 @@ namespace mongo {
}
bool ShardChunkManager::getNextChunk( const BSONObj& lookupKey, BSONObj* foundMin , BSONObj* foundMax ) const {
- assert( foundMin );
- assert( foundMax );
+ verify( foundMin );
+ verify( foundMax );
*foundMin = BSONObj();
*foundMax = BSONObj();
diff --git a/src/mongo/s/d_logic.cpp b/src/mongo/s/d_logic.cpp
index bcaad6ce9a5..879901f8c9b 100644
--- a/src/mongo/s/d_logic.cpp
+++ b/src/mongo/s/d_logic.cpp
@@ -44,7 +44,7 @@ using namespace std;
namespace mongo {
bool _handlePossibleShardedMessage( Message &m, DbResponse* dbresponse ) {
- DEV assert( shardingState.enabled() );
+ DEV verify( shardingState.enabled() );
int op = m.operation();
if ( op < 2000
@@ -65,7 +65,7 @@ namespace mongo {
LOG(1) << "connection meta data too old - will retry ns:(" << ns << ") op:(" << opToString(op) << ") " << errmsg << endl;
if ( doesOpGetAResponse( op ) ) {
- assert( dbresponse );
+ verify( dbresponse );
BufBuilder b( 32768 );
b.skip( sizeof( QueryResult ) );
{
diff --git a/src/mongo/s/d_migrate.cpp b/src/mongo/s/d_migrate.cpp
index 52276ac72fb..8f85f899425 100644
--- a/src/mongo/s/d_migrate.cpp
+++ b/src/mongo/s/d_migrate.cpp
@@ -84,8 +84,8 @@ namespace mongo {
}
void done( int step ) {
- assert( step == ++_next );
- assert( step <= _total );
+ verify( step == ++_next );
+ verify( step <= _total );
stringstream ss;
ss << "step" << step;
@@ -228,20 +228,20 @@ namespace mongo {
scoped_lock ll(_workLock);
scoped_lock l(_m); // reads and writes _active
- assert( ! _active );
+ verify( ! _active );
- assert( ! min.isEmpty() );
- assert( ! max.isEmpty() );
- assert( ns.size() );
+ verify( ! min.isEmpty() );
+ verify( ! max.isEmpty() );
+ verify( ns.size() );
_ns = ns;
_min = min;
_max = max;
- assert( _cloneLocs.size() == 0 );
- assert( _deleted.size() == 0 );
- assert( _reload.size() == 0 );
- assert( _memoryUsed == 0 );
+ verify( _cloneLocs.size() == 0 );
+ verify( _deleted.size() == 0 );
+ verify( _reload.size() == 0 );
+ verify( _memoryUsed == 0 );
_active = true;
}
@@ -481,7 +481,7 @@ namespace mongo {
readlock l(_ns);
Client::Context ctx( _ns );
NamespaceDetails *d = nsdetails( _ns.c_str() );
- assert( d );
+ verify( d );
scoped_spinlock lk( _trackerLocks );
allocSize = std::min(BSONObjMaxUserSize, (int)((12 + d->averageObjectSize()) * _cloneLocs.size()));
}
@@ -532,7 +532,7 @@ namespace mongo {
}
void aboutToDelete( const Database* db , const DiskLoc& dl ) {
- assert(db);
+ verify(db);
Lock::assertWriteLocked(db->name);
if ( ! _getActive() )
@@ -621,7 +621,7 @@ namespace mongo {
int loops = 0;
Timer t;
while ( t.seconds() < 900 ) { // 15 minutes
- assert( !Lock::isLocked() );
+ verify( !Lock::isLocked() );
sleepmillis( 20 );
set<CursorId> now;
@@ -838,9 +838,9 @@ namespace mongo {
}
maxVersion = x["lastmod"];
- assert( currChunk["shard"].type() );
- assert( currChunk["min"].type() );
- assert( currChunk["max"].type() );
+ verify( currChunk["shard"].type() );
+ verify( currChunk["min"].type() );
+ verify( currChunk["max"].type() );
myOldShard = currChunk["shard"].String();
conn.done();
@@ -918,7 +918,7 @@ namespace mongo {
if ( ! ok ) {
errmsg = "moveChunk failed to engage TO-shard in the data transfer: ";
- assert( res["errmsg"].type() );
+ verify( res["errmsg"].type() );
errmsg += res["errmsg"].String();
result.append( "cause" , res );
return false;
@@ -929,7 +929,7 @@ namespace mongo {
// 4.
for ( int i=0; i<86400; i++ ) { // don't want a single chunk move to take more than a day
- assert( !Lock::isLocked() );
+ verify( !Lock::isLocked() );
sleepsecs( 1 );
ScopedDbConnection conn( toShard.getConnString() );
BSONObj res;
@@ -987,7 +987,7 @@ namespace mongo {
{
writelock lk( ns );
- assert( myVersion > shardingState.getVersion( ns ) );
+ verify( myVersion > shardingState.getVersion( ns ) );
// bump the chunks manager's version up and "forget" about the chunk being moved
// this is not the commit point but in practice the state in this shard won't until the commit it done
@@ -1259,7 +1259,7 @@ namespace mongo {
void prepare() {
scoped_lock l(m_active); // reading and writing 'active'
- assert( ! active );
+ verify( ! active );
state = READY;
errmsg = "";
@@ -1289,10 +1289,10 @@ namespace mongo {
}
void _go() {
- assert( getActive() );
- assert( state == READY );
- assert( ! min.isEmpty() );
- assert( ! max.isEmpty() );
+ verify( getActive() );
+ verify( state == READY );
+ verify( ! min.isEmpty() );
+ verify( ! max.isEmpty() );
slaveCount = ( getSlaveCount() / 2 ) + 1;
@@ -1593,7 +1593,7 @@ namespace mongo {
case FAIL: return "fail";
case ABORT: return "abort";
}
- assert(0);
+ verify(0);
return "";
}
@@ -1734,12 +1734,12 @@ namespace mongo {
BSONObj min = BSON( "x" << 1 );
BSONObj max = BSON( "x" << 5 );
- assert( ! isInRange( BSON( "x" << 0 ) , min , max ) );
- assert( isInRange( BSON( "x" << 1 ) , min , max ) );
- assert( isInRange( BSON( "x" << 3 ) , min , max ) );
- assert( isInRange( BSON( "x" << 4 ) , min , max ) );
- assert( ! isInRange( BSON( "x" << 5 ) , min , max ) );
- assert( ! isInRange( BSON( "x" << 6 ) , min , max ) );
+ verify( ! isInRange( BSON( "x" << 0 ) , min , max ) );
+ verify( isInRange( BSON( "x" << 1 ) , min , max ) );
+ verify( isInRange( BSON( "x" << 3 ) , min , max ) );
+ verify( isInRange( BSON( "x" << 4 ) , min , max ) );
+ verify( ! isInRange( BSON( "x" << 5 ) , min , max ) );
+ verify( ! isInRange( BSON( "x" << 6 ) , min , max ) );
LOG(1) << "isInRangeTest passed" << migrateLog;
}
diff --git a/src/mongo/s/d_split.cpp b/src/mongo/s/d_split.cpp
index 473cc44502b..11b7b42e8e9 100644
--- a/src/mongo/s/d_split.cpp
+++ b/src/mongo/s/d_split.cpp
@@ -397,7 +397,7 @@ namespace mongo {
currCount++;
BSONObj currKey = c->currKey();
- DEV assert( currKey.woCompare( max ) <= 0 );
+ DEV verify( currKey.woCompare( max ) <= 0 );
if ( currCount > keyCount ) {
// Do not use this split key if it is the same used in the previous split point.
@@ -462,7 +462,7 @@ namespace mongo {
// Remove the sentinel at the beginning before returning and add fieldnames.
splitKeys.erase( splitKeys.begin() );
- assert( c.get() );
+ verify( c.get() );
for ( vector<BSONObj>::iterator it = splitKeys.begin(); it != splitKeys.end() ; ++it ) {
*it = bc->prettyKey( *it );
}
@@ -633,9 +633,9 @@ namespace mongo {
maxVersion = x["lastmod"];
BSONObj currChunk = conn->findOne( ShardNS::chunk , shardId.wrap( "_id" ) ).getOwned();
- assert( currChunk["shard"].type() );
- assert( currChunk["min"].type() );
- assert( currChunk["max"].type() );
+ verify( currChunk["shard"].type() );
+ verify( currChunk["min"].type() );
+ verify( currChunk["max"].type() );
shard = currChunk["shard"].String();
conn.done();
@@ -774,7 +774,7 @@ namespace mongo {
stringstream ss;
ss << "saving chunks failed. cmd: " << cmd << " result: " << cmdResult;
error() << ss.str() << endl;
- msgasserted( 13593 , ss.str() ); // assert(13593)
+ msgasserted( 13593 , ss.str() );
}
// install a chunk manager with knowledge about newly split chunks in this shard's state
diff --git a/src/mongo/s/d_state.cpp b/src/mongo/s/d_state.cpp
index 0919c128377..2f81f1aed09 100644
--- a/src/mongo/s/d_state.cpp
+++ b/src/mongo/s/d_state.cpp
@@ -51,11 +51,11 @@ namespace mongo {
void ShardingState::enable( const string& server ) {
_enabled = true;
- assert( server.size() );
+ verify( server.size() );
if ( _configServer.size() == 0 )
_configServer = server;
else {
- assert( server == _configServer );
+ verify( server == _configServer );
}
}
@@ -147,7 +147,7 @@ namespace mongo {
scoped_lock lk( _mutex );
ChunkManagersMap::const_iterator it = _chunks.find( ns );
- assert( it != _chunks.end() ) ;
+ verify( it != _chunks.end() ) ;
ShardChunkManagerPtr p = it->second;
// empty shards should have version 0
@@ -161,7 +161,7 @@ namespace mongo {
scoped_lock lk( _mutex );
ChunkManagersMap::const_iterator it = _chunks.find( ns );
- assert( it != _chunks.end() ) ;
+ verify( it != _chunks.end() ) ;
ShardChunkManagerPtr p( it->second->clonePlus( min , max , version ) );
_chunks[ns] = p;
}
@@ -171,7 +171,7 @@ namespace mongo {
scoped_lock lk( _mutex );
ChunkManagersMap::const_iterator it = _chunks.find( ns );
- assert( it != _chunks.end() ) ;
+ verify( it != _chunks.end() ) ;
ShardChunkManagerPtr p( it->second->cloneSplit( min , max , splitKeys , version ) );
_chunks[ns] = p;
}
diff --git a/src/mongo/s/grid.cpp b/src/mongo/s/grid.cpp
index 549d1e390a9..7731104e9f7 100644
--- a/src/mongo/s/grid.cpp
+++ b/src/mongo/s/grid.cpp
@@ -356,7 +356,7 @@ namespace mongo {
}
bool Grid::_getNewShardName( string* name ) const {
- DEV assert( name );
+ DEV verify( name );
bool ok = false;
int count = 0;
@@ -524,10 +524,10 @@ namespace mongo {
BSONObj w3 = BSON( "activeWindow" << BSON( "start" << T1 << "stop" << T2 ) ); // open now
BSONObj w4 = BSON( "activeWindow" << BSON( "start" << T3 << "stop" << T2 ) ); // open since last day
- assert( ! Grid::_inBalancingWindow( w1 , now ) );
- assert( ! Grid::_inBalancingWindow( w2 , now ) );
- assert( Grid::_inBalancingWindow( w3 , now ) );
- assert( Grid::_inBalancingWindow( w4 , now ) );
+ verify( ! Grid::_inBalancingWindow( w1 , now ) );
+ verify( ! Grid::_inBalancingWindow( w2 , now ) );
+ verify( Grid::_inBalancingWindow( w3 , now ) );
+ verify( Grid::_inBalancingWindow( w4 , now ) );
// bad input should not stop the balancer
@@ -537,11 +537,11 @@ namespace mongo {
BSONObj w8 = BSON( "wrongMarker" << 1 << "start" << 1 << "stop" << 1 ); // active window marker missing
BSONObj w9 = BSON( "activeWindow" << BSON( "start" << T3 << "stop" << E ) ); // garbage in window
- assert( Grid::_inBalancingWindow( w5 , now ) );
- assert( Grid::_inBalancingWindow( w6 , now ) );
- assert( Grid::_inBalancingWindow( w7 , now ) );
- assert( Grid::_inBalancingWindow( w8 , now ) );
- assert( Grid::_inBalancingWindow( w9 , now ) );
+ verify( Grid::_inBalancingWindow( w5 , now ) );
+ verify( Grid::_inBalancingWindow( w6 , now ) );
+ verify( Grid::_inBalancingWindow( w7 , now ) );
+ verify( Grid::_inBalancingWindow( w8 , now ) );
+ verify( Grid::_inBalancingWindow( w9 , now ) );
LOG(1) << "BalancingWidowObjTest passed" << endl;
}
diff --git a/src/mongo/s/request.cpp b/src/mongo/s/request.cpp
index e8f2a36102d..58960170656 100644
--- a/src/mongo/s/request.cpp
+++ b/src/mongo/s/request.cpp
@@ -38,7 +38,7 @@ namespace mongo {
Request::Request( Message& m, AbstractMessagingPort* p ) :
_m(m) , _d( m ) , _p(p) , _didInit(false) {
- assert( _d.getns() );
+ verify( _d.getns() );
_id = _m.header()->id;
_clientInfo = ClientInfo::get();
@@ -93,7 +93,7 @@ namespace mongo {
// Deprecated, will move to the strategy itself
Shard Request::primaryShard() const {
- assert( _didInit );
+ verify( _didInit );
if ( _chunkManager ) {
if ( _chunkManager->numChunks() > 1 )
@@ -108,7 +108,7 @@ namespace mongo {
void Request::process( int attempt ) {
init();
int op = _m.operation();
- assert( op > dbMsg );
+ verify( op > dbMsg );
if ( op == dbKillCursors ) {
cursorCache.gotKillCursors( _m );
@@ -152,7 +152,7 @@ namespace mongo {
}
void Request::reply( Message & response , const string& fromServer ) {
- assert( _didInit );
+ verify( _didInit );
long long cursor =response.header()->getCursor();
if ( cursor ) {
if ( fromServer.size() ) {
@@ -161,7 +161,7 @@ namespace mongo {
else {
// probably a getMore
// make sure we have a ref for this
- assert( cursorCache.getRef( cursor ).size() );
+ verify( cursorCache.getRef( cursor ).size() );
}
}
_p->reply( _m , response , _id );
diff --git a/src/mongo/s/request.h b/src/mongo/s/request.h
index 70923f6737a..59aaf9843c3 100644
--- a/src/mongo/s/request.h
+++ b/src/mongo/s/request.h
@@ -53,16 +53,16 @@ namespace mongo {
}
DBConfigPtr getConfig() const {
- assert( _didInit );
+ verify( _didInit );
return _config;
}
bool isShardingEnabled() const {
- assert( _didInit );
+ verify( _didInit );
return _config->isShardingEnabled();
}
ChunkManagerPtr getChunkManager() const {
- assert( _didInit );
+ verify( _didInit );
return _chunkManager;
}
diff --git a/src/mongo/s/s_only.cpp b/src/mongo/s/s_only.cpp
index bd0d0c25cbf..85fc9c18e8b 100644
--- a/src/mongo/s/s_only.cpp
+++ b/src/mongo/s/s_only.cpp
@@ -49,7 +49,7 @@ namespace mongo {
Client& Client::initThread(const char *desc, AbstractMessagingPort *mp) {
DEV nThreads++; // never decremented. this is for casi class asserts
setThreadName(desc);
- assert( currentClient.get() == 0 );
+ verify( currentClient.get() == 0 );
Client *c = new Client(desc, mp);
currentClient.reset(c);
mongo::lastError.initThread();
@@ -68,7 +68,7 @@ namespace mongo {
const char *ns, BSONObj& cmdObj ,
BSONObjBuilder& result,
bool fromRepl ) {
- assert(c);
+ verify(c);
string dbname = nsToDatabase( ns );
diff --git a/src/mongo/s/server.cpp b/src/mongo/s/server.cpp
index b989ffc925c..c1919142446 100644
--- a/src/mongo/s/server.cpp
+++ b/src/mongo/s/server.cpp
@@ -71,7 +71,7 @@ namespace mongo {
}
bool haveLocalShardingInfo( const string& ns ) {
- assert( 0 );
+ verify( 0 );
return false;
}
@@ -90,10 +90,10 @@ namespace mongo {
}
virtual void process( Message& m , AbstractMessagingPort* p , LastError * le) {
- assert( p );
+ verify( p );
Request r( m , p );
- assert( le );
+ verify( le );
lastError.startRequest( m , le );
try {
diff --git a/src/mongo/s/shard.cpp b/src/mongo/s/shard.cpp
index 81b41c7fcbc..e40bb8de41b 100644
--- a/src/mongo/s/shard.cpp
+++ b/src/mongo/s/shard.cpp
@@ -274,7 +274,7 @@ namespace mongo {
}
void Shard::setAddress( const ConnectionString& cs) {
- assert( _name.size() );
+ verify( _name.size() );
_addr = cs.toString();
_cs = cs;
_rsInit();
diff --git a/src/mongo/s/shard.h b/src/mongo/s/shard.h
index 8bd3f1761b0..2fc0941a8e4 100644
--- a/src/mongo/s/shard.h
+++ b/src/mongo/s/shard.h
@@ -73,12 +73,12 @@ namespace mongo {
ConnectionString getAddress() const { return _cs; }
string getName() const {
- assert( _name.size() );
+ verify( _name.size() );
return _name;
}
string getConnString() const {
- assert( _addr.size() );
+ verify( _addr.size() );
return _addr;
}
@@ -102,7 +102,7 @@ namespace mongo {
bool n = _name == s._name;
bool a = _addr == s._addr;
- assert( n == a ); // names and address are 1 to 1
+ verify( n == a ); // names and address are 1 to 1
return n;
}
@@ -227,19 +227,19 @@ namespace mongo {
DBClientBase& conn() {
_finishInit();
- assert( _conn );
+ verify( _conn );
return *_conn;
}
DBClientBase* operator->() {
_finishInit();
- assert( _conn );
+ verify( _conn );
return _conn;
}
DBClientBase* get() {
_finishInit();
- assert( _conn );
+ verify( _conn );
return _conn;
}
diff --git a/src/mongo/s/shard_version.cpp b/src/mongo/s/shard_version.cpp
index 21593908085..5dc574e7bdb 100644
--- a/src/mongo/s/shard_version.cpp
+++ b/src/mongo/s/shard_version.cpp
@@ -91,7 +91,7 @@ namespace mongo {
return &( set->masterConn() );
}
- assert( false );
+ verify( false );
return NULL;
}
@@ -102,7 +102,7 @@ namespace mongo {
WriteBackListener::init( *conn_in );
DBClientBase* conn = getVersionable( conn_in );
- assert( conn ); // errors thrown above
+ verify( conn ); // errors thrown above
BSONObjBuilder cmdBuilder;
@@ -159,7 +159,7 @@ namespace mongo {
return false;
DBClientBase* conn = getVersionable( conn_in );
- assert(conn); // errors thrown above
+ verify(conn); // errors thrown above
unsigned long long officialSequenceNumber = 0;
diff --git a/src/mongo/s/shardconnection.cpp b/src/mongo/s/shardconnection.cpp
index 34098547f8f..c3decfec707 100644
--- a/src/mongo/s/shardconnection.cpp
+++ b/src/mongo/s/shardconnection.cpp
@@ -46,7 +46,7 @@ namespace mongo {
for ( HostMap::iterator i=_hosts.begin(); i!=_hosts.end(); ++i ) {
string addr = i->first;
Status* ss = i->second;
- assert( ss );
+ verify( ss );
if ( ss->avail ) {
/* if we're shutting down, don't want to initiate release mechanism as it is slow,
and isn't needed since all connections will be closed anyway */
@@ -89,7 +89,7 @@ namespace mongo {
void done( const string& addr , DBClientBase* conn ) {
Status* s = _hosts[addr];
- assert( s );
+ verify( s );
if ( s->avail ) {
release( addr , conn );
return;
@@ -176,7 +176,7 @@ namespace mongo {
}
void ShardConnection::_init() {
- assert( _addr.size() );
+ verify( _addr.size() );
_conn = ClientConnections::threadInstance()->get( _addr , _ns );
_finishedInit = false;
}
@@ -188,12 +188,12 @@ namespace mongo {
if ( _ns.size() && versionManager.isVersionableCB( _conn ) ) {
// Make sure we specified a manager for the correct namespace
- if( _manager ) assert( _manager->getns() == _ns );
+ if( _manager ) verify( _manager->getns() == _ns );
_setVersion = versionManager.checkShardVersionCB( this , false , 1 );
}
else {
// Make sure we didn't specify a manager for an empty namespace
- assert( ! _manager );
+ verify( ! _manager );
_setVersion = false;
}
@@ -221,7 +221,7 @@ namespace mongo {
}
bool ShardConnection::runCommand( const string& db , const BSONObj& cmd , BSONObj& res ) {
- assert( _conn );
+ verify( _conn );
bool ok = _conn->runCommand( db , cmd , res );
if ( ! ok ) {
if ( res["code"].numberInt() == SendStaleConfigCode ) {
diff --git a/src/mongo/s/shardkey.cpp b/src/mongo/s/shardkey.cpp
index 41aa5224094..f926bba314b 100644
--- a/src/mongo/s/shardkey.cpp
+++ b/src/mongo/s/shardkey.cpp
@@ -156,44 +156,44 @@ namespace mongo {
void testIsPrefixOf() {
{
ShardKeyPattern k( BSON( "x" << 1 ) );
- assert( ! k.isPrefixOf( BSON( "a" << 1 ) ) );
- assert( k.isPrefixOf( BSON( "x" << 1 ) ) );
- assert( k.isPrefixOf( BSON( "x" << 1 << "a" << 1 ) ) );
- assert( ! k.isPrefixOf( BSON( "a" << 1 << "x" << 1 ) ) );
+ verify( ! k.isPrefixOf( BSON( "a" << 1 ) ) );
+ verify( k.isPrefixOf( BSON( "x" << 1 ) ) );
+ verify( k.isPrefixOf( BSON( "x" << 1 << "a" << 1 ) ) );
+ verify( ! k.isPrefixOf( BSON( "a" << 1 << "x" << 1 ) ) );
}
{
ShardKeyPattern k( BSON( "x" << 1 << "y" << 1 ) );
- assert( ! k.isPrefixOf( BSON( "x" << 1 ) ) );
- assert( ! k.isPrefixOf( BSON( "x" << 1 << "z" << 1 ) ) );
- assert( k.isPrefixOf( BSON( "x" << 1 << "y" << 1 ) ) );
- assert( k.isPrefixOf( BSON( "x" << 1 << "y" << 1 << "z" << 1 ) ) );
+ verify( ! k.isPrefixOf( BSON( "x" << 1 ) ) );
+ verify( ! k.isPrefixOf( BSON( "x" << 1 << "z" << 1 ) ) );
+ verify( k.isPrefixOf( BSON( "x" << 1 << "y" << 1 ) ) );
+ verify( k.isPrefixOf( BSON( "x" << 1 << "y" << 1 << "z" << 1 ) ) );
}
}
void hasshardkeytest() {
BSONObj x = fromjson("{ zid : \"abcdefg\", num: 1.0, name: \"eliot\" }");
ShardKeyPattern k( BSON( "num" << 1 ) );
- assert( k.hasShardKey(x) );
- assert( !k.hasShardKey( fromjson("{foo:'a'}") ) );
- assert( !k.hasShardKey( fromjson("{x: {$gt: 1}}") ) );
+ verify( k.hasShardKey(x) );
+ verify( !k.hasShardKey( fromjson("{foo:'a'}") ) );
+ verify( !k.hasShardKey( fromjson("{x: {$gt: 1}}") ) );
// try compound key
{
ShardKeyPattern k( fromjson("{a:1,b:-1,c:1}") );
- assert( k.hasShardKey( fromjson("{foo:'a',a:'b',c:'z',b:9,k:99}") ) );
- assert( !k.hasShardKey( fromjson("{foo:'a',a:'b',c:'z',bb:9,k:99}") ) );
- assert( !k.hasShardKey( fromjson("{k:99}") ) );
+ verify( k.hasShardKey( fromjson("{foo:'a',a:'b',c:'z',b:9,k:99}") ) );
+ verify( !k.hasShardKey( fromjson("{foo:'a',a:'b',c:'z',bb:9,k:99}") ) );
+ verify( !k.hasShardKey( fromjson("{k:99}") ) );
}
// try dotted key
{
ShardKeyPattern k( fromjson("{'a.b':1}") );
- assert( k.hasShardKey( fromjson("{a:{b:1,c:1},d:1}") ) );
- assert( k.hasShardKey( fromjson("{'a.b':1}") ) );
- assert( !k.hasShardKey( fromjson("{'a.c':1}") ) );
- assert( !k.hasShardKey( fromjson("{a:{c:1},d:1}") ) );
- assert( !k.hasShardKey( fromjson("{a:1}") ) );
- assert( !k.hasShardKey( fromjson("{b:1}") ) );
+ verify( k.hasShardKey( fromjson("{a:{b:1,c:1},d:1}") ) );
+ verify( k.hasShardKey( fromjson("{'a.b':1}") ) );
+ verify( !k.hasShardKey( fromjson("{'a.c':1}") ) );
+ verify( !k.hasShardKey( fromjson("{a:{c:1},d:1}") ) );
+ verify( !k.hasShardKey( fromjson("{a:1}") ) );
+ verify( !k.hasShardKey( fromjson("{b:1}") ) );
}
}
@@ -202,8 +202,8 @@ namespace mongo {
ShardKeyPattern k( fromjson("{a:1,'sub.b':-1,'sub.c':1}") );
BSONObj x = fromjson("{a:1,'sub.b':2,'sub.c':3}");
- assert( k.extractKey( fromjson("{a:1,sub:{b:2,c:3}}") ).binaryEqual(x) );
- assert( k.extractKey( fromjson("{sub:{b:2,c:3},a:1}") ).binaryEqual(x) );
+ verify( k.extractKey( fromjson("{a:1,sub:{b:2,c:3}}") ).binaryEqual(x) );
+ verify( k.extractKey( fromjson("{sub:{b:2,c:3},a:1}") ).binaryEqual(x) );
}
void moveToFrontTest() {
ShardKeyPattern sk (BSON("a" << 1 << "b" << 1));
@@ -211,13 +211,13 @@ namespace mongo {
BSONObj ret;
ret = sk.moveToFront(BSON("z" << 1 << "_id" << 1 << "y" << 1 << "a" << 1 << "x" << 1 << "b" << 1 << "w" << 1));
- assert(ret.binaryEqual(BSON("_id" << 1 << "a" << 1 << "b" << 1 << "z" << 1 << "y" << 1 << "x" << 1 << "w" << 1)));
+ verify(ret.binaryEqual(BSON("_id" << 1 << "a" << 1 << "b" << 1 << "z" << 1 << "y" << 1 << "x" << 1 << "w" << 1)));
ret = sk.moveToFront(BSON("_id" << 1 << "a" << 1 << "b" << 1 << "z" << 1 << "y" << 1 << "x" << 1 << "w" << 1));
- assert(ret.binaryEqual(BSON("_id" << 1 << "a" << 1 << "b" << 1 << "z" << 1 << "y" << 1 << "x" << 1 << "w" << 1)));
+ verify(ret.binaryEqual(BSON("_id" << 1 << "a" << 1 << "b" << 1 << "z" << 1 << "y" << 1 << "x" << 1 << "w" << 1)));
ret = sk.moveToFront(BSON("z" << 1 << "y" << 1 << "a" << 1 << "b" << 1 << "Z" << 1 << "Y" << 1));
- assert(ret.binaryEqual(BSON("a" << 1 << "b" << 1 << "z" << 1 << "y" << 1 << "Z" << 1 << "Y" << 1)));
+ verify(ret.binaryEqual(BSON("a" << 1 << "b" << 1 << "z" << 1 << "y" << 1 << "Z" << 1 << "Y" << 1)));
}
@@ -255,19 +255,19 @@ namespace mongo {
BSONObj k1 = BSON( "key" << 5 );
- assert( k.compare( min , max ) < 0 );
- assert( k.compare( min , k1 ) < 0 );
- assert( k.compare( max , min ) > 0 );
- assert( k.compare( min , min ) == 0 );
+ verify( k.compare( min , max ) < 0 );
+ verify( k.compare( min , k1 ) < 0 );
+ verify( k.compare( max , min ) > 0 );
+ verify( k.compare( min , min ) == 0 );
hasshardkeytest();
- assert( k.hasShardKey( k1 ) );
- assert( ! k.hasShardKey( BSON( "key2" << 1 ) ) );
+ verify( k.hasShardKey( k1 ) );
+ verify( ! k.hasShardKey( BSON( "key2" << 1 ) ) );
BSONObj a = k1;
BSONObj b = BSON( "key" << 999 );
- assert( k.compare(a,b) < 0 );
+ verify( k.compare(a,b) < 0 );
testIsPrefixOf();
// add middle multitype tests
diff --git a/src/mongo/s/strategy.h b/src/mongo/s/strategy.h
index 8ed9ece4603..afb66bd6652 100644
--- a/src/mongo/s/strategy.h
+++ b/src/mongo/s/strategy.h
@@ -40,7 +40,7 @@ namespace mongo {
{
// Only call this from sharded, for now.
// TODO: Refactor all this.
- assert( false );
+ verify( false );
}
// These interfaces will merge soon, so make it easy to share logic
diff --git a/src/mongo/s/strategy_shard.cpp b/src/mongo/s/strategy_shard.cpp
index cf1fcad4300..10b99e339e9 100644
--- a/src/mongo/s/strategy_shard.cpp
+++ b/src/mongo/s/strategy_shard.cpp
@@ -54,7 +54,7 @@ namespace mongo {
QuerySpec qSpec( (string)q.ns, q.query, q.fields, q.ntoskip, q.ntoreturn, q.queryOptions );
ParallelSortClusteredCursor * cursor = new ParallelSortClusteredCursor( qSpec, CommandInfo() );
- assert( cursor );
+ verify( cursor );
// TODO: Move out to Request itself, not strategy based
try {
@@ -411,7 +411,7 @@ namespace mongo {
shard = c->getShard();
}
- assert(shard != Shard());
+ verify(shard != Shard());
doWrite( dbUpdate , r , shard );
if ( c &&r.getClientInfo()->autoSplitOk() )
@@ -543,7 +543,7 @@ namespace mongo {
r.getConfig()->getChunkManager( ns )->getShardKey().isPrefixOf( newIndexKey ) );
ChunkManagerPtr cm = r.getConfig()->getChunkManager( ns );
- assert( cm );
+ verify( cm );
set<Shard> shards;
cm->getAllShards(shards);
diff --git a/src/mongo/s/strategy_single.cpp b/src/mongo/s/strategy_single.cpp
index 560b3b03fdf..7988a084d35 100644
--- a/src/mongo/s/strategy_single.cpp
+++ b/src/mongo/s/strategy_single.cpp
@@ -114,7 +114,7 @@ namespace mongo {
// Deprecated
virtual void writeOp( int op , Request& r ) {
// Don't use anymore, requires single-step detection of chunk manager or primary
- assert( 0 );
+ verify( 0 );
}
bool handleSpecialNamespaces( Request& r , QueryMessage& q ) {
diff --git a/src/mongo/s/util.h b/src/mongo/s/util.h
index ee562e9a82e..2f0354a96da 100644
--- a/src/mongo/s/util.h
+++ b/src/mongo/s/util.h
@@ -55,7 +55,7 @@ namespace mongo {
else {
_combined = 0;
log() << "ShardChunkVersion can't handle type (" << (int)(e.type()) << ") " << e << endl;
- assert(0);
+ verify(0);
}
}
diff --git a/src/mongo/scripting/bench.cpp b/src/mongo/scripting/bench.cpp
index 5f9697992ac..8b106258938 100644
--- a/src/mongo/scripting/bench.cpp
+++ b/src/mongo/scripting/bench.cpp
@@ -121,10 +121,10 @@ namespace mongo {
}
static void _fixField( BSONObjBuilder& b , const BSONElement& e ) {
- assert( e.type() == Object );
+ verify( e.type() == Object );
BSONObj sub = e.Obj();
- assert( sub.nFields() == 1 );
+ verify( sub.nFields() == 1 );
BSONElement f = sub.firstElement();
if ( str::equals( "#RAND_INT" , f.fieldName() ) ) {
@@ -213,7 +213,7 @@ namespace mongo {
if( check ){
if ( e["check"].type() == CodeWScope || e["check"].type() == Code || e["check"].type() == String ) {
scope = globalScriptEngine->getPooledScope( ns );
- assert( scope.get() );
+ verify( scope.get() );
if ( e.type() == CodeWScope ) {
scopeFunc = scope->createFunction( e["check"].codeWScopeCode() );
@@ -224,7 +224,7 @@ namespace mongo {
}
scope->init( &scopeObj );
- assert( scopeFunc );
+ verify( scopeFunc );
}
else {
warning() << "Invalid check type detected in benchRun op : " << e << endl;
@@ -288,7 +288,7 @@ namespace mongo {
if ( expected >= 0 && count != expected ) {
cout << "bench query on: " << ns << " expected: " << expected << " got: " << cout << endl;
- assert(false);
+ verify(false);
}
if( check ){
@@ -663,7 +663,7 @@ namespace mongo {
* benchRun( { ops : [] , host : XXX , db : XXXX , parallel : 5 , seconds : 5 }
*/
BSONObj benchRun( const BSONObj& argsFake, void* data ) {
- assert( argsFake.firstElement().isABSONObj() );
+ verify( argsFake.firstElement().isABSONObj() );
BSONObj args = argsFake.firstElement().Obj();
// setup
@@ -757,7 +757,7 @@ namespace mongo {
*/
BSONObj benchRunSync( const BSONObj& argsFake, void* data ) {
- assert( argsFake.firstElement().isABSONObj() );
+ verify( argsFake.firstElement().isABSONObj() );
BSONObj args = argsFake.firstElement().Obj();
// Get new BenchRunner object
@@ -774,7 +774,7 @@ namespace mongo {
*/
BSONObj benchStart( const BSONObj& argsFake, void* data ) {
- assert( argsFake.firstElement().isABSONObj() );
+ verify( argsFake.firstElement().isABSONObj() );
BSONObj args = argsFake.firstElement().Obj();
// Get new BenchRunner object
diff --git a/src/mongo/scripting/engine.cpp b/src/mongo/scripting/engine.cpp
index 9ba7915ee9c..9efa4e37d05 100644
--- a/src/mongo/scripting/engine.cpp
+++ b/src/mongo/scripting/engine.cpp
@@ -100,7 +100,7 @@ namespace mongo {
if ( ! exists( p ) ) {
log() << "file [" << filename << "] doesn't exist" << endl;
if ( assertOnError )
- assert( 0 );
+ verify( 0 );
return false;
}
@@ -120,7 +120,7 @@ namespace mongo {
if (empty) {
log() << "directory [" << filename << "] doesn't have any *.js files" << endl;
if ( assertOnError )
- assert( 0 );
+ verify( 0 );
return false;
}
@@ -133,7 +133,7 @@ namespace mongo {
unsigned L;
{
fileofs fo = f.len();
- assert( fo <= 0x7ffffffe );
+ verify( fo <= 0x7ffffffe );
L = (unsigned) fo;
}
boost::scoped_array<char> data (new char[L+1]);
@@ -186,7 +186,7 @@ namespace mongo {
static DBClientBase * db = createDirectClient();
auto_ptr<DBClientCursor> c = db->query( coll , Query(), 0, 0, NULL, QueryOption_SlaveOk, 0 );
- assert( c.get() );
+ verify( c.get() );
set<string> thisTime;
@@ -274,7 +274,7 @@ namespace mongo {
}
~ScopeCache() {
- assert( _magic == 17 );
+ verify( _magic == 17 );
_magic = 1;
if ( inShutdown() )
@@ -323,7 +323,7 @@ namespace mongo {
for ( PoolToScopes::iterator i=_pools.begin() ; i != _pools.end(); i++ ) {
for ( list<Scope*>::iterator j=i->second.begin(); j != i->second.end(); j++ ) {
Scope * s = *j;
- assert( ! seen.count( s ) );
+ verify( ! seen.count( s ) );
delete s;
seen.insert( s );
}
diff --git a/src/mongo/scripting/engine_spidermonkey.cpp b/src/mongo/scripting/engine_spidermonkey.cpp
index 1332e174867..e3feb8dabd6 100644
--- a/src/mongo/scripting/engine_spidermonkey.cpp
+++ b/src/mongo/scripting/engine_spidermonkey.cpp
@@ -21,8 +21,6 @@
#ifndef _WIN32
#include <boost/date_time/posix_time/posix_time.hpp>
-#undef assert
-#define assert MONGO_assert
#endif
#define smuassert( cx , msg , val ) \
@@ -254,13 +252,13 @@ namespace mongo {
bool toBoolean( jsval v ) {
JSBool b;
- assert( JS_ValueToBoolean( _context, v , &b ) );
+ verify( JS_ValueToBoolean( _context, v , &b ) );
return b;
}
OID toOID( jsval v ) {
JSContext * cx = _context;
- assert( JSVAL_IS_OID( v ) );
+ verify( JSVAL_IS_OID( v ) );
JSObject * o = JSVAL_TO_OBJECT( v );
OID oid;
@@ -274,14 +272,14 @@ namespace mongo {
if ( JS_InstanceOf( _context , o , &bson_ro_class , 0 ) ) {
BSONHolder * holder = GETHOLDER( _context , o );
- assert( holder );
+ verify( holder );
return holder->_obj.getOwned();
}
BSONObj orig;
if ( JS_InstanceOf( _context , o , &bson_class , 0 ) ) {
BSONHolder * holder = GETHOLDER(_context,o);
- assert( holder );
+ verify( holder );
if ( ! holder->_modified ) {
return holder->_obj;
}
@@ -300,12 +298,12 @@ namespace mongo {
}
JSIdArray * properties = JS_Enumerate( _context , o );
- assert( properties );
+ verify( properties );
for ( jsint i=0; i<properties->length; i++ ) {
jsid id = properties->vector[i];
jsval nameval;
- assert( JS_IdToValue( _context ,id , &nameval ) );
+ verify( JS_IdToValue( _context ,id , &nameval ) );
string name = toString( nameval );
if ( stack.isTop() && name == "_id" )
continue;
@@ -338,7 +336,7 @@ namespace mongo {
}
void appendRegex( BSONObjBuilder& b , const string& name , string s ) {
- assert( s[0] == '/' );
+ verify( s[0] == '/' );
s = s.substr(1);
string::size_type end = s.rfind( '/' );
b.appendRegex( name , s.substr( 0 , end ) , s.substr( end + 1 ) );
@@ -457,7 +455,7 @@ namespace mongo {
string code = raw;
size_t start = code.find( '(' );
- assert( start != string::npos );
+ verify( start != string::npos );
string fbase;
if ( start > 9 ) {
@@ -470,7 +468,7 @@ namespace mongo {
code = code.substr( start + 1 );
size_t end = code.find( ')' );
- assert( end != string::npos );
+ verify( end != string::npos );
string paramString = trim( code.substr( 0 , end ) );
code = code.substr( end + 1 );
@@ -508,7 +506,7 @@ namespace mongo {
jsval toval( double d ) {
jsval val;
- assert( JS_NewNumberValue( _context, d , &val ) );
+ verify( JS_NewNumberValue( _context, d , &val ) );
return val;
}
@@ -547,12 +545,12 @@ namespace mongo {
if ( ref == obj->firstElementFieldName() ) {
JSObject * o = JS_NewObject( _context , &dbref_class , NULL, NULL);
CHECKNEWOBJECT(o,_context,"toJSObject1");
- assert( JS_SetPrivate( _context , o , (void*)(new BSONHolder( obj->getOwned() ) ) ) );
+ verify( JS_SetPrivate( _context , o , (void*)(new BSONHolder( obj->getOwned() ) ) ) );
return o;
}
JSObject * o = JS_NewObject( _context , readOnly ? &bson_ro_class : &bson_class , NULL, NULL);
CHECKNEWOBJECT(o,_context,"toJSObject2");
- assert( JS_SetPrivate( _context , o , (void*)(new BSONHolder( obj->getOwned() ) ) ) );
+ verify( JS_SetPrivate( _context , o , (void*)(new BSONHolder( obj->getOwned() ) ) ) );
return o;
}
@@ -631,7 +629,7 @@ namespace mongo {
while ( i.more() ){
const BSONElement& e = i.next();
jsval v = toval( e );
- assert( JS_SetElement( _context , array , atoi(e.fieldName()) , &v ) );
+ verify( JS_SetElement( _context , array , atoi(e.fieldName()) , &v ) );
}
return myarray;
@@ -660,7 +658,7 @@ namespace mongo {
}
JSObject * r = JS_NewRegExpObject( _context , (char*)e.regex() , strlen( e.regex() ) , flagNumber );
- assert( r );
+ verify( r );
return OBJECT_TO_JSVAL( r );
}
case Code: {
@@ -717,8 +715,8 @@ namespace mongo {
CHECKNEWOBJECT(o,_context,"Bindata_BinData1");
int len;
const char * data = e.binData( len );
- assert( data );
- assert( JS_SetPrivate( _context , o , new BinDataHolder( data , len ) ) );
+ verify( data );
+ verify( JS_SetPrivate( _context , o , new BinDataHolder( data , len ) ) );
setProperty( o , "len" , toval( (double)len ) );
setProperty( o , "type" , toval( (double)e.binDataType() ) );
@@ -735,7 +733,7 @@ namespace mongo {
JSObject * getJSObject( JSObject * o , const char * name ) {
jsval v;
- assert( JS_GetProperty( _context , o , name , &v ) );
+ verify( JS_GetProperty( _context , o , name , &v ) );
return JSVAL_TO_OBJECT( v );
}
@@ -749,19 +747,19 @@ namespace mongo {
bool hasProperty( JSObject * o , const char * name ) {
JSBool res;
- assert( JS_HasProperty( _context , o , name , & res ) );
+ verify( JS_HasProperty( _context , o , name , & res ) );
return res;
}
jsval getProperty( JSObject * o , const char * field ) {
uassert( 10219 , "object passed to getPropery is null" , o );
jsval v;
- assert( JS_GetProperty( _context , o , field , &v ) );
+ verify( JS_GetProperty( _context , o , field , &v ) );
return v;
}
void setProperty( JSObject * o , const char * field , jsval v ) {
- assert( JS_SetProperty( _context , o , field , &v ) );
+ verify( JS_SetProperty( _context , o , field , &v ) );
}
string typeString( jsval v ) {
@@ -783,7 +781,7 @@ namespace mongo {
JSClass * getClass( JSObject * o , const char * field ) {
jsval v;
- assert( JS_GetProperty( _context , o , field , &v ) );
+ verify( JS_GetProperty( _context , o , field , &v ) );
if ( ! JSVAL_IS_OBJECT( v ) )
return 0;
return JS_GET_CLASS( _context , JSVAL_TO_OBJECT( v ) );
@@ -799,7 +797,7 @@ namespace mongo {
BSONHolder * o = GETHOLDER( cx , obj );
if ( o ) {
delete o;
- assert( JS_SetPrivate( cx , obj , 0 ) );
+ verify( JS_SetPrivate( cx , obj , 0 ) );
}
}
@@ -830,7 +828,7 @@ namespace mongo {
if ( it->more() ) {
string name = it->next();
Convertor c(cx);
- assert( JS_ValueToId( cx , c.toval( name.c_str() ) , idp ) );
+ verify( JS_ValueToId( cx , c.toval( name.c_str() ) , idp ) );
}
else {
delete it;
@@ -944,7 +942,7 @@ namespace mongo {
for( size_t i = 0; i+1 < s.size(); i += 2 ) {
*p++ = fromHex(src + i);
}
- assert( JS_SetPrivate( cx , o , new BinDataHolder( data , len ) ) );
+ verify( JS_SetPrivate( cx , o , new BinDataHolder( data , len ) ) );
Convertor c(cx);
c.setProperty( o, "len", c.toval((double)len) );
c.setProperty( o, "type", c.toval((double)subtype) );
@@ -1014,7 +1012,7 @@ namespace mongo {
NativeFunction func = (NativeFunction)((long long)c.getNumber( obj , "x" ) );
void* data = (void*)((long long)c.getNumber( obj , "y" ) );
- assert( func );
+ verify( func );
BSONObj a;
if ( argc > 0 ) {
@@ -1108,7 +1106,7 @@ namespace mongo {
// end Object helpers
JSBool resolveBSONField( JSContext *cx, JSObject *obj, jsval id, uintN flags, JSObject **objp ) {
- assert( JS_EnterLocalRootScope( cx ) );
+ verify( JS_EnterLocalRootScope( cx ) );
Convertor c( cx );
BSONHolder * holder = GETHOLDER( cx , obj );
@@ -1140,9 +1138,9 @@ namespace mongo {
return JS_FALSE;
}
- assert( ! holder->_inResolve );
+ verify( ! holder->_inResolve );
holder->_inResolve = true;
- assert( JS_SetProperty( cx , obj , s.c_str() , &val ) );
+ verify( JS_SetProperty( cx , obj , s.c_str() , &val ) );
holder->_inResolve = false;
if ( val != JSVAL_NULL && val != JSVAL_VOID && JSVAL_IS_OBJECT( val ) ) {
@@ -1179,7 +1177,7 @@ namespace mongo {
}
int x = 0;
- assert( x = 1 );
+ verify( x = 1 );
uassert( 10222 , "assert not being executed" , x == 1 );
}
@@ -1282,7 +1280,7 @@ namespace mongo {
void reset() {
smlock;
- assert( _convertor );
+ verify( _convertor );
return;
if ( _this ) {
JS_RemoveRoot( _context , &_this );
@@ -1349,14 +1347,14 @@ namespace mongo {
double getNumber( const char *field ) {
smlock;
jsval val;
- assert( JS_GetProperty( _context , _global , field , &val ) );
+ verify( JS_GetProperty( _context , _global , field , &val ) );
return _convertor->toNumber( val );
}
string getString( const char *field ) {
smlock;
jsval val;
- assert( JS_GetProperty( _context , _global , field , &val ) );
+ verify( JS_GetProperty( _context , _global , field , &val ) );
JSString * s = JS_ValueToString( _context , val );
return _convertor->toString( s );
}
@@ -1379,7 +1377,7 @@ namespace mongo {
int type( const char *field ) {
smlock;
jsval val;
- assert( JS_GetProperty( _context , _global , field , &val ) );
+ verify( JS_GetProperty( _context , _global , field , &val ) );
switch ( JS_TypeOfValue( _context , val ) ) {
case JSTYPE_VOID: return Undefined;
@@ -1409,19 +1407,19 @@ namespace mongo {
void setElement( const char *field , const BSONElement& val ) {
smlock;
jsval v = _convertor->toval( val );
- assert( JS_SetProperty( _context , _global , field , &v ) );
+ verify( JS_SetProperty( _context , _global , field , &v ) );
}
void setNumber( const char *field , double val ) {
smlock;
jsval v = _convertor->toval( val );
- assert( JS_SetProperty( _context , _global , field , &v ) );
+ verify( JS_SetProperty( _context , _global , field , &v ) );
}
void setString( const char *field , const char * val ) {
smlock;
jsval v = _convertor->toval( val );
- assert( JS_SetProperty( _context , _global , field , &v ) );
+ verify( JS_SetProperty( _context , _global , field , &v ) );
}
void setObject( const char *field , const BSONObj& obj , bool readOnly ) {
@@ -1433,7 +1431,7 @@ namespace mongo {
void setBoolean( const char *field , bool val ) {
smlock;
jsval v = BOOLEAN_TO_JSVAL( val );
- assert( JS_SetProperty( _context , _global , field , &v ) );
+ verify( JS_SetProperty( _context , _global , field , &v ) );
}
void setThis( const BSONObj * obj ) {
@@ -1458,10 +1456,10 @@ namespace mongo {
void rename( const char * from , const char * to ) {
smlock;
jsval v;
- assert( JS_GetProperty( _context , _global , from , &v ) );
- assert( JS_SetProperty( _context , _global , to , &v ) );
+ verify( JS_GetProperty( _context , _global , from , &v ) );
+ verify( JS_SetProperty( _context , _global , to , &v ) );
v = JSVAL_VOID;
- assert( JS_SetProperty( _context , _global , from , &v ) );
+ verify( JS_SetProperty( _context , _global , from , &v ) );
}
// ---- functions -----
@@ -1575,7 +1573,7 @@ namespace mongo {
smlock;
precall();
- assert( JS_EnterLocalRootScope( _context ) );
+ verify( JS_EnterLocalRootScope( _context ) );
int nargs = args ? args->nFields() : 0;
scoped_array<jsval> smargsPtr( new jsval[nargs] );
@@ -1607,7 +1605,7 @@ namespace mongo {
}
if ( ! ignoreReturn ) {
- assert( JS_SetProperty( _context , _global , "return" , &rval ) );
+ verify( JS_SetProperty( _context , _global , "return" , &rval ) );
}
return 0;
@@ -1720,17 +1718,17 @@ namespace mongo {
s.localConnect( "foo" );
- s.exec( "assert( db.getMongo() )" );
- s.exec( "assert( db.bar , 'collection getting does not work' ); " );
+ s.exec( "verify( db.getMongo() )" );
+ s.exec( "verify( db.bar , 'collection getting does not work' ); " );
s.exec( "assert.eq( db._name , 'foo' );" );
- s.exec( "assert( _mongo == db.getMongo() ); " );
- s.exec( "assert( _mongo == db._mongo ); " );
- s.exec( "assert( typeof DB.bar == 'undefined' ); " );
- s.exec( "assert( typeof DB.prototype.bar == 'undefined' , 'resolution is happening on prototype, not object' ); " );
-
- s.exec( "assert( db.bar ); " );
- s.exec( "assert( typeof db.addUser == 'function' )" );
- s.exec( "assert( db.addUser == DB.prototype.addUser )" );
+ s.exec( "verify( _mongo == db.getMongo() ); " );
+ s.exec( "verify( _mongo == db._mongo ); " );
+ s.exec( "verify( typeof DB.bar == 'undefined' ); " );
+ s.exec( "verify( typeof DB.prototype.bar == 'undefined' , 'resolution is happening on prototype, not object' ); " );
+
+ s.exec( "verify( db.bar ); " );
+ s.exec( "verify( typeof db.addUser == 'function' )" );
+ s.exec( "verify( db.addUser == DB.prototype.addUser )" );
s.exec( "assert.eq( 'foo.bar' , db.bar._fullName ); " );
s.exec( "db.bar.verify();" );
@@ -1738,10 +1736,10 @@ namespace mongo {
s.exec( "assert.eq( 'foo.bar.silly' , db.bar.silly._fullName )" );
s.exec( "assert.eq( 'function' , typeof _mongo.find , 'mongo.find is not a function' )" );
- assert( (string)"abc" == trim( "abc" ) );
- assert( (string)"abc" == trim( " abc" ) );
- assert( (string)"abc" == trim( "abc " ) );
- assert( (string)"abc" == trim( " abc " ) );
+ verify( (string)"abc" == trim( "abc" ) );
+ verify( (string)"abc" == trim( " abc" ) );
+ verify( (string)"abc" == trim( "abc " ) );
+ verify( (string)"abc" == trim( " abc " ) );
}
@@ -1757,7 +1755,7 @@ namespace mongo {
uassert( 10229 , "need a scope" , scope );
JSObject * o = JS_GetFunctionObject( f );
- assert( o );
+ verify( o );
scope->addRoot( &o , name );
}
diff --git a/src/mongo/scripting/engine_v8.cpp b/src/mongo/scripting/engine_v8.cpp
index e12987aa3d4..15d0fba2f29 100644
--- a/src/mongo/scripting/engine_v8.cpp
+++ b/src/mongo/scripting/engine_v8.cpp
@@ -1486,7 +1486,7 @@ namespace mongo {
b.appendMaxKey( sname );
return;
default:
- assert( "invalid internal field" == 0 );
+ verify( "invalid internal field" == 0 );
}
}
string s = toSTLString( value );
diff --git a/src/mongo/scripting/sm_db.cpp b/src/mongo/scripting/sm_db.cpp
index 52c3b365fb3..1578b4b375f 100644
--- a/src/mongo/scripting/sm_db.cpp
+++ b/src/mongo/scripting/sm_db.cpp
@@ -66,7 +66,7 @@ namespace mongo {
CursorHolder( auto_ptr< DBClientCursor > &cursor, const shared_ptr< DBClientWithCommands > &connection ) :
connection_( connection ),
cursor_( cursor ) {
- assert( cursor_.get() );
+ verify( cursor_.get() );
}
DBClientCursor *get() const { return cursor_.get(); }
private:
@@ -82,7 +82,7 @@ namespace mongo {
JSBool internal_cursor_constructor( JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval ) {
uassert( 10236 , "no args to internal_cursor_constructor" , argc == 0 );
- assert( JS_SetPrivate( cx , obj , 0 ) ); // just for safety
+ verify( JS_SetPrivate( cx , obj , 0 ) ); // just for safety
return JS_TRUE;
}
@@ -90,7 +90,7 @@ namespace mongo {
CursorHolder * holder = (CursorHolder*)JS_GetPrivate( cx , obj );
if ( holder ) {
delete holder;
- assert( JS_SetPrivate( cx , obj , 0 ) );
+ verify( JS_SetPrivate( cx , obj , 0 ) );
}
}
@@ -162,10 +162,10 @@ namespace mongo {
Convertor c( cx );
shared_ptr< DBClientWithCommands > client( createDirectClient() );
- assert( JS_SetPrivate( cx , obj , (void*)( new shared_ptr< DBClientWithCommands >( client ) ) ) );
+ verify( JS_SetPrivate( cx , obj , (void*)( new shared_ptr< DBClientWithCommands >( client ) ) ) );
jsval host = c.toval( "EMBEDDED" );
- assert( JS_SetProperty( cx , obj , "host" , &host ) );
+ verify( JS_SetProperty( cx , obj , "host" , &host ) );
return JS_TRUE;
}
@@ -203,9 +203,9 @@ namespace mongo {
return JS_FALSE;
}
- assert( JS_SetPrivate( cx , obj , (void*)( new shared_ptr< DBClientWithCommands >( conn ) ) ) );
+ verify( JS_SetPrivate( cx , obj , (void*)( new shared_ptr< DBClientWithCommands >( conn ) ) ) );
jsval host_val = c.toval( host.c_str() );
- assert( JS_SetProperty( cx , obj , "host" , &host_val ) );
+ verify( JS_SetProperty( cx , obj , "host" , &host_val ) );
return JS_TRUE;
}
@@ -220,7 +220,7 @@ namespace mongo {
shared_ptr< DBClientWithCommands > * connHolder = (shared_ptr< DBClientWithCommands >*)JS_GetPrivate( cx , obj );
if ( connHolder ) {
delete connHolder;
- assert( JS_SetPrivate( cx , obj , 0 ) );
+ verify( JS_SetPrivate( cx , obj , 0 ) );
}
}
@@ -284,7 +284,7 @@ namespace mongo {
}
JSObject * mycursor = JS_NewObject( cx , &internal_cursor_class , 0 , 0 );
CHECKNEWOBJECT( mycursor, cx, "internal_cursor_class" );
- assert( JS_SetPrivate( cx , mycursor , new CursorHolder( cursor, *connHolder ) ) );
+ verify( JS_SetPrivate( cx , mycursor , new CursorHolder( cursor, *connHolder ) ) );
*rval = OBJECT_TO_JSVAL( mycursor );
return JS_TRUE;
}
@@ -430,10 +430,10 @@ namespace mongo {
JSBool db_collection_constructor( JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval ) {
smuassert( cx , "db_collection_constructor wrong args" , argc == 4 );
- assert( JS_SetProperty( cx , obj , "_mongo" , &(argv[0]) ) );
- assert( JS_SetProperty( cx , obj , "_db" , &(argv[1]) ) );
- assert( JS_SetProperty( cx , obj , "_shortName" , &(argv[2]) ) );
- assert( JS_SetProperty( cx , obj , "_fullName" , &(argv[3]) ) );
+ verify( JS_SetProperty( cx , obj , "_mongo" , &(argv[0]) ) );
+ verify( JS_SetProperty( cx , obj , "_db" , &(argv[1]) ) );
+ verify( JS_SetProperty( cx , obj , "_shortName" , &(argv[2]) ) );
+ verify( JS_SetProperty( cx , obj , "_fullName" , &(argv[3]) ) );
Convertor c(cx);
if ( haveLocalShardingInfo( c.toString( argv[3] ) ) ) {
@@ -488,8 +488,8 @@ namespace mongo {
JSObject * doCreateCollection( JSContext * cx , JSObject * db , const string& shortName ) {
Convertor c(cx);
- assert( c.hasProperty( db , "_mongo" ) );
- assert( c.hasProperty( db , "_name" ) );
+ verify( c.hasProperty( db , "_mongo" ) );
+ verify( c.hasProperty( db , "_name" ) );
JSObject * coll = JS_NewObject( cx , &db_collection_class , 0 , 0 );
CHECKNEWOBJECT( coll, cx, "doCreateCollection" );
@@ -521,8 +521,8 @@ namespace mongo {
<< "valid database name";
smuassert( cx, msg.c_str(), NamespaceString::validDBName( dbName ) );
- assert( JS_SetProperty( cx , obj , "_mongo" , &(argv[0]) ) );
- assert( JS_SetProperty( cx , obj , "_name" , &(argv[1]) ) );
+ verify( JS_SetProperty( cx , obj , "_mongo" , &(argv[0]) ) );
+ verify( JS_SetProperty( cx , obj , "_name" , &(argv[1]) ) );
return JS_TRUE;
}
@@ -593,7 +593,7 @@ namespace mongo {
}
jsval v = c.toval( oid.str().c_str() );
- assert( JS_SetProperty( cx , obj , "str" , &v ) );
+ verify( JS_SetProperty( cx , obj , "str" , &v ) );
return JS_TRUE;
}
@@ -622,8 +622,8 @@ namespace mongo {
return JS_FALSE;
}
- assert( JS_SetProperty( cx , obj , "ns" , &(argv[0]) ) );
- assert( JS_SetProperty( cx , obj , "id" , &(argv[1]) ) );
+ verify( JS_SetProperty( cx , obj , "ns" , &(argv[0]) ) );
+ verify( JS_SetProperty( cx , obj , "id" , &(argv[1]) ) );
return JS_TRUE;
}
else {
@@ -655,15 +655,15 @@ namespace mongo {
if ( argc == 2 ) {
JSObject * o = JS_NewObject( cx , NULL , NULL, NULL );
CHECKNEWOBJECT( o, cx, "dbref_constructor" );
- assert( JS_SetProperty( cx, o , "$ref" , &argv[ 0 ] ) );
- assert( JS_SetProperty( cx, o , "$id" , &argv[ 1 ] ) );
+ verify( JS_SetProperty( cx, o , "$ref" , &argv[ 0 ] ) );
+ verify( JS_SetProperty( cx, o , "$id" , &argv[ 1 ] ) );
BSONObj bo = c.toObject( o );
- assert( JS_SetPrivate( cx , obj , (void*)(new BSONHolder( bo.getOwned() ) ) ) );
+ verify( JS_SetPrivate( cx , obj , (void*)(new BSONHolder( bo.getOwned() ) ) ) );
return JS_TRUE;
}
else {
JS_ReportError( cx , "DBRef needs 2 arguments" );
- assert( JS_SetPrivate( cx , obj , (void*)(new BSONHolder( BSONObj().getOwned() ) ) ) );
+ verify( JS_SetPrivate( cx , obj , (void*)(new BSONHolder( BSONObj().getOwned() ) ) ) );
return JS_FALSE;
}
}
@@ -699,7 +699,7 @@ namespace mongo {
zzz
- assert( JS_SetPrivate( cx, obj, new BinDataHolder( buf, 16 ) ) );
+ verify( JS_SetPrivate( cx, obj, new BinDataHolder( buf, 16 ) ) );
c.setProperty( obj, "len", c.toval( (double)16 ) );
c.setProperty( obj, "type", c.toval( (double)3 ) );
@@ -714,7 +714,7 @@ zzz
JSBool uuid_tostring(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval) {
Convertor c(cx);
void *holder = JS_GetPrivate( cx, obj );
- assert( holder );
+ verify( holder );
const char *data = ( ( BinDataHolder* )( holder ) )->c_;
stringstream ss;
ss << "UUID(\"" << toHex(data, 16);
@@ -728,7 +728,7 @@ zzz
void *holder = JS_GetPrivate( cx, obj );
if ( holder ) {
delete ( BinDataHolder* )holder;
- assert( JS_SetPrivate( cx , obj , 0 ) );
+ verify( JS_SetPrivate( cx , obj , 0 ) );
}
}
@@ -773,7 +773,7 @@ zzz
return JS_FALSE;
}
- assert( JS_SetPrivate( cx, obj, new BinDataHolder( decoded.data(), decoded.length() ) ) );
+ verify( JS_SetPrivate( cx, obj, new BinDataHolder( decoded.data(), decoded.length() ) ) );
c.setProperty( obj, "len", c.toval( (double)decoded.length() ) );
c.setProperty( obj, "type", c.toval( (double)type ) );
@@ -790,7 +790,7 @@ zzz
int type = (int)c.getNumber( obj , "type" );
int len = (int)c.getNumber( obj, "len" );
void *holder = JS_GetPrivate( cx, obj );
- assert( holder );
+ verify( holder );
const char *data = ( ( BinDataHolder* )( holder ) )->c_;
stringstream ss;
ss << "BinData(" << type << ",\"";
@@ -804,7 +804,7 @@ zzz
Convertor c(cx);
int len = (int)c.getNumber( obj, "len" );
void *holder = JS_GetPrivate( cx, obj );
- assert( holder );
+ verify( holder );
const char *data = ( ( BinDataHolder* )( holder ) )->c_;
stringstream ss;
base64::encode( ss, (const char *)data, len );
@@ -816,7 +816,7 @@ zzz
Convertor c(cx);
int len = (int)c.getNumber( obj, "len" );
void *holder = JS_GetPrivate( cx, obj );
- assert( holder );
+ verify( holder );
const char *data = ( ( BinDataHolder* )( holder ) )->c_;
stringstream ss;
ss.setf (ios_base::hex , ios_base::basefield);
@@ -835,7 +835,7 @@ zzz
void *holder = JS_GetPrivate( cx, obj );
if ( holder ) {
delete ( BinDataHolder* )holder;
- assert( JS_SetPrivate( cx , obj , 0 ) );
+ verify( JS_SetPrivate( cx , obj , 0 ) );
}
}
@@ -1138,7 +1138,7 @@ zzz
return JS_TRUE;
jsval val = JSVAL_VOID;
- assert( JS_CallFunctionName( cx , obj , "arrayAccess" , 1 , &id , &val ) );
+ verify( JS_CallFunctionName( cx , obj , "arrayAccess" , 1 , &id , &val ) );
Convertor c(cx);
c.setProperty( obj , c.toString( id ).c_str() , val );
*objp = obj;
@@ -1156,31 +1156,31 @@ zzz
void initMongoJS( SMScope * scope , JSContext * cx , JSObject * global , bool local ) {
- assert( JS_InitClass( cx , global , 0 , &mongo_class , local ? mongo_local_constructor : mongo_external_constructor , 0 , 0 , mongo_functions , 0 , 0 ) );
+ verify( JS_InitClass( cx , global , 0 , &mongo_class , local ? mongo_local_constructor : mongo_external_constructor , 0 , 0 , mongo_functions , 0 , 0 ) );
- assert( JS_InitClass( cx , global , 0 , &object_id_class , object_id_constructor , 0 , 0 , 0 , 0 , 0 ) );
- assert( JS_InitClass( cx , global , 0 , &db_class , db_constructor , 2 , 0 , 0 , 0 , 0 ) );
- assert( JS_InitClass( cx , global , 0 , &db_collection_class , db_collection_constructor , 4 , 0 , 0 , 0 , 0 ) );
- assert( JS_InitClass( cx , global , 0 , &internal_cursor_class , internal_cursor_constructor , 0 , 0 , internal_cursor_functions , 0 , 0 ) );
- assert( JS_InitClass( cx , global , 0 , &dbquery_class , dbquery_constructor , 0 , 0 , 0 , 0 , 0 ) );
- assert( JS_InitClass( cx , global , 0 , &dbpointer_class , dbpointer_constructor , 0 , 0 , dbpointer_functions , 0 , 0 ) );
- assert( JS_InitClass( cx , global , 0 , &bindata_class , bindata_constructor , 0 , 0 , bindata_functions , 0 , 0 ) );
-// assert( JS_InitClass( cx , global , 0 , &uuid_class , uuid_constructor , 0 , 0 , uuid_functions , 0 , 0 ) );
+ verify( JS_InitClass( cx , global , 0 , &object_id_class , object_id_constructor , 0 , 0 , 0 , 0 , 0 ) );
+ verify( JS_InitClass( cx , global , 0 , &db_class , db_constructor , 2 , 0 , 0 , 0 , 0 ) );
+ verify( JS_InitClass( cx , global , 0 , &db_collection_class , db_collection_constructor , 4 , 0 , 0 , 0 , 0 ) );
+ verify( JS_InitClass( cx , global , 0 , &internal_cursor_class , internal_cursor_constructor , 0 , 0 , internal_cursor_functions , 0 , 0 ) );
+ verify( JS_InitClass( cx , global , 0 , &dbquery_class , dbquery_constructor , 0 , 0 , 0 , 0 , 0 ) );
+ verify( JS_InitClass( cx , global , 0 , &dbpointer_class , dbpointer_constructor , 0 , 0 , dbpointer_functions , 0 , 0 ) );
+ verify( JS_InitClass( cx , global , 0 , &bindata_class , bindata_constructor , 0 , 0 , bindata_functions , 0 , 0 ) );
+// verify( JS_InitClass( cx , global , 0 , &uuid_class , uuid_constructor , 0 , 0 , uuid_functions , 0 , 0 ) );
- assert( JS_InitClass( cx , global , 0 , &timestamp_class , timestamp_constructor , 0 , 0 , 0 , 0 , 0 ) );
- assert( JS_InitClass( cx , global , 0 , &numberlong_class , numberlong_constructor , 0 , 0 , numberlong_functions , 0 , 0 ) );
- assert( JS_InitClass( cx , global , 0 , &numberint_class , numberint_constructor , 0 , 0 , numberint_functions , 0 , 0 ) );
- assert( JS_InitClass( cx , global , 0 , &minkey_class , 0 , 0 , 0 , 0 , 0 , 0 ) );
- assert( JS_InitClass( cx , global , 0 , &maxkey_class , 0 , 0 , 0 , 0 , 0 , 0 ) );
+ verify( JS_InitClass( cx , global , 0 , &timestamp_class , timestamp_constructor , 0 , 0 , 0 , 0 , 0 ) );
+ verify( JS_InitClass( cx , global , 0 , &numberlong_class , numberlong_constructor , 0 , 0 , numberlong_functions , 0 , 0 ) );
+ verify( JS_InitClass( cx , global , 0 , &numberint_class , numberint_constructor , 0 , 0 , numberint_functions , 0 , 0 ) );
+ verify( JS_InitClass( cx , global , 0 , &minkey_class , 0 , 0 , 0 , 0 , 0 , 0 ) );
+ verify( JS_InitClass( cx , global , 0 , &maxkey_class , 0 , 0 , 0 , 0 , 0 , 0 ) );
- assert( JS_InitClass( cx , global , 0 , &map_class , map_constructor , 0 , 0 , map_functions , 0 , 0 ) );
+ verify( JS_InitClass( cx , global , 0 , &map_class , map_constructor , 0 , 0 , map_functions , 0 , 0 ) );
- assert( JS_InitClass( cx , global , 0 , &bson_ro_class , bson_cons , 0 , 0 , bson_functions , 0 , 0 ) );
- assert( JS_InitClass( cx , global , 0 , &bson_class , bson_cons , 0 , 0 , bson_functions , 0 , 0 ) );
+ verify( JS_InitClass( cx , global , 0 , &bson_ro_class , bson_cons , 0 , 0 , bson_functions , 0 , 0 ) );
+ verify( JS_InitClass( cx , global , 0 , &bson_class , bson_cons , 0 , 0 , bson_functions , 0 , 0 ) );
static const char *dbrefName = "DBRef";
dbref_class.name = dbrefName;
- assert( JS_InitClass( cx , global , 0 , &dbref_class , dbref_constructor , 2 , 0 , bson_functions , 0 , 0 ) );
+ verify( JS_InitClass( cx , global , 0 , &dbref_class , dbref_constructor , 2 , 0 , bson_functions , 0 , 0 ) );
scope->execCoreFiles();
}
diff --git a/src/mongo/scripting/utils.cpp b/src/mongo/scripting/utils.cpp
index 612b173fdf8..cf486666a5b 100644
--- a/src/mongo/scripting/utils.cpp
+++ b/src/mongo/scripting/utils.cpp
@@ -47,8 +47,8 @@ namespace mongo {
BSONObj JSSleep(const mongo::BSONObj &args, void* data) {
- assert( args.nFields() == 1 );
- assert( args.firstElement().isNumber() );
+ verify( args.nFields() == 1 );
+ verify( args.firstElement().isNumber() );
int ms = int( args.firstElement().number() );
{
auto_ptr< ScriptEngine::Unlocker > u = globalScriptEngine->newThreadUnlocker();
diff --git a/src/mongo/scripting/v8_db.cpp b/src/mongo/scripting/v8_db.cpp
index 5cd4c945016..4923393d08c 100644
--- a/src/mongo/scripting/v8_db.cpp
+++ b/src/mongo/scripting/v8_db.cpp
@@ -196,7 +196,7 @@ namespace mongo {
char host[255];
if ( args.Length() > 0 && args[0]->IsString() ) {
- assert( args[0]->ToString()->Utf8Length() < 250 );
+ verify( args[0]->ToString()->Utf8Length() < 250 );
args[0]->ToString()->WriteAscii( host );
}
else {
@@ -266,7 +266,7 @@ namespace mongo {
DBClientBase * getConnection( const Arguments& args ) {
Local<External> c = External::Cast( *(args.This()->GetInternalField( 0 )) );
DBClientBase * conn = (DBClientBase*)(c->Value());
- assert( conn );
+ verify( conn );
return conn;
}
@@ -559,13 +559,13 @@ namespace mongo {
// --- DB ----
v8::Handle<v8::Value> dbInit(V8Scope* scope, const v8::Arguments& args) {
- assert( args.Length() == 2 );
+ verify( args.Length() == 2 );
args.This()->Set( scope->getV8Str( "_mongo" ) , args[0] );
args.This()->Set( scope->getV8Str( "_name" ) , args[1] );
for ( int i=0; i<args.Length(); i++ )
- assert( ! args[i]->IsUndefined() );
+ verify( ! args[i]->IsUndefined() );
string dbName = toSTLString( args[1] );
if ( !NamespaceString::validDBName( dbName)) {
@@ -578,7 +578,7 @@ namespace mongo {
}
v8::Handle<v8::Value> collectionInit( V8Scope* scope, const v8::Arguments& args ) {
- assert( args.Length() == 4 );
+ verify( args.Length() == 4 );
args.This()->Set( scope->getV8Str( "_mongo" ) , args[0] );
args.This()->Set( scope->getV8Str( "_db" ) , args[1] );
@@ -589,7 +589,7 @@ namespace mongo {
return v8::ThrowException( v8::String::New( "can't use sharded collection from db.eval" ) );
for ( int i=0; i<args.Length(); i++ )
- assert( ! args[i]->IsUndefined() );
+ verify( ! args[i]->IsUndefined() );
return v8::Undefined();
}
@@ -598,7 +598,7 @@ namespace mongo {
v8::Handle<v8::Object> t = args.This();
- assert( args.Length() >= 4 );
+ verify( args.Length() >= 4 );
t->Set( scope->getV8Str( "_mongo" ) , args[0] );
t->Set( scope->getV8Str( "_db" ) , args[1] );
@@ -680,7 +680,7 @@ namespace mongo {
// no hit, create new collection
v8::Handle<v8::Value> getCollection = info.This()->GetPrototype()->ToObject()->Get( v8::String::New( "getCollection" ) );
- assert( getCollection->IsFunction() );
+ verify( getCollection->IsFunction() );
TryCatch tryCatch;
v8::Function * f = (v8::Function*)(*getCollection);
@@ -701,7 +701,7 @@ namespace mongo {
v8::Handle<v8::Value> dbQueryIndexAccess( unsigned int index , const v8::AccessorInfo& info ) {
v8::Handle<v8::Value> arrayAccess = info.This()->GetPrototype()->ToObject()->Get( v8::String::New( "arrayAccess" ) );
- assert( arrayAccess->IsFunction() );
+ verify( arrayAccess->IsFunction() );
v8::Function * f = (v8::Function*)(*arrayAccess);
v8::Handle<v8::Value> argv[1];
diff --git a/src/mongo/scripting/v8_utils.h b/src/mongo/scripting/v8_utils.h
index ca5d317885f..dadea6bcbd3 100644
--- a/src/mongo/scripting/v8_utils.h
+++ b/src/mongo/scripting/v8_utils.h
@@ -29,7 +29,7 @@ namespace mongo {
void ReportException(v8::TryCatch* handler);
-#define jsassert(x,msg) assert(x)
+#define jsassert(x,msg) verify(x)
std::ostream& operator<<( std::ostream &s, const v8::Handle<v8::Value> & o );
std::ostream& operator<<( std::ostream &s, const v8::Handle<v8::TryCatch> * try_catch );
diff --git a/src/mongo/scripting/v8_wrapper.cpp b/src/mongo/scripting/v8_wrapper.cpp
index 0f57d5bb8cc..b3bb7857ac3 100644
--- a/src/mongo/scripting/v8_wrapper.cpp
+++ b/src/mongo/scripting/v8_wrapper.cpp
@@ -62,10 +62,10 @@ namespace mongo {
WrapperHolder * getWrapper( v8::Handle<v8::Object> o ) {
Handle<v8::Value> t = o->GetRealNamedProperty( v8::String::New( "_wrapper" ) );
- assert( t->IsExternal() );
+ verify( t->IsExternal() );
Local<External> c = External::Cast( *t );
WrapperHolder * w = (WrapperHolder*)(c->Value());
- assert( w );
+ verify( w );
return w;
}
diff --git a/src/mongo/shell/dbshell.cpp b/src/mongo/shell/dbshell.cpp
index f85b7006314..78ef31bc39a 100644
--- a/src/mongo/shell/dbshell.cpp
+++ b/src/mongo/shell/dbshell.cpp
@@ -363,25 +363,25 @@ using mongo::asserted;
struct BalancedTest : public mongo::UnitTest {
public:
void run() {
- assert( isBalanced( "x = 5" ) );
- assert( isBalanced( "function(){}" ) );
- assert( isBalanced( "function(){\n}" ) );
- assert( ! isBalanced( "function(){" ) );
- assert( isBalanced( "x = \"{\";" ) );
- assert( isBalanced( "// {" ) );
- assert( ! isBalanced( "// \n {" ) );
- assert( ! isBalanced( "\"//\" {" ) );
- assert( isBalanced( "{x:/x\\//}" ) );
- assert( ! isBalanced( "{ \\/// }" ) );
- assert( isBalanced( "x = 5 + y ") );
- assert( ! isBalanced( "x = ") );
- assert( ! isBalanced( "x = // hello") );
- assert( ! isBalanced( "x = 5 +") );
- assert( isBalanced( " x ++") );
- assert( isBalanced( "-- x") );
- assert( !isBalanced( "a.") );
- assert( !isBalanced( "a. ") );
- assert( isBalanced( "a.b") );
+ verify( isBalanced( "x = 5" ) );
+ verify( isBalanced( "function(){}" ) );
+ verify( isBalanced( "function(){\n}" ) );
+ verify( ! isBalanced( "function(){" ) );
+ verify( isBalanced( "x = \"{\";" ) );
+ verify( isBalanced( "// {" ) );
+ verify( ! isBalanced( "// \n {" ) );
+ verify( ! isBalanced( "\"//\" {" ) );
+ verify( isBalanced( "{x:/x\\//}" ) );
+ verify( ! isBalanced( "{ \\/// }" ) );
+ verify( isBalanced( "x = 5 + y ") );
+ verify( ! isBalanced( "x = ") );
+ verify( ! isBalanced( "x = // hello") );
+ verify( ! isBalanced( "x = 5 +") );
+ verify( isBalanced( " x ++") );
+ verify( isBalanced( "-- x") );
+ verify( !isBalanced( "a.") );
+ verify( !isBalanced( "a. ") );
+ verify( isBalanced( "a.b") );
}
} balanced_test;
diff --git a/src/mongo/shell/shell_utils.cpp b/src/mongo/shell/shell_utils.cpp
index d012a4c1f8b..f0561547e09 100644
--- a/src/mongo/shell/shell_utils.cpp
+++ b/src/mongo/shell/shell_utils.cpp
@@ -372,10 +372,10 @@ namespace mongo {
}
ProgramRunner( const BSONObj &args , bool isMongoProgram=true) {
- assert( !args.isEmpty() );
+ verify( !args.isEmpty() );
string program( args.firstElement().valuestrsafe() );
- assert( !program.empty() );
+ verify( !program.empty() );
boost::filesystem::path programPath = find(program);
if (isMongoProgram) {
@@ -406,7 +406,7 @@ namespace mongo {
str = ss.str();
}
else {
- assert( e.type() == mongo::String );
+ verify( e.type() == mongo::String );
str = e.valuestr();
}
if ( str == "--port" )
@@ -421,17 +421,17 @@ namespace mongo {
else {
if ( port_ <= 0 )
cout << "error: a port number is expected when running mongod (etc.) from the shell" << endl;
- assert( port_ > 0 );
+ verify( port_ > 0 );
}
if ( port_ > 0 && dbs.count( port_ ) != 0 ) {
cerr << "count for port: " << port_ << " is not 0 is: " << dbs.count( port_ ) << endl;
- assert( dbs.count( port_ ) == 0 );
+ verify( dbs.count( port_ ) == 0 );
}
}
void start() {
int pipeEnds[ 2 ];
- assert( pipe( pipeEnds ) != -1 );
+ verify( pipe( pipeEnds ) != -1 );
fflush( 0 );
launch_process(pipeEnds[1]); //sets pid_
@@ -467,11 +467,11 @@ namespace mongo {
cout << "error: lenToRead: " << lenToRead << endl;
cout << "first 300: " << string(buf,0,300) << endl;
}
- assert( lenToRead > 0 );
+ verify( lenToRead > 0 );
int ret = read( pipe_, (void *)start, lenToRead );
if( mongo::dbexitCalled )
break;
- assert( ret != -1 );
+ verify( ret != -1 );
start[ ret ] = '\0';
if ( strlen( start ) != unsigned( ret ) )
writeMongoProgramOutputLine( port_, pid_, "WARNING: mongod wrote null bytes to output" );
@@ -491,7 +491,7 @@ namespace mongo {
strcpy( buf, temp );
}
else {
- assert( strlen( buf ) < bufSize );
+ verify( strlen( buf ) < bufSize );
}
start = buf + strlen( buf );
}
@@ -526,8 +526,8 @@ namespace mongo {
args_tchar[i] = 0;
HANDLE h = (HANDLE)_get_osfhandle(child_stdout);
- assert(h != INVALID_HANDLE_VALUE);
- assert(SetHandleInformation(h, HANDLE_FLAG_INHERIT, 1));
+ verify(h != INVALID_HANDLE_VALUE);
+ verify(SetHandleInformation(h, HANDLE_FLAG_INHERIT, 1));
STARTUPINFO si;
ZeroMemory(&si, sizeof(si));
@@ -566,7 +566,7 @@ namespace mongo {
#else
pid_ = fork();
- assert( pid_ != -1 );
+ verify( pid_ != -1 );
if ( pid_ == 0 ) {
// DON'T ASSERT IN THIS BLOCK - very bad things will happen
@@ -608,7 +608,7 @@ namespace mongo {
//returns true if process exited
bool wait_for_pid(pid_t pid, bool block=true, int* exit_code=NULL) {
#ifdef _WIN32
- assert(handles.count(pid));
+ verify(handles.count(pid));
HANDLE h = handles[pid];
if (block)
@@ -690,9 +690,9 @@ namespace mongo {
}
BSONObj ResetDbpath( const BSONObj &a, void* data ) {
- assert( a.nFields() == 1 );
+ verify( a.nFields() == 1 );
string path = a.firstElement().valuestrsafe();
- assert( !path.empty() );
+ verify( !path.empty() );
if ( boost::filesystem::exists( path ) )
boost::filesystem::remove_all( path );
boost::filesystem::create_directory( path );
@@ -720,12 +720,12 @@ namespace mongo {
// NOTE target dbpath will be cleared first
BSONObj CopyDbpath( const BSONObj &a, void* data ) {
- assert( a.nFields() == 2 );
+ verify( a.nFields() == 2 );
BSONObjIterator i( a );
string from = i.next().str();
string to = i.next().str();
- assert( !from.empty() );
- assert( !to.empty() );
+ verify( !from.empty() );
+ verify( !to.empty() );
if ( boost::filesystem::exists( to ) )
boost::filesystem::remove_all( to );
boost::filesystem::create_directory( to );
@@ -736,7 +736,7 @@ namespace mongo {
inline void kill_wrapper(pid_t pid, int sig, int port) {
#ifdef _WIN32
if (sig == SIGKILL || port == 0) {
- assert( handles.count(pid) );
+ verify( handles.count(pid) );
TerminateProcess(handles[pid], 1); // returns failure for "zombie" processes.
}
else {
@@ -760,7 +760,7 @@ namespace mongo {
}
else {
cout << "killFailed: " << errnoWithDescription() << endl;
- assert( x == 0 );
+ verify( x == 0 );
}
}
@@ -801,7 +801,7 @@ namespace mongo {
time_t_to_String(time(0), now);
now[ 20 ] = 0;
cout << now << " failed to terminate process on port " << port << ", with pid " << pid << endl;
- assert( "Failed to terminate process" == 0 );
+ verify( "Failed to terminate process" == 0 );
}
if ( port > 0 ) {
@@ -827,7 +827,7 @@ namespace mongo {
BSONObjIterator i( a );
i.next();
BSONElement e = i.next();
- assert( e.isNumber() );
+ verify( e.isNumber() );
ret = int( e.number() );
}
return ret;
@@ -835,7 +835,7 @@ namespace mongo {
/** stopMongoProgram(port[, signal]) */
BSONObj StopMongoProgram( const BSONObj &a, void* data ) {
- assert( a.nFields() == 1 || a.nFields() == 2 );
+ verify( a.nFields() == 1 || a.nFields() == 2 );
uassert( 15853 , "stopMongo needs a number" , a.firstElement().isNumber() );
int port = int( a.firstElement().number() );
int code = killDb( port, 0, getSignal( a ) );
@@ -844,7 +844,7 @@ namespace mongo {
}
BSONObj StopMongoProgramByPid( const BSONObj &a, void* data ) {
- assert( a.nFields() == 1 || a.nFields() == 2 );
+ verify( a.nFields() == 1 || a.nFields() == 2 );
uassert( 15852 , "stopMongoByPid needs a number" , a.firstElement().isNumber() );
int pid = int( a.firstElement().number() );
int code = killDb( 0, pid, getSignal( a ) );
@@ -920,7 +920,7 @@ namespace mongo {
BSONObj getHostName(const BSONObj& a, void* data) {
uassert( 13411, "getHostName accepts no arguments", a.nFields() == 0 );
char buf[260]; // HOST_NAME_MAX is usually 255
- assert(gethostname(buf, 260) == 0);
+ verify(gethostname(buf, 260) == 0);
buf[259] = '\0';
return BSON("" << buf);
diff --git a/src/mongo/shell/utils.js b/src/mongo/shell/utils.js
index a658e3f9a57..76cdd64bc77 100644
--- a/src/mongo/shell/utils.js
+++ b/src/mongo/shell/utils.js
@@ -66,6 +66,10 @@ assert = function( b , msg ){
doassert( msg == undefined ? "assert failed" : "assert failed : " + msg );
}
+// the mongo code uses verify
+// so this is to be nice to mongo devs
+verify = assert;
+
assert.automsg = function( b ) {
assert( eval( b ), b );
}
diff --git a/src/mongo/tools/dump.cpp b/src/mongo/tools/dump.cpp
index 5dc4025cd1a..efde62eef30 100644
--- a/src/mongo/tools/dump.cpp
+++ b/src/mongo/tools/dump.cpp
@@ -292,7 +292,7 @@ public:
BSONObj obj;
try {
obj = loc.obj();
- assert( obj.valid() );
+ verify( obj.valid() );
LOG(1) << obj << endl;
w( obj );
}
@@ -454,7 +454,7 @@ public:
return -1;
}
- assert(op["ts"].type() == Timestamp);
+ verify(op["ts"].type() == Timestamp);
opLogStart = op["ts"]._numberLong();
}
diff --git a/src/mongo/tools/export.cpp b/src/mongo/tools/export.cpp
index c0671ea0689..7f10dc1e2f5 100644
--- a/src/mongo/tools/export.cpp
+++ b/src/mongo/tools/export.cpp
@@ -123,7 +123,7 @@ public:
return "";
}
// Can never get here
- assert(false);
+ verify(false);
return "";
}
diff --git a/src/mongo/tools/restore.cpp b/src/mongo/tools/restore.cpp
index 64702d6617c..e5b122cc5cd 100644
--- a/src/mongo/tools/restore.cpp
+++ b/src/mongo/tools/restore.cpp
@@ -307,7 +307,7 @@ public:
ns = "test";
}
- assert( ns.size() );
+ verify( ns.size() );
string oldCollName = root.leaf(); // Name of the collection that was dumped from
oldCollName = oldCollName.substr( 0 , oldCollName.find_last_of( "." ) );
diff --git a/src/mongo/tools/sniffer.cpp b/src/mongo/tools/sniffer.cpp
index aeab808cfed..5f160e3adce 100644
--- a/src/mongo/tools/sniffer.cpp
+++ b/src/mongo/tools/sniffer.cpp
@@ -174,7 +174,7 @@ void got_packet(u_char *args, const struct pcap_pkthdr *header, const u_char *pa
return;
}
- assert( ip->ip_p == IPPROTO_TCP );
+ verify( ip->ip_p == IPPROTO_TCP );
const struct sniff_tcp* tcp = (struct sniff_tcp*)(packet + captureHeaderSize + size_ip);
int size_tcp = TH_OFF(tcp)*4;
@@ -191,7 +191,7 @@ void got_packet(u_char *args, const struct pcap_pkthdr *header, const u_char *pa
const u_char * payload = (const u_char*)(packet + captureHeaderSize + size_ip + size_tcp);
unsigned totalSize = ntohs(ip->ip_len);
- assert( totalSize <= header->caplen );
+ verify( totalSize <= header->caplen );
int size_payload = totalSize - (size_ip + size_tcp);
if (size_payload <= 0 )
@@ -404,10 +404,10 @@ void processDiagLog( const char * file ) {
long length;
unsigned long long L = 0;
char * root = (char*)f.map( file , L, MemoryMappedFile::SEQUENTIAL );
- assert( L < 0x80000000 );
+ verify( L < 0x80000000 );
length = (long) L;
- assert( root );
- assert( length > 0 );
+ verify( root );
+ verify( length > 0 );
char * pos = root;
@@ -548,8 +548,8 @@ int main(int argc, char **argv) {
cerr << "don't know how to handle datalink type: " << pcap_datalink( handle ) << endl;
}
- assert( pcap_compile(handle, &fp, const_cast< char * >( "tcp" ) , 0, net) != -1 );
- assert( pcap_setfilter(handle, &fp) != -1 );
+ verify( pcap_compile(handle, &fp, const_cast< char * >( "tcp" ) , 0, net) != -1 );
+ verify( pcap_setfilter(handle, &fp) != -1 );
cout << "sniffing... ";
for ( set<int>::iterator i = serverPorts.begin(); i != serverPorts.end(); i++ )
diff --git a/src/mongo/tools/tool.cpp b/src/mongo/tools/tool.cpp
index eafae9cdfd0..12eaaa26b08 100644
--- a/src/mongo/tools/tool.cpp
+++ b/src/mongo/tools/tool.cpp
@@ -221,7 +221,7 @@ namespace mongo {
if ( _params.count( "directoryperdb" ) ) {
directoryperdb = true;
}
- assert( lastError.get( true ) );
+ verify( lastError.get( true ) );
if (_params.count("journal")){
cmdLine.dur = true;
@@ -488,13 +488,13 @@ namespace mongo {
while ( read < fileLength ) {
size_t amt = fread(buf, 1, 4, file);
- assert( amt == 4 );
+ verify( amt == 4 );
int size = ((int*)buf)[0];
uassert( 10264 , str::stream() << "invalid object size: " << size , size < BUF_SIZE );
amt = fread(buf+4, 1, size-4, file);
- assert( amt == (size_t)( size - 4 ) );
+ verify( amt == (size_t)( size - 4 ) );
BSONObj o( buf );
if ( _objcheck && ! o.valid() ) {
diff --git a/src/mongo/util/alignedbuilder.cpp b/src/mongo/util/alignedbuilder.cpp
index b2e0461b733..c17ed55922b 100644
--- a/src/mongo/util/alignedbuilder.cpp
+++ b/src/mongo/util/alignedbuilder.cpp
@@ -59,15 +59,15 @@ namespace mongo {
}
void AlignedBuilder::mallocSelfAligned(unsigned sz) {
- assert( sz == _p._size );
+ verify( sz == _p._size );
void *p = malloc(sz + Alignment - 1);
_p._allocationAddress = p;
size_t s = (size_t) p;
size_t sold = s;
s += Alignment - 1;
s = (s/Alignment)*Alignment;
- assert( s >= sold ); // begining
- assert( (s + sz) <= (sold + sz + Alignment - 1) ); //end
+ verify( s >= sold ); // begining
+ verify( (s + sz) <= (sold + sz + Alignment - 1) ); //end
_p._data = (char *) s;
}
@@ -75,7 +75,7 @@ namespace mongo {
void NOINLINE_DECL AlignedBuilder::growReallocate(unsigned oldLen) {
dassert( _len > _p._size );
unsigned a = _p._size;
- assert( a );
+ verify( a );
while( 1 ) {
if( a < 128 * 1024 * 1024 )
a *= 2;
@@ -88,7 +88,7 @@ namespace mongo {
abort();
}
wassert( a <= 256*1024*1024 );
- assert( a <= 512*1024*1024 );
+ verify( a <= 512*1024*1024 );
if( _len < a )
break;
}
@@ -111,7 +111,7 @@ namespace mongo {
_p._data = (char *) p;
#else
mallocSelfAligned(sz);
- assert( ((size_t) _p._data) % Alignment == 0 );
+ verify( ((size_t) _p._data) % Alignment == 0 );
#endif
}
@@ -119,7 +119,7 @@ namespace mongo {
// posix_memalign alignment is not maintained on reallocs, so we can't use realloc().
AllocationInfo old = _p;
_malloc(newSize);
- assert( oldLen <= _len );
+ verify( oldLen <= _len );
memcpy(_p._data, old._data, oldLen);
_free(old._allocationAddress);
}
diff --git a/src/mongo/util/alignedbuilder.h b/src/mongo/util/alignedbuilder.h
index 1d246a9d78e..d55378cf2b9 100644
--- a/src/mongo/util/alignedbuilder.h
+++ b/src/mongo/util/alignedbuilder.h
@@ -87,7 +87,7 @@ namespace mongo {
void appendStr(const StringData &str , bool includeEOO = true ) {
const unsigned len = str.size() + ( includeEOO ? 1 : 0 );
- assert( len < (unsigned) BSONObjMaxUserSize );
+ verify( len < (unsigned) BSONObjMaxUserSize );
memcpy(grow(len), str.data(), len);
}
diff --git a/src/mongo/util/array.h b/src/mongo/util/array.h
index 12822252fd7..c765e8799b9 100644
--- a/src/mongo/util/array.h
+++ b/src/mongo/util/array.h
@@ -41,7 +41,7 @@ namespace mongo {
}
T& operator[]( int x ) {
- assert( x >= 0 && x < _capacity );
+ verify( x >= 0 && x < _capacity );
return _data[x];
}
@@ -50,7 +50,7 @@ namespace mongo {
}
void push_back( const T& t ) {
- assert( _size < _capacity );
+ verify( _size < _capacity );
_data[_size++] = t;
}
diff --git a/src/mongo/util/assert_util.cpp b/src/mongo/util/assert_util.cpp
index 8e6eb693a09..72adf1f913e 100644
--- a/src/mongo/util/assert_util.cpp
+++ b/src/mongo/util/assert_util.cpp
@@ -112,7 +112,7 @@ namespace mongo {
breakpoint();
#if defined(_DEBUG) || defined(_DURABLEDEFAULTON) || defined(_DURABLEDEFAULTOFF)
// this is so we notice in buildbot
- log() << "\n\n***aborting after assert() failure as this is a debug/test build\n\n" << endl;
+ log() << "\n\n***aborting after verify() failure as this is a debug/test build\n\n" << endl;
abort();
#endif
throw e;
@@ -219,7 +219,7 @@ namespace mongo {
NOINLINE_DECL ErrorMsg::ErrorMsg(const char *msg, char ch) {
int l = strlen(msg);
- assert( l < 128);
+ verify( l < 128);
memcpy(buf, msg, l);
char *p = buf + l;
p[0] = ch;
@@ -228,7 +228,7 @@ namespace mongo {
NOINLINE_DECL ErrorMsg::ErrorMsg(const char *msg, unsigned val) {
int l = strlen(msg);
- assert( l < 128);
+ verify( l < 128);
memcpy(buf, msg, l);
char *p = buf + l;
sprintf(p, "%u", val);
diff --git a/src/mongo/util/assert_util.h b/src/mongo/util/assert_util.h
index aac5c3dbcc5..d46df42d934 100644
--- a/src/mongo/util/assert_util.h
+++ b/src/mongo/util/assert_util.h
@@ -18,6 +18,7 @@
#pragma once
#include "../bson/inline_decls.h"
+#include <typeinfo>
// MONGO_NORETURN undefed at end of file
#ifdef __GNUC__
@@ -152,7 +153,6 @@ namespace mongo {
void asserted(const char *msg, const char *file, unsigned line) MONGO_NORETURN;
void wasserted(const char *msg, const char *file, unsigned line);
- void verifyFailed( int msgid );
void fassertFailed( int msgid );
/** a "user assertion". throws UserAssertion. logs. typically used for errors that a user
@@ -178,18 +178,12 @@ namespace mongo {
inline std::string causedBy( const std::exception& e ){ return causedBy( e.what() ); }
inline std::string causedBy( const std::string& e ){ return causedBy( e.c_str() ); }
- /** in the mongodb source, use verify() instead of assert(). verify is always evaluated even in release builds. */
- inline void verify( int msgid , bool testOK ) { if ( ! testOK ) verifyFailed( msgid ); }
-
/** abends on condition failure */
inline void fassert( int msgid , bool testOK ) { if ( ! testOK ) fassertFailed( msgid ); }
-#ifdef assert
-#undef assert
-#endif
-#define MONGO_assert(_Expression) (void)( MONGO_likely(!!(_Expression)) || (mongo::asserted(#_Expression, __FILE__, __LINE__), 0) )
-#define assert MONGO_assert
+#define MONGO_verify(_Expression) (void)( MONGO_likely(!!(_Expression)) || (mongo::asserted(#_Expression, __FILE__, __LINE__), 0) )
+#define verify MONGO_verify
/* "user assert". if asserts, user did something wrong, not our code */
#define MONGO_uassert(msgid, msg, expr) (void)( MONGO_likely(!!(expr)) || (mongo::uasserted(msgid, msg), 0) )
@@ -211,7 +205,7 @@ namespace mongo {
could be slow.
*/
#if defined(_DEBUG)
-# define MONGO_dassert assert
+# define MONGO_dassert verify
#else
# define MONGO_dassert(x)
#endif
diff --git a/src/mongo/util/background.cpp b/src/mongo/util/background.cpp
index ef3ee9426b9..50ade692e09 100644
--- a/src/mongo/util/background.cpp
+++ b/src/mongo/util/background.cpp
@@ -83,7 +83,7 @@ namespace mongo {
}
bool BackgroundJob::wait( unsigned msTimeOut ) {
- assert( !_status->deleteSelf ); // you cannot call wait on a self-deleting job
+ verify( !_status->deleteSelf ); // you cannot call wait on a self-deleting job
scoped_lock l( _status->m );
while ( _status->state != Done ) {
if ( msTimeOut ) {
diff --git a/src/mongo/util/base64.h b/src/mongo/util/base64.h
index 505b5d78cca..d5507bf0932 100644
--- a/src/mongo/util/base64.h
+++ b/src/mongo/util/base64.h
@@ -37,9 +37,9 @@ namespace mongo {
test();
}
void test() {
- assert( strlen( (char*)encode ) == 64 );
+ verify( strlen( (char*)encode ) == 64 );
for ( int i=0; i<26; i++ )
- assert( encode[i] == toupper( encode[i+26] ) );
+ verify( encode[i] == toupper( encode[i+26] ) );
}
char e( int x ) {
diff --git a/src/mongo/util/bufreader.h b/src/mongo/util/bufreader.h
index 58257ce011d..4cb34f483ab 100644
--- a/src/mongo/util/bufreader.h
+++ b/src/mongo/util/bufreader.h
@@ -64,7 +64,7 @@ namespace mongo {
/** back up by nbytes */
void rewind(unsigned nbytes) {
_pos = ((char *) _pos) - nbytes;
- assert( _pos >= _start );
+ verify( _pos >= _start );
}
/** return current position pointer, and advance by len */
diff --git a/src/mongo/util/concurrency/list.h b/src/mongo/util/concurrency/list.h
index 61bdd55f46f..c19fcea1f22 100644
--- a/src/mongo/util/concurrency/list.h
+++ b/src/mongo/util/concurrency/list.h
@@ -63,7 +63,7 @@ namespace mongo {
T* head() const { return (T*) _head; }
void push(T* t) {
- assert( t->_next == 0 );
+ verify( t->_next == 0 );
scoped_lock lk(_m);
t->_next = (T*) _head;
_head = t;
diff --git a/src/mongo/util/concurrency/mutex.h b/src/mongo/util/concurrency/mutex.h
index d3a7518e5bc..4ae6c78c9c6 100644
--- a/src/mongo/util/concurrency/mutex.h
+++ b/src/mongo/util/concurrency/mutex.h
@@ -22,6 +22,7 @@
#if defined(_DEBUG)
#include "mutexdebugger.h"
#endif
+#include "mongo/util/assert_util.h"
namespace mongo {
@@ -130,12 +131,12 @@ namespace mongo {
void lock() {
EnterCriticalSection(&_cs);
#if defined(_DEBUG)
- assert( _cs.RecursionCount == 1 );
+ verify( _cs.RecursionCount == 1 );
#endif
}
void dassertLocked() const {
#if defined(_DEBUG)
- assert( _cs.OwningThread == (HANDLE) GetCurrentThreadId() );
+ verify( _cs.OwningThread == (HANDLE) GetCurrentThreadId() );
#endif
}
void unlock() {
@@ -155,15 +156,15 @@ namespace mongo {
class SimpleMutex : boost::noncopyable {
public:
void dassertLocked() const { }
- SimpleMutex(const char* name) { assert( pthread_mutex_init(&_lock,0) == 0 ); }
+ SimpleMutex(const char* name) { verify( pthread_mutex_init(&_lock,0) == 0 ); }
~SimpleMutex(){
if ( ! StaticObserver::_destroyingStatics ) {
- assert( pthread_mutex_destroy(&_lock) == 0 );
+ verify( pthread_mutex_destroy(&_lock) == 0 );
}
}
- void lock() { assert( pthread_mutex_lock(&_lock) == 0 ); }
- void unlock() { assert( pthread_mutex_unlock(&_lock) == 0 ); }
+ void lock() { verify( pthread_mutex_lock(&_lock) == 0 ); }
+ void unlock() { verify( pthread_mutex_unlock(&_lock) == 0 ); }
public:
class scoped_lock : boost::noncopyable {
SimpleMutex& _m;
@@ -195,7 +196,7 @@ namespace mongo {
rm.m.lock();
}
~scoped_lock() {
- assert( nLocksByMe > 0 );
+ verify( nLocksByMe > 0 );
if( --nLocksByMe == 0 ) {
rm.m.unlock();
}
diff --git a/src/mongo/util/concurrency/mutexdebugger.h b/src/mongo/util/concurrency/mutexdebugger.h
index 6347d50171b..8c41a687490 100644
--- a/src/mongo/util/concurrency/mutexdebugger.h
+++ b/src/mongo/util/concurrency/mutexdebugger.h
@@ -46,7 +46,7 @@ namespace mongo {
void entering(mid m) {
if( this == 0 || m == 0 ) return;
- assert( magic == 0x12345678 );
+ verify( magic == 0x12345678 );
Preceeding *_preceeding = us.get();
if( _preceeding == 0 )
@@ -57,7 +57,7 @@ namespace mongo {
aBreakPoint();
if( preceeding[b.c_str()] ) {
cout << "****** MutexDebugger error! warning " << b << " was locked before " << a << endl;
- assert(false);
+ verify(false);
}
}
@@ -101,7 +101,7 @@ namespace mongo {
}
if( failed ) {
cout << err << endl;
- assert( 0 );
+ verify( 0 );
}
}
void leaving(mid m) {
@@ -110,7 +110,7 @@ namespace mongo {
preceeding[m]--;
if( preceeding[m] < 0 ) {
cout << "ERROR: lock count for " << m << " is " << preceeding[m] << endl;
- assert( preceeding[m] >= 0 );
+ verify( preceeding[m] >= 0 );
}
}
};
diff --git a/src/mongo/util/concurrency/qlock.h b/src/mongo/util/concurrency/qlock.h
index a87cfd25f2b..781b5dd35f3 100644
--- a/src/mongo/util/concurrency/qlock.h
+++ b/src/mongo/util/concurrency/qlock.h
@@ -82,13 +82,13 @@ namespace mongo {
case 'R' : return them == 'W' || them == 'w';
case 'w' : return them == 'W' || them == 'R';
case 'r' : return them == 'W';
- default : assert(false);
+ default : verify(false);
}
return false;
}
inline void QLock::notifyWeUnlocked(char me) {
- assert( W.n == 0 );
+ verify( W.n == 0 );
if( U.n ) {
// U is highest priority
if( r.n + w.n + W.n == 0 )
@@ -203,9 +203,9 @@ namespace mongo {
// downgrade from W state to R state
inline void QLock::W_to_R() {
boost::mutex::scoped_lock lk(m);
- assert( W.n == 1 );
- assert( R.n == 0 );
- assert( U.n == 0 );
+ verify( W.n == 1 );
+ verify( R.n == 0 );
+ verify( U.n == 0 );
W.n = 0;
R.n = 1;
notifyWeUnlocked('W');
@@ -216,7 +216,7 @@ namespace mongo {
// if two threads try to do this you will deadlock.
inline bool QLock::R_to_W() {
boost::mutex::scoped_lock lk(m);
- assert( R.n > 0 && W.n == 0 );
+ verify( R.n > 0 && W.n == 0 );
U.n++;
fassert( 16136, U.n == 1 ); // for now we only allow one upgrade attempter
int pass = 0;
@@ -230,7 +230,7 @@ namespace mongo {
R.n--;
W.n++;
U.n--;
- assert( R.n == 0 && W.n == 1 && U.n == 0 );
+ verify( R.n == 0 && W.n == 1 && U.n == 0 );
return true;
}
diff --git a/src/mongo/util/concurrency/rwlock.h b/src/mongo/util/concurrency/rwlock.h
index 076297ba791..38bb8105845 100644
--- a/src/mongo/util/concurrency/rwlock.h
+++ b/src/mongo/util/concurrency/rwlock.h
@@ -57,7 +57,7 @@ namespace mongo {
}
public:
void upgrade() { // upgradable -> exclusive lock
- assert( x == UpgradableState );
+ verify( x == UpgradableState );
RWLockBase::upgrade();
x = Exclusive;
}
@@ -83,7 +83,7 @@ namespace mongo {
public:
Upgradable(RWLock& r) : _r(r) {
r.lockAsUpgradable();
- assert( _r.x == NilState );
+ verify( _r.x == NilState );
_r.x = RWLock::UpgradableState;
}
~Upgradable() {
@@ -92,7 +92,7 @@ namespace mongo {
_r.unlockFromUpgradable();
}
else {
- //TEMP assert( _r.x == Exclusive ); // has been upgraded
+ //TEMP verify( _r.x == Exclusive ); // has been upgraded
_r.x = NilState;
_r.unlock();
}
@@ -164,7 +164,7 @@ namespace mongo {
RWLockRecursive(const char *name) : _name(name) { }
void assertExclusivelyLocked() {
- assert( _state.get() < 0 );
+ verify( _state.get() < 0 );
}
class Exclusive : boost::noncopyable {
diff --git a/src/mongo/util/concurrency/rwlockimpl.h b/src/mongo/util/concurrency/rwlockimpl.h
index 0144a24cad9..440b778eedd 100644
--- a/src/mongo/util/concurrency/rwlockimpl.h
+++ b/src/mongo/util/concurrency/rwlockimpl.h
@@ -83,7 +83,7 @@ namespace mongo {
friend class SimpleRWLock;
pthread_rwlock_t _lock;
static void check( int x ) {
- assert( x == 0 );
+ verify( x == 0 );
}
protected:
~RWLockBase() {
@@ -137,8 +137,6 @@ namespace mongo { typedef boost::modified_shared_mutex shared_mutex; }
# include <boost/thread/shared_mutex.hpp>
namespace mongo { using boost::shared_mutex; }
# endif
-# undef assert
-# define assert MONGO_assert
namespace mongo {
class RWLockBase : boost::noncopyable {
diff --git a/src/mongo/util/concurrency/synchronization.cpp b/src/mongo/util/concurrency/synchronization.cpp
index ff4e8047f84..0ad0519812e 100644
--- a/src/mongo/util/concurrency/synchronization.cpp
+++ b/src/mongo/util/concurrency/synchronization.cpp
@@ -34,7 +34,7 @@ namespace mongo {
void Notification::notifyOne() {
scoped_lock lock( _mutex );
- assert( cur != lookFor );
+ verify( cur != lookFor );
cur++;
_condition.notify_one();
}
diff --git a/src/mongo/util/concurrency/task.cpp b/src/mongo/util/concurrency/task.cpp
index 43ce340021b..786c93b4efd 100644
--- a/src/mongo/util/concurrency/task.cpp
+++ b/src/mongo/util/concurrency/task.cpp
@@ -46,7 +46,7 @@ namespace mongo {
void Task::halt() { repeat = 0; }
void Task::run() {
- assert( n == 0 );
+ verify( n == 0 );
while( 1 ) {
n++;
try {
diff --git a/src/mongo/util/concurrency/thread_pool.cpp b/src/mongo/util/concurrency/thread_pool.cpp
index 1c258847cb5..b64a904cd4a 100644
--- a/src/mongo/util/concurrency/thread_pool.cpp
+++ b/src/mongo/util/concurrency/thread_pool.cpp
@@ -40,8 +40,8 @@ namespace mongo {
}
void set_task(Task& func) {
- assert(!func.empty());
- assert(_is_done);
+ verify(!func.empty());
+ verify(_is_done);
_is_done = false;
_task.put(func);
@@ -87,10 +87,10 @@ namespace mongo {
ThreadPool::~ThreadPool() {
join();
- assert(_tasks.empty());
+ verify(_tasks.empty());
// O(n) but n should be small
- assert(_freeWorkers.size() == (unsigned)_nThreads);
+ verify(_freeWorkers.size() == (unsigned)_nThreads);
while(!_freeWorkers.empty()) {
delete _freeWorkers.front();
diff --git a/src/mongo/util/concurrency/thread_pool.h b/src/mongo/util/concurrency/thread_pool.h
index b348ed1d01b..ea2f801b63c 100644
--- a/src/mongo/util/concurrency/thread_pool.h
+++ b/src/mongo/util/concurrency/thread_pool.h
@@ -19,8 +19,6 @@
#include <boost/function.hpp>
#include <boost/bind.hpp>
-#undef assert
-#define assert MONGO_assert
namespace mongo {
diff --git a/src/mongo/util/debug_util.cpp b/src/mongo/util/debug_util.cpp
index 8ba6534ef7c..3c4489c9a1c 100644
--- a/src/mongo/util/debug_util.cpp
+++ b/src/mongo/util/debug_util.cpp
@@ -51,7 +51,7 @@ namespace mongo {
}
void setupSIGTRAPforGDB() {
- assert( signal(SIGTRAP , launchGDB ) != SIG_ERR );
+ verify( signal(SIGTRAP , launchGDB ) != SIG_ERR );
}
#else
void setupSIGTRAPforGDB() {
diff --git a/src/mongo/util/file.h b/src/mongo/util/file.h
index 368e6927b43..410c20d3651 100644
--- a/src/mongo/util/file.h
+++ b/src/mongo/util/file.h
@@ -45,13 +45,13 @@ namespace mongo {
bool bad() {return false;}
bool is_open() {return false;}
fileofs len() { return 0; }
- void fsync() { assert(false); }
+ void fsync() { verify(false); }
// shrink file to size bytes. No-op if file already smaller.
void truncate(fileofs size);
/** @return -1 if error or unavailable */
- static boost::intmax_t freeSpace(const string &path) { assert(false); return -1; }
+ static boost::intmax_t freeSpace(const string &path) { verify(false); return -1; }
};
#if defined(_WIN32)
@@ -210,7 +210,7 @@ namespace mongo {
void fsync() { ::fsync(fd); }
static boost::intmax_t freeSpace ( const string &path ) {
struct statvfs info;
- assert( !statvfs( path.c_str() , &info ) );
+ verify( !statvfs( path.c_str() , &info ) );
return boost::intmax_t( info.f_bavail ) * info.f_frsize;
}
diff --git a/src/mongo/util/file_allocator.cpp b/src/mongo/util/file_allocator.cpp
index 6a1744398c5..cb470f82446 100644
--- a/src/mongo/util/file_allocator.cpp
+++ b/src/mongo/util/file_allocator.cpp
@@ -52,7 +52,7 @@ namespace mongo {
flushMyDirectory(parent); // flushes grandparent to ensure parent exists after crash
}
- assert(boost::filesystem::is_directory(parent));
+ verify(boost::filesystem::is_directory(parent));
return parent;
}
diff --git a/src/mongo/util/goodies.h b/src/mongo/util/goodies.h
index c2ca29a24e5..6683ee189f9 100644
--- a/src/mongo/util/goodies.h
+++ b/src/mongo/util/goodies.h
@@ -18,8 +18,8 @@
#pragma once
-#include "../bson/util/misc.h"
#include "concurrency/mutex.h"
+#include "../bson/util/misc.h"
namespace mongo {
@@ -112,9 +112,6 @@ namespace mongo {
#define MONGO_FLOG log() << __FILE__ ":" << __LINE__ << endl
#define FLOG MONGO_FLOG
-#undef assert
-#define assert MONGO_assert
-
inline bool startsWith(const char *str, const char *prefix) {
size_t l = strlen(prefix);
if ( strlen(str) < l ) return false;
@@ -150,7 +147,7 @@ namespace mongo {
#if !defined(_WIN32)
typedef int HANDLE;
inline void strcpy_s(char *dst, unsigned len, const char *src) {
- assert( strlen(src) < len );
+ verify( strlen(src) < len );
strcpy(dst, src);
}
#else
diff --git a/src/mongo/util/hashtab.h b/src/mongo/util/hashtab.h
index f1a33068e07..19253d7c60d 100644
--- a/src/mongo/util/hashtab.h
+++ b/src/mongo/util/hashtab.h
@@ -111,7 +111,7 @@ namespace mongo {
if ( sizeof(Node) != 628 ) {
out() << "HashTable() " << _name << " sizeof(node):" << sizeof(Node) << " n:" << n << " sizeof(Key): " << sizeof(Key) << " sizeof(Type):" << sizeof(Type) << endl;
- assert( sizeof(Node) == 628 );
+ verify( sizeof(Node) == 628 );
}
}
@@ -147,7 +147,7 @@ namespace mongo {
n->hash = k.hash();
}
else {
- assert( n->hash == k.hash() );
+ verify( n->hash == k.hash() );
}
n->value = value;
return true;
diff --git a/src/mongo/util/hex.h b/src/mongo/util/hex.h
index 8cf30f2d9d3..54eb8de8102 100644
--- a/src/mongo/util/hex.h
+++ b/src/mongo/util/hex.h
@@ -26,7 +26,7 @@ namespace mongo {
return c - 'a' + 10;
if ( 'A' <= c && c <= 'F' )
return c - 'A' + 10;
- assert( false );
+ verify( false );
return 0xff;
}
inline char fromHex( const char *c ) {
diff --git a/src/mongo/util/log.cpp b/src/mongo/util/log.cpp
index 1f05eaee3b5..dd4a24223ca 100644
--- a/src/mongo/util/log.cpp
+++ b/src/mongo/util/log.cpp
@@ -62,7 +62,7 @@ namespace mongo {
cout << "logpath [" << lp << "] should be a filename, not a directory" << endl;
dbexit( EXIT_BADOPTIONS );
- assert( 0 );
+ verify( 0 );
}
if ( ! append ) {
@@ -78,7 +78,7 @@ namespace mongo {
cout << "log file [" << lp << "] exists and couldn't make backup; run with --logappend or manually remove file (" << strerror(errno) << ")" << endl;
dbexit( EXIT_BADOPTIONS );
- assert( 0 );
+ verify( 0 );
}
}
}
@@ -88,7 +88,7 @@ namespace mongo {
if ( ! test ) {
cout << "can't open [" << lp << "] for log file: " << errnoWithDescription() << endl;
dbexit( EXIT_BADOPTIONS );
- assert( 0 );
+ verify( 0 );
}
if (append && exists){
@@ -150,7 +150,7 @@ namespace mongo {
if ( !tmp ) {
cerr << "can't open: " << _path.c_str() << " for log file" << endl;
dbexit( EXIT_BADOPTIONS );
- assert( 0 );
+ verify( 0 );
}
// redirect stdout and stderr to log file
@@ -334,7 +334,7 @@ namespace mongo {
}
string out( b.buf() , b.len() - 1);
- assert( b.len() < spaceNeeded );
+ verify( b.len() < spaceNeeded );
scoped_lock lk(mutex);
diff --git a/src/mongo/util/logfile.cpp b/src/mongo/util/logfile.cpp
index 22da9607ed6..d6992465936 100644
--- a/src/mongo/util/logfile.cpp
+++ b/src/mongo/util/logfile.cpp
@@ -78,7 +78,7 @@ namespace mongo {
}
void LogFile::truncate() {
- assert(_fd != INVALID_HANDLE_VALUE);
+ verify(_fd != INVALID_HANDLE_VALUE);
if (!SetEndOfFile(_fd)){
msgasserted(15871, "Couldn't truncate file: " + errnoWithDescription());
@@ -91,7 +91,7 @@ namespace mongo {
memset(&o,0,sizeof(o));
(unsigned long long&) o.Offset = offset;
BOOL ok= WriteFile(_fd, _buf, _len, 0, &o);
- assert(ok);
+ verify(ok);
}
void LogFile::readAt(unsigned long long offset, void *_buf, size_t _len) {
@@ -105,14 +105,14 @@ namespace mongo {
string e = errnoWithDescription();
//DWORD e = GetLastError();
log() << "LogFile readAt(" << offset << ") len:" << _len << "errno:" << e << endl;
- assert(false);
+ verify(false);
}
}
void LogFile::synchronousAppend(const void *_buf, size_t _len) {
const size_t BlockSize = 8 * 1024 * 1024;
- assert(_fd);
- assert(_len % 4096 == 0);
+ verify(_fd);
+ verify(_len % 4096 == 0);
const char *buf = (const char *) _buf;
size_t left = _len;
while( left ) {
@@ -184,7 +184,7 @@ namespace mongo {
}
void LogFile::truncate() {
- assert(_fd >= 0);
+ verify(_fd >= 0);
BOOST_STATIC_ASSERT(sizeof(off_t) == 8); // we don't want overflow here
const off_t pos = lseek(_fd, 0, SEEK_CUR); // doesn't actually seek
@@ -196,7 +196,7 @@ namespace mongo {
}
void LogFile::writeAt(unsigned long long offset, const void *buf, size_t len) {
- assert(((size_t)buf)%4096==0); // aligned
+ verify(((size_t)buf)%4096==0); // aligned
ssize_t written = pwrite(_fd, buf, len, offset);
if( written != (ssize_t) len ) {
log() << "writeAt fails " << errnoWithDescription() << endl;
@@ -209,9 +209,9 @@ namespace mongo {
}
void LogFile::readAt(unsigned long long offset, void *_buf, size_t _len) {
- assert(((size_t)_buf)%4096==0); // aligned
+ verify(((size_t)_buf)%4096==0); // aligned
ssize_t rd = pread(_fd, _buf, _len, offset);
- assert( rd != -1 );
+ verify( rd != -1 );
}
void LogFile::synchronousAppend(const void *b, size_t len) {
@@ -220,11 +220,11 @@ namespace mongo {
#endif
const char *buf = (char *) b;
- assert(_fd);
- assert(((size_t)buf)%4096==0); // aligned
+ verify(_fd);
+ verify(((size_t)buf)%4096==0); // aligned
if( len % 4096 != 0 ) {
log() << len << ' ' << len % 4096 << endl;
- assert(false);
+ verify(false);
}
ssize_t written = write(_fd, buf, len);
if( written != (ssize_t) len ) {
diff --git a/src/mongo/util/lruishmap.h b/src/mongo/util/lruishmap.h
index ba91bf6f0f6..031ef9ce059 100644
--- a/src/mongo/util/lruishmap.h
+++ b/src/mongo/util/lruishmap.h
@@ -43,7 +43,7 @@ namespace mongo {
int _find(const K& k, bool& found) {
int h = k.hash();
- assert( h > 0 );
+ verify( h > 0 );
int j = h % n;
int first = j;
for ( int i = 0; i < MaxChain; i++ ) {
diff --git a/src/mongo/util/mmap.cpp b/src/mongo/util/mmap.cpp
index 9865f16c484..29451ff2227 100755
--- a/src/mongo/util/mmap.cpp
+++ b/src/mongo/util/mmap.cpp
@@ -41,7 +41,7 @@ namespace mongo {
void *p = map(filename.c_str(), len);
if( p && zero ) {
size_t sz = (size_t) len;
- assert( len == sz );
+ verify( len == sz );
memset(p, 0, sz);
}
return p;
@@ -186,7 +186,7 @@ namespace mongo {
void MongoFile::setFilename(string fn) {
LockMongoFilesExclusive lk;
- assert( _filename.empty() );
+ verify( _filename.empty() );
_filename = fn;
MongoFile *&ptf = pathToFile[fn];
massert(13617, "MongoFile : multiple opens of same filename", ptf == 0);
diff --git a/src/mongo/util/mmap.h b/src/mongo/util/mmap.h
index f47f72c7c43..79fa787f244 100644
--- a/src/mongo/util/mmap.h
+++ b/src/mongo/util/mmap.h
@@ -152,7 +152,7 @@ namespace mongo {
virtual void* viewForFlushing() {
if( views.size() == 0 )
return 0;
- assert( views.size() == 1 );
+ verify( views.size() == 1 );
return views[0];
}
public:
@@ -246,18 +246,18 @@ namespace mongo {
}
bool get(unsigned i) const {
unsigned x = i / 32;
- assert( x < MemoryMappedFile::NChunks );
+ verify( x < MemoryMappedFile::NChunks );
return (bits[x] & (1 << (i%32))) != 0;
}
void set(unsigned i) {
unsigned x = i / 32;
wassert( x < (MemoryMappedFile::NChunks*2/3) ); // warn if getting close to limit
- assert( x < MemoryMappedFile::NChunks );
+ verify( x < MemoryMappedFile::NChunks );
bits[x] |= (1 << (i%32));
}
void clear(unsigned i) {
unsigned x = i / 32;
- assert( x < MemoryMappedFile::NChunks );
+ verify( x < MemoryMappedFile::NChunks );
bits[x] &= ~(1 << (i%32));
}
};
diff --git a/src/mongo/util/mmap_mm.cpp b/src/mongo/util/mmap_mm.cpp
index ec2400e02d3..a58aaaec040 100644
--- a/src/mongo/util/mmap_mm.cpp
+++ b/src/mongo/util/mmap_mm.cpp
@@ -37,7 +37,7 @@ namespace mongo {
}
void* MemoryMappedFile::map(const char *filename, long& length , int options ) {
- assert( length );
+ verify( length );
view = malloc( length );
return view;
}
diff --git a/src/mongo/util/mmap_posix.cpp b/src/mongo/util/mmap_posix.cpp
index f37f0448e02..eebed4f818b 100644
--- a/src/mongo/util/mmap_posix.cpp
+++ b/src/mongo/util/mmap_posix.cpp
@@ -76,7 +76,7 @@ namespace mongo {
switch ( a ) {
case Sequential: advice = MADV_SEQUENTIAL; break;
case Random: advice = MADV_RANDOM; break;
- default: assert(0);
+ default: verify(0);
}
if ( madvise(_p,_len,advice ) ) {
@@ -181,7 +181,7 @@ namespace mongo {
printMemInfo();
abort();
}
- assert( x == oldPrivateAddr );
+ verify( x == oldPrivateAddr );
return x;
}
diff --git a/src/mongo/util/mmap_win.cpp b/src/mongo/util/mmap_win.cpp
index 93896404ca2..b0c3a3e2726 100644
--- a/src/mongo/util/mmap_win.cpp
+++ b/src/mongo/util/mmap_win.cpp
@@ -35,7 +35,7 @@ namespace mongo {
void MemoryMappedFile::clearWritableBits(void *p) {
for( unsigned i = ((size_t)p)/ChunkSize; i <= (((size_t)p)+len)/ChunkSize; i++ ) {
writable.clear(i);
- assert( !writable.get(i) );
+ verify( !writable.get(i) );
}
}
@@ -67,7 +67,7 @@ namespace mongo {
unsigned long long mapped = 0;
void* MemoryMappedFile::createReadOnlyMap() {
- assert( maphandle );
+ verify( maphandle );
scoped_lock lk(mapViewMutex);
void *p = MapViewOfFile(maphandle, FILE_MAP_READ, /*f ofs hi*/0, /*f ofs lo*/ 0, /*dwNumberOfBytesToMap 0 means to eof*/0);
if ( p == 0 ) {
@@ -82,7 +82,7 @@ namespace mongo {
}
void* MemoryMappedFile::map(const char *filenameIn, unsigned long long &length, int options) {
- assert( fd == 0 && len == 0 ); // can't open more than once
+ verify( fd == 0 && len == 0 ); // can't open more than once
setFilename(filenameIn);
/* big hack here: Babble uses db names with colons. doesn't seem to work on windows. temporary perhaps. */
char filename[256];
diff --git a/src/mongo/util/mongoutils/test.cpp b/src/mongo/util/mongoutils/test.cpp
index 45268c5ca49..1a84e2d68af 100644
--- a/src/mongo/util/mongoutils/test.cpp
+++ b/src/mongo/util/mongoutils/test.cpp
@@ -30,16 +30,16 @@ int main() {
{
string s = "abcde";
str::stripTrailing(s, "ef");
- assert( s == "abcd" );
+ verify( s == "abcd" );
str::stripTrailing(s, "abcd");
- assert( s.empty() );
+ verify( s.empty() );
s = "abcddd";
str::stripTrailing(s, "d");
- assert( s == "abc" );
+ verify( s == "abc" );
}
string x = str::after("abcde", 'c');
- assert( x == "de" );
- assert( str::after("abcde", 'x') == "" );
+ verify( x == "de" );
+ verify( str::after("abcde", 'x') == "" );
return 0;
}
diff --git a/src/mongo/util/net/hostandport.h b/src/mongo/util/net/hostandport.h
index d05f13eb79c..2bfdb6d673c 100644
--- a/src/mongo/util/net/hostandport.h
+++ b/src/mongo/util/net/hostandport.h
@@ -40,7 +40,7 @@ namespace mongo {
/** @param p port number. -1 is ok to use default. */
HostAndPort(string h, int p /*= -1*/) : _host(h), _port(p) {
- assert( !str::startsWith(h, '#') );
+ verify( !str::startsWith(h, '#') );
}
HostAndPort(const SockAddr& sock ) : _host( sock.getAddr() ) , _port( sock.getPort() ) { }
@@ -158,8 +158,8 @@ namespace mongo {
}
string h = getHostName();
- assert( !h.empty() );
- assert( h != "localhost" );
+ verify( !h.empty() );
+ verify( h != "localhost" );
return HostAndPort(h, cmdLine.port);
}
@@ -208,8 +208,8 @@ namespace mongo {
inline void HostAndPort::init(const char *p) {
massert(13110, "HostAndPort: host is empty", *p);
- assert( *p != '#' );
- assert( _dynName.empty() );
+ verify( *p != '#' );
+ verify( _dynName.empty() );
const char *colon = strrchr(p, ':');
if( colon ) {
int port = atoi(colon+1);
diff --git a/src/mongo/util/net/httpclient.cpp b/src/mongo/util/net/httpclient.cpp
index 549734f4658..e94475a1940 100644
--- a/src/mongo/util/net/httpclient.cpp
+++ b/src/mongo/util/net/httpclient.cpp
@@ -121,7 +121,7 @@ namespace mongo {
int rc;
char version[32];
- assert( sscanf( buf , "%s %d" , version , &rc ) == 2 );
+ verify( sscanf( buf , "%s %d" , version , &rc ) == 2 );
HD( "rc: " << rc );
StringBuilder sb;
diff --git a/src/mongo/util/net/listen.cpp b/src/mongo/util/net/listen.cpp
index cba0c4610d5..7939399134e 100644
--- a/src/mongo/util/net/listen.cpp
+++ b/src/mongo/util/net/listen.cpp
@@ -341,7 +341,7 @@ namespace mongo {
}
void Listener::acceptedMP(MessagingPort *mp) {
- assert(!"You must overwrite one of the accepted methods");
+ verify(!"You must overwrite one of the accepted methods");
}
// ----- ListeningSockets -------
@@ -362,7 +362,7 @@ namespace mongo {
return DEFAULT_MAX_CONN;
#else
struct rlimit limit;
- assert( getrlimit(RLIMIT_NOFILE,&limit) == 0 );
+ verify( getrlimit(RLIMIT_NOFILE,&limit) == 0 );
int max = (int)(limit.rlim_cur * .8);
diff --git a/src/mongo/util/net/message.cpp b/src/mongo/util/net/message.cpp
index a84e5c48c5c..7c5aa4b9f89 100644
--- a/src/mongo/util/net/message.cpp
+++ b/src/mongo/util/net/message.cpp
@@ -47,7 +47,7 @@ namespace mongo {
/*struct MsgStart {
MsgStart() {
NextMsgId = (((unsigned) time(0)) << 16) ^ curTimeMillis();
- assert(MsgDataHeaderSize == 16);
+ verify(MsgDataHeaderSize == 16);
}
} msgstart;*/
diff --git a/src/mongo/util/net/message.h b/src/mongo/util/net/message.h
index c8ade44097d..46050c2d649 100644
--- a/src/mongo/util/net/message.h
+++ b/src/mongo/util/net/message.h
@@ -56,7 +56,7 @@ namespace mongo {
case dbKillCursors: return "killcursors";
default:
PRINT(op);
- assert(0);
+ verify(0);
return "";
}
}
@@ -79,7 +79,7 @@ namespace mongo {
default:
PRINT(op);
- assert(0);
+ verify(0);
return "";
}
@@ -129,8 +129,8 @@ namespace mongo {
}
long long getCursor() {
- assert( responseTo > 0 );
- assert( _operation == opReply );
+ verify( responseTo > 0 );
+ verify( _operation == opReply );
long long * l = (long long *)(_data + 4);
return l[0];
}
@@ -161,7 +161,7 @@ namespace mongo {
SockAddr _from;
MsgData *header() const {
- assert( !empty() );
+ verify( !empty() );
return _buf ? _buf : reinterpret_cast< MsgData* > ( _data[ 0 ].first );
}
int operation() const { return header()->operation(); }
@@ -195,7 +195,7 @@ namespace mongo {
return;
}
- assert( _freeIt );
+ verify( _freeIt );
int totalSize = 0;
for( vector< pair< char *, int > >::const_iterator i = _data.begin(); i != _data.end(); ++i ) {
totalSize += i->second;
@@ -212,8 +212,8 @@ namespace mongo {
// vector swap() so this is fast
Message& operator=(Message& r) {
- assert( empty() );
- assert( r._freeIt );
+ verify( empty() );
+ verify( r._freeIt );
_buf = r._buf;
r._buf = 0;
if ( r._data.size() > 0 ) {
@@ -250,7 +250,7 @@ namespace mongo {
_setData( md, true );
return;
}
- assert( _freeIt );
+ verify( _freeIt );
if ( _buf ) {
_data.push_back( make_pair( (char*)_buf, _buf->len ) );
_buf = 0;
@@ -261,14 +261,14 @@ namespace mongo {
// use to set first buffer if empty
void setData(MsgData *d, bool freeIt) {
- assert( empty() );
+ verify( empty() );
_setData( d, freeIt );
}
void setData(int operation, const char *msgtxt) {
setData(operation, msgtxt, strlen(msgtxt)+1);
}
void setData(int operation, const char *msgdata, size_t len) {
- assert( empty() );
+ verify( empty() );
size_t dataLen = len + sizeof(MsgData) - 4;
MsgData *d = (MsgData *) malloc(dataLen);
memcpy(d->_data, msgdata, len);
diff --git a/src/mongo/util/net/message_port.cpp b/src/mongo/util/net/message_port.cpp
index d0dc3cef28e..9a5a3ab1462 100644
--- a/src/mongo/util/net/message_port.cpp
+++ b/src/mongo/util/net/message_port.cpp
@@ -64,7 +64,7 @@ namespace mongo {
}
void append( Message& m ) {
- assert( m.header()->len <= 1300 );
+ verify( m.header()->len <= 1300 );
if ( len() + m.header()->len > 1300 )
flush();
@@ -180,10 +180,10 @@ again:
}
int z = (len+1023)&0xfffffc00;
- assert(z>=len);
+ verify(z>=len);
MsgData *md = (MsgData *) malloc(z);
ScopeGuard guard = MakeGuard(free, md);
- assert(md);
+ verify(md);
md->len = len;
char *p = (char *) &md->id;
@@ -234,7 +234,7 @@ again:
<< " response len: " << (unsigned)response.header()->len << '\n'
<< " response op: " << response.operation() << '\n'
<< " remote: " << psock->remoteString() << endl;
- assert(false);
+ verify(false);
response.reset();
}
mmm( log() << "*call() end" << endl; )
@@ -246,7 +246,7 @@ again:
}
void MessagingPort::say(Message& toSend, int responseTo) {
- assert( !toSend.empty() );
+ verify( !toSend.empty() );
mmm( log() << "* say() thr:" << GetCurrentThreadId() << endl; )
toSend.header()->id = nextMessageId();
toSend.header()->responseTo = responseTo;
diff --git a/src/mongo/util/net/message_server_asio.cpp b/src/mongo/util/net/message_server_asio.cpp
index 0c6a7d925da..52cf96d9578 100644
--- a/src/mongo/util/net/message_server_asio.cpp
+++ b/src/mongo/util/net/message_server_asio.cpp
@@ -105,7 +105,7 @@ namespace mongo {
MsgData * data = (MsgData*)raw;
memcpy( data , &_inHeader , sizeof( _inHeader ) );
- assert( data->len == _inHeader.len );
+ verify( data->len == _inHeader.len );
uassert( 10273 , "_cur not empty! pipelining requests not supported" , ! _cur.data );
@@ -127,7 +127,7 @@ namespace mongo {
if (!_myThread) // pool is empty
_myThread.reset(new StickyThread());
- assert(_myThread);
+ verify(_myThread);
_myThread->ready(shared_from_this());
}
@@ -150,7 +150,7 @@ namespace mongo {
{
// return thread to pool after we have sent data to the client
mongo::mutex::scoped_lock(tp_mutex);
- assert(_myThread);
+ verify(_myThread);
thread_pool.push_back(_myThread);
_myThread.reset();
}
diff --git a/src/mongo/util/net/message_server_port.cpp b/src/mongo/util/net/message_server_port.cpp
index a457c88193f..edb41bba37a 100644
--- a/src/mongo/util/net/message_server_port.cpp
+++ b/src/mongo/util/net/message_server_port.cpp
@@ -43,7 +43,7 @@ namespace mongo {
setThreadName( "conn" );
- assert( inPort );
+ verify( inPort );
inPort->psock->setLogLevel(1);
scoped_ptr<MessagingPort> p( inPort );
@@ -141,7 +141,7 @@ namespace mongo {
static const size_t STACK_SIZE = 1024*1024; // if we change this we need to update the warning
struct rlimit limits;
- assert(getrlimit(RLIMIT_STACK, &limits) == 0);
+ verify(getrlimit(RLIMIT_STACK, &limits) == 0);
if (limits.rlim_cur > STACK_SIZE) {
pthread_attr_setstacksize(&attrs, (DEBUG_BUILD
? (STACK_SIZE / 2)
diff --git a/src/mongo/util/net/miniwebserver.cpp b/src/mongo/util/net/miniwebserver.cpp
index 5c5d9daf471..60f738b53d1 100644
--- a/src/mongo/util/net/miniwebserver.cpp
+++ b/src/mongo/util/net/miniwebserver.cpp
@@ -156,7 +156,7 @@ namespace mongo {
}
else {
for ( vector<string>::iterator i = headers.begin(); i != headers.end(); i++ ) {
- assert( strncmp("Content-Length", i->c_str(), 14) );
+ verify( strncmp("Content-Length", i->c_str(), 14) );
ss << *i << "\r\n";
}
}
diff --git a/src/mongo/util/net/sock.cpp b/src/mongo/util/net/sock.cpp
index 946df432f39..b105a377a4b 100644
--- a/src/mongo/util/net/sock.cpp
+++ b/src/mongo/util/net/sock.cpp
@@ -201,7 +201,7 @@ namespace mongo {
}
else {
//TODO: handle other addresses in linked list;
- assert(addrs->ai_addrlen <= sizeof(sa));
+ verify(addrs->ai_addrlen <= sizeof(sa));
memcpy(&sa, addrs->ai_addr, addrs->ai_addrlen);
addressSize = addrs->ai_addrlen;
freeaddrinfo(addrs);
@@ -216,7 +216,7 @@ namespace mongo {
case AF_UNIX: return true;
default: return false;
}
- assert(false);
+ verify(false);
return false;
}
@@ -541,9 +541,9 @@ namespace mongo {
#ifdef MONGO_SSL
void Socket::secure( SSLManager * ssl ) {
- assert( ssl );
- assert( ! _ssl );
- assert( _fd >= 0 );
+ verify( ssl );
+ verify( ! _ssl );
+ verify( _fd >= 0 );
_ssl = ssl->secure( _fd );
SSL_connect( _ssl );
}
@@ -556,7 +556,7 @@ namespace mongo {
void Socket::postFork() {
#ifdef MONGO_SSL
if ( _sslAccepted ) {
- assert( _fd );
+ verify( _fd );
_ssl = _sslAccepted->secure( _fd );
SSL_accept( _ssl );
_sslAccepted = 0;
@@ -682,7 +682,7 @@ namespace mongo {
else {
_bytesOut += ret;
- assert( ret <= len );
+ verify( ret <= len );
len -= ret;
data += ret;
}
@@ -766,7 +766,7 @@ namespace mongo {
if ( ret > 0 ) {
if ( len <= 4 && ret != len )
log(_logLevel) << "Socket recv() got " << ret << " bytes wanted len=" << len << endl;
- assert( ret <= len );
+ verify( ret <= len );
len -= ret;
buf += ret;
}
diff --git a/src/mongo/util/processinfo_none.cpp b/src/mongo/util/processinfo_none.cpp
index 604018d32f2..8296c0cbbe1 100644
--- a/src/mongo/util/processinfo_none.cpp
+++ b/src/mongo/util/processinfo_none.cpp
@@ -58,7 +58,7 @@ namespace mongo {
}
bool ProcessInfo::blockInMemory( char * start ) {
- assert(0);
+ verify(0);
return true;
}
diff --git a/src/mongo/util/processinfo_win32.cpp b/src/mongo/util/processinfo_win32.cpp
index 6530e5fdaba..2dd3d1bbdaf 100755
--- a/src/mongo/util/processinfo_win32.cpp
+++ b/src/mongo/util/processinfo_win32.cpp
@@ -68,15 +68,15 @@ namespace mongo {
int ProcessInfo::getVirtualMemorySize() {
MEMORYSTATUSEX mse;
mse.dwLength = sizeof(mse);
- assert( GlobalMemoryStatusEx( &mse ) );
+ verify( GlobalMemoryStatusEx( &mse ) );
DWORDLONG x = (mse.ullTotalVirtual - mse.ullAvailVirtual) / (1024 * 1024) ;
- assert( x <= 0x7fffffff );
+ verify( x <= 0x7fffffff );
return (int) x;
}
int ProcessInfo::getResidentSize() {
PROCESS_MEMORY_COUNTERS pmc;
- assert( GetProcessMemoryInfo( GetCurrentProcess() , &pmc, sizeof(pmc) ) );
+ verify( GetProcessMemoryInfo( GetCurrentProcess() , &pmc, sizeof(pmc) ) );
return _wconvertmtos( pmc.WorkingSetSize );
}
diff --git a/src/mongo/util/ramlog.cpp b/src/mongo/util/ramlog.cpp
index d7a839a3fff..a4934f17be1 100644
--- a/src/mongo/util/ramlog.cpp
+++ b/src/mongo/util/ramlog.cpp
@@ -132,7 +132,7 @@ namespace mongo {
s << "<pre>\n";
for( int i = 0; i < (int)v.size(); i++ ) {
- assert( strlen(v[i]) > 20 );
+ verify( strlen(v[i]) > 20 );
int r = repeats(v, i);
if( r < 0 ) {
s << color( linkify( clean(v,i).c_str() ) ) << '\n';
diff --git a/src/mongo/util/text.cpp b/src/mongo/util/text.cpp
index a4091d684bb..3025d1da2b4 100644
--- a/src/mongo/util/text.cpp
+++ b/src/mongo/util/text.cpp
@@ -87,7 +87,7 @@ namespace mongo {
CP_UTF8, 0, wide.c_str(), static_cast<int>(wide.size()),
&buffer[0], static_cast<int>(buffer.size()), NULL, NULL);
if (len > 0) {
- assert(len == static_cast<int>(buffer.size()));
+ verify(len == static_cast<int>(buffer.size()));
return std::string(&buffer[0], buffer.size());
}
}
@@ -132,8 +132,8 @@ namespace mongo {
struct TextUnitTest : public UnitTest {
void run() {
- assert( parseLL("123") == 123 );
- assert( parseLL("-123000000000") == -123000000000LL );
+ verify( parseLL("123") == 123 );
+ verify( parseLL("-123000000000") == -123000000000LL );
}
} textUnitTest;
diff --git a/src/mongo/util/time_support.h b/src/mongo/util/time_support.h
index 69e32be709c..ad3cdbf4598 100644
--- a/src/mongo/util/time_support.h
+++ b/src/mongo/util/time_support.h
@@ -23,8 +23,6 @@
#include <boost/thread/tss.hpp>
#include <boost/date_time/posix_time/posix_time.hpp>
#include <boost/thread/xtime.hpp>
-#undef assert
-#define assert MONGO_assert
#include "../bson/util/misc.h" // Date_t
namespace mongo {
@@ -51,7 +49,7 @@ namespace mongo {
const char* fmt = (colonsOk ? "%Y-%m-%dT%H:%M:%S" : "%Y-%m-%dT%H-%M-%S");
char buf[32];
- assert(strftime(buf, sizeof(buf), fmt, &t) == 19);
+ verify(strftime(buf, sizeof(buf), fmt, &t) == 19);
return buf;
}
@@ -61,7 +59,7 @@ namespace mongo {
const char* fmt = "%Y-%m-%dT%H:%M:%SZ";
char buf[32];
- assert(strftime(buf, sizeof(buf), fmt, &t) == 20);
+ verify(strftime(buf, sizeof(buf), fmt, &t) == 20);
return buf;
}
@@ -104,7 +102,7 @@ namespace mongo {
Sleep(s*1000);
}
inline void sleepmillis(long long s) {
- assert( s <= 0xffffffff );
+ verify( s <= 0xffffffff );
Sleep((DWORD) s);
}
inline void sleepmicros(long long s) {
diff --git a/src/mongo/util/trace.cpp b/src/mongo/util/trace.cpp
index db81952e9eb..806f1fbd1e2 100755
--- a/src/mongo/util/trace.cpp
+++ b/src/mongo/util/trace.cpp
@@ -66,7 +66,7 @@ namespace mongo {
void Trace::setTraces(const string &names) {
/* create a new map, and replace the existing one */
NameMap *pM;
- assert(false);
+ verify(false);
}
#endif
diff --git a/src/mongo/util/util.cpp b/src/mongo/util/util.cpp
index bc4729be9b2..398997d9638 100644
--- a/src/mongo/util/util.cpp
+++ b/src/mongo/util/util.cpp
@@ -32,14 +32,14 @@ namespace mongo {
AtStartup() {
LARGE_INTEGER x;
bool ok = QueryPerformanceFrequency(&x);
- assert(ok);
+ verify(ok);
Timer::countsPerSecond = x.QuadPart;
}
} atstartuputil;
#endif
string hexdump(const char *data, unsigned len) {
- assert( len < 1000000 );
+ verify( len < 1000000 );
const unsigned char *p = (const unsigned char *) data;
stringstream ss;
for( unsigned i = 0; i < 4 && i < len; i++ ) {
@@ -167,19 +167,19 @@ namespace mongo {
struct UtilTest : public UnitTest {
void run() {
- assert( isPrime(3) );
- assert( isPrime(2) );
- assert( isPrime(13) );
- assert( isPrime(17) );
- assert( !isPrime(9) );
- assert( !isPrime(6) );
- assert( nextPrime(4) == 5 );
- assert( nextPrime(8) == 11 );
-
- assert( endsWith("abcde", "de") );
- assert( !endsWith("abcde", "dasdfasdfashkfde") );
-
- assert( swapEndian(0x01020304) == 0x04030201 );
+ verify( isPrime(3) );
+ verify( isPrime(2) );
+ verify( isPrime(13) );
+ verify( isPrime(17) );
+ verify( !isPrime(9) );
+ verify( !isPrime(6) );
+ verify( nextPrime(4) == 5 );
+ verify( nextPrime(8) == 11 );
+
+ verify( endsWith("abcde", "de") );
+ verify( !endsWith("abcde", "dasdfasdfashkfde") );
+
+ verify( swapEndian(0x01020304) == 0x04030201 );
}
} utilTest;
diff --git a/src/mongo/util/version.cpp b/src/mongo/util/version.cpp
index 1c4f658bf55..322a4c4e7e1 100644
--- a/src/mongo/util/version.cpp
+++ b/src/mongo/util/version.cpp
@@ -63,7 +63,7 @@ namespace mongo {
}
catch (...){ // not a number
if (curPart.empty()){
- assert(*c == '\0');
+ verify(*c == '\0');
break;
}
else if (startsWith(curPart, "rc")){
@@ -283,16 +283,16 @@ namespace mongo {
class VersionCmpTest : public UnitTest {
public:
void run() {
- assert( versionCmp("1.2.3", "1.2.3") == 0 );
- assert( versionCmp("1.2.3", "1.2.4") < 0 );
- assert( versionCmp("1.2.3", "1.2.20") < 0 );
- assert( versionCmp("1.2.3", "1.20.3") < 0 );
- assert( versionCmp("2.2.3", "10.2.3") < 0 );
- assert( versionCmp("1.2.3", "1.2.3-") > 0 );
- assert( versionCmp("1.2.3", "1.2.3-pre") > 0 );
- assert( versionCmp("1.2.3", "1.2.4-") < 0 );
- assert( versionCmp("1.2.3-", "1.2.3") < 0 );
- assert( versionCmp("1.2.3-pre", "1.2.3") < 0 );
+ verify( versionCmp("1.2.3", "1.2.3") == 0 );
+ verify( versionCmp("1.2.3", "1.2.4") < 0 );
+ verify( versionCmp("1.2.3", "1.2.20") < 0 );
+ verify( versionCmp("1.2.3", "1.20.3") < 0 );
+ verify( versionCmp("2.2.3", "10.2.3") < 0 );
+ verify( versionCmp("1.2.3", "1.2.3-") > 0 );
+ verify( versionCmp("1.2.3", "1.2.3-pre") > 0 );
+ verify( versionCmp("1.2.3", "1.2.4-") < 0 );
+ verify( versionCmp("1.2.3-", "1.2.3") < 0 );
+ verify( versionCmp("1.2.3-pre", "1.2.3") < 0 );
log(1) << "versionCmpTest passed" << endl;
}
@@ -301,22 +301,22 @@ namespace mongo {
class VersionArrayTest : public UnitTest {
public:
void run() {
- assert( _versionArray("1.2.3") == BSON_ARRAY(1 << 2 << 3 << 0) );
- assert( _versionArray("1.2.0") == BSON_ARRAY(1 << 2 << 0 << 0) );
- assert( _versionArray("2.0.0") == BSON_ARRAY(2 << 0 << 0 << 0) );
+ verify( _versionArray("1.2.3") == BSON_ARRAY(1 << 2 << 3 << 0) );
+ verify( _versionArray("1.2.0") == BSON_ARRAY(1 << 2 << 0 << 0) );
+ verify( _versionArray("2.0.0") == BSON_ARRAY(2 << 0 << 0 << 0) );
- assert( _versionArray("1.2.3-pre-") == BSON_ARRAY(1 << 2 << 3 << -100) );
- assert( _versionArray("1.2.0-pre-") == BSON_ARRAY(1 << 2 << 0 << -100) );
- assert( _versionArray("2.0.0-pre-") == BSON_ARRAY(2 << 0 << 0 << -100) );
+ verify( _versionArray("1.2.3-pre-") == BSON_ARRAY(1 << 2 << 3 << -100) );
+ verify( _versionArray("1.2.0-pre-") == BSON_ARRAY(1 << 2 << 0 << -100) );
+ verify( _versionArray("2.0.0-pre-") == BSON_ARRAY(2 << 0 << 0 << -100) );
- assert( _versionArray("1.2.3-rc0") == BSON_ARRAY(1 << 2 << 3 << -10) );
- assert( _versionArray("1.2.0-rc1") == BSON_ARRAY(1 << 2 << 0 << -9) );
- assert( _versionArray("2.0.0-rc2") == BSON_ARRAY(2 << 0 << 0 << -8) );
+ verify( _versionArray("1.2.3-rc0") == BSON_ARRAY(1 << 2 << 3 << -10) );
+ verify( _versionArray("1.2.0-rc1") == BSON_ARRAY(1 << 2 << 0 << -9) );
+ verify( _versionArray("2.0.0-rc2") == BSON_ARRAY(2 << 0 << 0 << -8) );
// Note that the pre of an rc is the same as the rc itself
- assert( _versionArray("1.2.3-rc3-pre-") == BSON_ARRAY(1 << 2 << 3 << -7) );
- assert( _versionArray("1.2.0-rc4-pre-") == BSON_ARRAY(1 << 2 << 0 << -6) );
- assert( _versionArray("2.0.0-rc5-pre-") == BSON_ARRAY(2 << 0 << 0 << -5) );
+ verify( _versionArray("1.2.3-rc3-pre-") == BSON_ARRAY(1 << 2 << 3 << -7) );
+ verify( _versionArray("1.2.0-rc4-pre-") == BSON_ARRAY(1 << 2 << 0 << -6) );
+ verify( _versionArray("2.0.0-rc5-pre-") == BSON_ARRAY(2 << 0 << 0 << -5) );
log(1) << "versionArrayTest passed" << endl;
}