summaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
authorEric Milkie <milkie@10gen.com>2015-03-06 11:50:05 -0500
committerEric Milkie <milkie@10gen.com>2015-03-16 09:08:06 -0400
commitd7563a168a8862e5c50f32829d17b628305a93ad (patch)
tree0dc69faf211892fa0724ac677f3da009289d87e6 /src
parent629eb083a2094b7a096b29d66504a8f34e1a1d60 (diff)
downloadmongo-d7563a168a8862e5c50f32829d17b628305a93ad.tar.gz
SERVER-17601 replace global variable debug and enum DEBUG_BUILD
Diffstat (limited to 'src')
-rw-r--r--src/mongo/base/checked_cast.h2
-rw-r--r--src/mongo/bson/bsonobjbuilder.cpp5
-rw-r--r--src/mongo/bson/mutable/document.cpp14
-rw-r--r--src/mongo/client/dbclient.cpp13
-rw-r--r--src/mongo/client/redef_macros.h3
-rw-r--r--src/mongo/client/undef_macros.h2
-rw-r--r--src/mongo/db/commands/pipeline_command.cpp4
-rw-r--r--src/mongo/db/instance.cpp2
-rw-r--r--src/mongo/db/storage/mmap_v1/dur_journal.cpp2
-rw-r--r--src/mongo/db/storage/mmap_v1/dur_recover.cpp4
-rw-r--r--src/mongo/db/storage/mmap_v1/record_store_v1_capped.cpp1
-rw-r--r--src/mongo/db/storage/mmap_v1/record_store_v1_simple.cpp2
-rw-r--r--src/mongo/dbtests/framework_options.cpp4
-rw-r--r--src/mongo/s/chunk_diff-inl.h2
-rw-r--r--src/mongo/util/assert_util.h7
-rw-r--r--src/mongo/util/background.cpp2
-rw-r--r--src/mongo/util/debug_util.h11
-rw-r--r--src/mongo/util/logfile.cpp28
-rw-r--r--src/mongo/util/net/message_server_port.cpp2
-rw-r--r--src/mongo/util/version_reporting.cpp2
20 files changed, 29 insertions, 83 deletions
diff --git a/src/mongo/base/checked_cast.h b/src/mongo/base/checked_cast.h
index 3d3c1c964c5..c4e9fe2ffc1 100644
--- a/src/mongo/base/checked_cast.h
+++ b/src/mongo/base/checked_cast.h
@@ -71,7 +71,7 @@ namespace mongo {
template<typename T, typename U>
T checked_cast(const U& u) {
- return checked_cast_impl<debug>::cast<T>(u);
+ return checked_cast_impl<kDebugBuild>::cast<T>(u);
};
} // namespace mongo
diff --git a/src/mongo/bson/bsonobjbuilder.cpp b/src/mongo/bson/bsonobjbuilder.cpp
index c52ffbdd607..ca3ac311527 100644
--- a/src/mongo/bson/bsonobjbuilder.cpp
+++ b/src/mongo/bson/bsonobjbuilder.cpp
@@ -197,13 +197,12 @@ namespace mongo {
BSONObjBuilder& BSONObjBuilder::appendDate(StringData fieldName, Date_t dt) {
/* easy to pass a time_t to this and get a bad result. thus this warning. */
-#if defined(_DEBUG) && defined(MONGO_EXPOSE_MACROS)
- if( dt > 0 && dt <= 0xffffffff ) {
+ if ( kDebugBuild && dt > 0 && dt <= 0xffffffff ) {
static int n;
if( n++ == 0 )
log() << "DEV WARNING appendDate() called with a tiny (but nonzero) date" << std::endl;
}
-#endif
+
_b.appendNum((char) Date);
_b.appendStr(fieldName);
_b.appendNum(dt);
diff --git a/src/mongo/bson/mutable/document.cpp b/src/mongo/bson/mutable/document.cpp
index f285cedcbfd..d2223a26500 100644
--- a/src/mongo/bson/mutable/document.cpp
+++ b/src/mongo/bson/mutable/document.cpp
@@ -414,11 +414,7 @@ namespace mutablebson {
// How many reps do we cache before we spill to heap. Use a power of two. For debug
// builds we make this very small so it is less likely to mask vector invalidation
// logic errors. We don't make it zero so that we do execute the fastRep code paths.
-#if defined(_DEBUG)
- const size_t kFastReps = 2;
-#else
- const size_t kFastReps = 128;
-#endif
+ const size_t kFastReps = kDebugBuild ? 2 : 128;
// An ElementRep contains the information necessary to locate the data for an Element,
// and the topology information for how the Element is related to other Elements in the
@@ -647,7 +643,7 @@ namespace mutablebson {
else {
verify(id <= Element::kMaxRepIdx);
- if (debug && paranoid) {
+ if (kDebugBuild && paranoid) {
// Force all reps to new addresses to help catch invalid rep usage.
std::vector<ElementRep> newSlowElements(_slowElements);
_slowElements.swap(newSlowElements);
@@ -696,7 +692,7 @@ namespace mutablebson {
const size_t objIdx = _objects.size();
verify(objIdx <= kMaxObjIdx);
_objects.push_back(newObj);
- if (debug && paranoid) {
+ if (kDebugBuild && paranoid) {
// Force reallocation to catch use after invalidation.
std::vector<BSONObj> new_objects(_objects);
_objects.swap(new_objects);
@@ -1013,7 +1009,7 @@ namespace mutablebson {
_damages.back().targetOffset = targetOffset;
_damages.back().sourceOffset = sourceOffset;
_damages.back().size = size;
- if (debug && paranoid) {
+ if (kDebugBuild && paranoid) {
// Force damage events to new addresses to catch invalidation errors.
DamageVector new_damages(_damages);
_damages.swap(new_damages);
@@ -1072,7 +1068,7 @@ namespace mutablebson {
fieldName.rawData(),
fieldName.rawData() + fieldName.size());
_fieldNames.push_back('\0');
- if (debug && paranoid) {
+ if (kDebugBuild && paranoid) {
// Force names to new addresses to catch invalidation errors.
std::vector<char> new_fieldNames(_fieldNames);
_fieldNames.swap(new_fieldNames);
diff --git a/src/mongo/client/dbclient.cpp b/src/mongo/client/dbclient.cpp
index 1018827dc8e..fcbdd90e0c0 100644
--- a/src/mongo/client/dbclient.cpp
+++ b/src/mongo/client/dbclient.cpp
@@ -44,6 +44,7 @@
#include "mongo/db/namespace_string.h"
#include "mongo/s/stale_exception.h" // for RecvStaleConfigException
#include "mongo/util/assert_util.h"
+#include "mongo/util/debug_util.h"
#include "mongo/util/log.h"
#include "mongo/util/net/ssl_manager.h"
#include "mongo/util/net/ssl_options.h"
@@ -1534,15 +1535,11 @@ namespace mongo {
}
/* -- DBClientCursor ---------------------------------------------- */
-
-#ifdef _DEBUG
-#define CHECK_OBJECT( o , msg ) massert( 10337 , (string)"object not valid" + (msg) , (o).isValid() )
-#else
-#define CHECK_OBJECT( o , msg )
-#endif
-
void assembleRequest( const string &ns, BSONObj query, int nToReturn, int nToSkip, const BSONObj *fieldsToReturn, int queryOptions, Message &toSend ) {
- CHECK_OBJECT( query , "assembleRequest query" );
+ if (kDebugBuild) {
+ massert( 10337 , (string)"object not valid assembleRequest query" , query.isValid() );
+ }
+
// see query.h for the protocol we are using here.
BufBuilder b;
int opts = queryOptions;
diff --git a/src/mongo/client/redef_macros.h b/src/mongo/client/redef_macros.h
index 323e8b018ef..7706bcfb075 100644
--- a/src/mongo/client/redef_macros.h
+++ b/src/mongo/client/redef_macros.h
@@ -72,9 +72,6 @@
#pragma push_macro("DEV")
#undef DEV
#define DEV MONGO_DEV
-#pragma push_macro("DEBUGGING")
-#undef DEBUGGING
-#define DEBUGGING MONGO_DEBUGGING
#pragma push_macro("SOMETIMES")
#undef SOMETIMES
#define SOMETIMES MONGO_SOMETIMES
diff --git a/src/mongo/client/undef_macros.h b/src/mongo/client/undef_macros.h
index 90221b00f0c..13f160bed22 100644
--- a/src/mongo/client/undef_macros.h
+++ b/src/mongo/client/undef_macros.h
@@ -60,8 +60,6 @@
// util/debug_util.h
#undef DEV
#pragma pop_macro("DEV")
-#undef DEBUGGING
-#pragma pop_macro("DEBUGGING")
#undef SOMETIMES
#pragma pop_macro("SOMETIMES")
#undef OCCASIONALLY
diff --git a/src/mongo/db/commands/pipeline_command.cpp b/src/mongo/db/commands/pipeline_command.cpp
index 4a0dd6e3699..db28236268e 100644
--- a/src/mongo/db/commands/pipeline_command.cpp
+++ b/src/mongo/db/commands/pipeline_command.cpp
@@ -195,10 +195,9 @@ namespace mongo {
if (!pPipeline.get())
return false;
-#if _DEBUG
// This is outside of the if block to keep the object alive until the pipeline is finished.
BSONObj parsed;
- if (!pPipeline->isExplain() && !pCtx->inShard) {
+ if (kDebugBuild && !pPipeline->isExplain() && !pCtx->inShard) {
// Make sure all operations round-trip through Pipeline::toBson()
// correctly by reparsing every command on DEBUG builds. This is
// important because sharded aggregations rely on this ability.
@@ -208,7 +207,6 @@ namespace mongo {
pPipeline = Pipeline::parseCommand(errmsg, parsed, pCtx);
verify(pPipeline);
}
-#endif
PlanExecutor* exec = NULL;
scoped_ptr<ClientCursorPin> pin; // either this OR the execHolder will be non-null
diff --git a/src/mongo/db/instance.cpp b/src/mongo/db/instance.cpp
index edbc7e458a3..9e3ca6438f9 100644
--- a/src/mongo/db/instance.cpp
+++ b/src/mongo/db/instance.cpp
@@ -820,7 +820,7 @@ namespace mongo {
}
}
pass++;
- if (debug)
+ if (kDebugBuild)
sleepmillis(20);
else
sleepmillis(2);
diff --git a/src/mongo/db/storage/mmap_v1/dur_journal.cpp b/src/mongo/db/storage/mmap_v1/dur_journal.cpp
index 369ed9a4075..5d7ba47c094 100644
--- a/src/mongo/db/storage/mmap_v1/dur_journal.cpp
+++ b/src/mongo/db/storage/mmap_v1/dur_journal.cpp
@@ -392,7 +392,7 @@ namespace mongo {
boost::filesystem::path filepath = preallocPath(i);
unsigned long long limit = DataLimitPerJournalFile;
- if( debug && i == 1 ) {
+ if( kDebugBuild && i == 1 ) {
// moving 32->64, the prealloc files would be short. that is "ok", but we want to exercise that
// case, so we force exercising here when _DEBUG is set by arbitrarily stopping prealloc at a low
// limit for a file. also we want to be able to change in the future the constant without a lot of
diff --git a/src/mongo/db/storage/mmap_v1/dur_recover.cpp b/src/mongo/db/storage/mmap_v1/dur_recover.cpp
index 9297472057b..b4e0a6b3bd9 100644
--- a/src/mongo/db/storage/mmap_v1/dur_recover.cpp
+++ b/src/mongo/db/storage/mmap_v1/dur_recover.cpp
@@ -486,8 +486,8 @@ namespace mongo {
JSectHeader h;
br.peek(h);
if( h.fileId != fileId ) {
- if (debug || (mmapv1GlobalOptions.journalOptions &
- MMAPV1Options::JournalDumpJournal)) {
+ if (kDebugBuild || (mmapv1GlobalOptions.journalOptions &
+ MMAPV1Options::JournalDumpJournal)) {
log() << "Ending processFileBuffer at differing fileId want:" << fileId << " got:" << h.fileId << endl;
log() << " sect len:" << h.sectionLen() << " seqnum:" << h.seqNumber << endl;
}
diff --git a/src/mongo/db/storage/mmap_v1/record_store_v1_capped.cpp b/src/mongo/db/storage/mmap_v1/record_store_v1_capped.cpp
index 26115bf6e3d..76eef273815 100644
--- a/src/mongo/db/storage/mmap_v1/record_store_v1_capped.cpp
+++ b/src/mongo/db/storage/mmap_v1/record_store_v1_capped.cpp
@@ -580,7 +580,6 @@ namespace mongo {
void CappedRecordStoreV1::addDeletedRec( OperationContext* txn, const DiskLoc& dloc ) {
DeletedRecord* d = txn->recoveryUnit()->writing( drec( dloc ) );
- DEBUGGING log() << "TEMP: add deleted rec " << dloc.toString() << ' ' << hex << d->extentOfs() << endl;
if ( !cappedLastDelRecLastExtent().isValid() ) {
// Initial extent allocation. Insert at end.
d->nextDeleted() = DiskLoc();
diff --git a/src/mongo/db/storage/mmap_v1/record_store_v1_simple.cpp b/src/mongo/db/storage/mmap_v1/record_store_v1_simple.cpp
index 073c3efaa68..281a9ee1d76 100644
--- a/src/mongo/db/storage/mmap_v1/record_store_v1_simple.cpp
+++ b/src/mongo/db/storage/mmap_v1/record_store_v1_simple.cpp
@@ -232,8 +232,6 @@ namespace mongo {
void SimpleRecordStoreV1::addDeletedRec( OperationContext* txn, const DiskLoc& dloc ) {
DeletedRecord* d = drec( dloc );
- DEBUGGING log() << "TEMP: add deleted rec " << dloc.toString() << ' ' << std::hex << d->extentOfs() << endl;
-
int b = bucket(d->lengthWithHeaders());
*txn->recoveryUnit()->writing(&d->nextDeleted()) = _details->deletedListEntry(b);
_details->setDeletedListEntry(txn, b, dloc);
diff --git a/src/mongo/dbtests/framework_options.cpp b/src/mongo/dbtests/framework_options.cpp
index ebfcd7d6203..a4842012c15 100644
--- a/src/mongo/dbtests/framework_options.cpp
+++ b/src/mongo/dbtests/framework_options.cpp
@@ -229,8 +229,8 @@ namespace mongo {
frameworkGlobalParams.filter = params["filter"].as<string>();
}
- if (debug && storageGlobalParams.dur) {
- log() << "_DEBUG: automatically enabling mmapv1GlobalOptions.journalOptions=8 "
+ if (kDebugBuild && storageGlobalParams.dur) {
+ log() << "Debug Build: automatically enabling mmapv1GlobalOptions.journalOptions=8 "
<< "(JournalParanoid)" << endl;
// this was commented out. why too slow or something?
mmapv1GlobalOptions.journalOptions |= MMAPV1Options::JournalParanoid;
diff --git a/src/mongo/s/chunk_diff-inl.h b/src/mongo/s/chunk_diff-inl.h
index 0f88f2517b7..2b379b15b0e 100644
--- a/src/mongo/s/chunk_diff-inl.h
+++ b/src/mongo/s/chunk_diff-inl.h
@@ -101,7 +101,7 @@ namespace mongo {
// Open a cursor for the diff chunks
std::auto_ptr<DBClientCursor> cursor = conn->query(
- ChunkType::ConfigNS, diffQuery, 0, 0, 0, 0, ( DEBUG_BUILD ? 2 : 1000000 ) );
+ ChunkType::ConfigNS, diffQuery, 0, 0, 0, 0, ( kDebugBuild ? 2 : 1000000 ) );
verify( cursor.get() );
int diff = calculateConfigDiff( *cursor.get() );
diff --git a/src/mongo/util/assert_util.h b/src/mongo/util/assert_util.h
index 7bd5d3c2aab..ff3aed48ef8 100644
--- a/src/mongo/util/assert_util.h
+++ b/src/mongo/util/assert_util.h
@@ -39,6 +39,7 @@
#include "mongo/logger/logger.h"
#include "mongo/logger/logstream_builder.h"
#include "mongo/util/concurrency/thread_name.h"
+#include "mongo/util/debug_util.h"
namespace mongo {
@@ -295,11 +296,7 @@ namespace mongo {
/* dassert is 'debug assert' -- might want to turn off for production as these
could be slow.
*/
-#if defined(_DEBUG)
-# define MONGO_dassert(x) invariant(x)
-#else
-# define MONGO_dassert(x)
-#endif
+#define MONGO_dassert(x) if (kDebugBuild) invariant(x)
#ifdef MONGO_EXPOSE_MACROS
# define dassert MONGO_dassert
diff --git a/src/mongo/util/background.cpp b/src/mongo/util/background.cpp
index c98e674ccc8..d1ad927bb8f 100644
--- a/src/mongo/util/background.cpp
+++ b/src/mongo/util/background.cpp
@@ -323,7 +323,7 @@ namespace mongo {
void PeriodicTaskRunner::run() {
// Use a shorter cycle time in debug mode to help catch race conditions.
- const size_t waitMillis = (debug ? 5 : 60) * 1000;
+ const size_t waitMillis = (kDebugBuild ? 5 : 60) * 1000;
const stdx::function<bool()> predicate =
stdx::bind( &PeriodicTaskRunner::_isShutdownRequested, this );
diff --git a/src/mongo/util/debug_util.h b/src/mongo/util/debug_util.h
index bcf979ee20d..14ea0a1d98b 100644
--- a/src/mongo/util/debug_util.h
+++ b/src/mongo/util/debug_util.h
@@ -33,19 +33,14 @@
namespace mongo {
#if defined(_DEBUG)
- enum {DEBUG_BUILD = 1};
- const bool debug=true;
+ const bool kDebugBuild = true;
#else
- enum {DEBUG_BUILD = 0};
- const bool debug=false;
+ const bool kDebugBuild = false;
#endif
-#define MONGO_DEV if( DEBUG_BUILD )
+#define MONGO_DEV if (kDebugBuild)
#define DEV MONGO_DEV
-#define MONGO_DEBUGGING if( 0 )
-#define DEBUGGING MONGO_DEBUGGING
-
// The following declare one unique counter per enclosing function.
// NOTE The implementation double-increments on a match, but we don't really care.
#define MONGO_SOMETIMES( occasion, howOften ) for( static unsigned occasion = 0; ++occasion % howOften == 0; )
diff --git a/src/mongo/util/logfile.cpp b/src/mongo/util/logfile.cpp
index db9a0aa9a0b..44e23e00dbc 100644
--- a/src/mongo/util/logfile.cpp
+++ b/src/mongo/util/logfile.cpp
@@ -48,34 +48,6 @@ using namespace mongoutils;
using std::endl;
using std::string;
-namespace mongo {
- struct LogfileTest : public StartupTest {
- LogfileTest() { }
- void run() {
- if( 0 && debug ) {
- try {
- LogFile f("logfile_test");
- void *p = mongoMalloc(16384);
- char *buf = (char*) p;
- buf += 4095;
- buf = (char*) (((size_t)buf)&(~0xfff));
- memset(buf, 'z', 8192);
- buf[8190] = '\n';
- buf[8191] = 'B';
- buf[0] = 'A';
- f.synchronousAppend(buf, 8192);
- f.synchronousAppend(buf, 8192);
- free(p);
- }
- catch(DBException& e ) {
- log() << "logfile.cpp test failed : " << e.what() << endl;
- throw;
- }
- }
- }
- } __test;
-}
-
#if defined(_WIN32)
namespace mongo {
diff --git a/src/mongo/util/net/message_server_port.cpp b/src/mongo/util/net/message_server_port.cpp
index 1941a8870cf..c8af74dc3e8 100644
--- a/src/mongo/util/net/message_server_port.cpp
+++ b/src/mongo/util/net/message_server_port.cpp
@@ -129,7 +129,7 @@ namespace {
if (limits.rlim_cur > STACK_SIZE) {
size_t stackSizeToSet = STACK_SIZE;
#if !__has_feature(address_sanitizer)
- if (DEBUG_BUILD)
+ if (kDebugBuild)
stackSizeToSet /= 2;
#endif
pthread_attr_setstacksize(&attrs, stackSizeToSet);
diff --git a/src/mongo/util/version_reporting.cpp b/src/mongo/util/version_reporting.cpp
index b3c6d4dd863..83370cc1ba2 100644
--- a/src/mongo/util/version_reporting.cpp
+++ b/src/mongo/util/version_reporting.cpp
@@ -128,7 +128,7 @@ namespace mongo {
/*TODO: add this back once the module system is in place -- maybe once we do something like serverstatus with callbacks*/
// << "interpreterVersion" << globalScriptEngine->getInterpreterVersionString()
<< "bits" << ( sizeof( int* ) == 4 ? 32 : 64 );
- result.appendBool( "debug" , debug );
+ result.appendBool( "debug" , kDebugBuild );
result.appendNumber("maxBsonObjectSize", BSONObjMaxUserSize);
}
}