summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorEliot Horowitz <eliot@10gen.com>2009-12-28 16:43:43 -0500
committerEliot Horowitz <eliot@10gen.com>2009-12-28 16:43:43 -0500
commit0ab8149a8e9e900b63ee28b9590a249578339376 (patch)
tree564a5c516b7c308964b79ea761e4800efe26d252
parent95137f262c2a57831dccf058428a2f64e900496e (diff)
downloadmongo-0ab8149a8e9e900b63ee28b9590a249578339376.tar.gz
uassert/massert take error code SERVER-112
-rw-r--r--SConstruct7
-rw-r--r--buildscripts/errorcodes.py82
-rw-r--r--client/clientOnly.cpp2
-rw-r--r--client/connpool.cpp4
-rw-r--r--client/connpool.h4
-rw-r--r--client/dbclient.cpp20
-rw-r--r--client/dbclient.h2
-rw-r--r--client/gridfs.cpp12
-rw-r--r--client/model.cpp2
-rw-r--r--client/parallel.cpp6
-rw-r--r--client/quorum.cpp8
-rw-r--r--db/btree.cpp22
-rw-r--r--db/client.h2
-rw-r--r--db/cloner.cpp16
-rw-r--r--db/concurrency.h2
-rw-r--r--db/curop.h4
-rw-r--r--db/database.h14
-rw-r--r--db/db.cpp16
-rw-r--r--db/db.h4
-rw-r--r--db/dbcommands.cpp20
-rw-r--r--db/dbeval.cpp2
-rw-r--r--db/dbhelpers.cpp6
-rw-r--r--db/dbmessage.h8
-rw-r--r--db/extsort.cpp10
-rw-r--r--db/extsort.h2
-rw-r--r--db/instance.cpp18
-rw-r--r--db/jsobj.cpp48
-rw-r--r--db/jsobj.h18
-rw-r--r--db/json.cpp6
-rw-r--r--db/matcher.cpp18
-rw-r--r--db/matcher.h2
-rw-r--r--db/mr.cpp10
-rw-r--r--db/namespace.cpp12
-rw-r--r--db/namespace.h14
-rw-r--r--db/nonce.cpp8
-rw-r--r--db/pdfile.cpp56
-rw-r--r--db/query.cpp29
-rw-r--r--db/queryoptimizer.cpp20
-rw-r--r--db/queryutil.cpp4
-rw-r--r--db/rec.h4
-rw-r--r--db/reccache.cpp10
-rw-r--r--db/reccache.h4
-rw-r--r--db/reci.h2
-rw-r--r--db/recstore.h16
-rw-r--r--db/repl.cpp42
-rw-r--r--db/replset.h6
-rw-r--r--db/scanandorder.h4
-rw-r--r--db/storage.cpp12
-rw-r--r--db/update.cpp64
-rw-r--r--db/update.h4
-rw-r--r--dbtests/btreetests.cpp2
-rw-r--r--dbtests/framework.cpp2
-rw-r--r--dbtests/jsobjtests.cpp10
-rw-r--r--dbtests/queryoptimizertests.cpp8
-rw-r--r--s/chunk.cpp40
-rw-r--r--s/commands_public.cpp8
-rw-r--r--s/config.cpp26
-rw-r--r--s/config.h2
-rw-r--r--s/cursors.cpp2
-rw-r--r--s/d_logic.cpp2
-rw-r--r--s/request.cpp8
-rw-r--r--s/s_only.cpp2
-rw-r--r--s/server.cpp2
-rw-r--r--s/shardkey.cpp12
-rw-r--r--s/strategy.cpp8
-rw-r--r--s/strategy_shard.cpp6
-rw-r--r--s/strategy_single.cpp4
-rw-r--r--scripting/engine.cpp14
-rw-r--r--scripting/engine_java.h2
-rw-r--r--scripting/engine_spidermonkey.cpp42
-rw-r--r--scripting/engine_v8.cpp10
-rw-r--r--scripting/sm_db.cpp42
-rw-r--r--shell/dbshell.cpp2
-rw-r--r--shell/utils.cpp16
-rw-r--r--tools/dump.cpp2
-rw-r--r--tools/import.cpp2
-rw-r--r--tools/restore.cpp4
-rw-r--r--tools/sniffer.cpp4
-rw-r--r--util/assert_util.cpp10
-rw-r--r--util/assert_util.h19
-rw-r--r--util/base64.cpp2
-rw-r--r--util/file.h2
-rw-r--r--util/file_allocator.h12
-rw-r--r--util/httpclient.cpp4
-rw-r--r--util/message.cpp2
-rw-r--r--util/message_server_asio.cpp4
-rw-r--r--util/message_server_port.cpp2
-rw-r--r--util/mmap_posix.cpp4
88 files changed, 571 insertions, 482 deletions
diff --git a/SConstruct b/SConstruct
index d90a38191c5..db95a207a0a 100644
--- a/SConstruct
+++ b/SConstruct
@@ -957,6 +957,13 @@ testEnv.Prepend( LIBPATH=["."] )
# ----- TARGETS ------
+def checkErrorCodes():
+ import buildscripts.errorcodes as x
+ if x.checkErrorCodes() == False:
+ print( "next id to use:" + x.getNextCode() )
+ Exit(-1)
+
+checkErrorCodes()
# main db target
mongod = env.Program( "mongod" , commonFiles + coreDbFiles + serverOnlyFiles + [ "db/db.cpp" ] )
diff --git a/buildscripts/errorcodes.py b/buildscripts/errorcodes.py
new file mode 100644
index 00000000000..a550a931105
--- /dev/null
+++ b/buildscripts/errorcodes.py
@@ -0,0 +1,82 @@
+
+import os
+import sys
+import re
+
+def getAllSourceFiles( arr=None , prefix="." ):
+ if arr is None:
+ arr = []
+
+ for x in os.listdir( prefix ):
+ if x.startswith( "." ) or x.startswith( "pcre-" ):
+ continue
+ full = prefix + "/" + x
+ if os.path.isdir( full ):
+ getAllSourceFiles( arr , full )
+ else:
+ if full.endswith( ".cpp" ) or full.endswith( ".h" ) or full.endswith( ".c" ):
+ arr.append( full )
+
+ return arr
+
+assertNames = [ "uassert" , "massert" ]
+
+def assignErrorCodes():
+ cur = 10000
+ for root in assertNames:
+ for x in getAllSourceFiles():
+ print( x )
+ didAnything = False
+ fixed = ""
+ for line in open( x ):
+ s = line.partition( root + "(" )
+ if s[1] == "" or line.startswith( "#define " + root):
+ fixed += line
+ continue
+ fixed += s[0] + root + "( " + str( cur ) + " , " + s[2]
+ cur = cur + 1
+ didAnything = True
+ if didAnything:
+ out = open( x , 'w' )
+ out.write( fixed )
+ out.close()
+
+
+def readErrorCodes( callback ):
+ p = re.compile( "([um]asser(t|ted)) *\( *(\d+)" )
+ for x in getAllSourceFiles():
+ lineNum = 1
+ for line in open( x ):
+ for m in p.findall( line ):
+ callback( x , lineNum , line , m[2] )
+ lineNum = lineNum + 1
+
+
+def getNextCode():
+ highest = [0]
+ def check( fileName , lineNum , line , code ):
+ code = int( code )
+ if code > highest[0]:
+ highest[0] = code
+ readErrorCodes( check )
+ return highest[0] + 1
+
+def checkErrorCodes():
+ seen = {}
+ errors = []
+ def checkDups( fileName , lineNum , line , code ):
+ if code in seen:
+ print( "DUPLICATE IDS" )
+ print( "%s:%d:%s %s" % ( fileName , lineNum , line.strip() , code ) )
+ print( "%s:%d:%s %s" % seen[code] )
+ errors.append( seen[code] )
+ seen[code] = ( fileName , lineNum , line , code )
+ readErrorCodes( checkDups )
+ return len( errors ) == 0
+
+if __name__ == "__main__":
+ ok = checkErrorCodes()
+ print( "ok:" + str( ok ) )
+ if ok == False:
+ print( "next: " + str( getNextCode() ) )
+
diff --git a/client/clientOnly.cpp b/client/clientOnly.cpp
index b315afd2e74..90527d56bd2 100644
--- a/client/clientOnly.cpp
+++ b/client/clientOnly.cpp
@@ -51,7 +51,7 @@ namespace mongo {
}
/*
auto_ptr<CursorIterator> Helpers::find( const char *ns , BSONObj query , bool requireIndex ){
- uassert( "Helpers::find can't be used in client" , 0 );
+ uassert( 10000 , "Helpers::find can't be used in client" , 0 );
return auto_ptr<CursorIterator>(0);
}
*/
diff --git a/client/connpool.cpp b/client/connpool.cpp
index c12486c1e73..b332bae5a9d 100644
--- a/client/connpool.cpp
+++ b/client/connpool.cpp
@@ -40,7 +40,7 @@ namespace mongo {
log(2) << "creating new connection for pool to:" << host << endl;
if ( !cc->connect(host.c_str(), errmsg) ) {
delete cc;
- uassert( (string)"dbconnectionpool: connect failed " + host , false);
+ uassert( 11002 , (string)"dbconnectionpool: connect failed " + host , false);
return 0;
}
c = cc;
@@ -50,7 +50,7 @@ namespace mongo {
DBClientPaired *p = new DBClientPaired();
if( !p->connect(host) ) {
delete p;
- uassert( (string)"dbconnectionpool: connect failed [2] " + host , false);
+ uassert( 11003 , (string)"dbconnectionpool: connect failed [2] " + host , false);
return 0;
}
c = p;
diff --git a/client/connpool.h b/client/connpool.h
index 1838702027f..34ed498c880 100644
--- a/client/connpool.h
+++ b/client/connpool.h
@@ -80,13 +80,13 @@ namespace mongo {
public:
/** get the associated connection object */
DBClientBase* operator->(){
- uassert( "did you call done already" , _conn );
+ uassert( 11004 , "did you call done already" , _conn );
return _conn;
}
/** get the associated connection object */
DBClientBase& conn() {
- uassert( "did you call done already" , _conn );
+ uassert( 11005 , "did you call done already" , _conn );
return *_conn;
}
diff --git a/client/dbclient.cpp b/client/dbclient.cpp
index 6b2df8333db..99f0a78f13a 100644
--- a/client/dbclient.cpp
+++ b/client/dbclient.cpp
@@ -132,7 +132,7 @@ namespace mongo {
BSONObj cmd = BSON( "count" << ns.coll << "query" << query );
BSONObj res;
if( !runCommand(ns.db.c_str(), cmd, res, options) )
- uasserted(string("count fails:") + res.toString());
+ uasserted(11010,string("count fails:") + res.toString());
return res.getIntField("n");
}
@@ -314,8 +314,8 @@ namespace mongo {
list<string> DBClientWithCommands::getDatabaseNames(){
BSONObj info;
- uassert( "listdatabases failed" , runCommand( "admin" , BSON( "listDatabases" << 1 ) , info ) );
- uassert( "listDatabases.databases not array" , info["databases"].type() == Array );
+ uassert( 10005 , "listdatabases failed" , runCommand( "admin" , BSON( "listDatabases" << 1 ) , info ) );
+ uassert( 10006 , "listDatabases.databases not array" , info["databases"].type() == Array );
list<string> names;
@@ -426,7 +426,7 @@ namespace mongo {
auto_ptr<DBClientCursor> c =
this->query(ns, query, 1, 0, fieldsToReturn, queryOptions);
- massert( "DBClientBase::findOne: transport error", c.get() );
+ massert( 10276 , "DBClientBase::findOne: transport error", c.get() );
if ( !c->more() )
return BSONObj();
@@ -448,7 +448,7 @@ namespace mongo {
port = CmdLine::DefaultDBPort;
ip = hostbyname( serverAddress.c_str() );
}
- massert( "Unable to parse hostname", !ip.empty() );
+ massert( 10277 , "Unable to parse hostname", !ip.empty() );
// we keep around SockAddr for connection life -- maybe MessagingPort
// requires that?
@@ -594,14 +594,14 @@ namespace mongo {
BSON( "deleteIndexes" << NamespaceString( ns ).coll << "index" << indexName ) ,
info ) ){
log() << "dropIndex failed: " << info << endl;
- uassert( "dropIndex failed" , 0 );
+ uassert( 10007 , "dropIndex failed" , 0 );
}
resetIndexCache();
}
void DBClientWithCommands::dropIndexes( const string& ns ){
BSONObj info;
- uassert( "dropIndexes failed" , runCommand( nsToClient( ns.c_str() ) ,
+ uassert( 10008 , "dropIndexes failed" , runCommand( nsToClient( ns.c_str() ) ,
BSON( "deleteIndexes" << NamespaceString( ns ).coll << "index" << "*") ,
info ) );
resetIndexCache();
@@ -716,7 +716,7 @@ namespace mongo {
if ( !port().call(toSend, response) ) {
failed = true;
if ( assertOk )
- massert("dbclient error communicating with server", false);
+ massert( 10278 , "dbclient error communicating with server", false);
return false;
}
}
@@ -878,7 +878,7 @@ namespace mongo {
sleepsecs(1);
}
- uassert("checkmaster: no master found", false);
+ uassert( 10009 , "checkmaster: no master found", false);
}
inline DBClientConnection& DBClientPaired::checkMaster() {
@@ -922,7 +922,7 @@ namespace mongo {
bool DBClientPaired::connect(string hostpairstring) {
size_t comma = hostpairstring.find( "," );
- uassert("bad hostpairstring", comma != string::npos);
+ uassert( 10010 , "bad hostpairstring", comma != string::npos);
return connect( hostpairstring.substr( 0 , comma ) , hostpairstring.substr( comma + 1 ) );
}
diff --git a/client/dbclient.h b/client/dbclient.h
index 54d0b1927e1..a0235bda21d 100644
--- a/client/dbclient.h
+++ b/client/dbclient.h
@@ -418,7 +418,7 @@ namespace mongo {
virtual bool dropCollection( const string &ns ){
string db = nsGetDB( ns );
string coll = nsGetCollection( ns );
- uassert( "no collection name", coll.size() );
+ uassert( 10011 , "no collection name", coll.size() );
BSONObj info;
diff --git a/client/gridfs.cpp b/client/gridfs.cpp
index d2f6cce174f..b007ea054ad 100644
--- a/client/gridfs.cpp
+++ b/client/gridfs.cpp
@@ -61,7 +61,7 @@ namespace mongo {
}
BSONObj GridFS::storeFile( const char* data , size_t length , const string& remoteName , const string& contentType){
- massert("large files not yet implemented", length <= 0xffffffff);
+ massert( 10279 , "large files not yet implemented", length <= 0xffffffff);
char const * const end = data + length;
OID id;
@@ -83,14 +83,14 @@ namespace mongo {
BSONObj GridFS::storeFile( const string& fileName , const string& remoteName , const string& contentType){
- uassert( "file doesn't exist" , fileName == "-" || boost::filesystem::exists( fileName ) );
+ uassert( 10012 , "file doesn't exist" , fileName == "-" || boost::filesystem::exists( fileName ) );
FILE* fd;
if (fileName == "-")
fd = stdin;
else
fd = fopen( fileName.c_str() , "rb" );
- uassert("error opening file", fd);
+ uassert( 10013 , "error opening file", fd);
OID id;
id.init();
@@ -120,7 +120,7 @@ namespace mongo {
if (fd != stdin)
fclose( fd );
- massert("large files not yet implemented", length <= 0xffffffff);
+ massert( 10280 , "large files not yet implemented", length <= 0xffffffff);
return insertFile((remoteName.empty() ? fileName : remoteName), id, length, contentType);
}
@@ -197,7 +197,7 @@ namespace mongo {
b.append( "n" , n );
BSONObj o = _grid->_client.findOne( _grid->_chunksNS.c_str() , b.obj() );
- uassert( "chunk is empty!" , ! o.isEmpty() );
+ uassert( 10014 , "chunk is empty!" , ! o.isEmpty() );
return Chunk(o);
}
@@ -227,7 +227,7 @@ namespace mongo {
}
void GridFile::_exists(){
- uassert( "doesn't exists" , exists() );
+ uassert( 10015 , "doesn't exists" , exists() );
}
}
diff --git a/client/model.cpp b/client/model.cpp
index 2b076ddbc50..94e22b71c08 100644
--- a/client/model.cpp
+++ b/client/model.cpp
@@ -36,7 +36,7 @@ namespace mongo {
}
void Model::remove( bool safe ){
- uassert( "_id isn't set - needed for remove()" , _id["_id"].type() );
+ uassert( 10016 , "_id isn't set - needed for remove()" , _id["_id"].type() );
ScopedDbConnection conn( modelServer() );
conn->remove( getNS() , _id );
diff --git a/client/parallel.cpp b/client/parallel.cpp
index 7c4b12f1e90..418f99a7820 100644
--- a/client/parallel.cpp
+++ b/client/parallel.cpp
@@ -33,7 +33,7 @@ namespace mongo {
}
auto_ptr<DBClientCursor> ClusteredCursor::query( const string& server , int num , BSONObj extra ){
- uassert( "cursor already done" , ! _done );
+ uassert( 10017 , "cursor already done" , ! _done );
BSONObj q = _query;
if ( ! extra.isEmpty() ){
@@ -107,7 +107,7 @@ namespace mongo {
}
BSONObj SerialServerClusteredCursor::next(){
- uassert( "no more items" , more() );
+ uassert( 10018 , "no more items" , more() );
return _current->next();
}
@@ -182,7 +182,7 @@ namespace mongo {
bestFrom = i;
}
- uassert( "no more elements" , ! best.isEmpty() );
+ uassert( 10019 , "no more elements" , ! best.isEmpty() );
_nexts[bestFrom] = BSONObj();
return best;
diff --git a/client/quorum.cpp b/client/quorum.cpp
index bcda3d6e09a..d125421a873 100644
--- a/client/quorum.cpp
+++ b/client/quorum.cpp
@@ -13,7 +13,7 @@ namespace mongo {
_connect( h );
}
_connect( commaSeperated );
- uassert( "QuorumConnection needs 3 servers" , _conns.size() == 3 );
+ uassert( 10020 , "QuorumConnection needs 3 servers" , _conns.size() == 3 );
}
QuorumConnection::QuorumConnection( string a , string b , string c ){
@@ -104,7 +104,7 @@ namespace mongo {
auto_ptr<DBClientCursor> QuorumConnection::query(const string &ns, Query query, int nToReturn, int nToSkip,
const BSONObj *fieldsToReturn, int queryOptions){
- uassert( "$cmd not support yet in QuorumConnection::query" , ns.find( "$cmd" ) == string::npos );
+ uassert( 10021 , "$cmd not support yet in QuorumConnection::query" , ns.find( "$cmd" ) == string::npos );
for ( size_t i=0; i<_conns.size(); i++ ){
try {
@@ -122,7 +122,7 @@ namespace mongo {
}
auto_ptr<DBClientCursor> QuorumConnection::getMore( const string &ns, long long cursorId, int nToReturn, int options ){
- uassert("QuorumConnection::getMore not supported yet" , 0);
+ uassert( 10022 , "QuorumConnection::getMore not supported yet" , 0);
auto_ptr<DBClientCursor> c;
return c;
}
@@ -140,7 +140,7 @@ namespace mongo {
}
void QuorumConnection::insert( const string &ns, const vector< BSONObj >& v ){
- uassert("QuorumConnection bulk insert not implemented" , 0);
+ uassert( 10023 , "QuorumConnection bulk insert not implemented" , 0);
}
void QuorumConnection::remove( const string &ns , Query query, bool justOne ){ assert(0); }
diff --git a/db/btree.cpp b/db/btree.cpp
index dab6f341542..8b910f50cd4 100644
--- a/db/btree.cpp
+++ b/db/btree.cpp
@@ -82,7 +82,7 @@ namespace mongo {
{
bool f = false;
assert( f = true );
- massert("assert is misdefined", f);
+ massert( 10281 , "assert is misdefined", f);
}
killCurrentOp.checkForInterrupt();
@@ -223,14 +223,14 @@ namespace mongo {
does not bother returning that value.
*/
void BucketBasics::popBack(DiskLoc& recLoc, BSONObj& key) {
- massert( "n==0 in btree popBack()", n > 0 );
+ massert( 10282 , "n==0 in btree popBack()", n > 0 );
assert( k(n-1).isUsed() ); // no unused skipping in this function at this point - btreebuilder doesn't require that
KeyNode kn = keyNode(n-1);
recLoc = kn.recordLoc;
key = kn.key;
int keysize = kn.key.objsize();
- massert("rchild not null in btree popBack()", nextChild.isNull());
+ massert( 10283 , "rchild not null in btree popBack()", nextChild.isNull());
/* weirdly, we also put the rightmost down pointer in nextchild, even when bucket isn't full. */
nextChild = kn.prevChildBucket;
@@ -408,11 +408,11 @@ namespace mongo {
if( !dupsChecked ) {
dupsChecked = true;
if( idx.head.btree()->exists(idx, idx.head, key, order) )
- uasserted( dupKeyError( idx , key ) );
+ uasserted( ASSERT_ID_DUPKEY , dupKeyError( idx , key ) );
}
}
else
- uasserted( dupKeyError( idx , key ) );
+ uasserted( ASSERT_ID_DUPKEY , dupKeyError( idx , key ) );
}
// dup keys allowed. use recordLoc as if it is part of the key
@@ -476,7 +476,7 @@ found:
//defensive:
n = -1;
parent.Null();
- massert("todo: use RecStoreInterface instead", false);
+ massert( 10284 , "todo: use RecStoreInterface instead", false);
// TODO: this was broken anyway as deleteRecord does unindexRecord() call which assumes the data is a BSONObj,
// and it isn't.
assert(false);
@@ -801,8 +801,8 @@ found:
_KeyNode& kn = k(pos);
if ( kn.isUnused() ) {
log(4) << "btree _insert: reusing unused key" << endl;
- massert("_insert: reuse key but lchild is not null", lChild.isNull());
- massert("_insert: reuse key but rchild is not null", rChild.isNull());
+ massert( 10285 , "_insert: reuse key but lchild is not null", lChild.isNull());
+ massert( 10286 , "_insert: reuse key but rchild is not null", rChild.isNull());
kn.setUsed();
return 0;
}
@@ -813,7 +813,7 @@ found:
out() << " " << "recordLoc:" << recordLoc.toString() << " pos:" << pos << endl;
out() << " old l r: " << childForPos(pos).toString() << ' ' << childForPos(pos+1).toString() << endl;
out() << " new l r: " << lChild.toString() << ' ' << rChild.toString() << endl;
- massert("btree: key+recloc already in index", false);
+ massert( 10287 , "btree: key+recloc already in index", false);
}
DEBUGGING out() << "TEMP: key: " << key.toString() << endl;
@@ -961,10 +961,10 @@ namespace mongo {
if( !dupsAllowed ) {
if( n > 0 ) {
int cmp = keyLast.woCompare(key, order);
- massert( "bad key order in BtreeBuilder - server internal error", cmp <= 0 );
+ massert( 10288 , "bad key order in BtreeBuilder - server internal error", cmp <= 0 );
if( cmp == 0 ) {
//if( !dupsAllowed )
- uasserted( BtreeBucket::dupKeyError( idx , keyLast ) );
+ uasserted( ASSERT_ID_DUPKEY , BtreeBucket::dupKeyError( idx , keyLast ) );
}
}
keyLast = key;
diff --git a/db/client.h b/db/client.h
index 15a76606c4a..3d7a604d15b 100644
--- a/db/client.h
+++ b/db/client.h
@@ -128,7 +128,7 @@ namespace mongo {
int s = dbMutex.getState();
if( s != -1 ) {
log() << "error: releaseAndWriteLock() s == " << s << endl;
- msgasserted( "releaseAndWriteLock: unlock_shared failed, probably recursive" );
+ msgasserted( 12600, "releaseAndWriteLock: unlock_shared failed, probably recursive" );
}
#endif
diff --git a/db/cloner.cpp b/db/cloner.cpp
index d01b8c035d9..5788db8c49d 100644
--- a/db/cloner.cpp
+++ b/db/cloner.cpp
@@ -64,9 +64,9 @@ namespace mongo {
if ( e.eoo() )
break;
if ( string("ns") == e.fieldName() ) {
- uassert("bad ns field for index during dbcopy", e.type() == String);
+ uassert( 10024 , "bad ns field for index during dbcopy", e.type() == String);
const char *p = strchr(e.valuestr(), '.');
- uassert("bad ns field for index during dbcopy [2]", p);
+ uassert( 10025 , "bad ns field for index during dbcopy [2]", p);
string newname = cc().database()->name + p;
b.append("ns", newname);
}
@@ -166,7 +166,7 @@ namespace mongo {
bool Cloner::go(const char *masterHost, string& errmsg, const string& fromdb, bool logForRepl, bool slaveOk, bool useReplAuth, bool snapshot) {
- massert( "useReplAuth is not written to replication log", !useReplAuth || !logForRepl );
+ massert( 10289 , "useReplAuth is not written to replication log", !useReplAuth || !logForRepl );
string todb = cc().database()->name;
stringstream a,b;
@@ -218,7 +218,7 @@ namespace mongo {
BSONElement e = collection.findElement("name");
if ( e.eoo() ) {
string s = "bad system.namespaces object " + collection.toString();
- massert(s.c_str(), false);
+ massert( 10290 , s.c_str(), false);
}
assert( !e.eoo() );
assert( e.type() == String );
@@ -337,9 +337,9 @@ namespace mongo {
if ( c->more() ) {
replayOpLog( c.get(), query );
cursorId = c->getCursorId();
- massert( "Expected valid tailing cursor", cursorId != 0 );
+ massert( 10291 , "Expected valid tailing cursor", cursorId != 0 );
} else {
- massert( "Did not expect valid cursor for empty query result", c->getCursorId() == 0 );
+ massert( 10292 , "Did not expect valid cursor for empty query result", c->getCursorId() == 0 );
cursorId = 0;
}
c->decouple();
@@ -633,7 +633,7 @@ namespace mongo {
setClient( source.c_str() );
NamespaceDetails *nsd = nsdetails( source.c_str() );
- uassert( "source namespace does not exist", nsd );
+ uassert( 10026 , "source namespace does not exist", nsd );
bool capped = nsd->capped;
long long size = 0;
if ( capped )
@@ -643,7 +643,7 @@ namespace mongo {
setClient( target.c_str() );
if ( nsdetails( target.c_str() ) ){
- uassert( "target namespace exists", cmdObj["dropTarget"].trueValue() );
+ uassert( 10027 , "target namespace exists", cmdObj["dropTarget"].trueValue() );
BSONObjBuilder bb( result.subobjStart( "dropTarget" ) );
dropCollection( target , errmsg , bb );
bb.done();
diff --git a/db/concurrency.h b/db/concurrency.h
index 6d0a94fbf31..8bbd31ef9f3 100644
--- a/db/concurrency.h
+++ b/db/concurrency.h
@@ -75,7 +75,7 @@ namespace mongo {
_state.set(s+1);
return;
}
- massert("internal error: locks are not upgradeable", s == 0 );
+ massert( 10293 , "internal error: locks are not upgradeable", s == 0 );
_state.set(1);
_m.lock();
_minfo.entered();
diff --git a/db/curop.h b/db/curop.h
index 7c70ad61890..926667b944e 100644
--- a/db/curop.h
+++ b/db/curop.h
@@ -131,10 +131,10 @@ namespace mongo {
void checkForInterrupt() {
if( state != Off ) {
if( state == All )
- uasserted("interrupted at shutdown");
+ uasserted(11600,"interrupted at shutdown");
if( cc().curop()->opNum() == toKill ) {
state = Off;
- uasserted("interrupted");
+ uasserted(11601,"interrupted");
}
}
}
diff --git a/db/database.h b/db/database.h
index cfae0bbb12c..4703146ae8f 100644
--- a/db/database.h
+++ b/db/database.h
@@ -35,11 +35,11 @@ namespace mongo {
{
{
int L = strlen(nm);
- uassert( "db name is empty", L > 0 );
- uassert( "bad db name [1]", *nm != '.' );
- uassert( "bad db name [2]", nm[L-1] != '.' );
- uassert( "bad char(s) in db name", strchr(nm, ' ') == 0 );
- uassert( "db name too long", L < 64 );
+ uassert( 10028 , "db name is empty", L > 0 );
+ uassert( 10029 , "bad db name [1]", *nm != '.' );
+ uassert( 10030 , "bad db name [2]", nm[L-1] != '.' );
+ uassert( 10031 , "bad char(s) in db name", strchr(nm, ' ') == 0 );
+ uassert( 10032 , "db name too long", L < 64 );
}
newDb = namespaceIndex.exists();
@@ -86,9 +86,9 @@ namespace mongo {
out() << "getFile(): n=" << n << endl;
#if !defined(_RECSTORE)
if( n >= RecCache::Base && n <= RecCache::Base+1000 )
- massert("getFile(): bad file number - using recstore db w/nonrecstore db build?", false);
+ massert( 10294 , "getFile(): bad file number - using recstore db w/nonrecstore db build?", false);
#endif
- massert("getFile(): bad file number value (corrupt db?): run repair", false);
+ massert( 10295 , "getFile(): bad file number value (corrupt db?): run repair", false);
}
DEV {
if ( n > 100 )
diff --git a/db/db.cpp b/db/db.cpp
index 0a8088a9240..3d4217d27c6 100644
--- a/db/db.cpp
+++ b/db/db.cpp
@@ -432,7 +432,7 @@ namespace mongo {
stringstream ss;
ss << "dbpath (" << dbpath << ") does not exist";
- massert( ss.str().c_str(), boost::filesystem::exists( dbpath ) );
+ massert( 10296 , ss.str().c_str(), boost::filesystem::exists( dbpath ) );
acquirePathLock();
remove_all( dbpath + "/_tmp/" );
@@ -756,7 +756,7 @@ int main(int argc, char* argv[], char *envp[] )
#endif
if (params.count("logpath")) {
string lp = params["logpath"].as<string>();
- uassert( "logpath has to be non-zero" , lp.size() );
+ uassert( 10033 , "logpath has to be non-zero" , lp.size() );
initLogging( lp , params.count( "logappend" ) );
}
if (params.count("nocursors")) {
@@ -830,32 +830,32 @@ int main(int argc, char* argv[], char *envp[] )
pairWith(paired.c_str(), "-");
}
} else if (params.count("arbiter")) {
- uasserted("specifying --arbiter without --pairwith");
+ uasserted(10999,"specifying --arbiter without --pairwith");
}
if (params.count("autoresync")) {
autoresync = true;
}
if( params.count("nssize") ) {
int x = params["nssize"].as<int>();
- uassert("bad --nssize arg", x > 0 && x <= (0x7fffffff/1024/1024));
+ uassert( 10034 , "bad --nssize arg", x > 0 && x <= (0x7fffffff/1024/1024));
lenForNewNsFiles = x * 1024 * 1024;
assert(lenForNewNsFiles > 0);
}
if (params.count("oplogSize")) {
long x = params["oplogSize"].as<long>();
- uassert("bad --oplogSize arg", x > 0);
+ uassert( 10035 , "bad --oplogSize arg", x > 0);
cmdLine.oplogSize = x * 1024 * 1024;
assert(cmdLine.oplogSize > 0);
}
if (params.count("opIdMem")) {
long x = params["opIdMem"].as<long>();
- uassert("bad --opIdMem arg", x > 0);
+ uassert( 10036 , "bad --opIdMem arg", x > 0);
opIdMem = x;
assert(opIdMem > 0);
}
if (params.count("cacheSize")) {
long x = params["cacheSize"].as<long>();
- uassert("bad --cacheSize arg", x > 0);
+ uassert( 10037 , "bad --cacheSize arg", x > 0);
setRecCacheSize(x);
}
if (params.count("port") == 0 ) {
@@ -1051,7 +1051,7 @@ BOOL CtrlHandler( DWORD fdwCtrlType )
if( SetConsoleCtrlHandler( (PHANDLER_ROUTINE) CtrlHandler, TRUE ) )
;
else
- massert("Couldn't register Windows Ctrl-C handler", false);
+ massert( 10297 , "Couldn't register Windows Ctrl-C handler", false);
}
#endif
diff --git a/db/db.h b/db/db.h
index 7db6967f0b1..4f98282a263 100644
--- a/db/db.h
+++ b/db/db.h
@@ -136,11 +136,11 @@ namespace mongo {
locktype = dbMutex.getState();
assert( locktype );
if ( locktype > 0 ) {
- massert("can't temprelease nested write lock", locktype == 1);
+ massert( 10298 , "can't temprelease nested write lock", locktype == 1);
dbMutex.unlock();
}
else {
- massert("can't temprelease nested read lock", locktype == -1);
+ massert( 10299 , "can't temprelease nested read lock", locktype == -1);
dbMutex.unlock_shared();
}
}
diff --git a/db/dbcommands.cpp b/db/dbcommands.cpp
index 0e31c9c0d2f..69d514ea702 100644
--- a/db/dbcommands.cpp
+++ b/db/dbcommands.cpp
@@ -164,7 +164,7 @@ namespace mongo {
}
CmdForceError() : Command("forceerror") {}
bool run(const char *ns, BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool fromRepl) {
- uassert("forced error", false);
+ uassert( 10038 , "forced error", false);
return true;
}
} cmdForceError;
@@ -527,7 +527,7 @@ namespace mongo {
errmsg = "ns not found";
return false;
}
- uassert( "can't drop collection with reserved $ character in name", strchr(nsToDrop.c_str(), '$') == 0 );
+ uassert( 10039 , "can't drop collection with reserved $ character in name", strchr(nsToDrop.c_str(), '$') == 0 );
dropCollection( nsToDrop, errmsg, result );
return true;
}
@@ -806,7 +806,7 @@ namespace mongo {
int myn = c.getIntField( "n" );
if ( n != myn ){
log() << "should have chunk: " << n << " have:" << myn << endl;
- uassert( "chunks out of order" , n == myn );
+ uassert( 10040 , "chunks out of order" , n == myn );
}
int len;
@@ -999,9 +999,9 @@ namespace mongo {
string fromNs = string( realDbName ) + "." + from;
string toNs = string( realDbName ) + "." + to;
- massert( "source collection " + fromNs + " does not exist", !setClient( fromNs.c_str() ) );
+ massert( 10300 , "source collection " + fromNs + " does not exist", !setClient( fromNs.c_str() ) );
NamespaceDetails *nsd = nsdetails( fromNs.c_str() );
- massert( "source collection " + fromNs + " does not exist", nsd );
+ massert( 10301 , "source collection " + fromNs + " does not exist", nsd );
long long excessSize = nsd->datasize - size * 2;
DiskLoc extent = nsd->firstExtent;
for( ; excessSize > 0 && extent != nsd->lastExtent; extent = extent.ext()->xnext ) {
@@ -1099,9 +1099,9 @@ namespace mongo {
BSONObjBuilder b( obj.objsize() + 32 );
b.append( "0" , obj );
int res = s->invoke( func , b.obj() );
- uassert( (string)"invoke failed in $keyf: " + s->getError() , res == 0 );
+ uassert( 10041 , (string)"invoke failed in $keyf: " + s->getError() , res == 0 );
int type = s->type("return");
- uassert( "return of $key has to be an object" , type == Object );
+ uassert( 10042 , "return of $key has to be an object" , type == Object );
return s->getObject( "return" );
}
return obj.extractFields( keyPattern , true );
@@ -1158,7 +1158,7 @@ namespace mongo {
n = map.size();
s->setObject( "$key" , key , true );
- uassert( "group() can't handle more than 10000 unique keys" , n <= 10000 );
+ uassert( 10043 , "group() can't handle more than 10000 unique keys" , n <= 10000 );
}
s->setObject( "obj" , obj , true );
@@ -1288,7 +1288,7 @@ namespace mongo {
continue;
if ( map.insert( value ).second ){
size += o.objsize() + 20;
- uassert( "distinct too big, 4mb cap" , size < 4 * 1024 * 1024 );
+ uassert( 10044 , "distinct too big, 4mb cap" , size < 4 * 1024 * 1024 );
}
}
@@ -1365,7 +1365,7 @@ namespace mongo {
string errmsg;
Command *c = i->second;
AuthenticationInfo *ai = currentClient.get()->ai;
- uassert("unauthorized", ai->isAuthorized(cc().database()->name.c_str()) || !c->requiresAuth());
+ uassert( 10045 , "unauthorized", ai->isAuthorized(cc().database()->name.c_str()) || !c->requiresAuth());
bool admin = c->adminOnly();
if ( admin && !fromRepl && strncmp(ns, "admin", 5) != 0 ) {
diff --git a/db/dbeval.cpp b/db/dbeval.cpp
index b20385035ac..7f5b74ff80e 100644
--- a/db/dbeval.cpp
+++ b/db/dbeval.cpp
@@ -39,7 +39,7 @@ namespace mongo {
bool dbEval(const char *ns, BSONObj& cmd, BSONObjBuilder& result, string& errmsg) {
BSONElement e = cmd.firstElement();
- uassert( "eval needs Code" , e.type() == Code || e.type() == CodeWScope || e.type() == String );
+ uassert( 10046 , "eval needs Code" , e.type() == Code || e.type() == CodeWScope || e.type() == String );
const char *code = 0;
switch ( e.type() ) {
diff --git a/db/dbhelpers.cpp b/db/dbhelpers.cpp
index d1a23e6d09c..45e313ba6c5 100644
--- a/db/dbhelpers.cpp
+++ b/db/dbhelpers.cpp
@@ -131,7 +131,7 @@ namespace mongo {
QueryPlanSet s( ns, query, BSONObj(), 0, !requireIndex );
FindOne original( requireIndex );
shared_ptr< FindOne > res = s.runOp( original );
- massert( res->exceptionMessage(), res->complete() );
+ massert( 10302 , res->exceptionMessage(), res->complete() );
if ( res->one().isEmpty() )
return false;
result = res->one();
@@ -139,7 +139,7 @@ namespace mongo {
}
auto_ptr<CursorIterator> Helpers::find( const char *ns , BSONObj query , bool requireIndex ){
- uassert( "requireIndex not supported in Helpers::find yet" , ! requireIndex );
+ uassert( 10047 , "requireIndex not supported in Helpers::find yet" , ! requireIndex );
auto_ptr<CursorIterator> i;
i.reset( new CursorIterator( DataFileMgr::findAll( ns ) , query ) );
return i;
@@ -216,7 +216,7 @@ namespace mongo {
Helpers::emptyCollection( name_.c_str() );
} else {
string err;
- massert( err, userCreateNS( name_.c_str(), fromjson( "{autoIndexId:false}" ), err, false ) );
+ massert( 10303 , err, userCreateNS( name_.c_str(), fromjson( "{autoIndexId:false}" ), err, false ) );
}
Helpers::ensureIndex( name_.c_str(), key_, true, "setIdx" );
}
diff --git a/db/dbmessage.h b/db/dbmessage.h
index 4db96ffcdfb..3c413cbbefc 100644
--- a/db/dbmessage.h
+++ b/db/dbmessage.h
@@ -117,13 +117,13 @@ namespace mongo {
BSONObj nextJsObj() {
if ( nextjsobj == data )
nextjsobj += strlen(data) + 1; // skip namespace
- massert( "Remaining data too small for BSON object", theEnd - nextjsobj > 3 );
+ massert( 10304 , "Remaining data too small for BSON object", theEnd - nextjsobj > 3 );
BSONObj js(nextjsobj);
- massert( "Invalid object size", js.objsize() > 3 );
- massert( "Next object larger than available space",
+ massert( 10305 , "Invalid object size", js.objsize() > 3 );
+ massert( 10306 , "Next object larger than available space",
js.objsize() < ( theEnd - data ) );
if ( objcheck && !js.valid() ) {
- massert("bad object in message", false);
+ massert( 10307 , "bad object in message", false);
}
nextjsobj += js.objsize();
if ( nextjsobj >= theEnd )
diff --git a/db/extsort.cpp b/db/extsort.cpp
index 46567aa75f5..08b343a1b50 100644
--- a/db/extsort.cpp
+++ b/db/extsort.cpp
@@ -57,7 +57,7 @@ namespace mongo {
}
void BSONObjExternalSorter::sort(){
- uassert( "already sorted" , ! _sorted );
+ uassert( 10048 , "already sorted" , ! _sorted );
_sorted = true;
@@ -82,7 +82,7 @@ namespace mongo {
}
void BSONObjExternalSorter::add( const BSONObj& o , const DiskLoc & loc ){
- uassert( "sorted already" , ! _sorted );
+ uassert( 10049 , "sorted already" , ! _sorted );
if ( ! _cur ){
_cur = new InMemory();
@@ -99,7 +99,7 @@ namespace mongo {
}
void BSONObjExternalSorter::finishMap(){
- uassert( "bad" , _cur );
+ uassert( 10050 , "bad" , _cur );
_curSizeSoFar = 0;
if ( _cur->size() == 0 )
@@ -113,7 +113,7 @@ namespace mongo {
ofstream out;
out.open( file.c_str() , ios_base::out | ios_base::binary );
- uassert( (string)"couldn't open file: " + file , out.good() );
+ uassert( 10051 , (string)"couldn't open file: " + file , out.good() );
int num = 0;
for ( InMemory::iterator i=_cur->begin(); i != _cur->end(); i++ ){
@@ -205,7 +205,7 @@ namespace mongo {
BSONObjExternalSorter::FileIterator::FileIterator( string file ){
long length;
_buf = (char*)_file.map( file.c_str() , length );
- massert( "mmap failed" , _buf );
+ massert( 10308 , "mmap failed" , _buf );
assert( (unsigned long)length == file_size( file ) );
_end = _buf + length;
}
diff --git a/db/extsort.h b/db/extsort.h
index 42702a5a47a..64e45b47533 100644
--- a/db/extsort.h
+++ b/db/extsort.h
@@ -96,7 +96,7 @@ namespace mongo {
void sort();
auto_ptr<Iterator> iterator(){
- uassert( "not sorted" , _sorted );
+ uassert( 10052 , "not sorted" , _sorted );
return auto_ptr<Iterator>( new Iterator( this ) );
}
diff --git a/db/instance.cpp b/db/instance.cpp
index 5326fd02795..abb5de9660d 100644
--- a/db/instance.cpp
+++ b/db/instance.cpp
@@ -150,7 +150,7 @@ namespace mongo {
try {
if (q.fields.get() && q.fields->errmsg)
- uassert(q.fields->errmsg, false);
+ uassert( 10053 , q.fields->errmsg, false);
/* note these are logged BEFORE authentication -- which is sort of ok */
if ( _diaglog.level && logit ) {
@@ -448,7 +448,7 @@ namespace mongo {
DbMessage d(m);
const char *ns = d.getns();
assert(*ns);
- uassert( "not master", isMasterNs( ns ) );
+ uassert( 10054 , "not master", isMasterNs( ns ) );
setClient(ns);
Client& client = cc();
client.top.setWrite();
@@ -459,7 +459,7 @@ namespace mongo {
assert( d.moreJSObjs() );
assert( query.objsize() < m.data->dataLen() );
BSONObj toupdate = d.nextJsObj();
- uassert("update object too large", toupdate.objsize() <= MaxBSONObjectSize);
+ uassert( 10055 , "update object too large", toupdate.objsize() <= MaxBSONObjectSize);
assert( toupdate.objsize() < m.data->dataLen() );
assert( query.objsize() + toupdate.objsize() < m.data->dataLen() );
bool upsert = flags & Option_Upsert;
@@ -480,7 +480,7 @@ namespace mongo {
DbMessage d(m);
const char *ns = d.getns();
assert(*ns);
- uassert( "not master", isMasterNs( ns ) );
+ uassert( 10056 , "not master", isMasterNs( ns ) );
setClient(ns);
Client& client = cc();
client.top.setWrite();
@@ -514,7 +514,7 @@ namespace mongo {
QueryResult* msgdata;
try {
AuthenticationInfo *ai = currentClient.get()->ai;
- uassert("unauthorized", ai->isAuthorized(cc().database()->name.c_str()));
+ uassert( 10057 , "unauthorized", ai->isAuthorized(cc().database()->name.c_str()));
msgdata = getMore(ns, ntoreturn, cursorid, ss);
}
catch ( AssertionException& e ) {
@@ -536,14 +536,14 @@ namespace mongo {
DbMessage d(m);
const char *ns = d.getns();
assert(*ns);
- uassert( "not master", isMasterNs( ns ) );
+ uassert( 10058 , "not master", isMasterNs( ns ) );
setClient(ns);
cc().top.setWrite();
ss << ns;
while ( d.moreJSObjs() ) {
BSONObj js = d.nextJsObj();
- uassert("object to insert too large", js.objsize() <= MaxBSONObjectSize);
+ uassert( 10059 , "object to insert too large", js.objsize() <= MaxBSONObjectSize);
theDataFileMgr.insert(ns, js, false);
logOp("i", ns, js);
}
@@ -720,8 +720,8 @@ namespace mongo {
#if !defined(_WIN32) && !defined(__sunos__)
string name = ( boost::filesystem::path( dbpath ) / "mongod.lock" ).native_file_string();
lockFile = open( name.c_str(), O_RDWR | O_CREAT | O_TRUNC, S_IRWXU | S_IRWXG | S_IRWXO );
- massert( "Unable to create / open lock file for dbpath: " + name, lockFile > 0 );
- massert( "Unable to acquire lock for dbpath: " + name, flock( lockFile, LOCK_EX | LOCK_NB ) == 0 );
+ massert( 10309 , "Unable to create / open lock file for dbpath: " + name, lockFile > 0 );
+ massert( 10310 , "Unable to acquire lock for dbpath: " + name, flock( lockFile, LOCK_EX | LOCK_NB ) == 0 );
stringstream ss;
ss << getpid() << endl;
diff --git a/db/jsobj.cpp b/db/jsobj.cpp
index 8171fc45e13..88d3c62c79f 100644
--- a/db/jsobj.cpp
+++ b/db/jsobj.cpp
@@ -221,7 +221,7 @@ namespace mongo {
stringstream ss;
ss << "Number " << number() << " cannot be represented in JSON";
string message = ss.str();
- massert( message.c_str(), false );
+ massert( 10311 , message.c_str(), false );
}
break;
case Bool:
@@ -332,7 +332,7 @@ namespace mongo {
ss << "Cannot create a properly formatted JSON string with "
<< "element: " << toString() << " of type: " << type();
string message = ss.str();
- massert( message.c_str(), false );
+ massert( 10312 , message.c_str(), false );
}
return s.str();
}
@@ -369,42 +369,42 @@ namespace mongo {
case Symbol:
case Code:
case String:
- massert( "Insufficient bytes to calculate element size", maxLen == -1 || remain > 3 );
+ massert( 10313 , "Insufficient bytes to calculate element size", maxLen == -1 || remain > 3 );
x = valuestrsize() + 4;
break;
case CodeWScope:
- massert( "Insufficient bytes to calculate element size", maxLen == -1 || remain > 3 );
+ massert( 10314 , "Insufficient bytes to calculate element size", maxLen == -1 || remain > 3 );
x = objsize();
break;
case DBRef:
- massert( "Insufficient bytes to calculate element size", maxLen == -1 || remain > 3 );
+ massert( 10315 , "Insufficient bytes to calculate element size", maxLen == -1 || remain > 3 );
x = valuestrsize() + 4 + 12;
break;
case Object:
case Array:
- massert( "Insufficient bytes to calculate element size", maxLen == -1 || remain > 3 );
+ massert( 10316 , "Insufficient bytes to calculate element size", maxLen == -1 || remain > 3 );
x = objsize();
break;
case BinData:
- massert( "Insufficient bytes to calculate element size", maxLen == -1 || remain > 3 );
+ massert( 10317 , "Insufficient bytes to calculate element size", maxLen == -1 || remain > 3 );
x = valuestrsize() + 4 + 1/*subtype*/;
break;
case RegEx:
{
const char *p = value();
int len1 = ( maxLen == -1 ) ? strlen( p ) : strnlen( p, remain );
- massert( "Invalid regex string", len1 != -1 );
+ massert( 10318 , "Invalid regex string", len1 != -1 );
p = p + len1 + 1;
int len2 = ( maxLen == -1 ) ? strlen( p ) : strnlen( p, remain - len1 - 1 );
- massert( "Invalid regex options string", len2 != -1 );
+ massert( 10319 , "Invalid regex options string", len2 != -1 );
x = len1 + 1 + len2 + 1;
}
break;
default: {
stringstream ss;
ss << "BSONElement: bad type " << (int) type();
- massert(ss.str().c_str(),false);
+ massert( 10320 , ss.str().c_str(),false);
}
}
totalSize = x + fieldNameSize() + 1; // BSONType
@@ -610,21 +610,21 @@ namespace mongo {
case Code:
case Symbol:
case String:
- massert( "Invalid dbref/code/string/symbol size",
+ massert( 10321 , "Invalid dbref/code/string/symbol size",
valuestrsize() > 0 &&
valuestrsize() - 1 == strnlen( valuestr(), valuestrsize() ) );
break;
case CodeWScope: {
int totalSize = *( int * )( value() );
- massert( "Invalid CodeWScope size", totalSize >= 8 );
+ massert( 10322 , "Invalid CodeWScope size", totalSize >= 8 );
int strSizeWNull = *( int * )( value() + 4 );
- massert( "Invalid CodeWScope string size", totalSize >= strSizeWNull + 4 + 4 );
- massert( "Invalid CodeWScope string size",
+ massert( 10323 , "Invalid CodeWScope string size", totalSize >= strSizeWNull + 4 + 4 );
+ massert( 10324 , "Invalid CodeWScope string size",
strSizeWNull > 0 &&
strSizeWNull - 1 == strnlen( codeWScopeCode(), strSizeWNull ) );
- massert( "Invalid CodeWScope size", totalSize >= strSizeWNull + 4 + 4 + 4 );
+ massert( 10325 , "Invalid CodeWScope size", totalSize >= strSizeWNull + 4 + 4 + 4 );
int objSize = *( int * )( value() + 4 + 4 + strSizeWNull );
- massert( "Invalid CodeWScope object size", totalSize == 4 + 4 + strSizeWNull + objSize );
+ massert( 10326 , "Invalid CodeWScope object size", totalSize == 4 + 4 + strSizeWNull + objSize );
// Subobject validation handled elsewhere.
}
case Object:
@@ -707,17 +707,17 @@ namespace mongo {
BSONObjIterator i(*this);
bool first = true;
while ( 1 ) {
- massert( "Object does not end with EOO", i.moreWithEOO() );
+ massert( 10327 , "Object does not end with EOO", i.moreWithEOO() );
BSONElement e = i.next( true );
- massert( "Invalid element size", e.size() > 0 );
- massert( "Element too large", e.size() < ( 1 << 30 ) );
+ massert( 10328 , "Invalid element size", e.size() > 0 );
+ massert( 10329 , "Element too large", e.size() < ( 1 << 30 ) );
int offset = e.rawdata() - this->objdata();
- massert( "Element extends past end of object",
+ massert( 10330 , "Element extends past end of object",
e.size() + offset <= this->objsize() );
e.validate();
bool end = ( e.size() + offset == this->objsize() );
if ( e.eoo() ) {
- massert( "EOO Before end of object", end );
+ massert( 10331 , "EOO Before end of object", end );
break;
}
if ( first )
@@ -814,7 +814,7 @@ namespace mongo {
if ( other.isEmpty() )
return 1;
- uassert( "woSortOrder needs a non-empty sortKey" , ! sortKey.isEmpty() );
+ uassert( 10060 , "woSortOrder needs a non-empty sortKey" , ! sortKey.isEmpty() );
BSONObjIterator i(sortKey);
while ( 1 ){
@@ -1481,7 +1481,7 @@ namespace mongo {
Labeler::Label SIZE( "$size" );
void BSONElementManipulator::initTimestamp() {
- massert( "Expected CurrentTime type", element_.type() == Timestamp );
+ massert( 10332 , "Expected CurrentTime type", element_.type() == Timestamp );
unsigned long long &timestamp = *( reinterpret_cast< unsigned long long* >( value() ) );
if ( timestamp == 0 )
timestamp = OpTime::now().asDate();
@@ -1529,7 +1529,7 @@ namespace mongo {
};
log() << "type not support for appendMinElementForType: " << t << endl;
- uassert( "type not supported for appendMinElementForType" , false );
+ uassert( 10061 , "type not supported for appendMinElementForType" , false );
}
void BSONObjBuilder::appendMaxForType( const string& field , int t ){
diff --git a/db/jsobj.h b/db/jsobj.h
index 92517bc1d8d..3f07521f70c 100644
--- a/db/jsobj.h
+++ b/db/jsobj.h
@@ -494,7 +494,7 @@ namespace mongo {
default:
log() << "can't convert type: " << (int)(type()) << " to code" << endl;
}
- uassert( "not code" , 0 );
+ uassert( 10062 , "not code" , 0 );
return "";
}
@@ -594,12 +594,12 @@ namespace mongo {
}
const char * dbrefNS() const {
- uassert( "not a dbref" , type() == DBRef );
+ uassert( 10063 , "not a dbref" , type() == DBRef );
return value() + 4;
}
const OID& dbrefOID() const {
- uassert( "not a dbref" , type() == DBRef );
+ uassert( 10064 , "not a dbref" , type() == DBRef );
const char * start = value();
start += 4 + *reinterpret_cast< const int* >( start );
return *reinterpret_cast< const OID* >( start );
@@ -620,7 +620,7 @@ namespace mongo {
else {
if ( maxLen != -1 ) {
int size = strnlen( fieldName(), maxLen - 1 );
- massert( "Invalid field name", size != -1 );
+ massert( 10333 , "Invalid field name", size != -1 );
fieldNameSize_ = size + 1;
}
}
@@ -707,7 +707,7 @@ namespace mongo {
stringstream ss;
ss << "Invalid BSONObj spec size: " << objsize();
string s = ss.str();
- massert( s , 0 );
+ massert( 10334 , s , 0 );
}
}
#pragma pack(1)
@@ -1434,7 +1434,7 @@ namespace mongo {
/** The returned BSONObj will free the buffer when it is finished. */
BSONObj obj() {
- massert( "builder does not own memory", owned() );
+ massert( 10335 , "builder does not own memory", owned() );
int l;
return BSONObj(decouple(l), true);
}
@@ -1491,7 +1491,7 @@ namespace mongo {
}
Labeler operator<<( const Labeler::Label &l ) {
- massert( "No subobject started", s_.subobjStarted() );
+ massert( 10336 , "No subobject started", s_.subobjStarted() );
return s_ << l;
}
@@ -1645,13 +1645,13 @@ namespace mongo {
extern JSObj1 js1;
#ifdef _DEBUG
-#define CHECK_OBJECT( o , msg ) massert( (string)"object not valid" + (msg) , (o).isValid() )
+#define CHECK_OBJECT( o , msg ) massert( 10337 , (string)"object not valid" + (msg) , (o).isValid() )
#else
#define CHECK_OBJECT( o , msg )
#endif
inline BSONObj BSONElement::embeddedObjectUserCheck() {
- uassert( "invalid parameter: expected an object", type()==Object || type()==Array );
+ uassert( 10065 , "invalid parameter: expected an object", type()==Object || type()==Array );
return BSONObj(value());
}
diff --git a/db/json.cpp b/db/json.cpp
index 6879298a814..924d84bbc90 100644
--- a/db/json.cpp
+++ b/db/json.cpp
@@ -211,7 +211,7 @@ namespace mongo {
fieldNameEnd( ObjectBuilder &_b ) : b( _b ) {}
void operator() ( const char *start, const char *end ) const {
string name = b.popString();
- massert( "Invalid use of reserved field name",
+ massert( 10338 , "Invalid use of reserved field name",
name != "$oid" &&
name != "$binary" &&
name != "$type" &&
@@ -343,7 +343,7 @@ namespace mongo {
struct binDataBinary {
binDataBinary( ObjectBuilder &_b ) : b( _b ) {}
void operator() ( const char *start, const char *end ) const {
- massert( "Badly formatted bindata", ( end - start ) % 4 == 0 );
+ massert( 10339 , "Badly formatted bindata", ( end - start ) % 4 == 0 );
string encoded( start, end );
b.binData = base64::decode( encoded );
}
@@ -550,7 +550,7 @@ public:
len = 10;
stringstream ss;
ss << "Failure parsing JSON string near: " << string( result.stop, len );
- massert( ss.str(), false );
+ massert( 10340 , ss.str(), false );
}
return b.pop();
}
diff --git a/db/matcher.cpp b/db/matcher.cpp
index 53ebce3bf54..6bdd5f7ea04 100644
--- a/db/matcher.cpp
+++ b/db/matcher.cpp
@@ -55,7 +55,7 @@ namespace mongo {
BSONObj *jsScope;
void setFunc(const char *code) {
- massert( "scope has to be created first!" , scope.get() );
+ massert( 10341 , "scope has to be created first!" , scope.get() );
func = scope->createFunction( code );
}
@@ -127,8 +127,8 @@ namespace mongo {
if ( ( e.type() == CodeWScope || e.type() == Code || e.type() == String ) && strcmp(e.fieldName(), "$where")==0 ) {
// $where: function()...
- uassert( "$where occurs twice?", where == 0 );
- uassert( "$where query, but no script engine", globalScriptEngine );
+ uassert( 10066 , "$where occurs twice?", where == 0 );
+ uassert( 10067 , "$where query, but no script engine", globalScriptEngine );
where = new Where();
where->scope = globalScriptEngine->getPooledScope( cc().ns() );
where->scope->localConnect( cc().database()->name.c_str() );
@@ -183,7 +183,7 @@ namespace mongo {
if ( fn[1] == 'r' && fn[2] == 'e' && fn[3] == 'f' && fn[4] == 0 ){
break; // { $ref : xxx } - treat as normal object
}
- uassert( (string)"invalid operator: " + fn , op != -1 );
+ uassert( 10068 , (string)"invalid operator: " + fn , op != -1 );
}
isOperator = true;
@@ -241,7 +241,7 @@ namespace mongo {
break;
}
default:
- uassert( (string)"BUG - can't operator for: " + fn , 0 );
+ uassert( 10069 , (string)"BUG - can't operator for: " + fn , 0 );
}
}
@@ -520,7 +520,7 @@ namespace mongo {
if ( where ) {
if ( where->func == 0 ) {
- uassert("$where compile error", false);
+ uassert( 10070 , "$where compile error", false);
return false; // didn't compile
}
@@ -537,10 +537,10 @@ namespace mongo {
stringstream ss;
ss << "error on invocation of $where function:\n"
<< where->scope->getError();
- uassert(ss.str(), false);
+ uassert( 10071 , ss.str(), false);
return false;
} else if ( err != 0 ) { // ! INVOKE_SUCCESS
- uassert("unknown error in invocation of $where function", false);
+ uassert( 10072 , "unknown error in invocation of $where function", false);
return false;
}
return where->scope->getBoolean( "return" ) != 0;
@@ -609,7 +609,7 @@ namespace mongo {
int ret = 0;
pcre_config( PCRE_CONFIG_UTF8 , &ret );
- massert( "pcre not compiled with utf8 support" , ret );
+ massert( 10342 , "pcre not compiled with utf8 support" , ret );
pcrecpp::RE re1(")({a}h.*o");
pcrecpp::RE re("h.llo");
diff --git a/db/matcher.h b/db/matcher.h
index 2c198cb1f53..7924b0c3ca3 100644
--- a/db/matcher.h
+++ b/db/matcher.h
@@ -63,7 +63,7 @@ namespace mongo {
mod = o["0"].numberInt();
modm = o["1"].numberInt();
- uassert( "mod can't be 0" , mod );
+ uassert( 10073 , "mod can't be 0" , mod );
}
else if ( _op == BSONObj::opTYPE ){
type = (BSONType)(_e.embeddedObject().firstElement().numberInt());
diff --git a/db/mr.cpp b/db/mr.cpp
index dfefa8e5bc1..e03b542b619 100644
--- a/db/mr.cpp
+++ b/db/mr.cpp
@@ -41,7 +41,7 @@ namespace mongo {
typedef map< BSONObj,list<BSONObj>,MyCmp > InMemory;
BSONObj reduceValues( list<BSONObj>& values , Scope * s , ScriptingFunction reduce , bool final , ScriptingFunction finalize ){
- uassert( "need values" , values.size() );
+ uassert( 10074 , "need values" , values.size() );
int sizeEstimate = ( values.size() * values.begin()->getField( "value" ).size() ) + 128;
BSONObj key;
@@ -68,7 +68,7 @@ namespace mongo {
s->invokeSafe( reduce , args );
if ( s->type( "return" ) == Array ){
- uassert("reduce -> multiple not supported yet",0);
+ uassert( 10075 , "reduce -> multiple not supported yet",0);
return BSONObj();
}
@@ -159,7 +159,7 @@ namespace mongo {
db.dropCollection( finalLong );
if ( db.count( tempLong ) ){
BSONObj info;
- uassert( "rename failed" , db.runCommand( "admin" , BSON( "renameCollection" << tempLong << "to" << finalLong ) , info ) );
+ uassert( 10076 , "rename failed" , db.runCommand( "admin" , BSON( "renameCollection" << tempLong << "to" << finalLong ) , info ) );
}
}
return db.count( finalLong );
@@ -343,7 +343,7 @@ namespace mongo {
boost::thread_specific_ptr<MRTL> _tlmr;
BSONObj fast_emit( const BSONObj& args ){
- uassert( "fast_emit takes 2 args" , args.nFields() == 2 );
+ uassert( 10077 , "fast_emit takes 2 args" , args.nFields() == 2 );
_tlmr->insert( args );
_tlmr->numEmits++;
return BSONObj();
@@ -515,7 +515,7 @@ namespace mongo {
BSONObj res = e.embeddedObjectUserCheck();
- uassert( "something bad happened" , shardedOutputCollection == res["result"].valuestrsafe() );
+ uassert( 10078 , "something bad happened" , shardedOutputCollection == res["result"].valuestrsafe() );
servers.insert( shard );
shardCounts.appendAs( res["counts"] , shard.c_str() );
diff --git a/db/namespace.cpp b/db/namespace.cpp
index cb412b21792..fac072293ce 100644
--- a/db/namespace.cpp
+++ b/db/namespace.cpp
@@ -74,13 +74,13 @@ namespace mongo {
len = f.length();
if ( len % (1024*1024) != 0 ){
log() << "bad .ns file: " << pathString << endl;
- uassert( "bad .ns file length, cannot open database", len % (1024*1024) == 0 );
+ uassert( 10079 , "bad .ns file length, cannot open database", len % (1024*1024) == 0 );
}
}
}
else {
// use lenForNewNsFiles, we are making a new database
- massert( "bad lenForNewNsFiles", lenForNewNsFiles >= 1024*1024 );
+ massert( 10343 , "bad lenForNewNsFiles", lenForNewNsFiles >= 1024*1024 );
long l = lenForNewNsFiles;
p = f.map(pathString.c_str(), l);
if( p ) {
@@ -492,14 +492,14 @@ namespace mongo {
continue;
}
- massert( "Capped collection full and delete not allowed", cappedMayDelete() );
+ massert( 10344 , "Capped collection full and delete not allowed", cappedMayDelete() );
DiskLoc fr = theCapExtent()->firstRecord;
theDataFileMgr.deleteRecord(ns, fr.rec(), fr, true);
compact();
if( ++passes >= 5000 ) {
log() << "passes ns:" << ns << " len:" << len << '\n';
log() << "passes max:" << max << " nrecords:" << nrecords << " datasize: " << datasize << endl;
- massert( "passes >= 5000 in capped collection alloc", false );
+ massert( 10345 , "passes >= 5000 in capped collection alloc", false );
}
}
@@ -537,7 +537,7 @@ namespace mongo {
(aug08 - this method not currently used)
*/
int NamespaceDetails::fieldIsIndexed(const char *fieldName) {
- massert("not implemented", false);
+ massert( 10346 , "not implemented", false);
/*
for ( int i = 0; i < nIndexes; i++ ) {
IndexDetails& idx = indexes[i];
@@ -607,7 +607,7 @@ namespace mongo {
spec << "{size:" << logSizeMb * 1024 * 1024 << ",capped:true,autoIndexId:false}";
setClient( _cll_ns.c_str() );
string err;
- massert( "Could not create log ns", userCreateNS( _cll_ns.c_str(), fromjson( spec.str() ), err, false ) );
+ massert( 10347 , "Could not create log ns", userCreateNS( _cll_ns.c_str(), fromjson( spec.str() ), err, false ) );
NamespaceDetails *d = nsdetails( _cll_ns.c_str() );
d->cappedDisallowDelete();
}
diff --git a/db/namespace.h b/db/namespace.h
index c537a9893f8..3007091d8a8 100644
--- a/db/namespace.h
+++ b/db/namespace.h
@@ -88,7 +88,7 @@ namespace mongo {
*this = ns;
}
Namespace& operator=(const char *ns) {
- uassert("ns name too long, max size is 128", strlen(ns) < MaxNsLen);
+ uassert( 10080 , "ns name too long, max size is 128", strlen(ns) < MaxNsLen);
//memset(buf, 0, MaxNsLen); /* this is just to keep stuff clean in the files for easy dumping and reading */
strcpy_s(buf, MaxNsLen, ns);
return *this;
@@ -97,7 +97,7 @@ namespace mongo {
/* for more than 10 indexes -- see NamespaceDetails::Extra */
string extraName() {
string s = string(buf) + "$extra";
- massert("ns name too long", s.size() < MaxNsLen);
+ massert( 10348 , "ns name too long", s.size() < MaxNsLen);
return s;
}
@@ -386,7 +386,7 @@ namespace mongo {
if( &i.next() == &idx )
return i.pos()-1;
}
- massert("E12000 idxNo fails", false);
+ massert( 10349 , "E12000 idxNo fails", false);
return -1;
}
@@ -634,7 +634,7 @@ namespace mongo {
void add_ns( const char *ns, const NamespaceDetails &details ) {
init();
Namespace n(ns);
- uassert("too many namespaces/collections", ht->put(n, details));
+ uassert( 10081 , "too many namespaces/collections", ht->put(n, details));
}
/* just for diagnostics */
@@ -649,12 +649,12 @@ namespace mongo {
Namespace n(ns);
Namespace extra(n.extraName().c_str()); // throws userexception if ns name too long
NamespaceDetails *d = details(ns);
- massert( "allocExtra: base ns missing?", d );
+ massert( 10350 , "allocExtra: base ns missing?", d );
assert( d->extraOffset == 0 );
- massert( "allocExtra: extra already exists", ht->get(extra) == 0 );
+ massert( 10351 , "allocExtra: extra already exists", ht->get(extra) == 0 );
NamespaceDetails::Extra temp;
memset(&temp, 0, sizeof(temp));
- uassert( "allocExtra: too many namespaces/collections", ht->put(extra, (NamespaceDetails&) temp));
+ uassert( 10082 , "allocExtra: too many namespaces/collections", ht->put(extra, (NamespaceDetails&) temp));
NamespaceDetails::Extra *e = (NamespaceDetails::Extra *) ht->get(extra);
d->extraOffset = ((char *) e) - ((char *) d);
assert( d->extra() == e );
diff --git a/db/nonce.cpp b/db/nonce.cpp
index 7fdde052234..4c677bef342 100644
--- a/db/nonce.cpp
+++ b/db/nonce.cpp
@@ -24,7 +24,7 @@ namespace mongo {
Security::Security() {
static int n;
- massert("Security is a singleton class", ++n == 1);
+ massert( 10352 , "Security is a singleton class", ++n == 1);
init();
}
@@ -34,7 +34,7 @@ namespace mongo {
#if defined(__linux__)
_devrandom = new ifstream("/dev/urandom", ios::binary|ios::in);
- massert( "can't open dev/urandom", _devrandom->is_open() );
+ massert( 10353 , "can't open dev/urandom", _devrandom->is_open() );
#elif defined(_WIN32)
srand(curTimeMicros());
#else
@@ -44,7 +44,7 @@ namespace mongo {
#ifndef NDEBUG
if ( do_md5_test() )
- massert("md5 unit test fails", false);
+ massert( 10354 , "md5 unit test fails", false);
#endif
}
@@ -59,7 +59,7 @@ namespace mongo {
nonce n;
#if defined(__linux__)
_devrandom->read((char*)&n, sizeof(n));
- massert("devrandom failed", !_devrandom->fail());
+ massert( 10355 , "devrandom failed", !_devrandom->fail());
#elif defined(_WIN32)
n = (((unsigned long long)rand())<<32) | rand();
#else
diff --git a/db/pdfile.cpp b/db/pdfile.cpp
index 1d9e2972b84..cc26c012a50 100644
--- a/db/pdfile.cpp
+++ b/db/pdfile.cpp
@@ -111,7 +111,7 @@ namespace mongo {
size &= 0xffffffffffffff00LL;
}
- uassert( "invalid size spec", size > 0 );
+ uassert( 10083 , "invalid size spec", size > 0 );
bool newCapped = false;
int mx = 0;
@@ -172,7 +172,7 @@ namespace mongo {
// returns true if successful
bool userCreateNS(const char *ns, BSONObj j, string& err, bool logForReplication) {
const char *coll = strchr( ns, '.' ) + 1;
- massert( "invalid ns", coll && *coll );
+ massert( 10356 , "invalid ns", coll && *coll );
char cl[ 256 ];
nsToClient( ns, cl );
bool ok = _userCreateNS(ns, j, err);
@@ -237,7 +237,7 @@ namespace mongo {
Database *database = cc().database();
if ( database )
s += database->name;
- uasserted(s);
+ uasserted(12501,s);
}
}
@@ -265,9 +265,9 @@ namespace mongo {
header = (MDFHeader *) mmf.map(filename, size);
if( sizeof(char *) == 4 )
- uassert("can't map file memory - mongo requires 64 bit build for larger datasets", header);
+ uassert( 10084 , "can't map file memory - mongo requires 64 bit build for larger datasets", header);
else
- uassert("can't map file memory", header);
+ uassert( 10085 , "can't map file memory", header);
header->init(fileNo, size);
}
@@ -294,9 +294,9 @@ namespace mongo {
}
Extent* MongoDataFile::createExtent(const char *ns, int approxSize, bool newCapped, int loops) {
- massert( "shutdown in progress", !goingAway );
- massert( "bad new extent size", approxSize >= 0 && approxSize <= 0x7ff00000 );
- massert( "header==0 on new extent: 32 bit mmap space exceeded?", header ); // null if file open failed
+ massert( 10357 , "shutdown in progress", !goingAway );
+ massert( 10358 , "bad new extent size", approxSize >= 0 && approxSize <= 0x7ff00000 );
+ massert( 10359 , "header==0 on new extent: 32 bit mmap space exceeded?", header ); // null if file open failed
int ExtentSize = approxSize <= header->unusedLength ? approxSize : header->unusedLength;
DiskLoc loc;
if ( ExtentSize <= 0 ) {
@@ -389,7 +389,7 @@ namespace mongo {
DiskLoc Extent::reuse(const char *nsname) {
log(3) << "reset extent was:" << nsDiagnostic.buf << " now:" << nsname << '\n';
- massert( "Extent::reset bad magic value", magic == 0x41424344 );
+ massert( 10360 , "Extent::reset bad magic value", magic == 0x41424344 );
xnext.Null();
xprev.Null();
nsDiagnostic = nsname;
@@ -577,15 +577,15 @@ namespace mongo {
/* drop a collection/namespace */
void dropNS(const string& nsToDrop) {
NamespaceDetails* d = nsdetails(nsToDrop.c_str());
- uassert( (string)"ns not found: " + nsToDrop , d );
+ uassert( 10086 , (string)"ns not found: " + nsToDrop , d );
NamespaceString s(nsToDrop);
assert( s.db == cc().database()->name );
if( s.isSystem() ) {
if( s.coll == "system.profile" )
- uassert( "turn off profiling before dropping system.profile collection", cc().database()->profile == 0 );
+ uassert( 10087 , "turn off profiling before dropping system.profile collection", cc().database()->profile == 0 );
else
- uasserted( "can't drop system ns" );
+ uasserted( 12502, "can't drop system ns" );
}
{
@@ -604,7 +604,7 @@ namespace mongo {
string err;
_userCreateNS(s.c_str(), BSONObj(), err);
freeExtents = nsdetails(s.c_str());
- massert("can't create .$freelist", freeExtents);
+ massert( 10361 , "can't create .$freelist", freeExtents);
}
if( freeExtents->firstExtent.isNull() ) {
freeExtents->firstExtent = d->firstExtent;
@@ -635,7 +635,7 @@ namespace mongo {
assert( deleteIndexes(d, name.c_str(), "*", errmsg, result, true) );
}
catch( DBException& ) {
- uasserted("drop: deleteIndexes for collection failed - consider trying repair");
+ uasserted(12503,"drop: deleteIndexes for collection failed - consider trying repair");
}
assert( d->nIndexes == 0 );
}
@@ -697,7 +697,7 @@ namespace mongo {
arrElt = e;
}
// enforce single array path here
- uassert( "cannot index parallel arrays", e.type() != Array || e.rawdata() == arrElt.rawdata() );
+ uassert( 10088 , "cannot index parallel arrays", e.type() != Array || e.rawdata() == arrElt.rawdata() );
}
bool allFound = true; // have we found elements for all field names in the key spec?
for( vector< const char * >::const_iterator i = fieldNames.begin(); allFound && i != fieldNames.end(); ++i )
@@ -877,7 +877,7 @@ namespace mongo {
NamespaceDetails* d = nsdetails(ns);
if ( d->capped && !cappedOK ) {
out() << "failing remove on a capped ns " << ns << endl;
- uassert( "can't remove from a capped collection E00051" , 0 );
+ uassert( 10089 , "can't remove from a capped collection" , 0 );
return;
}
@@ -916,7 +916,7 @@ namespace mongo {
if( added.empty() || !idx.unique() )
return;
for( vector<BSONObj*>::iterator i = added.begin(); i != added.end(); i++ )
- uassert("E11001 duplicate key on update", !idx.hasKey(**i));
+ uassert( 11001 , "E11001 duplicate key on update", !idx.hasKey(**i));
}
};
@@ -982,7 +982,7 @@ namespace mongo {
if ( toupdate->netLength() < objNew.objsize() ) {
// doesn't fit. reallocate -----------------------------------------------------
- uassert("E10003 failing update: objects in a capped ns cannot grow", !(d && d->capped));
+ uassert( 10003 , "E10003 failing update: objects in a capped ns cannot grow", !(d && d->capped));
d->paddingTooSmall();
if ( cc().database()->profile )
ss << " moved ";
@@ -1165,7 +1165,7 @@ namespace mongo {
keep in ram and have a limit.
*/
dupsToDrop.push_back(d.second);
- uassert("too may dups on index build with dropDups=true", dupsToDrop.size() < 1000000 );
+ uassert( 10092 , "too may dups on index build with dropDups=true", dupsToDrop.size() < 1000000 );
}
pm2.hit();
}
@@ -1329,11 +1329,11 @@ namespace mongo {
*/
DiskLoc DataFileMgr::insert(const char *ns, const void *obuf, int len, bool god, const BSONElement &writeId, bool mayAddIndex) {
bool wouldAddIndex = false;
- uassert("cannot insert into reserved $ collection", god || strchr(ns, '$') == 0 );
- uassert("invalid ns", strchr( ns , '.' ) > 0 );
+ uassert( 10093 , "cannot insert into reserved $ collection", god || strchr(ns, '$') == 0 );
+ uassert( 10094 , "invalid ns", strchr( ns , '.' ) > 0 );
const char *sys = strstr(ns, "system.");
if ( sys ) {
- uassert("attempt to insert in reserved database name 'system'", sys != ns);
+ uassert( 10095 , "attempt to insert in reserved database name 'system'", sys != ns);
if ( strstr(ns, ".system.") ) {
// later:check for dba-type permissions here if have that at some point separate
if ( strstr(ns, ".system.indexes" ) )
@@ -1372,23 +1372,23 @@ namespace mongo {
BSONObj io((const char *) obuf);
const char *name = io.getStringField("name"); // name of the index
tabletoidxns = io.getStringField("ns"); // table it indexes
- uassert( "invalid ns to index" , tabletoidxns.size() && tabletoidxns.find( '.' ) != string::npos );
+ uassert( 10096 , "invalid ns to index" , tabletoidxns.size() && tabletoidxns.find( '.' ) != string::npos );
if ( cc().database()->name != nsToClient(tabletoidxns.c_str()) ) {
- uassert("bad table to index name on add index attempt", false);
+ uassert( 10097 , "bad table to index name on add index attempt", false);
return DiskLoc();
}
BSONObj key = io.getObjectField("key");
if( !validKeyPattern(key) ) {
string s = string("bad index key pattern ") + key.toString();
- uassert(s.c_str(), false);
+ uassert( 10098 , s.c_str(), false);
}
if ( *name == 0 || tabletoidxns.empty() || key.isEmpty() || key.objsize() > 2048 ) {
out() << "user warning: bad add index attempt name:" << (name?name:"") << "\n ns:" <<
tabletoidxns << "\n ourns:" << ns;
out() << "\n idxobj:" << io.toString() << endl;
string s = "bad add index attempt " + tabletoidxns + " key:" + key.toString();
- uasserted(s);
+ uasserted(12504, s);
}
tableToIndex = nsdetails(tabletoidxns.c_str());
if ( tableToIndex == 0 ) {
@@ -1411,7 +1411,7 @@ namespace mongo {
ss << "add index fails, too many indexes for " << tabletoidxns << " key:" << key.toString();
string s = ss.str();
log() << s << '\n';
- uasserted(s);
+ uasserted(12505,s);
}
if ( !god && IndexDetails::isIdIndexPattern( key ) ) {
ensureHaveIdIndex( tabletoidxns.c_str() );
@@ -1430,7 +1430,7 @@ namespace mongo {
*/
BSONObj io((const char *) obuf);
BSONElement idField = io.getField( "_id" );
- uassert( "_id cannot be an array", idField.type() != Array );
+ uassert( 10099 , "_id cannot be an array", idField.type() != Array );
if( idField.eoo() && !wouldAddIndex && strstr(ns, ".local.") == 0 ) {
addID = len;
if ( writeId.eoo() ) {
diff --git a/db/query.cpp b/db/query.cpp
index c5fcda00229..e8a93d33a2c 100644
--- a/db/query.cpp
+++ b/db/query.cpp
@@ -111,21 +111,18 @@ namespace mongo {
if done here, as there are pointers into those objects in
NamespaceDetails.
*/
- if( ! legalClientSystemNS( ns , true ) ){
- uasserted("cannot delete from system namespace");
- return -1;
- }
+ uassert(12050, "cannot delete from system namespace", legalClientSystemNS( ns , true ) );
}
if ( strchr( ns , '$' ) ){
log() << "cannot delete from collection with reserved $ in name: " << ns << endl;
- uassert( "cannot delete from collection with reserved $ in name", strchr(ns, '$') == 0 );
+ uassert( 10100 , "cannot delete from collection with reserved $ in name", strchr(ns, '$') == 0 );
}
}
NamespaceDetails *d = nsdetails( ns );
if ( ! d )
return 0;
- uassert( "can't remove from a capped collection E00052" , ! d->capped );
+ uassert( 10101 , "can't remove from a capped collection" , ! d->capped );
int nDeleted = 0;
QueryPlanSet s( ns, pattern, BSONObj() );
@@ -252,11 +249,11 @@ namespace mongo {
if ( j.isEmpty() )
break;
BSONElement e = j.firstElement();
- uassert("bad order array", !e.eoo());
- uassert("bad order array [2]", e.isNumber());
+ uassert( 10102 , "bad order array", !e.eoo());
+ uassert( 10103 , "bad order array [2]", e.isNumber());
b.append(e);
(*p)++;
- uassert("too many ordering elements", *p <= '9');
+ uassert( 10104 , "too many ordering elements", *p <= '9');
}
return b.obj();
@@ -497,7 +494,7 @@ namespace mongo {
findingStart_( (queryOptions & Option_OplogReplay) != 0 ),
findingStartCursor_()
{
- uassert("bad skip value in query", ntoskip >= 0);
+ uassert( 10105 , "bad skip value in query", ntoskip >= 0);
}
virtual void init() {
@@ -723,13 +720,13 @@ namespace mongo {
else {
AuthenticationInfo *ai = currentClient.get()->ai;
- uassert("unauthorized", ai->isAuthorized(cc().database()->name.c_str()));
+ uassert( 10106 , "unauthorized", ai->isAuthorized(cc().database()->name.c_str()));
/* we allow queries to SimpleSlave's -- but not to the slave (nonmaster) member of a replica pair
so that queries to a pair are realtime consistent as much as possible. use setSlaveOk() to
query the nonmaster member of a replica pair.
*/
- uassert( "not master", isMaster() || (queryOptions & Option_SlaveOk) || slave == SimpleSlave );
+ uassert( 10107 , "not master", isMaster() || (queryOptions & Option_SlaveOk) || slave == SimpleSlave );
BSONElement hint;
BSONObj min;
@@ -769,8 +766,8 @@ namespace mongo {
BSONElement e = jsobj.getField("$snapshot");
snapshot = !e.eoo() && e.trueValue();
if( snapshot ) {
- uassert("E12001 can't sort with $snapshot", order.isEmpty());
- uassert("E12002 can't use hint with $snapshot", hint.eoo());
+ uassert( 12001 , "E12001 can't sort with $snapshot", order.isEmpty());
+ uassert( 12002 , "E12002 can't use hint with $snapshot", hint.eoo());
NamespaceDetails *d = nsdetails(ns);
if ( d ){
int i = d->findIdIndex();
@@ -800,7 +797,7 @@ namespace mongo {
out() << "Bad query object?\n jsobj:";
out() << jsobj.toString() << "\n query:";
out() << query.toString() << endl;
- uassert("bad query object", false);
+ uassert( 10110 , "bad query object", false);
}
if ( strcmp( query.firstElement().fieldName() , "_id" ) == 0 && query.nFields() == 1 && query.firstElement().isSimpleType() ){
@@ -833,7 +830,7 @@ namespace mongo {
UserQueryOp original( ntoskip, ntoreturn, order, wantMore, explain, filter.get(), queryOptions );
shared_ptr< UserQueryOp > o = qps.runOp( original );
UserQueryOp &dqo = *o;
- massert( dqo.exceptionMessage(), dqo.complete() );
+ massert( 10362 , dqo.exceptionMessage(), dqo.complete() );
n = dqo.n();
nscanned = dqo.nscanned();
if ( dqo.scanAndOrderRequired() )
diff --git a/db/queryoptimizer.cpp b/db/queryoptimizer.cpp
index 3745c268cab..499417a0f1c 100644
--- a/db/queryoptimizer.cpp
+++ b/db/queryoptimizer.cpp
@@ -37,7 +37,7 @@ namespace mongo {
if ( ! nsdetails( ns ) )
return;
- uassert( (string)"table scans not allowed:" + ns , ! cmdLine.notablescan );
+ uassert( 10111 , (string)"table scans not allowed:" + ns , ! cmdLine.notablescan );
}
double elementDirection( const BSONElement &e ) {
@@ -175,7 +175,7 @@ namespace mongo {
return findTableScan( fbs_.ns(), order_, startLoc );
}
- massert( "newCursor() with start location not implemented for indexed plans", startLoc.isNull() );
+ massert( 10363 , "newCursor() with start location not implemented for indexed plans", startLoc.isNull() );
if ( indexBounds_.size() < 2 ) {
// we are sure to spec endKeyInclusive_
@@ -194,7 +194,7 @@ namespace mongo {
orderSpec = 1;
return findTableScan( fbs_.ns(), BSON( "$natural" << -orderSpec ) );
}
- massert( "newReverseCursor() not implemented for indexed plans", false );
+ massert( 10364 , "newReverseCursor() not implemented for indexed plans", false );
return auto_ptr< Cursor >( 0 );
}
@@ -235,7 +235,7 @@ namespace mongo {
string errmsg;
BSONObj keyPattern = id.keyPattern();
// This reformats min_ and max_ to be used for index lookup.
- massert( errmsg, indexDetailsForRange( fbs_.ns(), errmsg, min_, max_, keyPattern ) );
+ massert( 10365 , errmsg, indexDetailsForRange( fbs_.ns(), errmsg, min_, max_, keyPattern ) );
}
NamespaceDetails *d = nsdetails(ns);
plans_.push_back( PlanPtr( new QueryPlan( d, d->idxNo(id), fbs_, order_, min_, max_ ) ) );
@@ -270,9 +270,9 @@ namespace mongo {
}
else if( hint.type() == Object ) {
BSONObj hintobj = hint.embeddedObject();
- uassert( "bad hint", !hintobj.isEmpty() );
+ uassert( 10112 , "bad hint", !hintobj.isEmpty() );
if ( !strcmp( hintobj.firstElement().fieldName(), "$natural" ) ) {
- massert( "natural order cannot be specified with $min/$max", min_.isEmpty() && max_.isEmpty() );
+ massert( 10366 , "natural order cannot be specified with $min/$max", min_.isEmpty() && max_.isEmpty() );
// Table scan plan
plans_.push_back( PlanPtr( new QueryPlan( d, -1, fbs_, order_ ) ) );
return;
@@ -286,14 +286,14 @@ namespace mongo {
}
}
}
- uassert( "bad hint", false );
+ uassert( 10113 , "bad hint", false );
}
if ( !min_.isEmpty() || !max_.isEmpty() ) {
string errmsg;
BSONObj keyPattern;
IndexDetails *idx = indexDetailsForRange( ns, errmsg, min_, max_, keyPattern );
- massert( errmsg, idx );
+ massert( 10367 , errmsg, idx );
plans_.push_back( PlanPtr( new QueryPlan( d, d->idxNo(*idx), fbs_, order_, min_, max_ ) ) );
return;
}
@@ -321,7 +321,7 @@ namespace mongo {
return;
}
}
- massert( "Unable to locate previously recorded index", false );
+ massert( 10368 , "Unable to locate previously recorded index", false );
}
}
@@ -393,7 +393,7 @@ namespace mongo {
}
shared_ptr< QueryOp > QueryPlanSet::Runner::run() {
- massert( "no plans", plans_.plans_.size() > 0 );
+ massert( 10369 , "no plans", plans_.plans_.size() > 0 );
if ( plans_.plans_.size() > 1 )
log(1) << " running multiple plans" << endl;
diff --git a/db/queryutil.cpp b/db/queryutil.cpp
index f06d11966b5..dc0bf5790ca 100644
--- a/db/queryutil.cpp
+++ b/db/queryutil.cpp
@@ -89,7 +89,7 @@ namespace mongo {
lower = e;
break;
case BSONObj::opALL: {
- massert( "$all requires array", e.type() == Array );
+ massert( 10370 , "$all requires array", e.type() == Array );
BSONObjIterator i( e.embeddedObject() );
if ( i.more() )
lower = upper = i.next();
@@ -337,7 +337,7 @@ namespace mongo {
///////////////////
void FieldMatcher::add( const BSONObj& o ){
- massert("can only add to FieldMatcher once", source_.isEmpty());
+ massert( 10371 , "can only add to FieldMatcher once", source_.isEmpty());
source_ = o;
BSONObjIterator i( o );
diff --git a/db/rec.h b/db/rec.h
index 13b91f5f0c6..b749dd844be 100644
--- a/db/rec.h
+++ b/db/rec.h
@@ -65,7 +65,7 @@ public:
static char* get(DiskLoc d, unsigned len) {
assert( d.a() == INMEMFILE );
#ifdef __LP64__
- massert("64 bit not done", false);
+ massert( 10372 , "64 bit not done", false);
return 0;
#else
return (char *) d.getOfs();
@@ -93,7 +93,7 @@ public:
}
virtual void rename(const char *fromNs, const char *toNs) {
- massert( "rename not yet implemented for InMem_RecStore", false );
+ massert( 10373 , "rename not yet implemented for InMem_RecStore", false );
}
};
#endif
diff --git a/db/reccache.cpp b/db/reccache.cpp
index 9442c320a56..66dd4e31ccc 100644
--- a/db/reccache.cpp
+++ b/db/reccache.cpp
@@ -16,7 +16,7 @@ unsigned RecCache::MAXNODES = 50000;
void setRecCacheSize(unsigned mb) {
unsigned long long MB = mb;
log(2) << "reccache size: " << MB << "MB\n";
- uassert( "bad cache size", MB > 0 && MB < 1000000 );
+ uassert( 10114 , "bad cache size", MB > 0 && MB < 1000000 );
RecCache::MAXNODES = (unsigned) MB * 1024 * 1024 / 8192;
log(3) << "RecCache::MAXNODES=" << RecCache::MAXNODES << '\n';
}
@@ -103,7 +103,7 @@ BasicRecStore* RecCache::_initStore(string fname) {
// arbitrary limit. if you are hitting, we should use fewer files and put multiple
// indexes in a single file (which is easy to do)
- massert( "too many index files", n < 10000 );
+ massert( 10374 , "too many index files", n < 10000 );
if( stores.size() < (unsigned)n+1 )
stores.resize(n+1);
@@ -148,11 +148,11 @@ BasicRecStore* RecCache::initStore(int n) {
}
catch (...) {
string s = string("i/o error looking for .idx file in ") + directory();
- massert(s, false);
+ massert( 10375 , s, false);
}
stringstream ss;
ss << "index datafile missing? n=" << n;
- uasserted(ss.str());
+ uasserted(12500,ss.str());
return 0;
}
@@ -196,7 +196,7 @@ string RecCache::findStoreFilename(const char *_ns, bool& found) {
}
catch (...) {
string s = string("i/o error looking for .idx file in ") + directory();
- massert(s, false);
+ massert( 10376 , s, false);
}
// DNE. return a name that would work.
diff --git a/db/reccache.h b/db/reccache.h
index 6516e0a2632..42943c52dd0 100644
--- a/db/reccache.h
+++ b/db/reccache.h
@@ -177,7 +177,7 @@ public:
fileofs o = rs.insert((const char *) obuf, len);
assert( o % recsize == 0 );
fileofs recnum = o / recsize;
- massert( "RecCache file too large?", recnum <= 0x7fffffff );
+ massert( 10377 , "RecCache file too large?", recnum <= 0x7fffffff );
Node *n = mkNode();
memcpy(n->data, obuf, len);
DiskLoc d(rs.fileNumber + Base, (int) recnum);
@@ -214,7 +214,7 @@ public:
}
virtual void rename(const char *fromNs, const char *toNs) {
- massert( "rename not yet implemented for CachedBasicRecStore", false );
+ massert( 10378 , "rename not yet implemented for CachedBasicRecStore", false );
}
/* close datafiles associated with the db specified. */
diff --git a/db/reci.h b/db/reci.h
index ead396339ce..295388c57ce 100644
--- a/db/reci.h
+++ b/db/reci.h
@@ -25,7 +25,7 @@ public:
/* insert specified data as a record */
virtual DiskLoc insert(const char *ns, const void *obuf, int len, bool god) = 0;
- virtual void deleteRecord(const char *ns, DiskLoc d) { massert("not implemented RecStoreInterface::deleteRecord", false); }
+ virtual void deleteRecord(const char *ns, DiskLoc d) { massert( 10379 , "not implemented RecStoreInterface::deleteRecord", false); }
/* drop the collection */
virtual void drop(const char *ns) = 0;
diff --git a/db/recstore.h b/db/recstore.h
index 8bec7f8d683..2e6a90a74bf 100644
--- a/db/recstore.h
+++ b/db/recstore.h
@@ -50,7 +50,7 @@ private:
RecStoreHeader h; // h.reserved is wasteful here; fix later.
void write(fileofs ofs, const char *data, unsigned len) {
f.write(ofs, data, len);
- massert("basicrecstore write io error", !f.bad());
+ massert( 10380 , "basicrecstore write io error", !f.bad());
}
};
@@ -66,25 +66,25 @@ inline BasicRecStore::~BasicRecStore() {
inline void BasicRecStore::writeHeader() {
write(0, (const char *) &h, 28); // update header in file for new leof
- uassert("file io error in BasicRecStore [1]", !f.bad());
+ uassert( 10115 , "file io error in BasicRecStore [1]", !f.bad());
}
inline fileofs BasicRecStore::insert(const char *buf, unsigned reclen) {
if( h.firstDeleted ) {
- uasserted("deleted not yet implemented recstoreinsert");
+ uasserted(11500, "deleted not yet implemented recstoreinsert");
}
- massert("bad len", reclen == h.recsize);
+ massert( 10381 , "bad len", reclen == h.recsize);
fileofs ofs = h.leof;
h.leof += reclen;
if( h.leof > len ) {
// grow the file. we grow quite a bit to avoid excessive file system fragmentations
len += (len / 8) + h.recsize;
- uassert( "recstore file too big for 32 bit", len <= 0x7fffffff || sizeof(std::streamoff) > 4 );
+ uassert( 10116 , "recstore file too big for 32 bit", len <= 0x7fffffff || sizeof(std::streamoff) > 4 );
write(len, "", 0);
}
writeHeader();
write(ofs, buf, reclen);
- uassert("file io error in BasicRecStore [2]", !f.bad());
+ uassert( 10117 , "file io error in BasicRecStore [2]", !f.bad());
return ofs;
}
@@ -98,11 +98,11 @@ inline void BasicRecStore::update(fileofs o, const char *buf, unsigned len) {
inline void BasicRecStore::get(fileofs o, char *buf, unsigned len) {
assert(o <= h.leof && o >= sizeof(RecStoreHeader));
f.read(o, buf, len);
- massert("basicrestore::get I/O error", !f.bad());
+ massert( 10382 , "basicrestore::get I/O error", !f.bad());
}
inline void BasicRecStore::remove(fileofs o, unsigned len) {
- uasserted("not yet implemented recstoreremove");
+ uasserted(11501, "not yet implemented recstoreremove");
}
}
diff --git a/db/repl.cpp b/db/repl.cpp
index 4ade7f14e0e..73a4e763d8b 100644
--- a/db/repl.cpp
+++ b/db/repl.cpp
@@ -336,7 +336,7 @@ namespace mongo {
virtual bool run(const char *ns, BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool) {
if ( replPair == 0 ) {
- massert( "Another mongod instance believes incorrectly that this node is its peer", !cmdObj.getBoolField( "fromArbiter" ) );
+ massert( 10383 , "Another mongod instance believes incorrectly that this node is its peer", !cmdObj.getBoolField( "fromArbiter" ) );
// assume that we are an arbiter and should forward the request
string host = cmdObj.getStringField("your_name");
int port = cmdObj.getIntField( "your_port" );
@@ -462,11 +462,11 @@ namespace mongo {
only = o.getStringField("only");
hostName = o.getStringField("host");
_sourceName = o.getStringField("source");
- uassert( "'host' field not set in sources collection object", !hostName.empty() );
- uassert( "only source='main' allowed for now with replication", sourceName() == "main" );
+ uassert( 10118 , "'host' field not set in sources collection object", !hostName.empty() );
+ uassert( 10119 , "only source='main' allowed for now with replication", sourceName() == "main" );
BSONElement e = o.getField("syncedTo");
if ( !e.eoo() ) {
- uassert( "bad sources 'syncedTo' field value", e.type() == Date || e.type() == Timestamp );
+ uassert( 10120 , "bad sources 'syncedTo' field value", e.type() == Date || e.type() == Timestamp );
OpTime tmp( e.date() );
syncedTo = tmp;
}
@@ -594,20 +594,20 @@ namespace mongo {
n++;
ReplSource tmp(c->current());
if ( tmp.hostName != cmdLine.source ) {
- log() << "E10000 --source " << cmdLine.source << " != " << tmp.hostName << " from local.sources collection" << endl;
+ log() << "--source " << cmdLine.source << " != " << tmp.hostName << " from local.sources collection" << endl;
log() << "terminating after 30 seconds" << endl;
sleepsecs(30);
dbexit( EXIT_REPLICATION_ERROR );
}
if ( tmp.only != cmdLine.only ) {
- log() << "E10001 --only " << cmdLine.only << " != " << tmp.only << " from local.sources collection" << endl;
+ log() << "--only " << cmdLine.only << " != " << tmp.only << " from local.sources collection" << endl;
log() << "terminating after 30 seconds" << endl;
sleepsecs(30);
dbexit( EXIT_REPLICATION_ERROR );
}
c->advance();
}
- uassert( "E10002 local.sources collection corrupt?", n<2 );
+ uassert( 10002 , "local.sources collection corrupt?", n<2 );
if ( n == 0 ) {
// source missing. add.
ReplSource s;
@@ -618,7 +618,7 @@ namespace mongo {
}
else {
try {
- massert("--only requires use of --source", cmdLine.only.empty());
+ massert( 10384 , "--only requires use of --source", cmdLine.only.empty());
} catch ( ... ) {
dbexit( EXIT_BADOPTIONS );
}
@@ -636,14 +636,14 @@ namespace mongo {
n++;
ReplSource tmp(c->current());
if ( tmp.hostName != remote ) {
- log() << "E10003 pairwith " << remote << " != " << tmp.hostName << " from local.sources collection" << endl;
+ log() << "pairwith " << remote << " != " << tmp.hostName << " from local.sources collection" << endl;
log() << "terminating after 30 seconds" << endl;
sleepsecs(30);
dbexit( EXIT_REPLICATION_ERROR );
}
c->advance();
}
- uassert( "E10002 local.sources collection corrupt?", n<2 );
+ uassert( 10122 , "local.sources collection corrupt?", n<2 );
if ( n == 0 ) {
// source missing. add.
ReplSource s;
@@ -708,7 +708,7 @@ namespace mongo {
dbtemprelease t;
connect();
bool ok = conn->runCommand( "admin", BSON( "listDatabases" << 1 ), info );
- massert( "Unable to get database list", ok );
+ massert( 10385 , "Unable to get database list", ok );
}
BSONObjIterator i( info.getField( "databases" ).embeddedObject() );
while( i.moreWithEOO() ) {
@@ -984,7 +984,7 @@ namespace mongo {
BSONObj last = conn->findOne( _ns.c_str(), Query().sort( BSON( "$natural" << -1 ) ) );
if ( !last.isEmpty() ) {
BSONElement ts = last.findElement( "ts" );
- massert( "non Date ts found", ts.type() == Date || ts.type() == Timestamp );
+ massert( 10386 , "non Date ts found", ts.type() == Date || ts.type() == Timestamp );
syncedTo = OpTime( ts.date() );
}
}
@@ -1003,7 +1003,7 @@ namespace mongo {
}
void ReplSource::resetSlave() {
- massert( "request to kill slave replication falied",
+ massert( 10387 , "request to kill slave replication falied",
conn->simpleCommand( "admin", 0, "forcedead" ) );
syncToTailOfRemoteLog();
{
@@ -1038,7 +1038,7 @@ namespace mongo {
idTracker.reset();
dbtemprelease t;
resetSlave();
- massert( "local master log filled, forcing slave resync", false );
+ massert( 10388 , "local master log filled, forcing slave resync", false );
}
if ( !newTail.isNull() )
localLogTail = newTail;
@@ -1071,7 +1071,7 @@ namespace mongo {
syncToTailOfRemoteLog();
BSONObj info;
bool ok = conn->runCommand( "admin", BSON( "listDatabases" << 1 ), info );
- massert( "Unable to get database list", ok );
+ massert( 10389 , "Unable to get database list", ok );
BSONObjIterator i( info.getField( "databases" ).embeddedObject() );
while( i.moreWithEOO() ) {
BSONElement e = i.next();
@@ -1156,11 +1156,11 @@ namespace mongo {
string err = op.getStringField("$err");
if ( !err.empty() ) {
problem() << "repl: $err reading remote oplog: " + err << '\n';
- massert( "got $err reading remote oplog", false );
+ massert( 10390 , "got $err reading remote oplog", false );
}
else {
problem() << "repl: bad object read from remote oplog: " << op.toString() << '\n';
- massert("repl: bad object read from remote oplog", false);
+ massert( 10391 , "repl: bad object read from remote oplog", false);
}
}
@@ -1264,7 +1264,7 @@ namespace mongo {
nextOpTime = tmp;
if ( !( last < nextOpTime ) ) {
problem() << "sync error: last " << last.toString() << " >= nextOpTime " << nextOpTime.toString() << endl;
- uassert("bad 'ts' value in sources", false);
+ uassert( 10123 , "bad 'ts' value in sources", false);
}
sync_pullOpLog_applyOperation(op, &localLogTail);
@@ -1302,8 +1302,8 @@ namespace mongo {
string u = user.getStringField("user");
string p = user.getStringField("pwd");
- massert("bad user object? [1]", !u.empty());
- massert("bad user object? [2]", !p.empty());
+ massert( 10392 , "bad user object? [1]", !u.empty());
+ massert( 10393 , "bad user object? [2]", !p.empty());
string err;
if( !conn->auth("local", u.c_str(), p.c_str(), err, false) ) {
log() << "replauthenticate: can't authenticate to master server, user:" << u << endl;
@@ -1371,7 +1371,7 @@ namespace mongo {
log() << " " << o.toString() << endl;
return false;
}
- uassert( e.type() == Date );
+ uassert( 10124 , e.type() == Date );
OpTime serverCurTime;
serverCurTime.asDate() = e.date();
*/
diff --git a/db/replset.h b/db/replset.h
index 23226db29ce..8d3ec65ffbd 100644
--- a/db/replset.h
+++ b/db/replset.h
@@ -155,14 +155,14 @@ namespace mongo {
if ( p ) {
remoteHost = string(remoteEnd, p-remoteEnd);
remotePort = atoi(p+1);
- uassert("bad port #", remotePort > 0 && remotePort < 0x10000 );
+ uassert( 10125 , "bad port #", remotePort > 0 && remotePort < 0x10000 );
if ( remotePort == CmdLine::DefaultDBPort )
remote = remoteHost; // don't include ":27017" as it is default; in case ran in diff ways over time to normalizke the hostname format in sources collection
}
- uassert("arbiter parm is missing, use '-' for none", arb);
+ uassert( 10126 , "arbiter parm is missing, use '-' for none", arb);
arbHost = arb;
- uassert("arbiter parm is empty", !arbHost.empty());
+ uassert( 10127 , "arbiter parm is empty", !arbHost.empty());
}
/* This is set to true if we have EVER been up to date -- this way a new pair member
diff --git a/db/scanandorder.h b/db/scanandorder.h
index 688117836bf..3f414334e6a 100644
--- a/db/scanandorder.h
+++ b/db/scanandorder.h
@@ -110,7 +110,7 @@ namespace mongo {
BSONObj k = order.getKeyFromObject(o);
if ( (int) best.size() < limit ) {
approxSize += k.objsize();
- uassert( "too much key data for sort() with no index. add an index or specify a smaller limit", approxSize < 1 * 1024 * 1024 );
+ uassert( 10128 , "too much key data for sort() with no index. add an index or specify a smaller limit", approxSize < 1 * 1024 * 1024 );
_add(k, o);
return;
}
@@ -133,7 +133,7 @@ namespace mongo {
nFilled++;
if ( nFilled >= limit )
break;
- uassert( "too much data for sort() with no index", b.len() < 4000000 ); // appserver limit
+ uassert( 10129 , "too much data for sort() with no index", b.len() < 4000000 ); // appserver limit
}
nout = nFilled;
}
diff --git a/db/storage.cpp b/db/storage.cpp
index a93b53a4ecb..4da2d823e24 100644
--- a/db/storage.cpp
+++ b/db/storage.cpp
@@ -24,10 +24,10 @@ RecStoreInterface *btreeStore = new MongoMemMapped_RecStore();
void BasicRecStore::init(const char *fn, unsigned recsize)
{
- massert( "compile packing problem recstore?", sizeof(RecStoreHeader) == 8192);
+ massert( 10394 , "compile packing problem recstore?", sizeof(RecStoreHeader) == 8192);
filename = fn;
f.open(fn);
- uassert( string("couldn't open file:")+fn, f.is_open() );
+ uassert( 10130 , string("couldn't open file:")+fn, f.is_open() );
len = f.len();
if( len == 0 ) {
log() << "creating recstore file " << fn << '\n';
@@ -37,13 +37,13 @@ void BasicRecStore::init(const char *fn, unsigned recsize)
}
else {
f.read(0, (char *) &h, sizeof(RecStoreHeader));
- massert(string("recstore was not closed cleanly: ")+fn, h.cleanShutdown==0);
- massert(string("recstore recsize mismatch, file:")+fn, h.recsize == recsize);
- massert(string("bad recstore [1], file:")+fn, (h.leof-sizeof(RecStoreHeader)) % recsize == 0);
+ massert( 10395 , string("recstore was not closed cleanly: ")+fn, h.cleanShutdown==0);
+ massert( 10396 , string("recstore recsize mismatch, file:")+fn, h.recsize == recsize);
+ massert( 10397 , string("bad recstore [1], file:")+fn, (h.leof-sizeof(RecStoreHeader)) % recsize == 0);
if( h.leof > len ) {
stringstream ss;
ss << "bad recstore, file:" << fn << " leof:" << h.leof << " len:" << len;
- massert(ss.str(), false);
+ massert( 10398 , ss.str(), false);
}
if( h.cleanShutdown )
log() << "warning: non-clean shutdown for file " << fn << '\n';
diff --git a/db/update.cpp b/db/update.cpp
index 793c1f7e4f8..33129511319 100644
--- a/db/update.cpp
+++ b/db/update.cpp
@@ -67,7 +67,7 @@ namespace mongo {
}
case PUSH: {
- uassert( "$push can only be applied to an array" , in.type() == Array );
+ uassert( 10131 , "$push can only be applied to an array" , in.type() == Array );
BSONObjBuilder bb( b.subarrayStart( shortFieldName ) );
BSONObjIterator i( in.embeddedObject() );
int n=0;
@@ -84,8 +84,8 @@ namespace mongo {
}
case PUSH_ALL: {
- uassert( "$pushAll can only be applied to an array" , in.type() == Array );
- uassert( "$pushAll has to be passed an array" , elt.type() );
+ uassert( 10132 , "$pushAll can only be applied to an array" , in.type() == Array );
+ uassert( 10133 , "$pushAll has to be passed an array" , elt.type() );
BSONObjBuilder bb( b.subarrayStart( shortFieldName ) );
@@ -109,7 +109,7 @@ namespace mongo {
case PULL:
case PULL_ALL: {
- uassert( "$pull/$pullAll can only be applied to an array" , in.type() == Array );
+ uassert( 10134 , "$pull/$pullAll can only be applied to an array" , in.type() == Array );
BSONObjBuilder bb( b.subarrayStart( shortFieldName ) );
int n = 0;
@@ -142,7 +142,7 @@ namespace mongo {
}
case POP: {
- uassert( "$pop can only be applied to an array" , in.type() == Array );
+ uassert( 10135 , "$pop can only be applied to an array" , in.type() == Array );
BSONObjBuilder bb( b.subarrayStart( shortFieldName ) );
int n = 0;
@@ -178,9 +178,9 @@ namespace mongo {
}
case BIT: {
- uassert( "$bit needs an array" , elt.type() == Object );
- uassert( "$bit can only be applied to numbers" , in.isNumber() );
- uassert( "$bit can't use a double" , in.type() != NumberDouble );
+ uassert( 10136 , "$bit needs an array" , elt.type() == Object );
+ uassert( 10137 , "$bit can only be applied to numbers" , in.isNumber() );
+ uassert( 10138 , "$bit can't use a double" , in.type() != NumberDouble );
int x = in.numberInt();
long long y = in.numberLong();
@@ -188,7 +188,7 @@ namespace mongo {
BSONObjIterator it( elt.embeddedObject() );
while ( it.more() ){
BSONElement e = it.next();
- uassert( "$bit field must be number" , e.isNumber() );
+ uassert( 10139 , "$bit field must be number" , e.isNumber() );
if ( strcmp( e.fieldName() , "and" ) == 0 ){
switch( in.type() ){
case NumberInt: x = x&e.numberInt(); break;
@@ -239,7 +239,7 @@ namespace mongo {
else {
switch( m.op ) {
case Mod::INC:
- uassert( "Cannot apply $inc modifier to non-number", e.isNumber() || e.eoo() );
+ uassert( 10140 , "Cannot apply $inc modifier to non-number", e.isNumber() || e.eoo() );
if ( !e.isNumber() )
inPlacePossible = false;
break;
@@ -250,12 +250,12 @@ namespace mongo {
break;
case Mod::PUSH:
case Mod::PUSH_ALL:
- uassert( "Cannot apply $push/$pushAll modifier to non-array", e.type() == Array || e.eoo() );
+ uassert( 10141 , "Cannot apply $push/$pushAll modifier to non-array", e.type() == Array || e.eoo() );
inPlacePossible = false;
break;
case Mod::PULL:
case Mod::PULL_ALL: {
- uassert( "Cannot apply $pull/$pullAll modifier to non-array", e.type() == Array || e.eoo() );
+ uassert( 10142 , "Cannot apply $pull/$pullAll modifier to non-array", e.type() == Array || e.eoo() );
BSONObjIterator i( e.embeddedObject() );
while( inPlacePossible && i.more() ) {
BSONElement arrI = i.next();
@@ -278,7 +278,7 @@ namespace mongo {
break;
}
case Mod::POP: {
- uassert( "Cannot apply $pop modifier to non-array", e.type() == Array || e.eoo() );
+ uassert( 10143 , "Cannot apply $pop modifier to non-array", e.type() == Array || e.eoo() );
if ( ! e.embeddedObject().isEmpty() )
inPlacePossible = false;
break;
@@ -318,7 +318,7 @@ namespace mongo {
}
break;
default:
- uassert( "can't apply mod in place - shouldn't have gotten here" , 0 );
+ uassert( 10144 , "can't apply mod in place - shouldn't have gotten here" , 0 );
}
}
}
@@ -377,7 +377,7 @@ namespace mongo {
switch ( cmp ){
case LEFT_SUBFIELD: { // Mod is embeddeed under this element
- uassert( "LEFT_SUBFIELD only supports Object" , e.type() == Object || e.type() == Array );
+ uassert( 10145 , "LEFT_SUBFIELD only supports Object" , e.type() == Object || e.type() == Array );
if ( onedownseen.count( e.fieldName() ) == 0 ){
onedownseen.insert( e.fieldName() );
BSONObjBuilder bb ( e.type() == Object ? b.subobjStart( e.fieldName() ) : b.subarrayStart( e.fieldName() ) );
@@ -404,10 +404,10 @@ namespace mongo {
e = es.next();
continue;
case RIGHT_SUBFIELD:
- massert( "ModSet::createNewFromMods - RIGHT_SUBFIELD should be impossible" , 0 );
+ massert( 10399 , "ModSet::createNewFromMods - RIGHT_SUBFIELD should be impossible" , 0 );
break;
default:
- massert( "unhandled case" , 0 );
+ massert( 10400 , "unhandled case" , 0 );
}
}
@@ -443,7 +443,7 @@ namespace mongo {
continue;
}
- uassert( "upsert with foo.bar type queries not supported yet" , strchr( e.fieldName() , '.' ) == 0 );
+ uassert( 10146 , "upsert with foo.bar type queries not supported yet" , strchr( e.fieldName() , '.' ) == 0 );
bb.append( e );
@@ -473,7 +473,7 @@ namespace mongo {
while ( it.more() ) {
BSONElement e = it.next();
const char *fn = e.fieldName();
- uassert( "Invalid modifier specified" + string( fn ), e.type() == Object );
+ uassert( 10147 , "Invalid modifier specified" + string( fn ), e.type() == Object );
BSONObj j = e.embeddedObject();
BSONObjIterator jt(j);
Mod::Op op = opFromStr( fn );
@@ -484,12 +484,12 @@ namespace mongo {
const char * fieldName = f.fieldName();
- uassert( "Mod on _id not allowed", strcmp( fieldName, "_id" ) != 0 );
- uassert( "Invalid mod field name, may not end in a period", fieldName[ strlen( fieldName ) - 1 ] != '.' );
- uassert( "Field name duplication not allowed with modifiers", ! haveModForField( fieldName ) );
- uassert( "have conflict mod" , ! haveConflictingMod( fieldName ) );
- uassert( "Modifier $inc allowed for numbers only", f.isNumber() || op != Mod::INC );
- uassert( "Modifier $pushAll/pullAll allowed for arrays only", f.type() == Array || ( op != Mod::PUSH_ALL && op != Mod::PULL_ALL ) );
+ uassert( 10148 , "Mod on _id not allowed", strcmp( fieldName, "_id" ) != 0 );
+ uassert( 10149 , "Invalid mod field name, may not end in a period", fieldName[ strlen( fieldName ) - 1 ] != '.' );
+ uassert( 10150 , "Field name duplication not allowed with modifiers", ! haveModForField( fieldName ) );
+ uassert( 10151 , "have conflict mod" , ! haveConflictingMod( fieldName ) );
+ uassert( 10152 , "Modifier $inc allowed for numbers only", f.isNumber() || op != Mod::INC );
+ uassert( 10153 , "Modifier $pushAll/pullAll allowed for arrays only", f.type() == Array || ( op != Mod::PUSH_ALL && op != Mod::PULL_ALL ) );
Mod m;
m.init( op , f );
@@ -520,7 +520,7 @@ namespace mongo {
BSONElement e = i.next();
if ( e.eoo() )
break;
- uassert( "Modifiers and non-modifiers cannot be mixed", e.fieldName()[ 0 ] != '$' );
+ uassert( 10154 , "Modifiers and non-modifiers cannot be mixed", e.fieldName()[ 0 ] != '$' );
}
}
@@ -566,10 +566,10 @@ namespace mongo {
UpdateResult updateObjects(const char *ns, BSONObj updateobjOrig, BSONObj patternOrig, bool upsert, bool multi, stringstream& ss, bool logop ) {
int profile = cc().database()->profile;
- uassert("cannot update reserved $ collection", strchr(ns, '$') == 0 );
+ uassert( 10155 , "cannot update reserved $ collection", strchr(ns, '$') == 0 );
if ( strstr(ns, ".system.") ) {
/* dm: it's very important that system.indexes is never updated as IndexDetails has pointers into it */
- uassert("cannot update system collection", legalClientSystemNS( ns , true ) );
+ uassert( 10156 , "cannot update system collection", legalClientSystemNS( ns , true ) );
}
set<DiskLoc> seenObjects;
@@ -577,7 +577,7 @@ namespace mongo {
QueryPlanSet qps( ns, patternOrig, BSONObj() );
UpdateOp original;
shared_ptr< UpdateOp > u = qps.runOp( original );
- massert( u->exceptionMessage(), u->complete() );
+ massert( 10401 , u->exceptionMessage(), u->complete() );
shared_ptr< Cursor > c = u->c();
int numModded = 0;
while ( c->ok() ) {
@@ -610,7 +610,7 @@ namespace mongo {
pattern = idPattern.obj();
}
else {
- uassert( "multi-update requires all modified objects to have an _id" , ! multi );
+ uassert( 10157 , "multi-update requires all modified objects to have an _id" , ! multi );
}
}
@@ -685,7 +685,7 @@ namespace mongo {
continue;
}
- uassert( "multi update only works with $ operators" , ! multi );
+ uassert( 10158 , "multi update only works with $ operators" , ! multi );
BSONElementManipulator::lookForTimestamps( updateobj );
checkNoMods( updateobj );
@@ -719,7 +719,7 @@ namespace mongo {
logOp( "i", ns, newObj );
return UpdateResult( 0 , 1 , 1 );
}
- uassert( "multi update only works with $ operators" , ! multi );
+ uassert( 10159 , "multi update only works with $ operators" , ! multi );
checkNoMods( updateobjOrig );
if ( profile )
ss << " upsert ";
diff --git a/db/update.h b/db/update.h
index df0f8e029e7..5cca6248d62 100644
--- a/db/update.h
+++ b/db/update.h
@@ -62,7 +62,7 @@ namespace mongo {
/* [dm] why is this const? (or rather, why was setn const?) i see why but think maybe clearer if were not. */
void inc(BSONElement& n) const {
- uassert( "$inc value is not a number", n.isNumber() );
+ uassert( 10160 , "$inc value is not a number", n.isNumber() );
if( ndouble )
*ndouble += n.numberDouble();
else if( nint )
@@ -225,7 +225,7 @@ namespace mongo {
if ( strcmp( fn, Mod::modNames[ i ] ) == 0 )
return Mod::Op( i );
- uassert( "Invalid modifier specified " + string( fn ), false );
+ uassert( 10161 , "Invalid modifier specified " + string( fn ), false );
return Mod::INC;
}
diff --git a/dbtests/btreetests.cpp b/dbtests/btreetests.cpp
index eca89bfdf77..5a0b15dfca1 100644
--- a/dbtests/btreetests.cpp
+++ b/dbtests/btreetests.cpp
@@ -32,7 +32,7 @@ namespace BtreeTests {
{
bool f = false;
assert( f = true );
- massert("assert is misdefined", f);
+ massert( 10402 , "assert is misdefined", f);
}
setClient( ns() );
diff --git a/dbtests/framework.cpp b/dbtests/framework.cpp
index 94d923ce4d5..6ed5e723319 100644
--- a/dbtests/framework.cpp
+++ b/dbtests/framework.cpp
@@ -307,7 +307,7 @@ namespace mongo {
if ( ! _suites )
_suites = new map<string,Suite*>();
Suite*& m = (*_suites)[name];
- uassert( "already have suite with that name" , ! m );
+ uassert( 10162 , "already have suite with that name" , ! m );
m = s;
}
diff --git a/dbtests/jsobjtests.cpp b/dbtests/jsobjtests.cpp
index 28e247a31f3..1d89537b8c3 100644
--- a/dbtests/jsobjtests.cpp
+++ b/dbtests/jsobjtests.cpp
@@ -795,11 +795,11 @@ namespace JsobjTests {
stringstream ss;
ss << "type: " << t;
string s = ss.str();
- massert( s , min( t ).woCompare( max( t ) ) < 0 );
- massert( s , max( t ).woCompare( min( t ) ) > 0 );
- massert( s , min( t ).woCompare( min( t ) ) == 0 );
- massert( s , max( t ).woCompare( max( t ) ) == 0 );
- massert( s , abs( min( t ).firstElement().canonicalType() - max( t ).firstElement().canonicalType() ) <= 10 );
+ massert( 10403 , s , min( t ).woCompare( max( t ) ) < 0 );
+ massert( 10404 , s , max( t ).woCompare( min( t ) ) > 0 );
+ massert( 10405 , s , min( t ).woCompare( min( t ) ) == 0 );
+ massert( 10406 , s , max( t ).woCompare( max( t ) ) == 0 );
+ massert( 10407 , s , abs( min( t ).firstElement().canonicalType() - max( t ).firstElement().canonicalType() ) <= 10 );
}
}
diff --git a/dbtests/queryoptimizertests.cpp b/dbtests/queryoptimizertests.cpp
index bfe4d3e091d..6dba2f56641 100644
--- a/dbtests/queryoptimizertests.cpp
+++ b/dbtests/queryoptimizertests.cpp
@@ -779,7 +779,7 @@ namespace QueryOptimizerTests {
virtual void next() {
if ( iThrow_ )
threw_ = true;
- massert( "throw", !iThrow_ );
+ massert( 10408 , "throw", !iThrow_ );
if ( ++i_ > 10 )
setComplete();
}
@@ -815,7 +815,7 @@ namespace QueryOptimizerTests {
public:
virtual void init() {}
virtual void next() {
- massert( "throw", false );
+ massert( 10409 , "throw", false );
}
virtual QueryOp *clone() const {
return new TestOp();
@@ -916,7 +916,7 @@ namespace QueryOptimizerTests {
virtual void init() {}
virtual void next() {
if ( qp().indexKey().firstElement().fieldName() == string( "$natural" ) )
- massert( "throw", false );
+ massert( 10410 , "throw", false );
setComplete();
}
virtual QueryOp *clone() const {
@@ -928,7 +928,7 @@ namespace QueryOptimizerTests {
virtual void next() {
if ( qp().indexKey().firstElement().fieldName() == string( "$natural" ) )
setComplete();
- massert( "throw", false );
+ massert( 10411 , "throw", false );
}
virtual QueryOp *clone() const {
return new ScanOnlyTestOp();
diff --git a/s/chunk.cpp b/s/chunk.cpp
index c8da28ed672..9b757a77368 100644
--- a/s/chunk.cpp
+++ b/s/chunk.cpp
@@ -69,7 +69,7 @@ namespace mongo {
BSONObjIterator i(k);
while( i.more() ) {
BSONElement e = i.next();
- uassert( "can only handle numbers here - which i think is correct" , e.isNumber() );
+ uassert( 10163 , "can only handle numbers here - which i think is correct" , e.isNumber() );
r.append( e.fieldName() , -1 * e.number() );
}
@@ -91,7 +91,7 @@ namespace mongo {
) , result ) ){
stringstream ss;
ss << "medianKey command failed: " << result;
- uassert( ss.str() , 0 );
+ uassert( 10164 , ss.str() , 0 );
}
conn.done();
@@ -103,12 +103,12 @@ namespace mongo {
}
Chunk * Chunk::split( const BSONObj& m ){
- uassert( "can't split as shard that doesn't have a manager" , _manager );
+ uassert( 10165 , "can't split as shard that doesn't have a manager" , _manager );
log(1) << " before split on: " << m << "\n"
<< "\t self : " << toString() << endl;
- uassert( "locking namespace on server failed" , lockNamespaceOnServer( getShard() , _ns ) );
+ uassert( 10166 , "locking namespace on server failed" , lockNamespaceOnServer( getShard() , _ns ) );
Chunk * s = new Chunk( _manager );
s->_ns = _ns;
@@ -134,7 +134,7 @@ namespace mongo {
}
bool Chunk::moveAndCommit( const string& to , string& errmsg ){
- uassert( "can't move shard to its current location!" , to != getShard() );
+ uassert( 10167 , "can't move shard to its current location!" , to != getShard() );
log() << "moving chunk ns: " << _ns << " moving chunk: " << toString() << " " << _shard << " -> " << to << endl;
@@ -187,7 +187,7 @@ namespace mongo {
}
else if ( newVersion <= oldVersion ){
log() << "newVersion: " << newVersion << " oldVersion: " << oldVersion << endl;
- uassert( "version has to be higher" , newVersion > oldVersion );
+ uassert( 10168 , "version has to be higher" , newVersion > oldVersion );
}
BSONObjBuilder b;
@@ -262,7 +262,7 @@ namespace mongo {
log() << "moving chunk (auto): " << toMove->toString() << " to: " << newLocation << " #objcets: " << toMove->countObjects() << endl;
string errmsg;
- massert( (string)"moveAndCommit failed: " + errmsg ,
+ massert( 10412 , (string)"moveAndCommit failed: " + errmsg ,
toMove->moveAndCommit( newLocation , errmsg ) );
return true;
@@ -272,7 +272,7 @@ namespace mongo {
ScopedDbConnection conn( getShard() );
BSONObj result;
- uassert( "datasize failed!" , conn->runCommand( "admin" , BSON( "datasize" << _ns
+ uassert( 10169 , "datasize failed!" , conn->runCommand( "admin" , BSON( "datasize" << _ns
<< "keyPattern" << _manager->getShardKey().key()
<< "min" << getMin()
<< "max" << getMax()
@@ -335,11 +335,11 @@ namespace mongo {
_max = from.getObjectField( "maxDotted" ).getOwned();
}
- uassert( "Chunk needs a ns" , ! _ns.empty() );
- uassert( "Chunk needs a server" , ! _ns.empty() );
+ uassert( 10170 , "Chunk needs a ns" , ! _ns.empty() );
+ uassert( 10171 , "Chunk needs a server" , ! _ns.empty() );
- uassert( "Chunk needs a min" , ! _min.isEmpty() );
- uassert( "Chunk needs a max" , ! _max.isEmpty() );
+ uassert( 10172 , "Chunk needs a min" , ! _min.isEmpty() );
+ uassert( 10173 , "Chunk needs a max" , ! _max.isEmpty() );
}
string Chunk::modelServer() {
@@ -358,15 +358,15 @@ namespace mongo {
Model::save( check );
if ( reload ){
// need to do this so that we get the new _lastMod and therefore version number
- massert( "_id has to be filled in already" , ! _id.isEmpty() );
+ massert( 10413 , "_id has to be filled in already" , ! _id.isEmpty() );
string b = toString();
BSONObj q = _id.copy();
- massert( "how could load fail?" , load( q ) );
+ massert( 10414 , "how could load fail?" , load( q ) );
log(2) << "before: " << q << "\t" << b << endl;
log(2) << "after : " << _id << "\t" << toString() << endl;
- massert( "chunk reload changed content!" , b == toString() );
- massert( "id changed!" , q["_id"] == _id["_id"] );
+ massert( 10415 , "chunk reload changed content!" , b == toString() );
+ massert( 10416 , "id changed!" , q["_id"] == _id["_id"] );
}
}
@@ -492,7 +492,7 @@ namespace mongo {
}
void ChunkManager::drop(){
- uassert( "config servers not all up" , configServer.allUp() );
+ uassert( 10174 , "config servers not all up" , configServer.allUp() );
map<string,ShardChunkVersion> seen;
@@ -509,7 +509,7 @@ namespace mongo {
continue;
// rollback
- uassert( "don't know how to rollback locks b/c drop can't lock all shards" , 0 );
+ uassert( 10175 , "don't know how to rollback locks b/c drop can't lock all shards" , 0 );
}
log(1) << "ChunkManager::drop : " << _ns << "\t all locked" << endl;
@@ -529,7 +529,7 @@ namespace mongo {
log(1) << "ChunkManager::drop : " << _ns << "\t removed shard data" << endl;
// clean up database meta-data
- uassert( "no sharding data?" , _config->removeSharding( _ns ) );
+ uassert( 10176 , "no sharding data?" , _config->removeSharding( _ns ) );
_config->save();
@@ -567,7 +567,7 @@ namespace mongo {
withRealChunks.insert( c->getShard() );
}
- massert( "how did version get smalled" , getVersion() >= a );
+ massert( 10417 , "how did version get smalled" , getVersion() >= a );
ensureIndex(); // TODO: this is too aggressive - but not really sooo bad
}
diff --git a/s/commands_public.cpp b/s/commands_public.cpp
index db28829cc00..2d3de7aaa23 100644
--- a/s/commands_public.cpp
+++ b/s/commands_public.cpp
@@ -98,7 +98,7 @@ namespace mongo {
}
ChunkManager * cm = conf->getChunkManager( fullns );
- massert( "how could chunk manager be null!" , cm );
+ massert( 10418 , "how could chunk manager be null!" , cm );
cm->drop();
@@ -157,7 +157,7 @@ namespace mongo {
}
ChunkManager * cm = conf->getChunkManager( fullns );
- massert( "how could chunk manager be null!" , cm );
+ massert( 10419 , "how could chunk manager be null!" , cm );
vector<Chunk*> chunks;
cm->getChunksForQuery( chunks , filter );
@@ -213,7 +213,7 @@ namespace mongo {
}
ChunkManager * cm = conf->getChunkManager( fullns );
- massert( "how could chunk manager be null!" , cm );
+ massert( 10420 , "how could chunk manager be null!" , cm );
vector<Chunk*> chunks;
cm->getChunksForQuery( chunks , BSONObj() );
@@ -286,7 +286,7 @@ namespace mongo {
// we don't want to copy these
}
else {
- uassert( (string)"don't know mr field: " + fn , 0 );
+ uassert( 10177 , (string)"don't know mr field: " + fn , 0 );
}
}
b.append( "out" , output );
diff --git a/s/config.cpp b/s/config.cpp
index 36461aea34a..18e998739e2 100644
--- a/s/config.cpp
+++ b/s/config.cpp
@@ -48,7 +48,7 @@ namespace mongo {
if ( isSharded( ns ) )
return "";
- uassert( "no primary!" , _primary.size() );
+ uassert( 10178 , "no primary!" , _primary.size() );
return _primary;
}
@@ -89,8 +89,8 @@ namespace mongo {
cout << "BBBB" << endl;
return false;
}
- uassert( "_sharded but no info" , info );
- uassert( "info but no sharded" , i != _sharded.end() );
+ uassert( 10179 , "_sharded but no info" , info );
+ uassert( 10180 , "info but no sharded" , i != _sharded.end() );
_sharded.erase( i );
_shards.erase( ns ); // TODO: clean this up, maybe switch to shared_ptr
@@ -102,7 +102,7 @@ namespace mongo {
if ( m && ! reload )
return m;
- uassert( (string)"not sharded:" + ns , isSharded( ns ) );
+ uassert( 10181 , (string)"not sharded:" + ns , isSharded( ns ) );
if ( m && reload )
log() << "reloading shard info for: " << ns << endl;
m = new ChunkManager( this , ns , _sharded[ ns ].key , _sharded[ns].unique );
@@ -138,9 +138,9 @@ namespace mongo {
BSONObjIterator i(sharded);
while ( i.more() ){
BSONElement e = i.next();
- uassert( "sharded things have to be objects" , e.type() == Object );
+ uassert( 10182 , "sharded things have to be objects" , e.type() == Object );
BSONObj c = e.embeddedObject();
- uassert( "key has to be an object" , c["key"].type() == Object );
+ uassert( 10183 , "key has to be an object" , c["key"].type() == Object );
_sharded[e.fieldName()] = CollectionInfo( c["key"].embeddedObject() ,
c["unique"].trueValue() );
}
@@ -252,7 +252,7 @@ namespace mongo {
i->second->drop();
num++;
- uassert( "_dropShardedCollections too many collections - bailing" , num < 100000 );
+ uassert( 10184 , "_dropShardedCollections too many collections - bailing" , num < 100000 );
log(2) << "\t\t dropped " << num << " so far" << endl;
}
return true;
@@ -318,7 +318,7 @@ namespace mongo {
}
else {
log() << "\t can't find a shard to put new db on" << endl;
- uassert( "can't find a shard to put new db on" , 0 );
+ uassert( 10185 , "can't find a shard to put new db on" , 0 );
}
}
else {
@@ -332,7 +332,7 @@ namespace mongo {
}
void Grid::removeDB( string database ){
- uassert( "removeDB expects db name" , database.find( '.' ) == string::npos );
+ uassert( 10186 , "removeDB expects db name" , database.find( '.' ) == string::npos );
boostlock l( _lock );
_databases.erase( database );
@@ -342,7 +342,7 @@ namespace mongo {
ScopedDbConnection conn( configServer.getPrimary() );
BSONObj result;
- massert( "getoptime failed" , conn->simpleCommand( "admin" , &result , "getoptime" ) );
+ massert( 10421 , "getoptime failed" , conn->simpleCommand( "admin" , &result , "getoptime" ) );
conn.done();
return result["optime"]._numberLong();
@@ -360,7 +360,7 @@ namespace mongo {
}
bool ConfigServer::init( vector<string> configHosts ){
- uassert( "need configdbs" , configHosts.size() );
+ uassert( 10187 , "need configdbs" , configHosts.size() );
string hn = getHostName();
if ( hn.empty() ) {
@@ -391,7 +391,7 @@ namespace mongo {
return false;
}
- uassert( "can only hand 1 config db right now" , configHosts.size() == 1 );
+ uassert( 10188 , "can only hand 1 config db right now" , configHosts.size() == 1 );
_primary = configHosts[0];
return true;
@@ -430,7 +430,7 @@ namespace mongo {
if ( c->more() ){
BSONObj o = c->next();
version = o["version"].numberInt();
- uassert( "should only have 1 thing in config.version" , ! c->more() );
+ uassert( 10189 , "should only have 1 thing in config.version" , ! c->more() );
}
else {
if ( conn.count( "config.shard" ) || conn.count( "config.databases" ) ){
diff --git a/s/config.h b/s/config.h
index eade0265220..14d76b71067 100644
--- a/s/config.h
+++ b/s/config.h
@@ -166,7 +166,7 @@ namespace mongo {
}
virtual string modelServer(){
- uassert( "ConfigServer not setup" , _primary.size() );
+ uassert( 10190 , "ConfigServer not setup" , _primary.size() );
return _primary;
}
diff --git a/s/cursors.cpp b/s/cursors.cpp
index 478f4013151..3354b96b8e4 100644
--- a/s/cursors.cpp
+++ b/s/cursors.cpp
@@ -33,7 +33,7 @@ namespace mongo {
}
bool ShardedClientCursor::sendNextBatch( Request& r , int ntoreturn ){
- uassert( "cursor already done" , ! _done );
+ uassert( 10191 , "cursor already done" , ! _done );
int maxSize = 1024 * 1024;
if ( _totalSent > 0 )
diff --git a/s/d_logic.cpp b/s/d_logic.cpp
index d89999e9d25..c5008dcfb2d 100644
--- a/s/d_logic.cpp
+++ b/s/d_logic.cpp
@@ -525,7 +525,7 @@ namespace mongo {
}
OID * clientID = clientServerIds.get();
- massert( "write with bad shard config and no server id!" , clientID );
+ massert( 10422 , "write with bad shard config and no server id!" , clientID );
log() << "got write with an old config - writing back" << endl;
diff --git a/s/request.cpp b/s/request.cpp
index 1ce0174648e..ba48d6bd7af 100644
--- a/s/request.cpp
+++ b/s/request.cpp
@@ -47,11 +47,11 @@ namespace mongo {
void Request::reset( bool reload ){
_config = grid.getDBConfig( getns() );
if ( reload )
- uassert( "db config reload failed!" , _config->reload() );
+ uassert( 10192 , "db config reload failed!" , _config->reload() );
if ( _config->isSharded( getns() ) ){
_chunkManager = _config->getChunkManager( getns() , reload );
- uassert( (string)"no shard info for: " + getns() , _chunkManager );
+ uassert( 10193 , (string)"no shard info for: " + getns() , _chunkManager );
}
else {
_chunkManager = 0;
@@ -68,7 +68,7 @@ namespace mongo {
return _chunkManager->findChunk( _chunkManager->getShardKey().globalMin() ).getShard();
}
string s = _config->getShard( getns() );
- uassert( "can't call singleServerName on a sharded collection!" , s.size() > 0 );
+ uassert( 10194 , "can't call singleServerName on a sharded collection!" , s.size() > 0 );
return s;
}
@@ -93,7 +93,7 @@ namespace mongo {
}
catch ( StaleConfigException& staleConfig ){
log() << staleConfig.what() << " attempt: " << attempt << endl;
- uassert( "too many attempts to update config, failing" , attempt < 5 );
+ uassert( 10195 , "too many attempts to update config, failing" , attempt < 5 );
sleepsecs( attempt );
reset( true );
diff --git a/s/s_only.cpp b/s/s_only.cpp
index 89d358f117f..f0bc6bfa9c0 100644
--- a/s/s_only.cpp
+++ b/s/s_only.cpp
@@ -23,7 +23,7 @@
namespace mongo {
auto_ptr<CursorIterator> Helpers::find( const char *ns , BSONObj query , bool requireIndex ){
- uassert( "Helpers::find can't be used in mongos" , 0 );
+ uassert( 10196 , "Helpers::find can't be used in mongos" , 0 );
auto_ptr<CursorIterator> i;
return i;
}
diff --git a/s/server.cpp b/s/server.cpp
index 30d0aea0e1c..4868caf157b 100644
--- a/s/server.cpp
+++ b/s/server.cpp
@@ -104,7 +104,7 @@ namespace mongo {
}
DBClientBase *createDirectClient(){
- uassert( "createDirectClient not implemented for sharding yet" , 0 );
+ uassert( 10197 , "createDirectClient not implemented for sharding yet" , 0 );
return 0;
}
diff --git a/s/shardkey.cpp b/s/shardkey.cpp
index 0792d193605..15cf7b96af8 100644
--- a/s/shardkey.cpp
+++ b/s/shardkey.cpp
@@ -63,9 +63,9 @@ namespace mongo {
int ShardKeyPattern::compare( const BSONObj& lObject , const BSONObj& rObject ) {
BSONObj L = extractKey(lObject);
- uassert("left object doesn't have shard key", !L.isEmpty());
+ uassert( 10198 , "left object doesn't have shard key", !L.isEmpty());
BSONObj R = extractKey(rObject);
- uassert("right object doesn't have shard key", !R.isEmpty());
+ uassert( 10199 , "right object doesn't have shard key", !R.isEmpty());
return L.woCompare(R);
}
@@ -129,11 +129,11 @@ namespace mongo {
case BSONObj::opIN:
case BSONObj::NE:
case BSONObj::opSIZE:
- massert("not implemented yet relevant()", false);
+ massert( 10423 , "not implemented yet relevant()", false);
case BSONObj::Equality:
goto normal;
default:
- massert("bad operator in relevant()?", false);
+ massert( 10424 , "bad operator in relevant()?", false);
}
}
return true;
@@ -143,7 +143,7 @@ normal:
}
bool ShardKeyPattern::relevantForQuery( const BSONObj& query , Chunk * chunk ){
- massert("not done for compound patterns", patternfields.size() == 1);
+ massert( 10425 , "not done for compound patterns", patternfields.size() == 1);
bool rel = relevant(query, chunk->getMin(), chunk->getMax());
if( ! hasShardKey( query ) )
@@ -157,7 +157,7 @@ normal:
{ $gte : keyval(min), $lt : keyval(max) }
*/
void ShardKeyPattern::getFilter( BSONObjBuilder& b , const BSONObj& min, const BSONObj& max ){
- massert("not done for compound patterns", patternfields.size() == 1);
+ massert( 10426 , "not done for compound patterns", patternfields.size() == 1);
BSONObjBuilder temp;
temp.appendAs( extractKey(min).firstElement(), "$gte" );
temp.appendAs( extractKey(max).firstElement(), "$lt" );
diff --git a/s/strategy.cpp b/s/strategy.cpp
index ea86884e17c..b485bd27e39 100644
--- a/s/strategy.cpp
+++ b/s/strategy.cpp
@@ -42,7 +42,7 @@ namespace mongo {
}
}
- uassert("mongos: error calling db", ok);
+ uassert( 10200 , "mongos: error calling db", ok);
r.reply( response );
dbcon.done();
}
@@ -98,7 +98,7 @@ namespace mongo {
int len;
Message m( (void*)data["msg"].binData( len ) , false );
- massert( "invalid writeback message" , m.data->valid() );
+ massert( 10427 , "invalid writeback message" , m.data->valid() );
grid.getDBConfig( ns )->getChunkManager( ns , true );
@@ -178,7 +178,7 @@ namespace mongo {
log(1) << " setShardVersion failed!\n" << result << endl;
if ( result.getBoolField( "need_authoritative" ) )
- massert( "need_authoritative set but in authoritative mode already" , ! authoritative );
+ massert( 10428 , "need_authoritative set but in authoritative mode already" , ! authoritative );
if ( ! authoritative ){
checkShardVersion( conn , ns , 1 );
@@ -186,7 +186,7 @@ namespace mongo {
}
log(1) << " setShardVersion failed: " << result << endl;
- massert( "setShardVersion failed!" , 0 );
+ massert( 10429 , "setShardVersion failed!" , 0 );
}
bool setShardVersion( DBClientBase & conn , const string& ns , ShardChunkVersion version , bool authoritative , BSONObj& result ){
diff --git a/s/strategy_shard.cpp b/s/strategy_shard.cpp
index ee3b80295b7..9cc29ef7795 100644
--- a/s/strategy_shard.cpp
+++ b/s/strategy_shard.cpp
@@ -128,7 +128,7 @@ namespace mongo {
int flags = d.pullInt();
BSONObj query = d.nextJsObj();
- uassert( "invalid update" , d.moreJSObjs() );
+ uassert( 10201 , "invalid update" , d.moreJSObjs() );
BSONObj toupdate = d.nextJsObj();
BSONObj chunkFinder = query;
@@ -137,7 +137,7 @@ namespace mongo {
bool multi = flags & Option_Multi;
if ( multi )
- uassert( "can't mix multi and upsert and sharding" , ! upsert );
+ uassert( 10202 , "can't mix multi and upsert and sharding" , ! upsert );
if ( upsert && ! manager->hasShardKey( toupdate ) )
throw UserException( "can't upsert something without shard key" );
@@ -190,7 +190,7 @@ namespace mongo {
int flags = d.pullInt();
bool justOne = flags & 1;
- uassert( "bad delete message" , d.moreJSObjs() );
+ uassert( 10203 , "bad delete message" , d.moreJSObjs() );
BSONObj pattern = d.nextJsObj();
vector<Chunk*> chunks;
diff --git a/s/strategy_single.cpp b/s/strategy_single.cpp
index 0e1740648d1..c9ab50af34a 100644
--- a/s/strategy_single.cpp
+++ b/s/strategy_single.cpp
@@ -65,7 +65,7 @@ namespace mongo {
Message response;
bool ok = c.port().call( r.m() , response);
- uassert("dbgrid: getmore: error calling db", ok);
+ uassert( 10204 , "dbgrid: getmore: error calling db", ok);
r.reply( response );
dbcon.done();
@@ -81,7 +81,7 @@ namespace mongo {
BSONObj o = d.nextJsObj();
const char * ns = o["ns"].valuestr();
if ( r.getConfig()->isSharded( ns ) ){
- uassert( (string)"can't use unique indexes with sharding ns:" + ns +
+ uassert( 10205 , (string)"can't use unique indexes with sharding ns:" + ns +
" key: " + o["key"].embeddedObjectUserCheck().toString() ,
IndexDetails::isIdIndexPattern( o["key"].embeddedObjectUserCheck() ) ||
! o["unique"].trueValue() );
diff --git a/scripting/engine.cpp b/scripting/engine.cpp
index b7fd209544e..5e3be4ccd89 100644
--- a/scripting/engine.cpp
+++ b/scripting/engine.cpp
@@ -78,14 +78,14 @@ namespace mongo {
stringstream temp;
temp << "can't append type from:";
temp << t;
- uassert( temp.str() , 0 );
+ uassert( 10206 , temp.str() , 0 );
}
}
int Scope::invoke( const char* code , const BSONObj& args, int timeoutMs ){
ScriptingFunction func = createFunction( code );
- uassert( "compile failed" , func );
+ uassert( 10207 , "compile failed" , func );
return invoke( func , args, timeoutMs );
}
@@ -124,7 +124,7 @@ namespace mongo {
}
void Scope::validateObjectIdString( const string &str ) {
- massert ( "invalid object id: length", str.size() == 24 );
+ massert( 10448 , "invalid object id: length", str.size() == 24 );
for ( string::size_type i=0; i<str.size(); i++ ){
char c = str[i];
@@ -133,7 +133,7 @@ namespace mongo {
( c >= 'A' && c <= 'F' ) ){
continue;
}
- massert( "invalid object id: not hex", false );
+ massert( 10430 , "invalid object id: not hex", false );
}
}
@@ -141,7 +141,7 @@ namespace mongo {
if ( _localDBName.size() == 0 ){
if ( ignoreNotConnected )
return;
- uassert( "need to have locallyConnected already" , _localDBName.size() );
+ uassert( 10208 , "need to have locallyConnected already" , _localDBName.size() );
}
if ( _loadedVersion == _lastVersion )
return;
@@ -158,8 +158,8 @@ namespace mongo {
BSONElement n = o["_id"];
BSONElement v = o["value"];
- uassert( "name has to be a string" , n.type() == String );
- uassert( "value has to be set" , v.type() != EOO );
+ uassert( 10209 , "name has to be a string" , n.type() == String );
+ uassert( 10210 , "value has to be set" , v.type() != EOO );
setElement( n.valuestr() , v );
}
diff --git a/scripting/engine_java.h b/scripting/engine_java.h
index b4d71d022b3..ae11cc14f97 100644
--- a/scripting/engine_java.h
+++ b/scripting/engine_java.h
@@ -195,7 +195,7 @@ namespace mongo {
JavaJS->scopeSetString(s,field,val);
}
void setObject(const char *field, const BSONObj& obj , bool readOnly ) {
- uassert( "only readOnly setObject supported in java" , readOnly );
+ uassert( 10211 , "only readOnly setObject supported in java" , readOnly );
JavaJS->scopeSetObject(s,field,&obj);
}
void setBoolean(const char *field, bool val ) {
diff --git a/scripting/engine_spidermonkey.cpp b/scripting/engine_spidermonkey.cpp
index 62cc0e1ec06..29c718f9d30 100644
--- a/scripting/engine_spidermonkey.cpp
+++ b/scripting/engine_spidermonkey.cpp
@@ -61,7 +61,7 @@ namespace mongo {
}
void check(){
- uassert( "holder magic value is wrong" , _magic == 17 && _obj.isValid() );
+ uassert( 10212 , "holder magic value is wrong" , _magic == 17 && _obj.isValid() );
}
BSONFieldIterator * it();
@@ -144,7 +144,7 @@ namespace mongo {
free( dst );
if ( !JS_CStringsAreUTF8() )
for( string::const_iterator i = ss.begin(); i != ss.end(); ++i )
- uassert( "non ascii character detected", (unsigned char)(*i) <= 127 );
+ uassert( 10213 , "non ascii character detected", (unsigned char)(*i) <= 127 );
return ss;
}
@@ -154,7 +154,7 @@ namespace mongo {
double toNumber( jsval v ){
double d;
- uassert( "not a number" , JS_ValueToNumber( _context , v , &d ) );
+ uassert( 10214 , "not a number" , JS_ValueToNumber( _context , v , &d ) );
return d;
}
@@ -228,7 +228,7 @@ namespace mongo {
JSVAL_IS_VOID( v ) )
return BSONObj();
- uassert( "not an object" , JSVAL_IS_OBJECT( v ) );
+ uassert( 10215 , "not an object" , JSVAL_IS_OBJECT( v ) );
return toObject( JSVAL_TO_OBJECT( v ) );
}
@@ -237,7 +237,7 @@ namespace mongo {
}
string getFunctionCode( jsval v ){
- uassert( "not a function" , JS_TypeOfValue( _context , v ) == JSTYPE_FUNCTION );
+ uassert( 10216 , "not a function" , JS_TypeOfValue( _context , v ) == JSTYPE_FUNCTION );
return getFunctionCode( JS_ValueToFunction( _context , v ) );
}
@@ -294,7 +294,7 @@ namespace mongo {
break;
}
- default: uassert( (string)"can't append field. name:" + name + " type: " + typeString( val ) , 0 );
+ default: uassert( 10217 , (string)"can't append field. name:" + name + " type: " + typeString( val ) , 0 );
}
}
@@ -575,7 +575,7 @@ namespace mongo {
}
cout << "toval: unknown type: " << e.type() << endl;
- uassert( "not done: toval" , 0 );
+ uassert( 10218 , "not done: toval" , 0 );
return 0;
}
@@ -602,7 +602,7 @@ namespace mongo {
}
jsval getProperty( JSObject * o , const char * field ){
- uassert( "object passed to getPropery is null" , o );
+ uassert( 10219 , "object passed to getPropery is null" , o );
jsval v;
assert( JS_GetProperty( _context , o , field , &v ) );
return v;
@@ -693,7 +693,7 @@ namespace mongo {
return JS_TRUE;
}
- uassert( "don't know what to do with this op" , 0 );
+ uassert( 10220 , "don't know what to do with this op" , 0 );
return JS_FALSE;
}
@@ -916,7 +916,7 @@ namespace mongo {
#endif
_runtime = JS_NewRuntime(8L * 1024L * 1024L);
- uassert( "JS_NewRuntime failed" , _runtime );
+ uassert( 10221 , "JS_NewRuntime failed" , _runtime );
if ( ! utf8Ok() ){
log() << "*** warning: spider monkey build without utf8 support. consider rebuilding with utf8 support" << endl;
@@ -924,7 +924,7 @@ namespace mongo {
int x = 0;
assert( x = 1 );
- uassert( "assert not being executed" , x == 1 );
+ uassert( 10222 , "assert not being executed" , x == 1 );
}
~SMEngine(){
@@ -975,16 +975,16 @@ namespace mongo {
smlock;
_context = JS_NewContext( globalSMEngine->_runtime , 8192 );
_convertor = new Convertor( _context );
- massert( "JS_NewContext failed" , _context );
+ massert( 10431 , "JS_NewContext failed" , _context );
JS_SetOptions( _context , JSOPTION_VAROBJFIX);
//JS_SetVersion( _context , JSVERSION_LATEST); TODO
JS_SetErrorReporter( _context , errorReporter );
_global = JS_NewObject( _context , &global_class, NULL, NULL);
- massert( "JS_NewObject failed for global" , _global );
+ massert( 10432 , "JS_NewObject failed for global" , _global );
JS_SetGlobalObject( _context , _global );
- massert( "js init failed" , JS_InitStandardClasses( _context , _global ) );
+ massert( 10433 , "js init failed" , JS_InitStandardClasses( _context , _global ) );
JS_SetOptions( _context , JS_GetOptions( _context ) | JSOPTION_VAROBJFIX );
@@ -999,7 +999,7 @@ namespace mongo {
~SMScope(){
smlock;
- uassert( "deleted SMScope twice?" , _convertor );
+ uassert( 10223 , "deleted SMScope twice?" , _convertor );
for ( list<void*>::iterator i=_roots.begin(); i != _roots.end(); i++ ){
JS_RemoveRoot( _context , *i );
@@ -1056,7 +1056,7 @@ namespace mongo {
void externalSetup(){
smlock;
- uassert( "already local connected" , ! _localConnect );
+ uassert( 10224 , "already local connected" , ! _localConnect );
if ( _externalSetup )
return;
initMongoJS( this , _context , _global , false );
@@ -1065,9 +1065,9 @@ namespace mongo {
void localConnect( const char * dbName ){
smlock;
- uassert( "already setup for external db" , ! _externalSetup );
+ uassert( 10225 , "already setup for external db" , ! _externalSetup );
if ( _localConnect ){
- uassert( "connected to different db" , _localDBName == dbName );
+ uassert( 10226 , "connected to different db" , _localDBName == dbName );
return;
}
@@ -1135,7 +1135,7 @@ namespace mongo {
case JSTYPE_NUMBER: return NumberDouble;
case JSTYPE_BOOLEAN: return Bool;
default:
- uassert( "unknown type" , 0 );
+ uassert( 10227 , "unknown type" , 0 );
}
return 0;
}
@@ -1259,7 +1259,7 @@ namespace mongo {
uninstallCheckTimeout( timeoutMs );
if ( assertOnError )
- uassert( name + " exec failed" , worked );
+ uassert( 10228 , name + " exec failed" , worked );
if ( reportError && ! _error.empty() ){
// cout << "exec error: " << _error << endl;
@@ -1446,7 +1446,7 @@ namespace mongo {
return;
SMScope * scope = currentScope.get();
- uassert( "need a scope" , scope );
+ uassert( 10229 , "need a scope" , scope );
JSObject * o = JS_GetFunctionObject( f );
assert( o );
diff --git a/scripting/engine_v8.cpp b/scripting/engine_v8.cpp
index 1eda3d51667..3c736b32501 100644
--- a/scripting/engine_v8.cpp
+++ b/scripting/engine_v8.cpp
@@ -166,7 +166,7 @@ namespace mongo {
if ( v->IsNumber() )
return NumberDouble;
if ( v->IsExternal() ){
- uassert( "can't handle external yet" , 0 );
+ uassert( 10230 , "can't handle external yet" , 0 );
return -1;
}
if ( v->IsDate() )
@@ -205,7 +205,7 @@ namespace mongo {
Handle<Value> v = get( field );
if ( v->IsNull() || v->IsUndefined() )
return BSONObj();
- uassert( "not an object" , v->IsObject() );
+ uassert( 10231 , "not an object" , v->IsObject() );
return v8ToMongo( v->ToObject() );
}
@@ -252,7 +252,7 @@ namespace mongo {
}
Handle<Value> f = _global->Get( v8::String::New( fn.c_str() ) );
- uassert( "not a func" , f->IsFunction() );
+ uassert( 10232 , "not a func" , f->IsFunction() );
_funcs.push_back( f );
return num;
}
@@ -325,7 +325,7 @@ namespace mongo {
if (reportError)
log() << _error << endl;
if ( assertOnError )
- uassert( _error , 0 );
+ uassert( 10233 , _error , 0 );
return false;
}
@@ -335,7 +335,7 @@ namespace mongo {
if ( reportError )
log() << _error << endl;
if ( assertOnError )
- uassert( _error , 0 );
+ uassert( 10234 , _error , 0 );
return false;
}
diff --git a/scripting/sm_db.cpp b/scripting/sm_db.cpp
index 1c1a0b7bff1..62f88ed1ac7 100644
--- a/scripting/sm_db.cpp
+++ b/scripting/sm_db.cpp
@@ -65,12 +65,12 @@ namespace mongo {
DBClientCursor *getCursor( JSContext *cx, JSObject *obj ) {
CursorHolder * holder = (CursorHolder*)JS_GetPrivate( cx , obj );
- uassert( "no cursor!" , holder );
+ uassert( 10235 , "no cursor!" , holder );
return holder->get();
}
JSBool internal_cursor_constructor( JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval ){
- uassert( "no args to internal_cursor_constructor" , argc == 0 );
+ uassert( 10236 , "no args to internal_cursor_constructor" , argc == 0 );
assert( JS_SetPrivate( cx , obj , 0 ) ); // just for safety
return JS_TRUE;
}
@@ -120,7 +120,7 @@ namespace mongo {
// ------ mongo stuff ------
JSBool mongo_constructor( JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval ){
- uassert( "mongo_constructor not implemented yet" , 0 );
+ uassert( 10237 , "mongo_constructor not implemented yet" , 0 );
throw -1;
}
@@ -139,7 +139,7 @@ namespace mongo {
JSBool mongo_external_constructor( JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval ){
Convertor c( cx );
- uassert( "0 or 1 args to Mongo" , argc <= 1 );
+ uassert( 10238 , "0 or 1 args to Mongo" , argc <= 1 );
string host = "127.0.0.1";
if ( argc > 0 )
@@ -191,7 +191,7 @@ namespace mongo {
DBClientWithCommands *getConnection( JSContext *cx, JSObject *obj ) {
shared_ptr< DBClientWithCommands > * connHolder = (shared_ptr< DBClientWithCommands >*)JS_GetPrivate( cx , obj );
- uassert( "no connection!" , connHolder && connHolder->get() );
+ uassert( 10239 , "no connection!" , connHolder && connHolder->get() );
return connHolder->get();
}
@@ -211,9 +211,9 @@ namespace mongo {
};
JSBool mongo_find(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval){
- uassert( "mongo_find neesd 5 args" , argc == 5 );
+ uassert( 10240 , "mongo_find neesd 5 args" , argc == 5 );
shared_ptr< DBClientWithCommands > * connHolder = (shared_ptr< DBClientWithCommands >*)JS_GetPrivate( cx , obj );
- uassert( "no connection!" , connHolder && connHolder->get() );
+ uassert( 10241 , "no connection!" , connHolder && connHolder->get() );
DBClientWithCommands *conn = connHolder->get();
Convertor c( cx );
@@ -246,9 +246,9 @@ namespace mongo {
}
JSBool mongo_update(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval){
- uassert( "mongo_find needs at elast 3 args" , argc >= 3 );
- uassert( "2nd param to update has to be an object" , JSVAL_IS_OBJECT( argv[1] ) );
- uassert( "3rd param to update has to be an object" , JSVAL_IS_OBJECT( argv[2] ) );
+ uassert( 10242 , "mongo_find needs at elast 3 args" , argc >= 3 );
+ uassert( 10243 , "2nd param to update has to be an object" , JSVAL_IS_OBJECT( argv[1] ) );
+ uassert( 10244 , "3rd param to update has to be an object" , JSVAL_IS_OBJECT( argv[2] ) );
Convertor c( cx );
if ( c.getBoolean( obj , "readOnly" ) ){
@@ -257,7 +257,7 @@ namespace mongo {
}
DBClientWithCommands * conn = getConnection( cx, obj );
- uassert( "no connection!" , conn );
+ uassert( 10245 , "no connection!" , conn );
string ns = c.toString( argv[0] );
@@ -275,8 +275,8 @@ namespace mongo {
}
JSBool mongo_insert(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval){
- uassert( "mongo_insert needs 2 args" , argc == 2 );
- uassert( "2nd param to insert has to be an object" , JSVAL_IS_OBJECT( argv[1] ) );
+ uassert( 10246 , "mongo_insert needs 2 args" , argc == 2 );
+ uassert( 10247 , "2nd param to insert has to be an object" , JSVAL_IS_OBJECT( argv[1] ) );
Convertor c( cx );
if ( c.getBoolean( obj , "readOnly" ) ){
@@ -285,7 +285,7 @@ namespace mongo {
}
DBClientWithCommands * conn = getConnection( cx, obj );
- uassert( "no connection!" , conn );
+ uassert( 10248 , "no connection!" , conn );
string ns = c.toString( argv[0] );
@@ -311,8 +311,8 @@ namespace mongo {
}
JSBool mongo_remove(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval){
- uassert( "mongo_remove needs 2 arguments" , argc == 2 );
- uassert( "2nd param to insert has to be an object" , JSVAL_IS_OBJECT( argv[1] ) );
+ uassert( 10249 , "mongo_remove needs 2 arguments" , argc == 2 );
+ uassert( 10250 , "2nd param to insert has to be an object" , JSVAL_IS_OBJECT( argv[1] ) );
Convertor c( cx );
if ( c.getBoolean( obj , "readOnly" ) ){
@@ -321,7 +321,7 @@ namespace mongo {
}
DBClientWithCommands * conn = getConnection( cx, obj );
- uassert( "no connection!" , conn );
+ uassert( 10251 , "no connection!" , conn );
string ns = c.toString( argv[0] );
BSONObj o = c.toObject( argv[1] );
@@ -349,7 +349,7 @@ namespace mongo {
// ------------- db_collection -------------
JSBool db_collection_constructor( JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval ){
- uassert( "db_collection_constructor wrong args" , argc == 4 );
+ uassert( 10252 , "db_collection_constructor wrong args" , argc == 4 );
assert( JS_SetProperty( cx , obj , "_mongo" , &(argv[0]) ) );
assert( JS_SetProperty( cx , obj , "_db" , &(argv[1]) ) );
assert( JS_SetProperty( cx , obj , "_shortName" , &(argv[2]) ) );
@@ -432,7 +432,7 @@ namespace mongo {
JSBool db_constructor( JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval ){
- uassert( "wrong number of arguments to DB" , argc == 2 );
+ uassert( 10253 , "wrong number of arguments to DB" , argc == 2 );
assert( JS_SetProperty( cx , obj , "_mongo" , &(argv[0]) ) );
assert( JS_SetProperty( cx , obj , "_name" , &(argv[1]) ) );
@@ -484,7 +484,7 @@ namespace mongo {
oid.init();
}
else {
- uassert( "object_id_constructor can't take more than 1 param" , argc == 1 );
+ uassert( 10254 , "object_id_constructor can't take more than 1 param" , argc == 1 );
string s = c.toString( argv[0] );
try {
@@ -674,7 +674,7 @@ namespace mongo {
// dbquery
JSBool dbquery_constructor( JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval ){
- uassert( "DDQuery needs at least 4 args" , argc >= 4 );
+ uassert( 10255 , "DDQuery needs at least 4 args" , argc >= 4 );
Convertor c(cx);
c.setProperty( obj , "_mongo" , argv[0] );
diff --git a/shell/dbshell.cpp b/shell/dbshell.cpp
index 0baa168973e..28bcc3fa74a 100644
--- a/shell/dbshell.cpp
+++ b/shell/dbshell.cpp
@@ -498,7 +498,7 @@ int main(int argc, char* argv[]) {
namespace mongo {
DBClientBase * createDirectClient(){
- uassert( "no createDirectClient in shell" , 0 );
+ uassert( 10256 , "no createDirectClient in shell" , 0 );
return 0;
}
}
diff --git a/shell/utils.cpp b/shell/utils.cpp
index ae0ab41df6a..ca082ef66d3 100644
--- a/shell/utils.cpp
+++ b/shell/utils.cpp
@@ -69,7 +69,7 @@ namespace mongo {
}
BSONObj listFiles(const BSONObj& args){
- uassert( "need to specify 1 argument to listFiles" , args.nFields() == 1 );
+ uassert( 10257 , "need to specify 1 argument to listFiles" , args.nFields() == 1 );
BSONObjBuilder lst;
@@ -118,7 +118,7 @@ namespace mongo {
BSONObj JSGetMemInfo( const BSONObj& args ){
ProcessInfo pi;
- uassert( "processinfo not supported" , pi.supported() );
+ uassert( 10258 , "processinfo not supported" , pi.supported() );
BSONObjBuilder e;
e.append( "virtual" , pi.getVirtualMemorySize() );
@@ -144,8 +144,8 @@ namespace mongo {
#include <sys/wait.h>
BSONObj AllocatePorts( const BSONObj &args ) {
- uassert( "allocatePorts takes exactly 1 argument", args.nFields() == 1 );
- uassert( "allocatePorts needs to be passed an integer", args.firstElement().isNumber() );
+ uassert( 10259 , "allocatePorts takes exactly 1 argument", args.nFields() == 1 );
+ uassert( 10260 , "allocatePorts needs to be passed an integer", args.firstElement().isNumber() );
int n = int( args.firstElement().number() );
@@ -173,7 +173,7 @@ namespace mongo {
sort( ports.begin(), ports.end() );
for( unsigned i = 1; i < ports.size(); ++i )
- massert( "duplicate ports allocated", ports[ i - 1 ] != ports[ i ] );
+ massert( 10434 , "duplicate ports allocated", ports[ i - 1 ] != ports[ i ] );
BSONObjBuilder b;
b.append( "", ports );
return b.obj();
@@ -220,7 +220,7 @@ namespace mongo {
assert( !program.empty() );
boost::filesystem::path programPath = ( boost::filesystem::path( argv0 ) ).branch_path() / program;
- massert( "couldn't find " + programPath.native_file_string(), boost::filesystem::exists( programPath ) );
+ massert( 10435 , "couldn't find " + programPath.native_file_string(), boost::filesystem::exists( programPath ) );
port_ = -1;
argv_ = new char *[ args.nFields() + 1 ];
@@ -276,7 +276,7 @@ namespace mongo {
assert( dup2( pipeEnds[ 1 ], STDOUT_FILENO ) != -1 );
assert( dup2( pipeEnds[ 1 ], STDERR_FILENO ) != -1 );
execvp( argv_[ 0 ], argv_ );
- massert( "Unable to start program" , 0 );
+ massert( 10436 , "Unable to start program" , 0 );
}
cout << "shell: started mongo program";
@@ -466,7 +466,7 @@ namespace mongo {
#endif
BSONObj jsmd5( const BSONObj &a ){
- uassert( "js md5 needs a string" , a.firstElement().type() == String );
+ uassert( 10261 , "js md5 needs a string" , a.firstElement().type() == String );
const char * s = a.firstElement().valuestrsafe();
md5digest d;
diff --git a/tools/dump.cpp b/tools/dump.cpp
index 6fe8ae2234c..201cfc93ead 100644
--- a/tools/dump.cpp
+++ b/tools/dump.cpp
@@ -39,7 +39,7 @@ public:
ofstream out;
out.open( outputFile.string().c_str() , ios_base::out | ios_base::binary );
- uassert( "couldn't open file" , out.good() );
+ uassert( 10262 , "couldn't open file" , out.good() );
ProgressMeter m( conn( true ).count( coll.c_str() , BSONObj() , Option_SlaveOk ) );
diff --git a/tools/import.cpp b/tools/import.cpp
index 7a05cf095c8..9b43e4fa7fc 100644
--- a/tools/import.cpp
+++ b/tools/import.cpp
@@ -204,7 +204,7 @@ public:
char line[ (1024 * 1024 * 4) + 128];
while ( *in ){
in->getline( line , BUF_SIZE );
- uassert( "unknown error reading file" , ( in->rdstate() & ios_base::badbit ) == 0 );
+ uassert( 10263 , "unknown error reading file" , ( in->rdstate() & ios_base::badbit ) == 0 );
log(1) << "got line:" << line << endl;
char * buf = line;
diff --git a/tools/restore.cpp b/tools/restore.cpp
index 82dd32743a0..19e3a26a9d3 100644
--- a/tools/restore.cpp
+++ b/tools/restore.cpp
@@ -151,7 +151,7 @@ public:
if ( size >= BUF_SIZE ){
cerr << "got an object of size: " << size << " terminating..." << endl;
}
- uassert( "invalid object size" , size < BUF_SIZE );
+ uassert( 10264 , "invalid object size" , size < BUF_SIZE );
file.read( buf + 4 , size - 4 );
@@ -166,7 +166,7 @@ public:
free( buf );
- uassert( "counts don't match" , m.done() == fileLength );
+ uassert( 10265 , "counts don't match" , m.done() == fileLength );
out() << "\t " << m.hits() << " objects" << endl;
}
};
diff --git a/tools/sniffer.cpp b/tools/sniffer.cpp
index 26830d7d29d..9590d8fc8e9 100644
--- a/tools/sniffer.cpp
+++ b/tools/sniffer.cpp
@@ -370,8 +370,8 @@ int main(int argc, char **argv){
} else if ( arg == string( "--forward" ) ) {
forwardAddress = args[ ++i ];
} else if ( arg == string( "--source" ) ) {
- uassert( "can't use --source twice" , source == false );
- uassert( "source needs more args" , args.size() > i + 2);
+ uassert( 10266 , "can't use --source twice" , source == false );
+ uassert( 10267 , "source needs more args" , args.size() > i + 2);
source = true;
replay = ( args[ ++i ] == string( "FILE" ) );
if ( replay )
diff --git a/util/assert_util.cpp b/util/assert_util.cpp
index d3178833e42..060df8d80a3 100644
--- a/util/assert_util.cpp
+++ b/util/assert_util.cpp
@@ -53,9 +53,9 @@ namespace mongo {
}
int uacount = 0;
- void uasserted(const char *msg) {
+ void uasserted(int msgid, const char *msg) {
if ( ++uacount < 100 )
- log() << "User Exception " << msg << endl;
+ log() << "User Exception " << msgid << ":" << msg << endl;
else
RARELY log() << "User Exception " << msg << endl;
lastAssert[3].set(msg, getDbContext().c_str(), "", 0);
@@ -63,8 +63,8 @@ namespace mongo {
throw UserException(msg);
}
- void msgasserted(const char *msg) {
- log() << "Assertion: " << msg << endl;
+ void msgasserted(int msgid, const char *msg) {
+ log() << "Assertion: " << msgid << ":" << msg << endl;
lastAssert[2].set(msg, getDbContext().c_str(), "", 0);
raiseError(msg && *msg ? msg : "massert failure");
breakpoint();
@@ -100,7 +100,7 @@ namespace mongo {
}
void start( const string& lp , bool append ){
- uassert( "LoggingManager already started" , ! _enabled );
+ uassert( 10268 , "LoggingManager already started" , ! _enabled );
// test path
FILE * test = fopen( lp.c_str() , _append ? "a" : "w" );
diff --git a/util/assert_util.h b/util/assert_util.h
index 7edda75759a..656cfccd325 100644
--- a/util/assert_util.h
+++ b/util/assert_util.h
@@ -125,11 +125,11 @@ namespace mongo {
void asserted(const char *msg, const char *file, unsigned line);
void wasserted(const char *msg, const char *file, unsigned line);
- void uasserted(const char *msg);
- inline void uasserted(string msg) { uasserted(msg.c_str()); }
+ void uasserted(int msgid, const char *msg);
+ inline void uasserted(int msgid , string msg) { uasserted(msgid, msg.c_str()); }
void uassert_nothrow(const char *msg); // reported via lasterror, but don't throw exception
- void msgasserted(const char *msg);
- inline void msgasserted(string msg) { msgasserted(msg.c_str()); }
+ void msgasserted(int msgid, const char *msg);
+ inline void msgasserted(int msgid, string msg) { msgasserted(msgid, msg.c_str()); }
#ifdef assert
#undef assert
@@ -138,8 +138,8 @@ namespace mongo {
#define assert(_Expression) (void)( (!!(_Expression)) || (mongo::asserted(#_Expression, __FILE__, __LINE__), 0) )
/* "user assert". if asserts, user did something wrong, not our code */
-//#define uassert(_Expression) (void)( (!!(_Expression)) || (uasserted(#_Expression, __FILE__, __LINE__), 0) )
-#define uassert(msg,_Expression) (void)( (!!(_Expression)) || (mongo::uasserted(msg), 0) )
+//#define uassert( 10269 , _Expression) (void)( (!!(_Expression)) || (uasserted(#_Expression, __FILE__, __LINE__), 0) )
+#define uassert(msgid, msg,_Expression) (void)( (!!(_Expression)) || (mongo::uasserted(msgid, msg), 0) )
#define xassert(_Expression) (void)( (!!(_Expression)) || (mongo::asserted(#_Expression, __FILE__, __LINE__), 0) )
@@ -153,7 +153,7 @@ namespace mongo {
easy way to throw an exception and log something without our stack trace
display happening.
*/
-#define massert(msg,_Expression) (void)( (!!(_Expression)) || (mongo::msgasserted(msg), 0) )
+#define massert(msgid, msg,_Expression) (void)( (!!(_Expression)) || (mongo::msgasserted(msgid, msg), 0) )
/* dassert is 'debug assert' -- might want to turn off for production as these
could be slow.
@@ -164,6 +164,9 @@ namespace mongo {
#define dassert(x)
#endif
+ // some special ids that we want to duplicate
+#define ASSERT_ID_DUPKEY 11000
+
} // namespace mongo
#define BOOST_CHECK_EXCEPTION( expression ) \
@@ -173,5 +176,5 @@ namespace mongo {
problem() << "caught boost exception: " << e.what() << endl; \
assert( false ); \
} catch ( ... ) { \
- massert( "unknown boost failed" , false ); \
+ massert( 10437 , "unknown boost failed" , false ); \
}
diff --git a/util/base64.cpp b/util/base64.cpp
index e6a3865dc3b..cf2f485e762 100644
--- a/util/base64.cpp
+++ b/util/base64.cpp
@@ -111,7 +111,7 @@ namespace mongo {
void decode( stringstream& ss , const string& s ){
- uassert( "invalid base64" , s.size() % 4 == 0 );
+ uassert( 10270 , "invalid base64" , s.size() % 4 == 0 );
const unsigned char * data = (const unsigned char*)s.c_str();
int size = s.size();
diff --git a/util/file.h b/util/file.h
index 3074461afaf..347e2d63a37 100644
--- a/util/file.h
+++ b/util/file.h
@@ -94,7 +94,7 @@ public:
if( !ok )
err(ok);
else
- massert("ReadFile error - truncated file?", read == len);
+ massert( 10438 , "ReadFile error - truncated file?", read == len);
}
bool bad() { return _bad; }
bool is_open() { return fd != INVALID_HANDLE_VALUE; }
diff --git a/util/file_allocator.h b/util/file_allocator.h
index b53ebc32fe0..2643f270aad 100644
--- a/util/file_allocator.h
+++ b/util/file_allocator.h
@@ -152,7 +152,7 @@ namespace mongo {
if ( fd <= 0 ) {
stringstream ss;
ss << "couldn't open " << name << ' ' << OUTPUT_ERRNO;
- massert( ss.str(), fd <= 0 );
+ massert( 10439 , ss.str(), fd <= 0 );
}
#if defined(POSIX_FADV_DONTNEED)
@@ -164,11 +164,11 @@ namespace mongo {
/* make sure the file is the full desired length */
off_t filelen = lseek(fd, 0, SEEK_END);
if ( filelen < size ) {
- massert( "failure creating new datafile", filelen == 0 );
+ massert( 10440 , "failure creating new datafile", filelen == 0 );
// Check for end of disk.
- massert( "Unable to allocate file of desired size",
+ massert( 10441 , "Unable to allocate file of desired size",
size - 1 == lseek(fd, size - 1, SEEK_SET) );
- massert( "Unable to allocate file of desired size",
+ massert( 10442 , "Unable to allocate file of desired size",
1 == write(fd, "", 1) );
lseek(fd, 0, SEEK_SET);
log() << "allocating new datafile " << name << ", filling with zeroes..." << endl;
@@ -179,10 +179,10 @@ namespace mongo {
long left = size;
while ( 1 ) {
if ( left <= z ) {
- massert( "write failed", left == write(fd, buf, left) );
+ massert( 10443 , "write failed", left == write(fd, buf, left) );
break;
}
- massert( "write failed", z == write(fd, buf, z) );
+ massert( 10444 , "write failed", z == write(fd, buf, z) );
left -= z;
}
log() << "done allocating datafile " << name << ", size: " << size/1024/1024 << "MB, took " << ((double)t.millis())/1000.0 << " secs" << endl;
diff --git a/util/httpclient.cpp b/util/httpclient.cpp
index 4a2f7cd1ff6..284bb63e50d 100644
--- a/util/httpclient.cpp
+++ b/util/httpclient.cpp
@@ -21,7 +21,7 @@
namespace mongo {
int HttpClient::get( string url , map<string,string>& headers, stringstream& data ){
- uassert( "invalid url" , url.find( "http://" ) == 0 );
+ uassert( 10271 , "invalid url" , url.find( "http://" ) == 0 );
url = url.substr( 7 );
string host , path;
@@ -35,7 +35,7 @@ namespace mongo {
}
int port = 80;
- uassert( "non standard port not supported yet" , host.find( ":" ) == string::npos );
+ uassert( 10272 , "non standard port not supported yet" , host.find( ":" ) == string::npos );
cout << "host [" << host << "]" << endl;
cout << "path [" << path << "]" << endl;
diff --git a/util/message.cpp b/util/message.cpp
index 5db0f1e83b2..fa0186e77fb 100644
--- a/util/message.cpp
+++ b/util/message.cpp
@@ -444,7 +444,7 @@ again:
void setClientId( int id ){
usingClientIds = true;
id = id & 0xFFFF0000;
- massert( "invalid id" , id );
+ massert( 10445 , "invalid id" , id );
clientId.set( id );
}
diff --git a/util/message_server_asio.cpp b/util/message_server_asio.cpp
index 710ab1e8b58..4d5fab04f7a 100644
--- a/util/message_server_asio.cpp
+++ b/util/message_server_asio.cpp
@@ -72,7 +72,7 @@ namespace mongo {
memcpy( data , &_inHeader , sizeof( _inHeader ) );
assert( data->len == _inHeader.len );
- uassert( "_cur not empty! pipelining requests not supported" , ! _cur.data );
+ uassert( 10273 , "_cur not empty! pipelining requests not supported" , ! _cur.data );
_cur.setData( data , true );
async_read( _socket ,
@@ -112,7 +112,7 @@ namespace mongo {
_reply.data->id = nextMessageId();
_reply.data->responseTo = responseTo;
- uassert( "pipelining requests doesn't work yet" , query.data->id == _cur.data->id );
+ uassert( 10274 , "pipelining requests doesn't work yet" , query.data->id == _cur.data->id );
}
diff --git a/util/message_server_port.cpp b/util/message_server_port.cpp
index e788302cafd..e5becc9d32a 100644
--- a/util/message_server_port.cpp
+++ b/util/message_server_port.cpp
@@ -61,7 +61,7 @@ namespace mongo {
MessageServer( port , handler ) ,
Listener( "", port ){
- uassert( "multiple PortMessageServer not supported" , ! pms::handler );
+ uassert( 10275 , "multiple PortMessageServer not supported" , ! pms::handler );
pms::handler = handler;
}
diff --git a/util/mmap_posix.cpp b/util/mmap_posix.cpp
index 87de388e188..2d6441c928c 100644
--- a/util/mmap_posix.cpp
+++ b/util/mmap_posix.cpp
@@ -54,7 +54,7 @@ namespace mongo {
theFileAllocator().allocateAsap( filename, length );
len = length;
- massert( "mmap() can't map area of size 0" , length > 0 );
+ massert( 10446 , "mmap() can't map area of size 0" , length > 0 );
fd = open(filename, O_RDWR | O_NOATIME);
@@ -67,7 +67,7 @@ namespace mongo {
if ( filelen != length ){
cout << "wanted length: " << length << " filelen: " << filelen << endl;
cout << sizeof(size_t) << endl;
- massert( "file size allocation failed", filelen == length );
+ massert( 10447 , "file size allocation failed", filelen == length );
}
lseek( fd, 0, SEEK_SET );