summaryrefslogtreecommitdiff
path: root/src/mongo/client/parallel.cpp
diff options
context:
space:
mode:
Diffstat (limited to 'src/mongo/client/parallel.cpp')
-rw-r--r--src/mongo/client/parallel.cpp88
1 files changed, 44 insertions, 44 deletions
diff --git a/src/mongo/client/parallel.cpp b/src/mongo/client/parallel.cpp
index 91ee6f6d624..eab7d6c3be9 100644
--- a/src/mongo/client/parallel.cpp
+++ b/src/mongo/client/parallel.cpp
@@ -83,7 +83,7 @@ namespace mongo {
}
void ClusteredCursor::_checkCursor( DBClientCursor * cursor ) {
- assert( cursor );
+ verify( cursor );
if ( cursor->hasResultFlag( ResultFlag_ShardConfigStale ) ) {
BSONObj error;
@@ -99,7 +99,7 @@ namespace mongo {
auto_ptr<DBClientCursor> ClusteredCursor::query( const string& server , int num , BSONObj extra , int skipLeft , bool lazy ) {
uassert( 10017 , "cursor already done" , ! _done );
- assert( _didInit );
+ verify( _didInit );
BSONObj q = _query;
if ( ! extra.isEmpty() ) {
@@ -131,7 +131,7 @@ namespace mongo {
massert( 13633 , str::stream() << "error querying server: " << server , cursor.get() );
cursor->attach( &conn ); // this calls done on conn
- assert( ! conn.ok() );
+ verify( ! conn.ok() );
_checkCursor( cursor.get() );
return cursor;
}
@@ -205,9 +205,9 @@ namespace mongo {
if( ( isVersioned() && ! isSharded() ) || _qShards.size() == 1 ){
map<string,list<BSONObj> > out;
_explain( out );
- assert( out.size() == 1 );
+ verify( out.size() == 1 );
list<BSONObj>& l = out.begin()->second;
- assert( l.size() == 1 );
+ verify( l.size() == 1 );
b.appendElements( *(l.begin()) );
return;
}
@@ -338,8 +338,8 @@ namespace mongo {
}
BSONObj FilteringClientCursor::next() {
- assert( ! _next.isEmpty() );
- assert( ! _done );
+ verify( ! _next.isEmpty() );
+ verify( ! _done );
BSONObj ret = _next;
_next = BSONObj();
@@ -354,7 +354,7 @@ namespace mongo {
}
void FilteringClientCursor::_advance() {
- assert( _next.isEmpty() );
+ verify( _next.isEmpty() );
if ( ! _cursor.get() || _done )
return;
@@ -469,7 +469,7 @@ namespace mongo {
_sortKey = _qSpec.sort();
_fields = _qSpec.fields();
- if( ! isVersioned() ) assert( _cInfo.isEmpty() );
+ if( ! isVersioned() ) verify( _cInfo.isEmpty() );
}
if ( ! _sortKey.isEmpty() && ! _fields.isEmpty() ) {
@@ -532,8 +532,8 @@ namespace mongo {
}
else if( initialized ){
- assert( pcState->cursor );
- assert( pcState->conn );
+ verify( pcState->cursor );
+ verify( pcState->conn );
if( ! finished && pcState->conn->ok() ){
try{
@@ -557,7 +557,7 @@ namespace mongo {
pcState.reset();
}
- else assert( finished || ! initialized );
+ else verify( finished || ! initialized );
initialized = false;
finished = false;
@@ -729,7 +729,7 @@ namespace mongo {
}
- assert( todo.size() );
+ verify( todo.size() );
log( pc ) << "initializing over " << todo.size() << " shards required by " << vinfo << endl;
@@ -749,7 +749,7 @@ namespace mongo {
if( mdata.initialized ){
- assert( mdata.pcState );
+ verify( mdata.pcState );
PCStatePtr state = mdata.pcState;
@@ -794,7 +794,7 @@ namespace mongo {
if( manager ) state->manager = manager;
else if( primary ) state->primary = primary;
- assert( ! primary || shard == *primary || ! isVersioned() );
+ verify( ! primary || shard == *primary || ! isVersioned() );
// Setup conn
if( ! state->conn ) state->conn.reset( new ShardConnection( shard, ns, manager ) );
@@ -927,17 +927,17 @@ namespace mongo {
if( ! mdata.pcState ) continue;
// Make sure all state is in shards
- assert( todo.find( shard ) != todo.end() );
- assert( mdata.initialized = true );
- if( ! mdata.completed ) assert( mdata.pcState->conn->ok() );
- assert( mdata.pcState->cursor );
- if( isVersioned() ) assert( mdata.pcState->primary || mdata.pcState->manager );
- else assert( ! mdata.pcState->primary || ! mdata.pcState->manager );
- assert( ! mdata.retryNext );
-
- if( mdata.completed ) assert( mdata.finished );
- if( mdata.finished ) assert( mdata.initialized );
- if( ! returnPartial ) assert( mdata.initialized );
+ verify( todo.find( shard ) != todo.end() );
+ verify( mdata.initialized = true );
+ if( ! mdata.completed ) verify( mdata.pcState->conn->ok() );
+ verify( mdata.pcState->cursor );
+ if( isVersioned() ) verify( mdata.pcState->primary || mdata.pcState->manager );
+ else verify( ! mdata.pcState->primary || ! mdata.pcState->manager );
+ verify( ! mdata.retryNext );
+
+ if( mdata.completed ) verify( mdata.finished );
+ if( mdata.finished ) verify( mdata.initialized );
+ if( ! returnPartial ) verify( mdata.initialized );
}
}
@@ -968,13 +968,13 @@ namespace mongo {
try {
// Sanity checks
- if( ! mdata.completed ) assert( state->conn && state->conn->ok() );
- assert( state->cursor );
+ if( ! mdata.completed ) verify( state->conn && state->conn->ok() );
+ verify( state->cursor );
if( isVersioned() ){
- assert( state->manager || state->primary );
- assert( ! state->manager || ! state->primary );
+ verify( state->manager || state->primary );
+ verify( ! state->manager || ! state->primary );
}
- else assert( ! state->manager && ! state->primary );
+ else verify( ! state->manager && ! state->primary );
// If we weren't init'ing lazily, ignore this
@@ -1095,13 +1095,13 @@ namespace mongo {
else ++i;
// Make sure all state is in shards
- assert( mdata.initialized = true );
- assert( mdata.finished = true );
- assert( mdata.completed = true );
- assert( ! mdata.pcState->conn->ok() );
- assert( mdata.pcState->cursor );
- if( isVersioned() ) assert( mdata.pcState->primary || mdata.pcState->manager );
- else assert( ! mdata.pcState->primary && ! mdata.pcState->manager );
+ verify( mdata.initialized = true );
+ verify( mdata.finished = true );
+ verify( mdata.completed = true );
+ verify( ! mdata.pcState->conn->ok() );
+ verify( mdata.pcState->cursor );
+ if( isVersioned() ) verify( mdata.pcState->primary || mdata.pcState->manager );
+ else verify( ! mdata.pcState->primary && ! mdata.pcState->manager );
}
// TODO : More cleanup of metadata?
@@ -1179,7 +1179,7 @@ namespace mongo {
// log() << "Starting parallel search..." << endl;
// make sure we're not already initialized
- assert( ! _cursors );
+ verify( ! _cursors );
_cursors = new FilteringClientCursor[_numServers];
bool returnPartial = ( _options & QueryOption_PartialResults );
@@ -1302,7 +1302,7 @@ namespace mongo {
continue;
}
- assert( conns[i] );
+ verify( conns[i] );
retryQueries.erase( i );
bool retry = false;
@@ -1374,7 +1374,7 @@ namespace mongo {
}
// Don't exceed our max retries, should not happen
- assert( retries < 5 );
+ verify( retries < 5 );
}
while( retryQueries.size() > 0 /* something to retry */ &&
( socketExs.size() == 0 || returnPartial ) /* no conn issues */ &&
@@ -1383,7 +1383,7 @@ namespace mongo {
// Assert that our conns are all closed!
for( vector< shared_ptr<ShardConnection> >::iterator i = conns.begin(); i < conns.end(); ++i ){
- assert( ! (*i) || ! (*i)->ok() );
+ verify( ! (*i) || ! (*i)->ok() );
}
// Handle errors we got during initialization.
@@ -1560,7 +1560,7 @@ namespace mongo {
}
catch ( RecvStaleConfigException& e ){
- assert( versionManager.isVersionableCB( _conn ) );
+ verify( versionManager.isVersionableCB( _conn ) );
if( i >= maxRetries ){
error() << "Future::spawnComand (part 2) stale config exception" << causedBy( e ) << endl;
@@ -1578,7 +1578,7 @@ namespace mongo {
LOG( i > 1 ? 0 : 1 ) << "retrying lazy command" << causedBy( e ) << endl;
- assert( _conn->lazySupported() );
+ verify( _conn->lazySupported() );
_done = false;
init();
continue;